[Jython-checkins] jython (2.2): Switch (back) to copying full CPython Lib until we get to a shared Lib.

frank.wierzbicki jython-checkins at python.org
Mon Mar 19 20:34:44 CET 2012


http://hg.python.org/jython/rev/bed9f9de4ef3
changeset:   6412:bed9f9de4ef3
branch:      2.2
parent:      5374:512dab783493
user:        Frank Wierzbicki <fwierzbicki at gmail.com>
date:        Mon Mar 19 10:45:11 2012 -0700
summary:
  Switch (back) to copying full CPython Lib until we get to a shared Lib.

files:
  .hgignore                                           |    30 +
  .hgsub                                              |     1 -
  .hgsubstate                                         |     1 -
  build.xml                                           |     4 +-
  lib-python/2.2/BaseHTTPServer.py                    |   484 +
  lib-python/2.2/Bastion.py                           |   177 +
  lib-python/2.2/CGIHTTPServer.py                     |   325 +
  lib-python/2.2/ConfigParser.py                      |   472 +
  lib-python/2.2/Cookie.py                            |   742 ++
  lib-python/2.2/FCNTL.py                             |    14 +
  lib-python/2.2/HTMLParser.py                        |   383 +
  lib-python/2.2/MimeWriter.py                        |   181 +
  lib-python/2.2/Queue.py                             |   151 +
  lib-python/2.2/SimpleHTTPServer.py                  |   198 +
  lib-python/2.2/SimpleXMLRPCServer.py                |   274 +
  lib-python/2.2/SocketServer.py                      |   576 +
  lib-python/2.2/StringIO.py                          |   239 +
  lib-python/2.2/TERMIOS.py                           |    14 +
  lib-python/2.2/UserDict.py                          |    60 +
  lib-python/2.2/UserList.py                          |    85 +
  lib-python/2.2/UserString.py                        |   182 +
  lib-python/2.2/__future__.py                        |   104 +
  lib-python/2.2/__phello__.foo.py                    |     1 +
  lib-python/2.2/aifc.py                              |   961 ++
  lib-python/2.2/anydbm.py                            |    86 +
  lib-python/2.2/asynchat.py                          |   293 +
  lib-python/2.2/asyncore.py                          |   556 +
  lib-python/2.2/atexit.py                            |    50 +
  lib-python/2.2/audiodev.py                          |   257 +
  lib-python/2.2/base64.py                            |    77 +
  lib-python/2.2/bdb.py                               |   563 +
  lib-python/2.2/binhex.py                            |   531 +
  lib-python/2.2/bisect.py                            |    78 +
  lib-python/2.2/calendar.py                          |   246 +
  lib-python/2.2/cgi.py                               |  1040 +++
  lib-python/2.2/cgitb.py                             |   205 +
  lib-python/2.2/chunk.py                             |   167 +
  lib-python/2.2/cmd.py                               |   336 +
  lib-python/2.2/code.py                              |   311 +
  lib-python/2.2/codecs.py                            |   636 +
  lib-python/2.2/codeop.py                            |   171 +
  lib-python/2.2/colorsys.py                          |   123 +
  lib-python/2.2/commands.py                          |    84 +
  lib-python/2.2/compileall.py                        |   148 +
  lib-python/2.2/compiler/__init__.py                 |    26 +
  lib-python/2.2/compiler/ast.py                      |  1241 +++
  lib-python/2.2/compiler/consts.py                   |    19 +
  lib-python/2.2/compiler/future.py                   |    72 +
  lib-python/2.2/compiler/misc.py                     |    74 +
  lib-python/2.2/compiler/pyassem.py                  |   824 ++
  lib-python/2.2/compiler/pycodegen.py                |  1388 ++++
  lib-python/2.2/compiler/symbols.py                  |   419 +
  lib-python/2.2/compiler/syntax.py                   |    46 +
  lib-python/2.2/compiler/transformer.py              |  1359 ++++
  lib-python/2.2/compiler/visitor.py                  |   121 +
  lib-python/2.2/copy.py                              |   381 +
  lib-python/2.2/copy_reg.py                          |    78 +
  lib-python/2.2/curses/__init__.py                   |    54 +
  lib-python/2.2/curses/ascii.py                      |   100 +
  lib-python/2.2/curses/has_key.py                    |   189 +
  lib-python/2.2/curses/panel.py                      |     9 +
  lib-python/2.2/curses/textpad.py                    |   167 +
  lib-python/2.2/curses/wrapper.py                    |    63 +
  lib-python/2.2/dbhash.py                            |    16 +
  lib-python/2.2/difflib.py                           |  1088 +++
  lib-python/2.2/dircache.py                          |    44 +
  lib-python/2.2/dis.py                               |   327 +
  lib-python/2.2/distutils/README                     |    18 +
  lib-python/2.2/distutils/__init__.py                |    13 +
  lib-python/2.2/distutils/archive_util.py            |   173 +
  lib-python/2.2/distutils/bcppcompiler.py            |   409 +
  lib-python/2.2/distutils/ccompiler.py               |  1046 +++
  lib-python/2.2/distutils/cmd.py                     |   486 +
  lib-python/2.2/distutils/command/__init__.py        |    24 +
  lib-python/2.2/distutils/command/bdist.py           |   139 +
  lib-python/2.2/distutils/command/bdist_dumb.py      |    96 +
  lib-python/2.2/distutils/command/bdist_rpm.py       |   488 +
  lib-python/2.2/distutils/command/bdist_wininst.py   |   570 +
  lib-python/2.2/distutils/command/build.py           |   131 +
  lib-python/2.2/distutils/command/build_clib.py      |   240 +
  lib-python/2.2/distutils/command/build_ext.py       |   630 +
  lib-python/2.2/distutils/command/build_py.py        |   401 +
  lib-python/2.2/distutils/command/build_scripts.py   |   110 +
  lib-python/2.2/distutils/command/clean.py           |    79 +
  lib-python/2.2/distutils/command/command_template   |    45 +
  lib-python/2.2/distutils/command/config.py          |   366 +
  lib-python/2.2/distutils/command/install.py         |   598 +
  lib-python/2.2/distutils/command/install_data.py    |    83 +
  lib-python/2.2/distutils/command/install_headers.py |    53 +
  lib-python/2.2/distutils/command/install_lib.py     |   213 +
  lib-python/2.2/distutils/command/install_scripts.py |    63 +
  lib-python/2.2/distutils/command/sdist.py           |   475 +
  lib-python/2.2/distutils/core.py                    |   231 +
  lib-python/2.2/distutils/cygwinccompiler.py         |   441 +
  lib-python/2.2/distutils/dep_util.py                |   115 +
  lib-python/2.2/distutils/dir_util.py                |   219 +
  lib-python/2.2/distutils/dist.py                    |  1086 +++
  lib-python/2.2/distutils/errors.py                  |    99 +
  lib-python/2.2/distutils/extension.py               |   231 +
  lib-python/2.2/distutils/fancy_getopt.py            |   504 +
  lib-python/2.2/distutils/file_util.py               |   258 +
  lib-python/2.2/distutils/filelist.py                |   367 +
  lib-python/2.2/distutils/msvccompiler.py            |   515 +
  lib-python/2.2/distutils/mwerkscompiler.py          |   217 +
  lib-python/2.2/distutils/spawn.py                   |   169 +
  lib-python/2.2/distutils/sysconfig.py               |   445 +
  lib-python/2.2/distutils/text_file.py               |   384 +
  lib-python/2.2/distutils/unixccompiler.py           |   308 +
  lib-python/2.2/distutils/util.py                    |   458 +
  lib-python/2.2/distutils/version.py                 |   301 +
  lib-python/2.2/doctest.py                           |  1173 +++
  lib-python/2.2/dospath.py                           |   341 +
  lib-python/2.2/dumbdbm.py                           |   170 +
  lib-python/2.2/email/Charset.py                     |   393 +
  lib-python/2.2/email/Encoders.py                    |    94 +
  lib-python/2.2/email/Errors.py                      |    26 +
  lib-python/2.2/email/Generator.py                   |   378 +
  lib-python/2.2/email/Header.py                      |   515 +
  lib-python/2.2/email/Iterators.py                   |    25 +
  lib-python/2.2/email/MIMEAudio.py                   |    71 +
  lib-python/2.2/email/MIMEBase.py                    |    24 +
  lib-python/2.2/email/MIMEImage.py                   |    45 +
  lib-python/2.2/email/MIMEMessage.py                 |    32 +
  lib-python/2.2/email/MIMEMultipart.py               |    37 +
  lib-python/2.2/email/MIMENonMultipart.py            |    24 +
  lib-python/2.2/email/MIMEText.py                    |    45 +
  lib-python/2.2/email/Message.py                     |   837 ++
  lib-python/2.2/email/Parser.py                      |   292 +
  lib-python/2.2/email/Utils.py                       |   340 +
  lib-python/2.2/email/__init__.py                    |    72 +
  lib-python/2.2/email/_compat21.py                   |    69 +
  lib-python/2.2/email/_compat22.py                   |    70 +
  lib-python/2.2/email/_parseaddr.py                  |   480 +
  lib-python/2.2/email/base64MIME.py                  |   184 +
  lib-python/2.2/email/quopriMIME.py                  |   323 +
  lib-python/2.2/email/test/__init__.py               |     2 +
  lib-python/2.2/email/test/data/PyBanner048.gif      |   Bin 
  lib-python/2.2/email/test/data/audiotest.au         |   Bin 
  lib-python/2.2/email/test/data/msg_01.txt           |    19 +
  lib-python/2.2/email/test/data/msg_02.txt           |   135 +
  lib-python/2.2/email/test/data/msg_03.txt           |    16 +
  lib-python/2.2/email/test/data/msg_04.txt           |    37 +
  lib-python/2.2/email/test/data/msg_05.txt           |    28 +
  lib-python/2.2/email/test/data/msg_06.txt           |    33 +
  lib-python/2.2/email/test/data/msg_07.txt           |    83 +
  lib-python/2.2/email/test/data/msg_08.txt           |    24 +
  lib-python/2.2/email/test/data/msg_09.txt           |    24 +
  lib-python/2.2/email/test/data/msg_10.txt           |    32 +
  lib-python/2.2/email/test/data/msg_11.txt           |     7 +
  lib-python/2.2/email/test/data/msg_12.txt           |    36 +
  lib-python/2.2/email/test/data/msg_13.txt           |    94 +
  lib-python/2.2/email/test/data/msg_14.txt           |    23 +
  lib-python/2.2/email/test/data/msg_15.txt           |    52 +
  lib-python/2.2/email/test/data/msg_16.txt           |   123 +
  lib-python/2.2/email/test/data/msg_17.txt           |    12 +
  lib-python/2.2/email/test/data/msg_18.txt           |     6 +
  lib-python/2.2/email/test/data/msg_19.txt           |    43 +
  lib-python/2.2/email/test/data/msg_20.txt           |    22 +
  lib-python/2.2/email/test/data/msg_21.txt           |    20 +
  lib-python/2.2/email/test/data/msg_22.txt           |    46 +
  lib-python/2.2/email/test/data/msg_23.txt           |     8 +
  lib-python/2.2/email/test/data/msg_24.txt           |    10 +
  lib-python/2.2/email/test/data/msg_25.txt           |   117 +
  lib-python/2.2/email/test/data/msg_26.txt           |    45 +
  lib-python/2.2/email/test/data/msg_27.txt           |    15 +
  lib-python/2.2/email/test/data/msg_28.txt           |    25 +
  lib-python/2.2/email/test/data/msg_29.txt           |    22 +
  lib-python/2.2/email/test/data/msg_30.txt           |    23 +
  lib-python/2.2/email/test/data/msg_31.txt           |    15 +
  lib-python/2.2/email/test/data/msg_32.txt           |    14 +
  lib-python/2.2/email/test/data/msg_33.txt           |    29 +
  lib-python/2.2/email/test/data/msg_34.txt           |    19 +
  lib-python/2.2/email/test/data/msg_35.txt           |     4 +
  lib-python/2.2/email/test/test_email.py             |  2718 ++++++++
  lib-python/2.2/email/test/test_email_codecs.py      |    68 +
  lib-python/2.2/email/test/test_email_torture.py     |   136 +
  lib-python/2.2/encodings/__init__.py                |    97 +
  lib-python/2.2/encodings/aliases.py                 |   115 +
  lib-python/2.2/encodings/ascii.py                   |    35 +
  lib-python/2.2/encodings/base64_codec.py            |    62 +
  lib-python/2.2/encodings/charmap.py                 |    51 +
  lib-python/2.2/encodings/cp037.py                   |   280 +
  lib-python/2.2/encodings/cp1006.py                  |   138 +
  lib-python/2.2/encodings/cp1026.py                  |   280 +
  lib-python/2.2/encodings/cp1140.py                  |    45 +
  lib-python/2.2/encodings/cp1250.py                  |   123 +
  lib-python/2.2/encodings/cp1251.py                  |   157 +
  lib-python/2.2/encodings/cp1252.py                  |    76 +
  lib-python/2.2/encodings/cp1253.py                  |   151 +
  lib-python/2.2/encodings/cp1254.py                  |    82 +
  lib-python/2.2/encodings/cp1255.py                  |   143 +
  lib-python/2.2/encodings/cp1256.py                  |   129 +
  lib-python/2.2/encodings/cp1257.py                  |   131 +
  lib-python/2.2/encodings/cp1258.py                  |    90 +
  lib-python/2.2/encodings/cp424.py                   |   280 +
  lib-python/2.2/encodings/cp437.py                   |   172 +
  lib-python/2.2/encodings/cp500.py                   |   280 +
  lib-python/2.2/encodings/cp737.py                   |   172 +
  lib-python/2.2/encodings/cp775.py                   |   172 +
  lib-python/2.2/encodings/cp850.py                   |   172 +
  lib-python/2.2/encodings/cp852.py                   |   172 +
  lib-python/2.2/encodings/cp855.py                   |   172 +
  lib-python/2.2/encodings/cp856.py                   |   172 +
  lib-python/2.2/encodings/cp857.py                   |   171 +
  lib-python/2.2/encodings/cp860.py                   |   172 +
  lib-python/2.2/encodings/cp861.py                   |   172 +
  lib-python/2.2/encodings/cp862.py                   |   172 +
  lib-python/2.2/encodings/cp863.py                   |   172 +
  lib-python/2.2/encodings/cp864.py                   |   170 +
  lib-python/2.2/encodings/cp865.py                   |   172 +
  lib-python/2.2/encodings/cp866.py                   |   172 +
  lib-python/2.2/encodings/cp869.py                   |   172 +
  lib-python/2.2/encodings/cp874.py                   |   171 +
  lib-python/2.2/encodings/cp875.py                   |   281 +
  lib-python/2.2/encodings/hex_codec.py               |    62 +
  lib-python/2.2/encodings/iso8859_1.py               |    44 +
  lib-python/2.2/encodings/iso8859_10.py              |    90 +
  lib-python/2.2/encodings/iso8859_13.py              |   100 +
  lib-python/2.2/encodings/iso8859_14.py              |    75 +
  lib-python/2.2/encodings/iso8859_15.py              |    52 +
  lib-python/2.2/encodings/iso8859_2.py               |   101 +
  lib-python/2.2/encodings/iso8859_3.py               |    79 +
  lib-python/2.2/encodings/iso8859_4.py               |    94 +
  lib-python/2.2/encodings/iso8859_5.py               |   138 +
  lib-python/2.2/encodings/iso8859_6.py               |   137 +
  lib-python/2.2/encodings/iso8859_7.py               |   124 +
  lib-python/2.2/encodings/iso8859_8.py               |   112 +
  lib-python/2.2/encodings/iso8859_9.py               |    50 +
  lib-python/2.2/encodings/koi8_r.py                  |   172 +
  lib-python/2.2/encodings/koi8_u.py                  |    54 +
  lib-python/2.2/encodings/latin_1.py                 |    35 +
  lib-python/2.2/encodings/mac_cyrillic.py            |   167 +
  lib-python/2.2/encodings/mac_greek.py               |   170 +
  lib-python/2.2/encodings/mac_iceland.py             |   166 +
  lib-python/2.2/encodings/mac_latin2.py              |   170 +
  lib-python/2.2/encodings/mac_roman.py               |   167 +
  lib-python/2.2/encodings/mac_turkish.py             |   167 +
  lib-python/2.2/encodings/mbcs.py                    |    36 +
  lib-python/2.2/encodings/quopri_codec.py            |    58 +
  lib-python/2.2/encodings/raw_unicode_escape.py      |    30 +
  lib-python/2.2/encodings/rot_13.py                  |   105 +
  lib-python/2.2/encodings/undefined.py               |    34 +
  lib-python/2.2/encodings/unicode_escape.py          |    30 +
  lib-python/2.2/encodings/unicode_internal.py        |    30 +
  lib-python/2.2/encodings/utf_16.py                  |    61 +
  lib-python/2.2/encodings/utf_16_be.py               |    31 +
  lib-python/2.2/encodings/utf_16_le.py               |    31 +
  lib-python/2.2/encodings/utf_7.py                   |    27 +
  lib-python/2.2/encodings/utf_8.py                   |    31 +
  lib-python/2.2/encodings/uu_codec.py                |   112 +
  lib-python/2.2/encodings/zlib_codec.py              |    63 +
  lib-python/2.2/filecmp.py                           |   331 +
  lib-python/2.2/fileinput.py                         |   349 +
  lib-python/2.2/fnmatch.py                           |   107 +
  lib-python/2.2/formatter.py                         |   454 +
  lib-python/2.2/fpformat.py                          |   142 +
  lib-python/2.2/ftplib.py                            |   804 ++
  lib-python/2.2/getopt.py                            |   144 +
  lib-python/2.2/getpass.py                           |   123 +
  lib-python/2.2/gettext.py                           |   311 +
  lib-python/2.2/glob.py                              |    56 +
  lib-python/2.2/gopherlib.py                         |   205 +
  lib-python/2.2/gzip.py                              |   390 +
  lib-python/2.2/hmac.py                              |    99 +
  lib-python/2.2/hotshot/__init__.py                  |    41 +
  lib-python/2.2/hotshot/log.py                       |   194 +
  lib-python/2.2/hotshot/stats.py                     |    93 +
  lib-python/2.2/htmlentitydefs.py                    |   257 +
  lib-python/2.2/htmllib.py                           |   475 +
  lib-python/2.2/httplib.py                           |  1238 +++
  lib-python/2.2/ihooks.py                            |   511 +
  lib-python/2.2/imaplib.py                           |  1208 +++
  lib-python/2.2/imghdr.py                            |   154 +
  lib-python/2.2/imputil.py                           |   720 ++
  lib-python/2.2/inspect.py                           |   785 ++
  lib-python/2.2/keyword.py                           |    97 +
  lib-python/2.2/knee.py                              |   126 +
  lib-python/2.2/lib-old/Para.py                      |   343 +
  lib-python/2.2/lib-old/addpack.py                   |    67 +
  lib-python/2.2/lib-old/cmp.py                       |    63 +
  lib-python/2.2/lib-old/cmpcache.py                  |    64 +
  lib-python/2.2/lib-old/codehack.py                  |    81 +
  lib-python/2.2/lib-old/dircmp.py                    |   202 +
  lib-python/2.2/lib-old/dump.py                      |    63 +
  lib-python/2.2/lib-old/find.py                      |    26 +
  lib-python/2.2/lib-old/fmt.py                       |   623 +
  lib-python/2.2/lib-old/grep.py                      |    79 +
  lib-python/2.2/lib-old/lockfile.py                  |    15 +
  lib-python/2.2/lib-old/newdir.py                    |    73 +
  lib-python/2.2/lib-old/ni.py                        |   433 +
  lib-python/2.2/lib-old/packmail.py                  |   111 +
  lib-python/2.2/lib-old/poly.py                      |    52 +
  lib-python/2.2/lib-old/rand.py                      |    13 +
  lib-python/2.2/lib-old/tb.py                        |   177 +
  lib-python/2.2/lib-old/util.py                      |    25 +
  lib-python/2.2/lib-old/whatsound.py                 |     1 +
  lib-python/2.2/lib-old/zmod.py                      |    94 +
  lib-python/2.2/lib-tk/Canvas.py                     |   188 +
  lib-python/2.2/lib-tk/Dialog.py                     |    49 +
  lib-python/2.2/lib-tk/FileDialog.py                 |   273 +
  lib-python/2.2/lib-tk/FixTk.py                      |    37 +
  lib-python/2.2/lib-tk/ScrolledText.py               |    43 +
  lib-python/2.2/lib-tk/SimpleDialog.py               |   111 +
  lib-python/2.2/lib-tk/Tix.py                        |  1626 ++++
  lib-python/2.2/lib-tk/Tkconstants.py                |   103 +
  lib-python/2.2/lib-tk/Tkdnd.py                      |   321 +
  lib-python/2.2/lib-tk/Tkinter.py                    |  3141 +++++++++
  lib-python/2.2/lib-tk/tkColorChooser.py             |    74 +
  lib-python/2.2/lib-tk/tkCommonDialog.py             |    65 +
  lib-python/2.2/lib-tk/tkFileDialog.py               |   129 +
  lib-python/2.2/lib-tk/tkFont.py                     |   191 +
  lib-python/2.2/lib-tk/tkMessageBox.py               |   120 +
  lib-python/2.2/lib-tk/tkSimpleDialog.py             |   313 +
  lib-python/2.2/lib-tk/turtle.py                     |   385 +
  lib-python/2.2/linecache.py                         |   101 +
  lib-python/2.2/locale.py                            |   743 ++
  lib-python/2.2/macpath.py                           |   242 +
  lib-python/2.2/macurl2path.py                       |    95 +
  lib-python/2.2/mailbox.py                           |   313 +
  lib-python/2.2/mailcap.py                           |   255 +
  lib-python/2.2/markupbase.py                        |   317 +
  lib-python/2.2/mhlib.py                             |  1003 +++
  lib-python/2.2/mimetools.py                         |   226 +
  lib-python/2.2/mimetypes.py                         |   435 +
  lib-python/2.2/mimify.py                            |   464 +
  lib-python/2.2/multifile.py                         |   160 +
  lib-python/2.2/mutex.py                             |    51 +
  lib-python/2.2/netrc.py                             |   108 +
  lib-python/2.2/nntplib.py                           |   575 +
  lib-python/2.2/ntpath.py                            |   482 +
  lib-python/2.2/nturl2path.py                        |    66 +
  lib-python/2.2/os.py                                |   613 +
  lib-python/2.2/pdb.doc                              |   192 +
  lib-python/2.2/pdb.py                               |   979 ++
  lib-python/2.2/pickle.py                            |   986 +++
  lib-python/2.2/pipes.py                             |   297 +
  lib-python/2.2/plat-aix3/IN.py                      |   126 +
  lib-python/2.2/plat-aix3/regen                      |     8 +
  lib-python/2.2/plat-aix4/IN.py                      |   165 +
  lib-python/2.2/plat-aix4/regen                      |     8 +
  lib-python/2.2/plat-beos5/IN.py                     |   327 +
  lib-python/2.2/plat-beos5/regen                     |     7 +
  lib-python/2.2/plat-darwin/IN.py                    |   357 +
  lib-python/2.2/plat-darwin/regen                    |     3 +
  lib-python/2.2/plat-freebsd2/IN.py                  |   187 +
  lib-python/2.2/plat-freebsd2/regen                  |     3 +
  lib-python/2.2/plat-freebsd3/IN.py                  |   189 +
  lib-python/2.2/plat-freebsd3/regen                  |     4 +
  lib-python/2.2/plat-freebsd4/IN.py                  |   355 +
  lib-python/2.2/plat-freebsd4/regen                  |     3 +
  lib-python/2.2/plat-freebsd5/IN.py                  |   355 +
  lib-python/2.2/plat-freebsd5/regen                  |     3 +
  lib-python/2.2/plat-generic/regen                   |     3 +
  lib-python/2.2/plat-irix5/AL.py                     |    61 +
  lib-python/2.2/plat-irix5/CD.py                     |    34 +
  lib-python/2.2/plat-irix5/CL.py                     |    24 +
  lib-python/2.2/plat-irix5/CL_old.py                 |   236 +
  lib-python/2.2/plat-irix5/DEVICE.py                 |   400 +
  lib-python/2.2/plat-irix5/ERRNO.py                  |   147 +
  lib-python/2.2/plat-irix5/FILE.py                   |   239 +
  lib-python/2.2/plat-irix5/FL.py                     |   289 +
  lib-python/2.2/plat-irix5/GET.py                    |    59 +
  lib-python/2.2/plat-irix5/GL.py                     |   393 +
  lib-python/2.2/plat-irix5/GLWS.py                   |    12 +
  lib-python/2.2/plat-irix5/IN.py                     |   141 +
  lib-python/2.2/plat-irix5/IOCTL.py                  |   233 +
  lib-python/2.2/plat-irix5/SV.py                     |   120 +
  lib-python/2.2/plat-irix5/WAIT.py                   |    14 +
  lib-python/2.2/plat-irix5/cddb.py                   |   206 +
  lib-python/2.2/plat-irix5/cdplayer.py               |    89 +
  lib-python/2.2/plat-irix5/flp.doc                   |   117 +
  lib-python/2.2/plat-irix5/flp.py                    |   451 +
  lib-python/2.2/plat-irix5/jpeg.py                   |   111 +
  lib-python/2.2/plat-irix5/panel.py                  |   281 +
  lib-python/2.2/plat-irix5/panelparser.py            |   128 +
  lib-python/2.2/plat-irix5/readcd.doc                |   104 +
  lib-python/2.2/plat-irix5/readcd.py                 |   244 +
  lib-python/2.2/plat-irix5/regen                     |    10 +
  lib-python/2.2/plat-irix5/torgb.py                  |    98 +
  lib-python/2.2/plat-irix6/AL.py                     |    61 +
  lib-python/2.2/plat-irix6/CD.py                     |    34 +
  lib-python/2.2/plat-irix6/CL.py                     |    24 +
  lib-python/2.2/plat-irix6/DEVICE.py                 |   400 +
  lib-python/2.2/plat-irix6/ERRNO.py                  |   180 +
  lib-python/2.2/plat-irix6/FILE.py                   |   674 ++
  lib-python/2.2/plat-irix6/FL.py                     |   289 +
  lib-python/2.2/plat-irix6/GET.py                    |    59 +
  lib-python/2.2/plat-irix6/GL.py                     |   393 +
  lib-python/2.2/plat-irix6/GLWS.py                   |    12 +
  lib-python/2.2/plat-irix6/IN.py                     |   385 +
  lib-python/2.2/plat-irix6/IOCTL.py                  |   233 +
  lib-python/2.2/plat-irix6/SV.py                     |   120 +
  lib-python/2.2/plat-irix6/WAIT.py                   |   335 +
  lib-python/2.2/plat-irix6/cddb.py                   |   206 +
  lib-python/2.2/plat-irix6/cdplayer.py               |    89 +
  lib-python/2.2/plat-irix6/flp.doc                   |   117 +
  lib-python/2.2/plat-irix6/flp.py                    |   450 +
  lib-python/2.2/plat-irix6/jpeg.py                   |   111 +
  lib-python/2.2/plat-irix6/panel.py                  |   281 +
  lib-python/2.2/plat-irix6/panelparser.py            |   128 +
  lib-python/2.2/plat-irix6/readcd.doc                |   104 +
  lib-python/2.2/plat-irix6/readcd.py                 |   244 +
  lib-python/2.2/plat-irix6/regen                     |    11 +
  lib-python/2.2/plat-irix6/torgb.py                  |    98 +
  lib-python/2.2/plat-linux1/IN.py                    |   239 +
  lib-python/2.2/plat-linux1/regen                    |     8 +
  lib-python/2.2/plat-linux2/CDROM.py                 |   207 +
  lib-python/2.2/plat-linux2/DLFCN.py                 |    83 +
  lib-python/2.2/plat-linux2/IN.py                    |   603 +
  lib-python/2.2/plat-linux2/TYPES.py                 |   171 +
  lib-python/2.2/plat-linux2/regen                    |     8 +
  lib-python/2.2/plat-netbsd1/IN.py                   |    57 +
  lib-python/2.2/plat-netbsd1/regen                   |     3 +
  lib-python/2.2/plat-next3/regen                     |     6 +
  lib-python/2.2/plat-riscos/riscosenviron.py         |    43 +
  lib-python/2.2/plat-riscos/riscospath.py            |   375 +
  lib-python/2.2/plat-riscos/rourl2path.py            |    69 +
  lib-python/2.2/plat-sunos4/IN.py                    |    59 +
  lib-python/2.2/plat-sunos4/SUNAUDIODEV.py           |    38 +
  lib-python/2.2/plat-sunos4/WAIT.py                  |    13 +
  lib-python/2.2/plat-sunos4/regen                    |     9 +
  lib-python/2.2/plat-sunos5/CDIO.py                  |    73 +
  lib-python/2.2/plat-sunos5/DLFCN.py                 |    27 +
  lib-python/2.2/plat-sunos5/IN.py                    |  1421 ++++
  lib-python/2.2/plat-sunos5/STROPTS.py               |  1813 +++++
  lib-python/2.2/plat-sunos5/SUNAUDIODEV.py           |    40 +
  lib-python/2.2/plat-sunos5/TYPES.py                 |   314 +
  lib-python/2.2/plat-sunos5/regen                    |     9 +
  lib-python/2.2/plat-unixware7/IN.py                 |   836 ++
  lib-python/2.2/plat-unixware7/STROPTS.py            |   328 +
  lib-python/2.2/plat-unixware7/regen                 |     9 +
  lib-python/2.2/popen2.py                            |   199 +
  lib-python/2.2/poplib.py                            |   335 +
  lib-python/2.2/posixfile.py                         |   240 +
  lib-python/2.2/posixpath.py                         |   414 +
  lib-python/2.2/pprint.py                            |   310 +
  lib-python/2.2/pre.py                               |   656 ++
  lib-python/2.2/profile.doc                          |   702 ++
  lib-python/2.2/profile.py                           |   556 +
  lib-python/2.2/pstats.py                            |   641 +
  lib-python/2.2/pty.py                               |   167 +
  lib-python/2.2/py_compile.py                        |    82 +
  lib-python/2.2/pyclbr.py                            |   337 +
  lib-python/2.2/pydoc.py                             |  2112 ++++++
  lib-python/2.2/quopri.py                            |   237 +
  lib-python/2.2/random.py                            |   779 ++
  lib-python/2.2/re.py                                |    33 +
  lib-python/2.2/reconvert.py                         |   192 +
  lib-python/2.2/regex_syntax.py                      |    53 +
  lib-python/2.2/regsub.py                            |   198 +
  lib-python/2.2/repr.py                              |    95 +
  lib-python/2.2/rexec.py                             |   592 +
  lib-python/2.2/rfc822.py                            |  1010 +++
  lib-python/2.2/rlcompleter.py                       |   122 +
  lib-python/2.2/robotparser.py                       |   262 +
  lib-python/2.2/sched.py                             |   106 +
  lib-python/2.2/sgmllib.py                           |   516 +
  lib-python/2.2/shelve.py                            |   158 +
  lib-python/2.2/shlex.py                             |   209 +
  lib-python/2.2/shutil.py                            |   138 +
  lib-python/2.2/site-packages/README                 |     2 +
  lib-python/2.2/site.py                              |   330 +
  lib-python/2.2/smtpd.py                             |   543 +
  lib-python/2.2/smtplib.py                           |   729 ++
  lib-python/2.2/sndhdr.py                            |   228 +
  lib-python/2.2/socket.py                            |   256 +
  lib-python/2.2/sre.py                               |   311 +
  lib-python/2.2/sre_compile.py                       |   455 +
  lib-python/2.2/sre_constants.py                     |   259 +
  lib-python/2.2/sre_parse.py                         |   738 ++
  lib-python/2.2/stat.py                              |    86 +
  lib-python/2.2/statcache.py                         |    77 +
  lib-python/2.2/statvfs.py                           |    15 +
  lib-python/2.2/string.py                            |   387 +
  lib-python/2.2/stringold.py                         |   430 +
  lib-python/2.2/sunau.py                             |   474 +
  lib-python/2.2/sunaudio.py                          |    44 +
  lib-python/2.2/symbol.py                            |    95 +
  lib-python/2.2/symtable.py                          |   255 +
  lib-python/2.2/tabnanny.py                          |   327 +
  lib-python/2.2/telnetlib.py                         |   593 +
  lib-python/2.2/tempfile.py                          |   244 +
  lib-python/2.2/test/README                          |   372 +
  lib-python/2.2/test/__init__.py                     |     1 +
  lib-python/2.2/test/audiotest.au                    |   Bin 
  lib-python/2.2/test/autotest.py                     |     6 +
  lib-python/2.2/test/badsyntax_future3.py            |    10 +
  lib-python/2.2/test/badsyntax_future4.py            |    10 +
  lib-python/2.2/test/badsyntax_future5.py            |    12 +
  lib-python/2.2/test/badsyntax_future6.py            |    10 +
  lib-python/2.2/test/badsyntax_future7.py            |    11 +
  lib-python/2.2/test/badsyntax_nocaret.py            |     2 +
  lib-python/2.2/test/data/PyBanner048.gif            |   Bin 
  lib-python/2.2/test/data/msg_01.txt                 |    19 +
  lib-python/2.2/test/data/msg_02.txt                 |   135 +
  lib-python/2.2/test/data/msg_03.txt                 |    16 +
  lib-python/2.2/test/data/msg_04.txt                 |    37 +
  lib-python/2.2/test/data/msg_05.txt                 |    28 +
  lib-python/2.2/test/data/msg_06.txt                 |    33 +
  lib-python/2.2/test/data/msg_07.txt                 |    83 +
  lib-python/2.2/test/data/msg_08.txt                 |    24 +
  lib-python/2.2/test/data/msg_09.txt                 |    24 +
  lib-python/2.2/test/data/msg_10.txt                 |    32 +
  lib-python/2.2/test/data/msg_11.txt                 |     7 +
  lib-python/2.2/test/data/msg_12.txt                 |    36 +
  lib-python/2.2/test/data/msg_13.txt                 |    94 +
  lib-python/2.2/test/data/msg_14.txt                 |    23 +
  lib-python/2.2/test/data/msg_15.txt                 |    52 +
  lib-python/2.2/test/data/msg_16.txt                 |   123 +
  lib-python/2.2/test/data/msg_17.txt                 |    12 +
  lib-python/2.2/test/data/msg_18.txt                 |     6 +
  lib-python/2.2/test/data/msg_19.txt                 |    43 +
  lib-python/2.2/test/data/msg_20.txt                 |    22 +
  lib-python/2.2/test/data/msg_21.txt                 |    22 +
  lib-python/2.2/test/data/msg_22.txt                 |    46 +
  lib-python/2.2/test/data/msg_23.txt                 |     8 +
  lib-python/2.2/test/double_const.py                 |    30 +
  lib-python/2.2/test/greyrgb.uue                     |  1547 ++++
  lib-python/2.2/test/output/test_MimeWriter          |   110 +
  lib-python/2.2/test/output/test_asynchat            |     3 +
  lib-python/2.2/test/output/test_augassign           |    51 +
  lib-python/2.2/test/output/test_binascii            |    29 +
  lib-python/2.2/test/output/test_builtin             |    53 +
  lib-python/2.2/test/output/test_cfgparser           |     9 +
  lib-python/2.2/test/output/test_cgi                 |    29 +
  lib-python/2.2/test/output/test_charmapcodec        |    16 +
  lib-python/2.2/test/output/test_class               |   101 +
  lib-python/2.2/test/output/test_coercion            |  1054 +++
  lib-python/2.2/test/output/test_compare             |   101 +
  lib-python/2.2/test/output/test_compile             |     7 +
  lib-python/2.2/test/output/test_cookie              |    32 +
  lib-python/2.2/test/output/test_exceptions          |    52 +
  lib-python/2.2/test/output/test_extcall             |   112 +
  lib-python/2.2/test/output/test_frozen              |     4 +
  lib-python/2.2/test/output/test_future              |     9 +
  lib-python/2.2/test/output/test_gettext             |    46 +
  lib-python/2.2/test/output/test_global              |     5 +
  lib-python/2.2/test/output/test_grammar             |    66 +
  lib-python/2.2/test/output/test_httplib             |    10 +
  lib-python/2.2/test/output/test_linuxaudiodev       |     7 +
  lib-python/2.2/test/output/test_longexp             |     2 +
  lib-python/2.2/test/output/test_math                |    26 +
  lib-python/2.2/test/output/test_md5                 |     9 +
  lib-python/2.2/test/output/test_mimetools           |     5 +
  lib-python/2.2/test/output/test_mmap                |    34 +
  lib-python/2.2/test/output/test_new                 |     7 +
  lib-python/2.2/test/output/test_nis                 |     2 +
  lib-python/2.2/test/output/test_opcodes             |     6 +
  lib-python/2.2/test/output/test_openpty             |     2 +
  lib-python/2.2/test/output/test_operations          |     6 +
  lib-python/2.2/test/output/test_pkg                 |    45 +
  lib-python/2.2/test/output/test_poll                |    17 +
  lib-python/2.2/test/output/test_popen2              |     9 +
  lib-python/2.2/test/output/test_posixpath           |     2 +
  lib-python/2.2/test/output/test_pow                 |    25 +
  lib-python/2.2/test/output/test_profile             |    17 +
  lib-python/2.2/test/output/test_pty                 |     3 +
  lib-python/2.2/test/output/test_pwd                 |     7 +
  lib-python/2.2/test/output/test_pyexpat             |   110 +
  lib-python/2.2/test/output/test_re                  |     2 +
  lib-python/2.2/test/output/test_regex               |    29 +
  lib-python/2.2/test/output/test_rgbimg              |     2 +
  lib-python/2.2/test/output/test_richcmp             |   187 +
  lib-python/2.2/test/output/test_rotor               |     5 +
  lib-python/2.2/test/output/test_sax                 |    42 +
  lib-python/2.2/test/output/test_scope               |    24 +
  lib-python/2.2/test/output/test_signal              |     2 +
  lib-python/2.2/test/output/test_socket              |     2 +
  lib-python/2.2/test/output/test_string              |     3 +
  lib-python/2.2/test/output/test_thread              |     6 +
  lib-python/2.2/test/output/test_threadedtempfile    |     5 +
  lib-python/2.2/test/output/test_tokenize            |   648 +
  lib-python/2.2/test/output/test_types               |    16 +
  lib-python/2.2/test/output/test_ucn                 |     7 +
  lib-python/2.2/test/output/test_unicode             |    21 +
  lib-python/2.2/test/output/test_unicode_file        |     2 +
  lib-python/2.2/test/output/test_unicodedata         |     5 +
  lib-python/2.2/test/output/test_urlparse            |    47 +
  lib-python/2.2/test/output/test_winreg              |     3 +
  lib-python/2.2/test/output/test_winsound            |     2 +
  lib-python/2.2/test/output/test_xreadline           |     4 +
  lib-python/2.2/test/output/test_zlib                |    14 +
  lib-python/2.2/test/pickletester.py                 |   285 +
  lib-python/2.2/test/pydocfodder.py                  |   210 +
  lib-python/2.2/test/pystone.py                      |   252 +
  lib-python/2.2/test/re_tests.py                     |   661 ++
  lib-python/2.2/test/regex_tests.py                  |   287 +
  lib-python/2.2/test/regrtest.py                     |   832 ++
  lib-python/2.2/test/reperf.py                       |    23 +
  lib-python/2.2/test/sortperf.py                     |   141 +
  lib-python/2.2/test/string_tests.py                 |   265 +
  lib-python/2.2/test/test.xml                        |   115 +
  lib-python/2.2/test/test.xml.out                    |   115 +
  lib-python/2.2/test/test_MimeWriter.py              |   170 +
  lib-python/2.2/test/test_StringIO.py                |   113 +
  lib-python/2.2/test/test___all__.py                 |   158 +
  lib-python/2.2/test/test___future__.py              |    59 +
  lib-python/2.2/test/test_al.py                      |    23 +
  lib-python/2.2/test/test_array.py                   |   192 +
  lib-python/2.2/test/test_asynchat.py                |    58 +
  lib-python/2.2/test/test_atexit.py                  |    66 +
  lib-python/2.2/test/test_audioop.py                 |   264 +
  lib-python/2.2/test/test_augassign.py               |   261 +
  lib-python/2.2/test/test_b1.py                      |   632 +
  lib-python/2.2/test/test_b2.py                      |   365 +
  lib-python/2.2/test/test_base64.py                  |    53 +
  lib-python/2.2/test/test_bastion.py                 |     3 +
  lib-python/2.2/test/test_binascii.py                |   119 +
  lib-python/2.2/test/test_binhex.py                  |    50 +
  lib-python/2.2/test/test_binop.py                   |   328 +
  lib-python/2.2/test/test_bisect.py                  |   127 +
  lib-python/2.2/test/test_bsddb.py                   |    76 +
  lib-python/2.2/test/test_bufio.py                   |    60 +
  lib-python/2.2/test/test_builtin.py                 |    13 +
  lib-python/2.2/test/test_calendar.py                |    61 +
  lib-python/2.2/test/test_call.py                    |   131 +
  lib-python/2.2/test/test_capi.py                    |    16 +
  lib-python/2.2/test/test_cd.py                      |    26 +
  lib-python/2.2/test/test_cfgparser.py               |   284 +
  lib-python/2.2/test/test_cgi.py                     |   188 +
  lib-python/2.2/test/test_charmapcodec.py            |    43 +
  lib-python/2.2/test/test_cl.py                      |    78 +
  lib-python/2.2/test/test_class.py                   |   317 +
  lib-python/2.2/test/test_cmath.py                   |    35 +
  lib-python/2.2/test/test_codecs.py                  |    31 +
  lib-python/2.2/test/test_codeop.py                  |   190 +
  lib-python/2.2/test/test_coercion.py                |   118 +
  lib-python/2.2/test/test_commands.py                |    52 +
  lib-python/2.2/test/test_compare.py                 |    56 +
  lib-python/2.2/test/test_compile.py                 |   129 +
  lib-python/2.2/test/test_complex.py                 |    68 +
  lib-python/2.2/test/test_contains.py                |   171 +
  lib-python/2.2/test/test_cookie.py                  |    47 +
  lib-python/2.2/test/test_copy_reg.py                |    30 +
  lib-python/2.2/test/test_cpickle.py                 |   100 +
  lib-python/2.2/test/test_crypt.py                   |    11 +
  lib-python/2.2/test/test_curses.py                  |   210 +
  lib-python/2.2/test/test_dbm.py                     |    43 +
  lib-python/2.2/test/test_descr.py                   |  3276 ++++++++++
  lib-python/2.2/test/test_descrtut.py                |   501 +
  lib-python/2.2/test/test_difflib.py                 |     2 +
  lib-python/2.2/test/test_dircache.py                |    74 +
  lib-python/2.2/test/test_dl.py                      |    33 +
  lib-python/2.2/test/test_doctest.py                 |     2 +
  lib-python/2.2/test/test_doctest2.py                |   108 +
  lib-python/2.2/test/test_dospath.py                 |    61 +
  lib-python/2.2/test/test_dumbdbm.py                 |    79 +
  lib-python/2.2/test/test_email.py                   |    13 +
  lib-python/2.2/test/test_email_codecs.py            |    11 +
  lib-python/2.2/test/test_errno.py                   |    49 +
  lib-python/2.2/test/test_exceptions.py              |   206 +
  lib-python/2.2/test/test_extcall.py                 |   268 +
  lib-python/2.2/test/test_fcntl.py                   |    53 +
  lib-python/2.2/test/test_file.py                    |    63 +
  lib-python/2.2/test/test_fileinput.py               |   159 +
  lib-python/2.2/test/test_fnmatch.py                 |    46 +
  lib-python/2.2/test/test_fork1.py                   |    75 +
  lib-python/2.2/test/test_format.py                  |   218 +
  lib-python/2.2/test/test_fpformat.py                |    75 +
  lib-python/2.2/test/test_frozen.py                  |    26 +
  lib-python/2.2/test/test_funcattrs.py               |   379 +
  lib-python/2.2/test/test_future.py                  |    47 +
  lib-python/2.2/test/test_future1.py                 |    11 +
  lib-python/2.2/test/test_future2.py                 |    10 +
  lib-python/2.2/test/test_future3.py                 |    11 +
  lib-python/2.2/test/test_gc.py                      |   346 +
  lib-python/2.2/test/test_gdbm.py                    |    46 +
  lib-python/2.2/test/test_generators.py              |  1386 ++++
  lib-python/2.2/test/test_getargs.py                 |    21 +
  lib-python/2.2/test/test_getopt.py                  |   110 +
  lib-python/2.2/test/test_gettext.py                 |   200 +
  lib-python/2.2/test/test_gl.py                      |   150 +
  lib-python/2.2/test/test_glob.py                    |   115 +
  lib-python/2.2/test/test_global.py                  |    51 +
  lib-python/2.2/test/test_grammar.py                 |   732 ++
  lib-python/2.2/test/test_grp.py                     |    27 +
  lib-python/2.2/test/test_gzip.py                    |    82 +
  lib-python/2.2/test/test_hash.py                    |    36 +
  lib-python/2.2/test/test_hmac.py                    |   108 +
  lib-python/2.2/test/test_hotshot.py                 |   117 +
  lib-python/2.2/test/test_htmllib.py                 |    42 +
  lib-python/2.2/test/test_htmlparser.py              |   294 +
  lib-python/2.2/test/test_httplib.py                 |    58 +
  lib-python/2.2/test/test_imageop.py                 |   171 +
  lib-python/2.2/test/test_imgfile.py                 |   116 +
  lib-python/2.2/test/test_import.py                  |    71 +
  lib-python/2.2/test/test_inspect.py                 |   363 +
  lib-python/2.2/test/test_iter.py                    |   779 ++
  lib-python/2.2/test/test_largefile.py               |   162 +
  lib-python/2.2/test/test_linuxaudiodev.py           |    89 +
  lib-python/2.2/test/test_locale.py                  |    44 +
  lib-python/2.2/test/test_long.py                    |   410 +
  lib-python/2.2/test/test_long_future.py             |    55 +
  lib-python/2.2/test/test_longexp.py                 |    12 +
  lib-python/2.2/test/test_mailbox.py                 |   104 +
  lib-python/2.2/test/test_marshal.py                 |    44 +
  lib-python/2.2/test/test_math.py                    |   195 +
  lib-python/2.2/test/test_md5.py                     |    30 +
  lib-python/2.2/test/test_mhlib.py                   |   340 +
  lib-python/2.2/test/test_mimetools.py               |    18 +
  lib-python/2.2/test/test_mimetypes.py               |    59 +
  lib-python/2.2/test/test_minidom.py                 |   649 +
  lib-python/2.2/test/test_mmap.py                    |   317 +
  lib-python/2.2/test/test_multifile.py               |    66 +
  lib-python/2.2/test/test_mutants.py                 |   285 +
  lib-python/2.2/test/test_netrc.py                   |    42 +
  lib-python/2.2/test/test_new.py                     |   108 +
  lib-python/2.2/test/test_nis.py                     |    32 +
  lib-python/2.2/test/test_ntpath.py                  |   114 +
  lib-python/2.2/test/test_opcodes.py                 |   101 +
  lib-python/2.2/test/test_openpty.py                 |    21 +
  lib-python/2.2/test/test_operations.py              |    52 +
  lib-python/2.2/test/test_operator.py                |   218 +
  lib-python/2.2/test/test_os.py                      |   187 +
  lib-python/2.2/test/test_parser.py                  |   383 +
  lib-python/2.2/test/test_pep247.py                  |    50 +
  lib-python/2.2/test/test_pickle.py                  |    40 +
  lib-python/2.2/test/test_pkg.py                     |   259 +
  lib-python/2.2/test/test_pkgimport.py               |    84 +
  lib-python/2.2/test/test_poll.py                    |   172 +
  lib-python/2.2/test/test_popen2.py                  |    72 +
  lib-python/2.2/test/test_posixpath.py               |    40 +
  lib-python/2.2/test/test_pow.py                     |   125 +
  lib-python/2.2/test/test_pprint.py                  |   104 +
  lib-python/2.2/test/test_profile.py                 |    86 +
  lib-python/2.2/test/test_profilehooks.py            |   360 +
  lib-python/2.2/test/test_pty.py                     |    98 +
  lib-python/2.2/test/test_pwd.py                     |    71 +
  lib-python/2.2/test/test_pyclbr.py                  |   154 +
  lib-python/2.2/test/test_pyexpat.py                 |   202 +
  lib-python/2.2/test/test_queue.py                   |   158 +
  lib-python/2.2/test/test_quopri.py                  |   157 +
  lib-python/2.2/test/test_random.py                  |    19 +
  lib-python/2.2/test/test_re.py                      |   392 +
  lib-python/2.2/test/test_regex.py                   |   113 +
  lib-python/2.2/test/test_repr.py                    |   275 +
  lib-python/2.2/test/test_rfc822.py                  |   211 +
  lib-python/2.2/test/test_rgbimg.py                  |    63 +
  lib-python/2.2/test/test_richcmp.py                 |   261 +
  lib-python/2.2/test/test_rotor.py                   |    28 +
  lib-python/2.2/test/test_sax.py                     |   707 ++
  lib-python/2.2/test/test_scope.py                   |   524 +
  lib-python/2.2/test/test_select.py                  |    62 +
  lib-python/2.2/test/test_sgmllib.py                 |   314 +
  lib-python/2.2/test/test_sha.py                     |    52 +
  lib-python/2.2/test/test_signal.py                  |    66 +
  lib-python/2.2/test/test_socket.py                  |   170 +
  lib-python/2.2/test/test_socket_ssl.py              |    27 +
  lib-python/2.2/test/test_socketserver.py            |   165 +
  lib-python/2.2/test/test_sre.py                     |   434 +
  lib-python/2.2/test/test_strftime.py                |   146 +
  lib-python/2.2/test/test_string.py                  |    83 +
  lib-python/2.2/test/test_strop.py                   |   133 +
  lib-python/2.2/test/test_struct.py                  |   441 +
  lib-python/2.2/test/test_structseq.py               |    28 +
  lib-python/2.2/test/test_sunaudiodev.py             |    28 +
  lib-python/2.2/test/test_sundry.py                  |   102 +
  lib-python/2.2/test/test_support.py                 |   233 +
  lib-python/2.2/test/test_symtable.py                |     8 +
  lib-python/2.2/test/test_tempfile.py                |    10 +
  lib-python/2.2/test/test_thread.py                  |   117 +
  lib-python/2.2/test/test_threaded_import.py         |    56 +
  lib-python/2.2/test/test_threadedtempfile.py        |    86 +
  lib-python/2.2/test/test_threading.py               |    55 +
  lib-python/2.2/test/test_time.py                    |    50 +
  lib-python/2.2/test/test_timing.py                  |    21 +
  lib-python/2.2/test/test_tokenize.py                |     9 +
  lib-python/2.2/test/test_trace.py                   |   219 +
  lib-python/2.2/test/test_traceback.py               |    49 +
  lib-python/2.2/test/test_types.py                   |   428 +
  lib-python/2.2/test/test_ucn.py                     |   113 +
  lib-python/2.2/test/test_unary.py                   |    58 +
  lib-python/2.2/test/test_unicode.py                 |   782 ++
  lib-python/2.2/test/test_unicode_file.py            |    95 +
  lib-python/2.2/test/test_unicodedata.py             |   125 +
  lib-python/2.2/test/test_unpack.py                  |   144 +
  lib-python/2.2/test/test_urllib.py                  |   109 +
  lib-python/2.2/test/test_urllib2.py                 |    31 +
  lib-python/2.2/test/test_urlparse.py                |    94 +
  lib-python/2.2/test/test_userdict.py                |   120 +
  lib-python/2.2/test/test_userlist.py                |   201 +
  lib-python/2.2/test/test_userstring.py              |    43 +
  lib-python/2.2/test/test_uu.py                      |   158 +
  lib-python/2.2/test/test_wave.py                    |    34 +
  lib-python/2.2/test/test_weakref.py                 |   573 +
  lib-python/2.2/test/test_winreg.py                  |   151 +
  lib-python/2.2/test/test_winsound.py                |     6 +
  lib-python/2.2/test/test_xmllib.py                  |    35 +
  lib-python/2.2/test/test_xmlrpc.py                  |    37 +
  lib-python/2.2/test/test_xreadline.py               |    43 +
  lib-python/2.2/test/test_zipfile.py                 |    78 +
  lib-python/2.2/test/test_zlib.py                    |   226 +
  lib-python/2.2/test/testall.py                      |     4 +
  lib-python/2.2/test/testcodec.py                    |    48 +
  lib-python/2.2/test/testimg.uue                     |  1170 +++
  lib-python/2.2/test/testimgr.uue                    |  1170 +++
  lib-python/2.2/test/testrgb.uue                     |   971 ++
  lib-python/2.2/test/tokenize_tests.py               |   175 +
  lib-python/2.2/this.py                              |    28 +
  lib-python/2.2/threading.py                         |   698 ++
  lib-python/2.2/toaiff.py                            |   106 +
  lib-python/2.2/token.py                             |   140 +
  lib-python/2.2/tokenize.py                          |   287 +
  lib-python/2.2/traceback.py                         |   301 +
  lib-python/2.2/tty.py                               |    36 +
  lib-python/2.2/types.py                             |    86 +
  lib-python/2.2/tzparse.py                           |    98 +
  lib-python/2.2/unittest.py                          |   723 ++
  lib-python/2.2/urllib.py                            |  1465 ++++
  lib-python/2.2/urllib2.py                           |  1144 +++
  lib-python/2.2/urlparse.py                          |   276 +
  lib-python/2.2/user.py                              |    45 +
  lib-python/2.2/uu.py                                |   195 +
  lib-python/2.2/warnings.py                          |   258 +
  lib-python/2.2/wave.py                              |   489 +
  lib-python/2.2/weakref.py                           |   280 +
  lib-python/2.2/webbrowser.py                        |   330 +
  lib-python/2.2/whichdb.py                           |    87 +
  lib-python/2.2/whrandom.py                          |   140 +
  lib-python/2.2/xdrlib.py                            |   285 +
  lib-python/2.2/xml/__init__.py                      |    42 +
  lib-python/2.2/xml/dom/__init__.py                  |   125 +
  lib-python/2.2/xml/dom/domreg.py                    |    76 +
  lib-python/2.2/xml/dom/minidom.py                   |   970 ++
  lib-python/2.2/xml/dom/pulldom.py                   |   341 +
  lib-python/2.2/xml/parsers/__init__.py              |     8 +
  lib-python/2.2/xml/parsers/expat.py                 |    13 +
  lib-python/2.2/xml/sax/__init__.py                  |   108 +
  lib-python/2.2/xml/sax/_exceptions.py               |   126 +
  lib-python/2.2/xml/sax/expatreader.py               |   333 +
  lib-python/2.2/xml/sax/handler.py                   |   321 +
  lib-python/2.2/xml/sax/saxutils.py                  |   260 +
  lib-python/2.2/xml/sax/xmlreader.py                 |   378 +
  lib-python/2.2/xmllib.py                            |   929 ++
  lib-python/2.2/xmlrpclib.py                         |  1019 +++
  lib-python/2.2/zipfile.py                           |   586 +
  836 files changed, 175696 insertions(+), 4 deletions(-)


diff --git a/.hgignore b/.hgignore
new file mode 100644
--- /dev/null
+++ b/.hgignore
@@ -0,0 +1,30 @@
+syntax: glob
+*.class
+*.pyc
+*.pyd
+*.pyo
+*.orig
+*.rej
+*.swp
+\#*
+*~
+# IntelliJ files
+*.ipr
+*.iml
+*.iws
+.idea/misc.xml
+.idea/workspace.xml
+
+.AppleDouble
+.DS_Store
+.classpath
+.externalToolBuilders
+.project
+.settings
+__pycache__
+ant.properties
+bin
+build
+cachedir
+dist
+profile.txt
diff --git a/.hgsub b/.hgsub
--- a/.hgsub
+++ b/.hgsub
@@ -1,1 +0,0 @@
-CPythonLib = [svn] http://svn.python.org/projects/python/branches/release22-maint/Lib/
diff --git a/.hgsubstate b/.hgsubstate
--- a/.hgsubstate
+++ b/.hgsubstate
@@ -1,1 +0,0 @@
-70085 CPythonLib
diff --git a/build.xml b/build.xml
--- a/build.xml
+++ b/build.xml
@@ -216,7 +216,7 @@
         <property name="jython.base.dir" value="${basedir}" />
         <property name="source.dir" value="${basedir}/src" />
         <property name="templates.dir" value="${basedir}/src/templates" />
-        <property name="python.lib" value="${basedir}/CPythonLib" />
+        <property name="python.lib" value="${basedir}/lib-python/2.2" />
         <property name="bugtests.dir" value="${basedir}/bugtests" />
         <property name="templates.lazy" value="true" />
     </target>
@@ -236,7 +236,7 @@
         <property name="jython.base.dir" value="${svn.checkout.dir}/jython" />
         <property name="source.dir" value="${jython.base.dir}/src" />
         <property name="has.repositories.connection" value="true" />
-        <property name="python.lib" value="${jython.base.dir}/CPythonLib" />
+        <property name="python.lib" value="${jython.base.dir}/lib-python/2.2" />
         <property name="python.exe" value="${python.home}/python" />
         <condition property="do.checkout" value="true">
             <istrue value="${has.repositories.connection}" />
diff --git a/lib-python/2.2/BaseHTTPServer.py b/lib-python/2.2/BaseHTTPServer.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/BaseHTTPServer.py
@@ -0,0 +1,484 @@
+"""HTTP server base class.
+
+Note: the class in this module doesn't implement any HTTP request; see
+SimpleHTTPServer for simple implementations of GET, HEAD and POST
+(including CGI scripts).
+
+Contents:
+
+- BaseHTTPRequestHandler: HTTP request handler base class
+- test: test function
+
+XXX To do:
+
+- send server version
+- log requests even later (to capture byte count)
+- log user-agent header and other interesting goodies
+- send error log to separate file
+- are request names really case sensitive?
+
+"""
+
+
+# See also:
+#
+# HTTP Working Group                                        T. Berners-Lee
+# INTERNET-DRAFT                                            R. T. Fielding
+# <draft-ietf-http-v10-spec-00.txt>                     H. Frystyk Nielsen
+# Expires September 8, 1995                                  March 8, 1995
+#
+# URL: http://www.ics.uci.edu/pub/ietf/http/draft-ietf-http-v10-spec-00.txt
+
+
+# Log files
+# ---------
+#
+# Here's a quote from the NCSA httpd docs about log file format.
+#
+# | The logfile format is as follows. Each line consists of:
+# |
+# | host rfc931 authuser [DD/Mon/YYYY:hh:mm:ss] "request" ddd bbbb
+# |
+# |        host: Either the DNS name or the IP number of the remote client
+# |        rfc931: Any information returned by identd for this person,
+# |                - otherwise.
+# |        authuser: If user sent a userid for authentication, the user name,
+# |                  - otherwise.
+# |        DD: Day
+# |        Mon: Month (calendar name)
+# |        YYYY: Year
+# |        hh: hour (24-hour format, the machine's timezone)
+# |        mm: minutes
+# |        ss: seconds
+# |        request: The first line of the HTTP request as sent by the client.
+# |        ddd: the status code returned by the server, - if not available.
+# |        bbbb: the total number of bytes sent,
+# |              *not including the HTTP/1.0 header*, - if not available
+# |
+# | You can determine the name of the file accessed through request.
+#
+# (Actually, the latter is only true if you know the server configuration
+# at the time the request was made!)
+
+
+__version__ = "0.2"
+
+__all__ = ["HTTPServer", "BaseHTTPRequestHandler"]
+
+import sys
+import time
+import socket # For gethostbyaddr()
+import mimetools
+import SocketServer
+
+# Default error message
+DEFAULT_ERROR_MESSAGE = """\
+<head>
+<title>Error response</title>
+</head>
+<body>
+<h1>Error response</h1>
+<p>Error code %(code)d.
+<p>Message: %(message)s.
+<p>Error code explanation: %(code)s = %(explain)s.
+</body>
+"""
+
+
+class HTTPServer(SocketServer.TCPServer):
+
+    allow_reuse_address = 1    # Seems to make sense in testing environment
+
+    def server_bind(self):
+        """Override server_bind to store the server name."""
+        SocketServer.TCPServer.server_bind(self)
+        host, port = self.socket.getsockname()
+        self.server_name = socket.getfqdn(host)
+        self.server_port = port
+
+
+class BaseHTTPRequestHandler(SocketServer.StreamRequestHandler):
+
+    """HTTP request handler base class.
+
+    The following explanation of HTTP serves to guide you through the
+    code as well as to expose any misunderstandings I may have about
+    HTTP (so you don't need to read the code to figure out I'm wrong
+    :-).
+
+    HTTP (HyperText Transfer Protocol) is an extensible protocol on
+    top of a reliable stream transport (e.g. TCP/IP).  The protocol
+    recognizes three parts to a request:
+
+    1. One line identifying the request type and path
+    2. An optional set of RFC-822-style headers
+    3. An optional data part
+
+    The headers and data are separated by a blank line.
+
+    The first line of the request has the form
+
+    <command> <path> <version>
+
+    where <command> is a (case-sensitive) keyword such as GET or POST,
+    <path> is a string containing path information for the request,
+    and <version> should be the string "HTTP/1.0".  <path> is encoded
+    using the URL encoding scheme (using %xx to signify the ASCII
+    character with hex code xx).
+
+    The protocol is vague about whether lines are separated by LF
+    characters or by CRLF pairs -- for compatibility with the widest
+    range of clients, both should be accepted.  Similarly, whitespace
+    in the request line should be treated sensibly (allowing multiple
+    spaces between components and allowing trailing whitespace).
+
+    Similarly, for output, lines ought to be separated by CRLF pairs
+    but most clients grok LF characters just fine.
+
+    If the first line of the request has the form
+
+    <command> <path>
+
+    (i.e. <version> is left out) then this is assumed to be an HTTP
+    0.9 request; this form has no optional headers and data part and
+    the reply consists of just the data.
+
+    The reply form of the HTTP 1.0 protocol again has three parts:
+
+    1. One line giving the response code
+    2. An optional set of RFC-822-style headers
+    3. The data
+
+    Again, the headers and data are separated by a blank line.
+
+    The response code line has the form
+
+    <version> <responsecode> <responsestring>
+
+    where <version> is the protocol version (always "HTTP/1.0"),
+    <responsecode> is a 3-digit response code indicating success or
+    failure of the request, and <responsestring> is an optional
+    human-readable string explaining what the response code means.
+
+    This server parses the request and the headers, and then calls a
+    function specific to the request type (<command>).  Specifically,
+    a request SPAM will be handled by a method do_SPAM().  If no
+    such method exists the server sends an error response to the
+    client.  If it exists, it is called with no arguments:
+
+    do_SPAM()
+
+    Note that the request name is case sensitive (i.e. SPAM and spam
+    are different requests).
+
+    The various request details are stored in instance variables:
+
+    - client_address is the client IP address in the form (host,
+    port);
+
+    - command, path and version are the broken-down request line;
+
+    - headers is an instance of mimetools.Message (or a derived
+    class) containing the header information;
+
+    - rfile is a file object open for reading positioned at the
+    start of the optional input data part;
+
+    - wfile is a file object open for writing.
+
+    IT IS IMPORTANT TO ADHERE TO THE PROTOCOL FOR WRITING!
+
+    The first thing to be written must be the response line.  Then
+    follow 0 or more header lines, then a blank line, and then the
+    actual data (if any).  The meaning of the header lines depends on
+    the command executed by the server; in most cases, when data is
+    returned, there should be at least one header line of the form
+
+    Content-type: <type>/<subtype>
+
+    where <type> and <subtype> should be registered MIME types,
+    e.g. "text/html" or "text/plain".
+
+    """
+
+    # The Python system version, truncated to its first component.
+    sys_version = "Python/" + sys.version.split()[0]
+
+    # The server software version.  You may want to override this.
+    # The format is multiple whitespace-separated strings,
+    # where each string is of the form name[/version].
+    server_version = "BaseHTTP/" + __version__
+
+    def parse_request(self):
+        """Parse a request (internal).
+
+        The request should be stored in self.raw_request; the results
+        are in self.command, self.path, self.request_version and
+        self.headers.
+
+        Return value is 1 for success, 0 for failure; on failure, an
+        error is sent back.
+
+        """
+        self.request_version = version = "HTTP/0.9" # Default
+        requestline = self.raw_requestline
+        if requestline[-2:] == '\r\n':
+            requestline = requestline[:-2]
+        elif requestline[-1:] == '\n':
+            requestline = requestline[:-1]
+        self.requestline = requestline
+        words = requestline.split()
+        if len(words) == 3:
+            [command, path, version] = words
+            if version[:5] != 'HTTP/':
+                self.send_error(400, "Bad request version (%s)" % `version`)
+                return 0
+        elif len(words) == 2:
+            [command, path] = words
+            if command != 'GET':
+                self.send_error(400,
+                                "Bad HTTP/0.9 request type (%s)" % `command`)
+                return 0
+        else:
+            self.send_error(400, "Bad request syntax (%s)" % `requestline`)
+            return 0
+        self.command, self.path, self.request_version = command, path, version
+        self.headers = self.MessageClass(self.rfile, 0)
+        return 1
+
+    def handle(self):
+        """Handle a single HTTP request.
+
+        You normally don't need to override this method; see the class
+        __doc__ string for information on how to handle specific HTTP
+        commands such as GET and POST.
+
+        """
+
+        self.raw_requestline = self.rfile.readline()
+        if not self.parse_request(): # An error code has been sent, just exit
+            return
+        mname = 'do_' + self.command
+        if not hasattr(self, mname):
+            self.send_error(501, "Unsupported method (%s)" % `self.command`)
+            return
+        method = getattr(self, mname)
+        method()
+
+    def send_error(self, code, message=None):
+        """Send and log an error reply.
+
+        Arguments are the error code, and a detailed message.
+        The detailed message defaults to the short entry matching the
+        response code.
+
+        This sends an error response (so it must be called before any
+        output has been generated), logs the error, and finally sends
+        a piece of HTML explaining the error to the user.
+
+        """
+
+        try:
+            short, long = self.responses[code]
+        except KeyError:
+            short, long = '???', '???'
+        if not message:
+            message = short
+        explain = long
+        self.log_error("code %d, message %s", code, message)
+        self.send_response(code, message)
+        self.send_header("Content-Type", "text/html")
+        self.end_headers()
+        self.wfile.write(self.error_message_format %
+                         {'code': code,
+                          'message': message,
+                          'explain': explain})
+
+    error_message_format = DEFAULT_ERROR_MESSAGE
+
+    def send_response(self, code, message=None):
+        """Send the response header and log the response code.
+
+        Also send two standard headers with the server software
+        version and the current date.
+
+        """
+        self.log_request(code)
+        if message is None:
+            if self.responses.has_key(code):
+                message = self.responses[code][0]
+            else:
+                message = ''
+        if self.request_version != 'HTTP/0.9':
+            self.wfile.write("%s %s %s\r\n" %
+                             (self.protocol_version, str(code), message))
+        self.send_header('Server', self.version_string())
+        self.send_header('Date', self.date_time_string())
+
+    def send_header(self, keyword, value):
+        """Send a MIME header."""
+        if self.request_version != 'HTTP/0.9':
+            self.wfile.write("%s: %s\r\n" % (keyword, value))
+
+    def end_headers(self):
+        """Send the blank line ending the MIME headers."""
+        if self.request_version != 'HTTP/0.9':
+            self.wfile.write("\r\n")
+
+    def log_request(self, code='-', size='-'):
+        """Log an accepted request.
+
+        This is called by send_reponse().
+
+        """
+
+        self.log_message('"%s" %s %s',
+                         self.requestline, str(code), str(size))
+
+    def log_error(self, *args):
+        """Log an error.
+
+        This is called when a request cannot be fulfilled.  By
+        default it passes the message on to log_message().
+
+        Arguments are the same as for log_message().
+
+        XXX This should go to the separate error log.
+
+        """
+
+        apply(self.log_message, args)
+
+    def log_message(self, format, *args):
+        """Log an arbitrary message.
+
+        This is used by all other logging functions.  Override
+        it if you have specific logging wishes.
+
+        The first argument, FORMAT, is a format string for the
+        message to be logged.  If the format string contains
+        any % escapes requiring parameters, they should be
+        specified as subsequent arguments (it's just like
+        printf!).
+
+        The client host and current date/time are prefixed to
+        every message.
+
+        """
+
+        sys.stderr.write("%s - - [%s] %s\n" %
+                         (self.address_string(),
+                          self.log_date_time_string(),
+                          format%args))
+
+    def version_string(self):
+        """Return the server software version string."""
+        return self.server_version + ' ' + self.sys_version
+
+    def date_time_string(self):
+        """Return the current date and time formatted for a message header."""
+        now = time.time()
+        year, month, day, hh, mm, ss, wd, y, z = time.gmtime(now)
+        s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
+                self.weekdayname[wd],
+                day, self.monthname[month], year,
+                hh, mm, ss)
+        return s
+
+    def log_date_time_string(self):
+        """Return the current time formatted for logging."""
+        now = time.time()
+        year, month, day, hh, mm, ss, x, y, z = time.localtime(now)
+        s = "%02d/%3s/%04d %02d:%02d:%02d" % (
+                day, self.monthname[month], year, hh, mm, ss)
+        return s
+
+    weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
+
+    monthname = [None,
+                 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
+                 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
+
+    def address_string(self):
+        """Return the client address formatted for logging.
+
+        This version looks up the full hostname using gethostbyaddr(),
+        and tries to find a name that contains at least one dot.
+
+        """
+
+        host, port = self.client_address
+        return socket.getfqdn(host)
+
+    # Essentially static class variables
+
+    # The version of the HTTP protocol we support.
+    # Don't override unless you know what you're doing (hint: incoming
+    # requests are required to have exactly this version string).
+    protocol_version = "HTTP/1.0"
+
+    # The Message-like class used to parse headers
+    MessageClass = mimetools.Message
+
+    # Table mapping response codes to messages; entries have the
+    # form {code: (shortmessage, longmessage)}.
+    # See http://www.w3.org/hypertext/WWW/Protocols/HTTP/HTRESP.html
+    responses = {
+        200: ('OK', 'Request fulfilled, document follows'),
+        201: ('Created', 'Document created, URL follows'),
+        202: ('Accepted',
+              'Request accepted, processing continues off-line'),
+        203: ('Partial information', 'Request fulfilled from cache'),
+        204: ('No response', 'Request fulfilled, nothing follows'),
+
+        301: ('Moved', 'Object moved permanently -- see URI list'),
+        302: ('Found', 'Object moved temporarily -- see URI list'),
+        303: ('Method', 'Object moved -- see Method and URL list'),
+        304: ('Not modified',
+              'Document has not changed singe given time'),
+
+        400: ('Bad request',
+              'Bad request syntax or unsupported method'),
+        401: ('Unauthorized',
+              'No permission -- see authorization schemes'),
+        402: ('Payment required',
+              'No payment -- see charging schemes'),
+        403: ('Forbidden',
+              'Request forbidden -- authorization will not help'),
+        404: ('Not found', 'Nothing matches the given URI'),
+
+        500: ('Internal error', 'Server got itself in trouble'),
+        501: ('Not implemented',
+              'Server does not support this operation'),
+        502: ('Service temporarily overloaded',
+              'The server cannot process the request due to a high load'),
+        503: ('Gateway timeout',
+              'The gateway server did not receive a timely response'),
+
+        }
+
+
+def test(HandlerClass = BaseHTTPRequestHandler,
+         ServerClass = HTTPServer):
+    """Test the HTTP request handler class.
+
+    This runs an HTTP server on port 8000 (or the first command line
+    argument).
+
+    """
+
+    if sys.argv[1:]:
+        port = int(sys.argv[1])
+    else:
+        port = 8000
+    server_address = ('', port)
+
+    httpd = ServerClass(server_address, HandlerClass)
+
+    sa = httpd.socket.getsockname()
+    print "Serving HTTP on", sa[0], "port", sa[1], "..."
+    httpd.serve_forever()
+
+
+if __name__ == '__main__':
+    test()
diff --git a/lib-python/2.2/Bastion.py b/lib-python/2.2/Bastion.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/Bastion.py
@@ -0,0 +1,177 @@
+"""Bastionification utility.
+
+A bastion (for another object -- the 'original') is an object that has
+the same methods as the original but does not give access to its
+instance variables.  Bastions have a number of uses, but the most
+obvious one is to provide code executing in restricted mode with a
+safe interface to an object implemented in unrestricted mode.
+
+The bastionification routine has an optional second argument which is
+a filter function.  Only those methods for which the filter method
+(called with the method name as argument) returns true are accessible.
+The default filter method returns true unless the method name begins
+with an underscore.
+
+There are a number of possible implementations of bastions.  We use a
+'lazy' approach where the bastion's __getattr__() discipline does all
+the work for a particular method the first time it is used.  This is
+usually fastest, especially if the user doesn't call all available
+methods.  The retrieved methods are stored as instance variables of
+the bastion, so the overhead is only occurred on the first use of each
+method.
+
+Detail: the bastion class has a __repr__() discipline which includes
+the repr() of the original object.  This is precomputed when the
+bastion is created.
+
+"""
+
+__all__ = ["BastionClass", "Bastion"]
+
+from types import MethodType
+
+
+class BastionClass:
+
+    """Helper class used by the Bastion() function.
+
+    You could subclass this and pass the subclass as the bastionclass
+    argument to the Bastion() function, as long as the constructor has
+    the same signature (a get() function and a name for the object).
+
+    """
+
+    def __init__(self, get, name):
+        """Constructor.
+
+        Arguments:
+
+        get - a function that gets the attribute value (by name)
+        name - a human-readable name for the original object
+               (suggestion: use repr(object))
+
+        """
+        self._get_ = get
+        self._name_ = name
+
+    def __repr__(self):
+        """Return a representation string.
+
+        This includes the name passed in to the constructor, so that
+        if you print the bastion during debugging, at least you have
+        some idea of what it is.
+
+        """
+        return "<Bastion for %s>" % self._name_
+
+    def __getattr__(self, name):
+        """Get an as-yet undefined attribute value.
+
+        This calls the get() function that was passed to the
+        constructor.  The result is stored as an instance variable so
+        that the next time the same attribute is requested,
+        __getattr__() won't be invoked.
+
+        If the get() function raises an exception, this is simply
+        passed on -- exceptions are not cached.
+
+        """
+        attribute = self._get_(name)
+        self.__dict__[name] = attribute
+        return attribute
+
+
+def Bastion(object, filter = lambda name: name[:1] != '_',
+            name=None, bastionclass=BastionClass):
+    """Create a bastion for an object, using an optional filter.
+
+    See the Bastion module's documentation for background.
+
+    Arguments:
+
+    object - the original object
+    filter - a predicate that decides whether a function name is OK;
+             by default all names are OK that don't start with '_'
+    name - the name of the object; default repr(object)
+    bastionclass - class used to create the bastion; default BastionClass
+
+    """
+
+    raise RuntimeError, "This code is not secure in Python 2.2 and 2.3"
+
+    # Note: we define *two* ad-hoc functions here, get1 and get2.
+    # Both are intended to be called in the same way: get(name).
+    # It is clear that the real work (getting the attribute
+    # from the object and calling the filter) is done in get1.
+    # Why can't we pass get1 to the bastion?  Because the user
+    # would be able to override the filter argument!  With get2,
+    # overriding the default argument is no security loophole:
+    # all it does is call it.
+    # Also notice that we can't place the object and filter as
+    # instance variables on the bastion object itself, since
+    # the user has full access to all instance variables!
+
+    def get1(name, object=object, filter=filter):
+        """Internal function for Bastion().  See source comments."""
+        if filter(name):
+            attribute = getattr(object, name)
+            if type(attribute) == MethodType:
+                return attribute
+        raise AttributeError, name
+
+    def get2(name, get1=get1):
+        """Internal function for Bastion().  See source comments."""
+        return get1(name)
+
+    if name is None:
+        name = `object`
+    return bastionclass(get2, name)
+
+
+def _test():
+    """Test the Bastion() function."""
+    class Original:
+        def __init__(self):
+            self.sum = 0
+        def add(self, n):
+            self._add(n)
+        def _add(self, n):
+            self.sum = self.sum + n
+        def total(self):
+            return self.sum
+    o = Original()
+    b = Bastion(o)
+    testcode = """if 1:
+    b.add(81)
+    b.add(18)
+    print "b.total() =", b.total()
+    try:
+        print "b.sum =", b.sum,
+    except:
+        print "inaccessible"
+    else:
+        print "accessible"
+    try:
+        print "b._add =", b._add,
+    except:
+        print "inaccessible"
+    else:
+        print "accessible"
+    try:
+        print "b._get_.func_defaults =", map(type, b._get_.func_defaults),
+    except:
+        print "inaccessible"
+    else:
+        print "accessible"
+    \n"""
+    exec testcode
+    print '='*20, "Using rexec:", '='*20
+    import rexec
+    r = rexec.RExec()
+    m = r.add_module('__main__')
+    m.b = b
+    r.r_exec(testcode)
+
+
+if __name__ == '__main__':
+    _test()
diff --git a/lib-python/2.2/CGIHTTPServer.py b/lib-python/2.2/CGIHTTPServer.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/CGIHTTPServer.py
@@ -0,0 +1,325 @@
+"""CGI-savvy HTTP Server.
+
+This module builds on SimpleHTTPServer by implementing GET and POST
+requests to cgi-bin scripts.
+
+If the os.fork() function is not present (e.g. on Windows),
+os.popen2() is used as a fallback, with slightly altered semantics; if
+that function is not present either (e.g. on Macintosh), only Python
+scripts are supported, and they are executed by the current process.
+
+In all cases, the implementation is intentionally naive -- all
+requests are executed sychronously.
+
+SECURITY WARNING: DON'T USE THIS CODE UNLESS YOU ARE INSIDE A FIREWALL
+-- it may execute arbitrary Python code or external programs.
+
+"""
+
+
+__version__ = "0.4"
+
+__all__ = ["CGIHTTPRequestHandler"]
+
+import os
+import sys
+import urllib
+import BaseHTTPServer
+import SimpleHTTPServer
+import select
+
+
+class CGIHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
+
+    """Complete HTTP server with GET, HEAD and POST commands.
+
+    GET and HEAD also support running CGI scripts.
+
+    The POST command is *only* implemented for CGI scripts.
+
+    """
+
+    # Determine platform specifics
+    have_fork = hasattr(os, 'fork')
+    have_popen2 = hasattr(os, 'popen2')
+    have_popen3 = hasattr(os, 'popen3')
+
+    # Make rfile unbuffered -- we need to read one line and then pass
+    # the rest to a subprocess, so we can't use buffered input.
+    rbufsize = 0
+
+    def do_POST(self):
+        """Serve a POST request.
+
+        This is only implemented for CGI scripts.
+
+        """
+
+        if self.is_cgi():
+            self.run_cgi()
+        else:
+            self.send_error(501, "Can only POST to CGI scripts")
+
+    def send_head(self):
+        """Version of send_head that support CGI scripts"""
+        if self.is_cgi():
+            return self.run_cgi()
+        else:
+            return SimpleHTTPServer.SimpleHTTPRequestHandler.send_head(self)
+
+    def is_cgi(self):
+        """Test whether self.path corresponds to a CGI script.
+
+        Return a tuple (dir, rest) if self.path requires running a
+        CGI script, None if not.  Note that rest begins with a
+        slash if it is not empty.
+
+        The default implementation tests whether the path
+        begins with one of the strings in the list
+        self.cgi_directories (and the next character is a '/'
+        or the end of the string).
+
+        """
+
+        path = self.path
+
+        for x in self.cgi_directories:
+            i = len(x)
+            if path[:i] == x and (not path[i:] or path[i] == '/'):
+                self.cgi_info = path[:i], path[i+1:]
+                return 1
+        return 0
+
+    cgi_directories = ['/cgi-bin', '/htbin']
+
+    def is_executable(self, path):
+        """Test whether argument path is an executable file."""
+        return executable(path)
+
+    def is_python(self, path):
+        """Test whether argument path is a Python script."""
+        head, tail = os.path.splitext(path)
+        return tail.lower() in (".py", ".pyw")
+
+    def run_cgi(self):
+        """Execute a CGI script."""
+        dir, rest = self.cgi_info
+        i = rest.rfind('?')
+        if i >= 0:
+            rest, query = rest[:i], rest[i+1:]
+        else:
+            query = ''
+        i = rest.find('/')
+        if i >= 0:
+            script, rest = rest[:i], rest[i:]
+        else:
+            script, rest = rest, ''
+        scriptname = dir + '/' + script
+        scriptfile = self.translate_path(scriptname)
+        if not os.path.exists(scriptfile):
+            self.send_error(404, "No such CGI script (%s)" % `scriptname`)
+            return
+        if not os.path.isfile(scriptfile):
+            self.send_error(403, "CGI script is not a plain file (%s)" %
+                            `scriptname`)
+            return
+        ispy = self.is_python(scriptname)
+        if not ispy:
+            if not (self.have_fork or self.have_popen2 or self.have_popen3):
+                self.send_error(403, "CGI script is not a Python script (%s)" %
+                                `scriptname`)
+                return
+            if not self.is_executable(scriptfile):
+                self.send_error(403, "CGI script is not executable (%s)" %
+                                `scriptname`)
+                return
+
+        # Reference: http://hoohoo.ncsa.uiuc.edu/cgi/env.html
+        # XXX Much of the following could be prepared ahead of time!
+        env = {}
+        env['SERVER_SOFTWARE'] = self.version_string()
+        env['SERVER_NAME'] = self.server.server_name
+        env['GATEWAY_INTERFACE'] = 'CGI/1.1'
+        env['SERVER_PROTOCOL'] = self.protocol_version
+        env['SERVER_PORT'] = str(self.server.server_port)
+        env['REQUEST_METHOD'] = self.command
+        uqrest = urllib.unquote(rest)
+        env['PATH_INFO'] = uqrest
+        env['PATH_TRANSLATED'] = self.translate_path(uqrest)
+        env['SCRIPT_NAME'] = scriptname
+        if query:
+            env['QUERY_STRING'] = query
+        host = self.address_string()
+        if host != self.client_address[0]:
+            env['REMOTE_HOST'] = host
+        env['REMOTE_ADDR'] = self.client_address[0]
+        # XXX AUTH_TYPE
+        # XXX REMOTE_USER
+        # XXX REMOTE_IDENT
+        if self.headers.typeheader is None:
+            env['CONTENT_TYPE'] = self.headers.type
+        else:
+            env['CONTENT_TYPE'] = self.headers.typeheader
+        length = self.headers.getheader('content-length')
+        if length:
+            env['CONTENT_LENGTH'] = length
+        accept = []
+        for line in self.headers.getallmatchingheaders('accept'):
+            if line[:1] in "\t\n\r ":
+                accept.append(line.strip())
+            else:
+                accept = accept + line[7:].split(',')
+        env['HTTP_ACCEPT'] = ','.join(accept)
+        ua = self.headers.getheader('user-agent')
+        if ua:
+            env['HTTP_USER_AGENT'] = ua
+        co = filter(None, self.headers.getheaders('cookie'))
+        if co:
+            env['HTTP_COOKIE'] = ', '.join(co)
+        # XXX Other HTTP_* headers
+        if not self.have_fork:
+            # Since we're setting the env in the parent, provide empty
+            # values to override previously set values
+            for k in ('QUERY_STRING', 'REMOTE_HOST', 'CONTENT_LENGTH',
+                      'HTTP_USER_AGENT', 'HTTP_COOKIE'):
+                env.setdefault(k, "")
+        os.environ.update(env)
+
+        self.send_response(200, "Script output follows")
+
+        decoded_query = query.replace('+', ' ')
+
+        if self.have_fork:
+            # Unix -- fork as we should
+            args = [script]
+            if '=' not in decoded_query:
+                args.append(decoded_query)
+            nobody = nobody_uid()
+            self.wfile.flush() # Always flush before forking
+            pid = os.fork()
+            if pid != 0:
+                # Parent
+                pid, sts = os.waitpid(pid, 0)
+                # throw away additional data [see bug #427345]
+                while select.select([self.rfile], [], [], 0)[0]:
+                    waste = self.rfile.read(1)
+                if sts:
+                    self.log_error("CGI script exit status %#x", sts)
+                return
+            # Child
+            try:
+                try:
+                    os.setuid(nobody)
+                except os.error:
+                    pass
+                os.dup2(self.rfile.fileno(), 0)
+                os.dup2(self.wfile.fileno(), 1)
+                os.execve(scriptfile, args, env)
+            except:
+                self.server.handle_error(self.request, self.client_address)
+                os._exit(127)
+
+        elif self.have_popen2 or self.have_popen3:
+            # Windows -- use popen2 or popen3 to create a subprocess
+            import shutil
+            if self.have_popen3:
+                popenx = os.popen3
+            else:
+                popenx = os.popen2
+            cmdline = scriptfile
+            if self.is_python(scriptfile):
+                interp = sys.executable
+                if interp.lower().endswith("w.exe"):
+                    # On Windows, use python.exe, not pythonw.exe
+                    interp = interp[:-5] + interp[-4:]
+                cmdline = "%s -u %s" % (interp, cmdline)
+            if '=' not in query and '"' not in query:
+                cmdline = '%s "%s"' % (cmdline, query)
+            self.log_message("command: %s", cmdline)
+            try:
+                nbytes = int(length)
+            except:
+                nbytes = 0
+            files = popenx(cmdline, 'b')
+            fi = files[0]
+            fo = files[1]
+            if self.have_popen3:
+                fe = files[2]
+            if self.command.lower() == "post" and nbytes > 0:
+                data = self.rfile.read(nbytes)
+                fi.write(data)
+            # throw away additional data [see bug #427345]
+            while select.select([self.rfile._sock], [], [], 0)[0]:
+                waste = self.rfile._sock.recv(1)
+            fi.close()
+            shutil.copyfileobj(fo, self.wfile)
+            if self.have_popen3:
+                errors = fe.read()
+                fe.close()
+                if errors:
+                    self.log_error('%s', errors)
+            sts = fo.close()
+            if sts:
+                self.log_error("CGI script exit status %#x", sts)
+            else:
+                self.log_message("CGI script exited OK")
+
+        else:
+            # Other O.S. -- execute script in this process
+            save_argv = sys.argv
+            save_stdin = sys.stdin
+            save_stdout = sys.stdout
+            save_stderr = sys.stderr
+            try:
+                try:
+                    sys.argv = [scriptfile]
+                    if '=' not in decoded_query:
+                        sys.argv.append(decoded_query)
+                    sys.stdout = self.wfile
+                    sys.stdin = self.rfile
+                    execfile(scriptfile, {"__name__": "__main__"})
+                finally:
+                    sys.argv = save_argv
+                    sys.stdin = save_stdin
+                    sys.stdout = save_stdout
+                    sys.stderr = save_stderr
+            except SystemExit, sts:
+                self.log_error("CGI script exit status %s", str(sts))
+            else:
+                self.log_message("CGI script exited OK")
+
+
+nobody = None
+
+def nobody_uid():
+    """Internal routine to get nobody's uid"""
+    global nobody
+    if nobody:
+        return nobody
+    try:
+        import pwd
+    except ImportError:
+        return -1
+    try:
+        nobody = pwd.getpwnam('nobody')[2]
+    except KeyError:
+        nobody = 1 + max(map(lambda x: x[2], pwd.getpwall()))
+    return nobody
+
+
+def executable(path):
+    """Test for executable file."""
+    try:
+        st = os.stat(path)
+    except os.error:
+        return 0
+    return st[0] & 0111 != 0
+
+
+def test(HandlerClass = CGIHTTPRequestHandler,
+         ServerClass = BaseHTTPServer.HTTPServer):
+    SimpleHTTPServer.test(HandlerClass, ServerClass)
+
+
+if __name__ == '__main__':
+    test()
diff --git a/lib-python/2.2/ConfigParser.py b/lib-python/2.2/ConfigParser.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/ConfigParser.py
@@ -0,0 +1,472 @@
+"""Configuration file parser.
+
+A setup file consists of sections, lead by a "[section]" header,
+and followed by "name: value" entries, with continuations and such in
+the style of RFC 822.
+
+The option values can contain format strings which refer to other values in
+the same section, or values in a special [DEFAULT] section.
+
+For example:
+
+    something: %(dir)s/whatever
+
+would resolve the "%(dir)s" to the value of dir.  All reference
+expansions are done late, on demand.
+
+Intrinsic defaults can be specified by passing them into the
+ConfigParser constructor as a dictionary.
+
+class:
+
+ConfigParser -- responsible for for parsing a list of
+                configuration files, and managing the parsed database.
+
+    methods:
+
+    __init__(defaults=None)
+        create the parser and specify a dictionary of intrinsic defaults.  The
+        keys must be strings, the values must be appropriate for %()s string
+        interpolation.  Note that `__name__' is always an intrinsic default;
+        it's value is the section's name.
+
+    sections()
+        return all the configuration section names, sans DEFAULT
+
+    has_section(section)
+        return whether the given section exists
+
+    has_option(section, option)
+        return whether the given option exists in the given section
+
+    options(section)
+        return list of configuration options for the named section
+
+    read(filenames)
+        read and parse the list of named configuration files, given by
+        name.  A single filename is also allowed.  Non-existing files
+        are ignored.
+
+    readfp(fp, filename=None)
+        read and parse one configuration file, given as a file object.
+        The filename defaults to fp.name; it is only used in error
+        messages (if fp has no `name' attribute, the string `<???>' is used).
+
+    get(section, option, raw=0, vars=None)
+        return a string value for the named option.  All % interpolations are
+        expanded in the return values, based on the defaults passed into the
+        constructor and the DEFAULT section.  Additional substitutions may be
+        provided using the `vars' argument, which must be a dictionary whose
+        contents override any pre-existing defaults.
+
+    getint(section, options)
+        like get(), but convert value to an integer
+
+    getfloat(section, options)
+        like get(), but convert value to a float
+
+    getboolean(section, options)
+        like get(), but convert value to a boolean (currently case
+        insensitively defined as 0, false, no, off for 0, and 1, true,
+        yes, on for 1).  Returns 0 or 1.
+
+    remove_section(section)
+        remove the given file section and all its options
+
+    remove_option(section, option)
+        remove the given option from the given section
+
+    set(section, option, value)
+        set the given option
+
+    write(fp)
+        write the configuration state in .ini format
+"""
+
+import re
+import types
+
+__all__ = ["NoSectionError","DuplicateSectionError","NoOptionError",
+           "InterpolationError","InterpolationDepthError","ParsingError",
+           "MissingSectionHeaderError","ConfigParser",
+           "DEFAULTSECT", "MAX_INTERPOLATION_DEPTH"]
+
+DEFAULTSECT = "DEFAULT"
+
+MAX_INTERPOLATION_DEPTH = 10
+
+
+
+# exception classes
+class Error(Exception):
+    def __init__(self, msg=''):
+        self._msg = msg
+        Exception.__init__(self, msg)
+    def __repr__(self):
+        return self._msg
+    __str__ = __repr__
+
+class NoSectionError(Error):
+    def __init__(self, section):
+        Error.__init__(self, 'No section: %s' % section)
+        self.section = section
+
+class DuplicateSectionError(Error):
+    def __init__(self, section):
+        Error.__init__(self, "Section %s already exists" % section)
+        self.section = section
+
+class NoOptionError(Error):
+    def __init__(self, option, section):
+        Error.__init__(self, "No option `%s' in section: %s" %
+                       (option, section))
+        self.option = option
+        self.section = section
+
+class InterpolationError(Error):
+    def __init__(self, reference, option, section, rawval):
+        Error.__init__(self,
+                       "Bad value substitution:\n"
+                       "\tsection: [%s]\n"
+                       "\toption : %s\n"
+                       "\tkey    : %s\n"
+                       "\trawval : %s\n"
+                       % (section, option, reference, rawval))
+        self.reference = reference
+        self.option = option
+        self.section = section
+
+class InterpolationDepthError(Error):
+    def __init__(self, option, section, rawval):
+        Error.__init__(self,
+                       "Value interpolation too deeply recursive:\n"
+                       "\tsection: [%s]\n"
+                       "\toption : %s\n"
+                       "\trawval : %s\n"
+                       % (section, option, rawval))
+        self.option = option
+        self.section = section
+
+class ParsingError(Error):
+    def __init__(self, filename):
+        Error.__init__(self, 'File contains parsing errors: %s' % filename)
+        self.filename = filename
+        self.errors = []
+
+    def append(self, lineno, line):
+        self.errors.append((lineno, line))
+        self._msg = self._msg + '\n\t[line %2d]: %s' % (lineno, line)
+
+class MissingSectionHeaderError(ParsingError):
+    def __init__(self, filename, lineno, line):
+        Error.__init__(
+            self,
+            'File contains no section headers.\nfile: %s, line: %d\n%s' %
+            (filename, lineno, line))
+        self.filename = filename
+        self.lineno = lineno
+        self.line = line
+
+
+
+class ConfigParser:
+    def __init__(self, defaults=None):
+        self.__sections = {}
+        if defaults is None:
+            self.__defaults = {}
+        else:
+            self.__defaults = defaults
+
+    def defaults(self):
+        return self.__defaults
+
+    def sections(self):
+        """Return a list of section names, excluding [DEFAULT]"""
+        # self.__sections will never have [DEFAULT] in it
+        return self.__sections.keys()
+
+    def add_section(self, section):
+        """Create a new section in the configuration.
+
+        Raise DuplicateSectionError if a section by the specified name
+        already exists.
+        """
+        if section in self.__sections:
+            raise DuplicateSectionError(section)
+        self.__sections[section] = {}
+
+    def has_section(self, section):
+        """Indicate whether the named section is present in the configuration.
+
+        The DEFAULT section is not acknowledged.
+        """
+        return section in self.__sections
+
+    def options(self, section):
+        """Return a list of option names for the given section name."""
+        try:
+            opts = self.__sections[section].copy()
+        except KeyError:
+            raise NoSectionError(section)
+        opts.update(self.__defaults)
+        if '__name__' in opts:
+            del opts['__name__']
+        return opts.keys()
+
+    def read(self, filenames):
+        """Read and parse a filename or a list of filenames.
+
+        Files that cannot be opened are silently ignored; this is
+        designed so that you can specify a list of potential
+        configuration file locations (e.g. current directory, user's
+        home directory, systemwide directory), and all existing
+        configuration files in the list will be read.  A single
+        filename may also be given.
+        """
+        if isinstance(filenames, types.StringTypes):
+            filenames = [filenames]
+        for filename in filenames:
+            try:
+                fp = open(filename)
+            except IOError:
+                continue
+            self.__read(fp, filename)
+            fp.close()
+
+    def readfp(self, fp, filename=None):
+        """Like read() but the argument must be a file-like object.
+
+        The `fp' argument must have a `readline' method.  Optional
+        second argument is the `filename', which if not given, is
+        taken from fp.name.  If fp has no `name' attribute, `<???>' is
+        used.
+
+        """
+        if filename is None:
+            try:
+                filename = fp.name
+            except AttributeError:
+                filename = '<???>'
+        self.__read(fp, filename)
+
+    def get(self, section, option, raw=0, vars=None):
+        """Get an option value for a given section.
+
+        All % interpolations are expanded in the return values, based on the
+        defaults passed into the constructor, unless the optional argument
+        `raw' is true.  Additional substitutions may be provided using the
+        `vars' argument, which must be a dictionary whose contents overrides
+        any pre-existing defaults.
+
+        The section DEFAULT is special.
+        """
+        d = self.__defaults.copy()
+        try:
+            d.update(self.__sections[section])
+        except KeyError:
+            if section != DEFAULTSECT:
+                raise NoSectionError(section)
+        # Update with the entry specific variables
+        if vars is not None:
+            d.update(vars)
+        option = self.optionxform(option)
+        try:
+            value = d[option]
+        except KeyError:
+            raise NoOptionError(option, section)
+
+        if raw:
+            return value
+        return self._interpolate(section, option, value, d)
+
+    def _interpolate(self, section, option, rawval, vars):
+        # do the string interpolation
+        value = rawval
+        depth = MAX_INTERPOLATION_DEPTH
+        while depth:                    # Loop through this until it's done
+            depth -= 1
+            if value.find("%(") != -1:
+                try:
+                    value = value % vars
+                except KeyError, key:
+                    raise InterpolationError(key, option, section, rawval)
+            else:
+                break
+        if value.find("%(") != -1:
+            raise InterpolationDepthError(option, section, rawval)
+        return value
+
+    def __get(self, section, conv, option):
+        return conv(self.get(section, option))
+
+    def getint(self, section, option):
+        return self.__get(section, int, option)
+
+    def getfloat(self, section, option):
+        return self.__get(section, float, option)
+
+    _boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
+                       '0': False, 'no': False, 'false': False, 'off': False}
+
+    def getboolean(self, section, option):
+        v = self.get(section, option)
+        if v.lower() not in self._boolean_states:
+            raise ValueError, 'Not a boolean: %s' % v
+        return self._boolean_states[v.lower()]
+
+    def optionxform(self, optionstr):
+        return optionstr.lower()
+
+    def has_option(self, section, option):
+        """Check for the existence of a given option in a given section."""
+        if not section or section == DEFAULTSECT:
+            option = self.optionxform(option)
+            return option in self.__defaults
+        elif section not in self.__sections:
+            return 0
+        else:
+            option = self.optionxform(option)
+            return (option in self.__sections[section]
+                    or option in self.__defaults)
+
+    def set(self, section, option, value):
+        """Set an option."""
+        if not section or section == DEFAULTSECT:
+            sectdict = self.__defaults
+        else:
+            try:
+                sectdict = self.__sections[section]
+            except KeyError:
+                raise NoSectionError(section)
+        sectdict[self.optionxform(option)] = value
+
+    def write(self, fp):
+        """Write an .ini-format representation of the configuration state."""
+        if self.__defaults:
+            fp.write("[%s]\n" % DEFAULTSECT)
+            for (key, value) in self.__defaults.items():
+                fp.write("%s = %s\n" % (key, str(value).replace('\n', '\n\t')))
+            fp.write("\n")
+        for section in self.__sections:
+            fp.write("[%s]\n" % section)
+            for (key, value) in self.__sections[section].items():
+                if key != "__name__":
+                    fp.write("%s = %s\n" %
+                             (key, str(value).replace('\n', '\n\t')))
+            fp.write("\n")
+
+    def remove_option(self, section, option):
+        """Remove an option."""
+        if not section or section == DEFAULTSECT:
+            sectdict = self.__defaults
+        else:
+            try:
+                sectdict = self.__sections[section]
+            except KeyError:
+                raise NoSectionError(section)
+        option = self.optionxform(option)
+        existed = option in sectdict
+        if existed:
+            del sectdict[option]
+        return existed
+
+    def remove_section(self, section):
+        """Remove a file section."""
+        existed = section in self.__sections
+        if existed:
+            del self.__sections[section]
+        return existed
+
+    #
+    # Regular expressions for parsing section headers and options.
+    #
+    SECTCRE = re.compile(
+        r'\['                                 # [
+        r'(?P<header>[^]]+)'                  # very permissive!
+        r'\]'                                 # ]
+        )
+    OPTCRE = re.compile(
+        r'(?P<option>[^:=\s][^:=]*)'          # very permissive!
+        r'\s*(?P<vi>[:=])\s*'                 # any number of space/tab,
+                                              # followed by separator
+                                              # (either : or =), followed
+                                              # by any # space/tab
+        r'(?P<value>.*)$'                     # everything up to eol
+        )
+
+    def __read(self, fp, fpname):
+        """Parse a sectioned setup file.
+
+        The sections in setup file contains a title line at the top,
+        indicated by a name in square brackets (`[]'), plus key/value
+        options lines, indicated by `name: value' format lines.
+        Continuation are represented by an embedded newline then
+        leading whitespace.  Blank lines, lines beginning with a '#',
+        and just about everything else is ignored.
+        """
+        cursect = None                            # None, or a dictionary
+        optname = None
+        lineno = 0
+        e = None                                  # None, or an exception
+        while 1:
+            line = fp.readline()
+            if not line:
+                break
+            lineno = lineno + 1
+            # comment or blank line?
+            if line.strip() == '' or line[0] in '#;':
+                continue
+            if line.split(None, 1)[0].lower() == 'rem' and line[0] in "rR":
+                # no leading whitespace
+                continue
+            # continuation line?
+            if line[0].isspace() and cursect is not None and optname:
+                value = line.strip()
+                if value:
+                    cursect[optname] = "%s\n%s" % (cursect[optname], value)
+            # a section header or option header?
+            else:
+                # is it a section header?
+                mo = self.SECTCRE.match(line)
+                if mo:
+                    sectname = mo.group('header')
+                    if sectname in self.__sections:
+                        cursect = self.__sections[sectname]
+                    elif sectname == DEFAULTSECT:
+                        cursect = self.__defaults
+                    else:
+                        cursect = {'__name__': sectname}
+                        self.__sections[sectname] = cursect
+                    # So sections can't start with a continuation line
+                    optname = None
+                # no section header in the file?
+                elif cursect is None:
+                    raise MissingSectionHeaderError(fpname, lineno, `line`)
+                # an option line?
+                else:
+                    mo = self.OPTCRE.match(line)
+                    if mo:
+                        optname, vi, optval = mo.group('option', 'vi', 'value')
+                        if vi in ('=', ':') and ';' in optval:
+                            # ';' is a comment delimiter only if it follows
+                            # a spacing character
+                            pos = optval.find(';')
+                            if pos != -1 and optval[pos-1].isspace():
+                                optval = optval[:pos]
+                        optval = optval.strip()
+                        # allow empty values
+                        if optval == '""':
+                            optval = ''
+                        optname = self.optionxform(optname.rstrip())
+                        cursect[optname] = optval
+                    else:
+                        # a non-fatal parsing error occurred.  set up the
+                        # exception but keep going. the exception will be
+                        # raised at the end of the file and will contain a
+                        # list of all bogus lines
+                        if not e:
+                            e = ParsingError(fpname)
+                        e.append(lineno, `line`)
+        # if any parsing errors occurred, raise an exception
+        if e:
+            raise e
diff --git a/lib-python/2.2/Cookie.py b/lib-python/2.2/Cookie.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/Cookie.py
@@ -0,0 +1,742 @@
+#!/usr/bin/env python
+#
+
+####
+# Copyright 2000 by Timothy O'Malley <timo at alum.mit.edu>
+#
+#                All Rights Reserved
+#
+# Permission to use, copy, modify, and distribute this software
+# and its documentation for any purpose and without fee is hereby
+# granted, provided that the above copyright notice appear in all
+# copies and that both that copyright notice and this permission
+# notice appear in supporting documentation, and that the name of
+# Timothy O'Malley  not be used in advertising or publicity
+# pertaining to distribution of the software without specific, written
+# prior permission.
+#
+# Timothy O'Malley DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
+# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS, IN NO EVENT SHALL Timothy O'Malley BE LIABLE FOR
+# ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+#
+####
+#
+# Id: Cookie.py,v 2.29 2000/08/23 05:28:49 timo Exp
+#   by Timothy O'Malley <timo at alum.mit.edu>
+#
+#  Cookie.py is a Python module for the handling of HTTP
+#  cookies as a Python dictionary.  See RFC 2109 for more
+#  information on cookies.
+#
+#  The original idea to treat Cookies as a dictionary came from
+#  Dave Mitchell (davem at magnet.com) in 1995, when he released the
+#  first version of nscookie.py.
+#
+####
+
+r"""
+Here's a sample session to show how to use this module.
+At the moment, this is the only documentation.
+
+The Basics
+----------
+
+Importing is easy..
+
+   >>> import Cookie
+
+Most of the time you start by creating a cookie.  Cookies come in
+three flavors, each with slightly different encoding semanitcs, but
+more on that later.
+
+   >>> C = Cookie.SimpleCookie()
+   >>> C = Cookie.SerialCookie()
+   >>> C = Cookie.SmartCookie()
+
+[Note: Long-time users of Cookie.py will remember using
+Cookie.Cookie() to create an Cookie object.  Although deprecated, it
+is still supported by the code.  See the Backward Compatibility notes
+for more information.]
+
+Once you've created your Cookie, you can add values just as if it were
+a dictionary.
+
+   >>> C = Cookie.SmartCookie()
+   >>> C["fig"] = "newton"
+   >>> C["sugar"] = "wafer"
+   >>> print C
+   Set-Cookie: fig=newton;
+   Set-Cookie: sugar=wafer;
+
+Notice that the printable representation of a Cookie is the
+appropriate format for a Set-Cookie: header.  This is the
+default behavior.  You can change the header and printed
+attributes by using the the .output() function
+
+   >>> C = Cookie.SmartCookie()
+   >>> C["rocky"] = "road"
+   >>> C["rocky"]["path"] = "/cookie"
+   >>> print C.output(header="Cookie:")
+   Cookie: rocky=road; Path=/cookie;
+   >>> print C.output(attrs=[], header="Cookie:")
+   Cookie: rocky=road;
+
+The load() method of a Cookie extracts cookies from a string.  In a
+CGI script, you would use this method to extract the cookies from the
+HTTP_COOKIE environment variable.
+
+   >>> C = Cookie.SmartCookie()
+   >>> C.load("chips=ahoy; vienna=finger")
+   >>> print C
+   Set-Cookie: chips=ahoy;
+   Set-Cookie: vienna=finger;
+
+The load() method is darn-tootin smart about identifying cookies
+within a string.  Escaped quotation marks, nested semicolons, and other
+such trickeries do not confuse it.
+
+   >>> C = Cookie.SmartCookie()
+   >>> C.load('keebler="E=everybody; L=\\"Loves\\"; fudge=\\012;";')
+   >>> print C
+   Set-Cookie: keebler="E=everybody; L=\"Loves\"; fudge=\012;";
+
+Each element of the Cookie also supports all of the RFC 2109
+Cookie attributes.  Here's an example which sets the Path
+attribute.
+
+   >>> C = Cookie.SmartCookie()
+   >>> C["oreo"] = "doublestuff"
+   >>> C["oreo"]["path"] = "/"
+   >>> print C
+   Set-Cookie: oreo=doublestuff; Path=/;
+
+Each dictionary element has a 'value' attribute, which gives you
+back the value associated with the key.
+
+   >>> C = Cookie.SmartCookie()
+   >>> C["twix"] = "none for you"
+   >>> C["twix"].value
+   'none for you'
+
+
+A Bit More Advanced
+-------------------
+
+As mentioned before, there are three different flavors of Cookie
+objects, each with different encoding/decoding semantics.  This
+section briefly discusses the differences.
+
+SimpleCookie
+
+The SimpleCookie expects that all values should be standard strings.
+Just to be sure, SimpleCookie invokes the str() builtin to convert
+the value to a string, when the values are set dictionary-style.
+
+   >>> C = Cookie.SimpleCookie()
+   >>> C["number"] = 7
+   >>> C["string"] = "seven"
+   >>> C["number"].value
+   '7'
+   >>> C["string"].value
+   'seven'
+   >>> print C
+   Set-Cookie: number=7;
+   Set-Cookie: string=seven;
+
+
+SerialCookie
+
+The SerialCookie expects that all values should be serialized using
+cPickle (or pickle, if cPickle isn't available).  As a result of
+serializing, SerialCookie can save almost any Python object to a
+value, and recover the exact same object when the cookie has been
+returned.  (SerialCookie can yield some strange-looking cookie
+values, however.)
+
+   >>> C = Cookie.SerialCookie()
+   >>> C["number"] = 7
+   >>> C["string"] = "seven"
+   >>> C["number"].value
+   7
+   >>> C["string"].value
+   'seven'
+   >>> print C
+   Set-Cookie: number="I7\012.";
+   Set-Cookie: string="S'seven'\012p1\012.";
+
+Be warned, however, if SerialCookie cannot de-serialize a value (because
+it isn't a valid pickle'd object), IT WILL RAISE AN EXCEPTION.
+
+
+SmartCookie
+
+The SmartCookie combines aspects of each of the other two flavors.
+When setting a value in a dictionary-fashion, the SmartCookie will
+serialize (ala cPickle) the value *if and only if* it isn't a
+Python string.  String objects are *not* serialized.  Similarly,
+when the load() method parses out values, it attempts to de-serialize
+the value.  If it fails, then it fallsback to treating the value
+as a string.
+
+   >>> C = Cookie.SmartCookie()
+   >>> C["number"] = 7
+   >>> C["string"] = "seven"
+   >>> C["number"].value
+   7
+   >>> C["string"].value
+   'seven'
+   >>> print C
+   Set-Cookie: number="I7\012.";
+   Set-Cookie: string=seven;
+
+
+Backwards Compatibility
+-----------------------
+
+In order to keep compatibilty with earlier versions of Cookie.py,
+it is still possible to use Cookie.Cookie() to create a Cookie.  In
+fact, this simply returns a SmartCookie.
+
+   >>> C = Cookie.Cookie()
+   >>> print C.__class__.__name__
+   SmartCookie
+
+
+Finis.
+"""  #"
+#     ^
+#     |----helps out font-lock
+
+#
+# Import our required modules
+#
+import string
+from UserDict import UserDict
+
+try:
+    from cPickle import dumps, loads
+except ImportError:
+    from pickle import dumps, loads
+
+try:
+    import re
+except ImportError:
+    raise ImportError, "Cookie.py requires 're' from Python 1.5 or later"
+
+__all__ = ["CookieError","BaseCookie","SimpleCookie","SerialCookie",
+           "SmartCookie","Cookie"]
+
+#
+# Define an exception visible to External modules
+#
+class CookieError(Exception):
+    pass
+
+
+# These quoting routines conform to the RFC2109 specification, which in
+# turn references the character definitions from RFC2068.  They provide
+# a two-way quoting algorithm.  Any non-text character is translated
+# into a 4 character sequence: a forward-slash followed by the
+# three-digit octal equivalent of the character.  Any '\' or '"' is
+# quoted with a preceeding '\' slash.
+#
+# These are taken from RFC2068 and RFC2109.
+#       _LegalChars       is the list of chars which don't require "'s
+#       _Translator       hash-table for fast quoting
+#
+_LegalChars       = string.ascii_letters + string.digits + "!#$%&'*+-.^_`|~"
+_Translator       = {
+    '\000' : '\\000',  '\001' : '\\001',  '\002' : '\\002',
+    '\003' : '\\003',  '\004' : '\\004',  '\005' : '\\005',
+    '\006' : '\\006',  '\007' : '\\007',  '\010' : '\\010',
+    '\011' : '\\011',  '\012' : '\\012',  '\013' : '\\013',
+    '\014' : '\\014',  '\015' : '\\015',  '\016' : '\\016',
+    '\017' : '\\017',  '\020' : '\\020',  '\021' : '\\021',
+    '\022' : '\\022',  '\023' : '\\023',  '\024' : '\\024',
+    '\025' : '\\025',  '\026' : '\\026',  '\027' : '\\027',
+    '\030' : '\\030',  '\031' : '\\031',  '\032' : '\\032',
+    '\033' : '\\033',  '\034' : '\\034',  '\035' : '\\035',
+    '\036' : '\\036',  '\037' : '\\037',
+
+    '"' : '\\"',       '\\' : '\\\\',
+
+    '\177' : '\\177',  '\200' : '\\200',  '\201' : '\\201',
+    '\202' : '\\202',  '\203' : '\\203',  '\204' : '\\204',
+    '\205' : '\\205',  '\206' : '\\206',  '\207' : '\\207',
+    '\210' : '\\210',  '\211' : '\\211',  '\212' : '\\212',
+    '\213' : '\\213',  '\214' : '\\214',  '\215' : '\\215',
+    '\216' : '\\216',  '\217' : '\\217',  '\220' : '\\220',
+    '\221' : '\\221',  '\222' : '\\222',  '\223' : '\\223',
+    '\224' : '\\224',  '\225' : '\\225',  '\226' : '\\226',
+    '\227' : '\\227',  '\230' : '\\230',  '\231' : '\\231',
+    '\232' : '\\232',  '\233' : '\\233',  '\234' : '\\234',
+    '\235' : '\\235',  '\236' : '\\236',  '\237' : '\\237',
+    '\240' : '\\240',  '\241' : '\\241',  '\242' : '\\242',
+    '\243' : '\\243',  '\244' : '\\244',  '\245' : '\\245',
+    '\246' : '\\246',  '\247' : '\\247',  '\250' : '\\250',
+    '\251' : '\\251',  '\252' : '\\252',  '\253' : '\\253',
+    '\254' : '\\254',  '\255' : '\\255',  '\256' : '\\256',
+    '\257' : '\\257',  '\260' : '\\260',  '\261' : '\\261',
+    '\262' : '\\262',  '\263' : '\\263',  '\264' : '\\264',
+    '\265' : '\\265',  '\266' : '\\266',  '\267' : '\\267',
+    '\270' : '\\270',  '\271' : '\\271',  '\272' : '\\272',
+    '\273' : '\\273',  '\274' : '\\274',  '\275' : '\\275',
+    '\276' : '\\276',  '\277' : '\\277',  '\300' : '\\300',
+    '\301' : '\\301',  '\302' : '\\302',  '\303' : '\\303',
+    '\304' : '\\304',  '\305' : '\\305',  '\306' : '\\306',
+    '\307' : '\\307',  '\310' : '\\310',  '\311' : '\\311',
+    '\312' : '\\312',  '\313' : '\\313',  '\314' : '\\314',
+    '\315' : '\\315',  '\316' : '\\316',  '\317' : '\\317',
+    '\320' : '\\320',  '\321' : '\\321',  '\322' : '\\322',
+    '\323' : '\\323',  '\324' : '\\324',  '\325' : '\\325',
+    '\326' : '\\326',  '\327' : '\\327',  '\330' : '\\330',
+    '\331' : '\\331',  '\332' : '\\332',  '\333' : '\\333',
+    '\334' : '\\334',  '\335' : '\\335',  '\336' : '\\336',
+    '\337' : '\\337',  '\340' : '\\340',  '\341' : '\\341',
+    '\342' : '\\342',  '\343' : '\\343',  '\344' : '\\344',
+    '\345' : '\\345',  '\346' : '\\346',  '\347' : '\\347',
+    '\350' : '\\350',  '\351' : '\\351',  '\352' : '\\352',
+    '\353' : '\\353',  '\354' : '\\354',  '\355' : '\\355',
+    '\356' : '\\356',  '\357' : '\\357',  '\360' : '\\360',
+    '\361' : '\\361',  '\362' : '\\362',  '\363' : '\\363',
+    '\364' : '\\364',  '\365' : '\\365',  '\366' : '\\366',
+    '\367' : '\\367',  '\370' : '\\370',  '\371' : '\\371',
+    '\372' : '\\372',  '\373' : '\\373',  '\374' : '\\374',
+    '\375' : '\\375',  '\376' : '\\376',  '\377' : '\\377'
+    }
+
+def _quote(str, LegalChars=_LegalChars,
+    join=string.join, idmap=string._idmap, translate=string.translate):
+    #
+    # If the string does not need to be double-quoted,
+    # then just return the string.  Otherwise, surround
+    # the string in doublequotes and precede quote (with a \)
+    # special characters.
+    #
+    if "" == translate(str, idmap, LegalChars):
+        return str
+    else:
+        return '"' + join( map(_Translator.get, str, str), "" ) + '"'
+# end _quote
+
+
+_OctalPatt = re.compile(r"\\[0-3][0-7][0-7]")
+_QuotePatt = re.compile(r"[\\].")
+
+def _unquote(str, join=string.join, atoi=string.atoi):
+    # If there aren't any doublequotes,
+    # then there can't be any special characters.  See RFC 2109.
+    if  len(str) < 2:
+        return str
+    if str[0] != '"' or str[-1] != '"':
+        return str
+
+    # We have to assume that we must decode this string.
+    # Down to work.
+
+    # Remove the "s
+    str = str[1:-1]
+
+    # Check for special sequences.  Examples:
+    #    \012 --> \n
+    #    \"   --> "
+    #
+    i = 0
+    n = len(str)
+    res = []
+    while 0 <= i < n:
+        Omatch = _OctalPatt.search(str, i)
+        Qmatch = _QuotePatt.search(str, i)
+        if not Omatch and not Qmatch:              # Neither matched
+            res.append(str[i:])
+            break
+        # else:
+        j = k = -1
+        if Omatch: j = Omatch.start(0)
+        if Qmatch: k = Qmatch.start(0)
+        if Qmatch and ( not Omatch or k < j ):     # QuotePatt matched
+            res.append(str[i:k])
+            res.append(str[k+1])
+            i = k+2
+        else:                                      # OctalPatt matched
+            res.append(str[i:j])
+            res.append( chr( atoi(str[j+1:j+4], 8) ) )
+            i = j+4
+    return join(res, "")
+# end _unquote
+
+# The _getdate() routine is used to set the expiration time in
+# the cookie's HTTP header.      By default, _getdate() returns the
+# current time in the appropriate "expires" format for a
+# Set-Cookie header.     The one optional argument is an offset from
+# now, in seconds.      For example, an offset of -3600 means "one hour ago".
+# The offset may be a floating point number.
+#
+
+_weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
+
+_monthname = [None,
+              'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
+              'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
+
+def _getdate(future=0, weekdayname=_weekdayname, monthname=_monthname):
+    from time import gmtime, time
+    now = time()
+    year, month, day, hh, mm, ss, wd, y, z = gmtime(now + future)
+    return "%s, %02d-%3s-%4d %02d:%02d:%02d GMT" % \
+           (weekdayname[wd], day, monthname[month], year, hh, mm, ss)
+
+
+#
+# A class to hold ONE key,value pair.
+# In a cookie, each such pair may have several attributes.
+#       so this class is used to keep the attributes associated
+#       with the appropriate key,value pair.
+# This class also includes a coded_value attribute, which
+#       is used to hold the network representation of the
+#       value.  This is most useful when Python objects are
+#       pickled for network transit.
+#
+
+class Morsel(UserDict):
+    # RFC 2109 lists these attributes as reserved:
+    #   path       comment         domain
+    #   max-age    secure      version
+    #
+    # For historical reasons, these attributes are also reserved:
+    #   expires
+    #
+    # This dictionary provides a mapping from the lowercase
+    # variant on the left to the appropriate traditional
+    # formatting on the right.
+    _reserved = { "expires" : "expires",
+                   "path"        : "Path",
+                   "comment" : "Comment",
+                   "domain"      : "Domain",
+                   "max-age" : "Max-Age",
+                   "secure"      : "secure",
+                   "version" : "Version",
+                   }
+    _reserved_keys = _reserved.keys()
+
+    def __init__(self):
+        # Set defaults
+        self.key = self.value = self.coded_value = None
+        UserDict.__init__(self)
+
+        # Set default attributes
+        for K in self._reserved_keys:
+            UserDict.__setitem__(self, K, "")
+    # end __init__
+
+    def __setitem__(self, K, V):
+        K = string.lower(K)
+        if not K in self._reserved_keys:
+            raise CookieError("Invalid Attribute %s" % K)
+        UserDict.__setitem__(self, K, V)
+    # end __setitem__
+
+    def isReservedKey(self, K):
+        return string.lower(K) in self._reserved_keys
+    # end isReservedKey
+
+    def set(self, key, val, coded_val,
+            LegalChars=_LegalChars,
+            idmap=string._idmap, translate=string.translate ):
+        # First we verify that the key isn't a reserved word
+        # Second we make sure it only contains legal characters
+        if string.lower(key) in self._reserved_keys:
+            raise CookieError("Attempt to set a reserved key: %s" % key)
+        if "" != translate(key, idmap, LegalChars):
+            raise CookieError("Illegal key value: %s" % key)
+
+        # It's a good key, so save it.
+        self.key                 = key
+        self.value               = val
+        self.coded_value         = coded_val
+    # end set
+
+    def output(self, attrs=None, header = "Set-Cookie:"):
+        return "%s %s" % ( header, self.OutputString(attrs) )
+
+    __str__ = output
+
+    def __repr__(self):
+        return '<%s: %s=%s>' % (self.__class__.__name__,
+                                self.key, repr(self.value) )
+
+    def js_output(self, attrs=None):
+        # Print javascript
+        return """
+        <SCRIPT LANGUAGE="JavaScript">
+        <!-- begin hiding
+        document.cookie = \"%s\"
+        // end hiding -->
+        </script>
+        """ % ( self.OutputString(attrs), )
+    # end js_output()
+
+    def OutputString(self, attrs=None):
+        # Build up our result
+        #
+        result = []
+        RA = result.append
+
+        # First, the key=value pair
+        RA("%s=%s;" % (self.key, self.coded_value))
+
+        # Now add any defined attributes
+        if attrs is None:
+            attrs = self._reserved_keys
+        items = self.items()
+        items.sort()
+        for K,V in items:
+            if V == "": continue
+            if K not in attrs: continue
+            if K == "expires" and type(V) == type(1):
+                RA("%s=%s;" % (self._reserved[K], _getdate(V)))
+            elif K == "max-age" and type(V) == type(1):
+                RA("%s=%d;" % (self._reserved[K], V))
+            elif K == "secure":
+                RA("%s;" % self._reserved[K])
+            else:
+                RA("%s=%s;" % (self._reserved[K], V))
+
+        # Return the result
+        return string.join(result, " ")
+    # end OutputString
+# end Morsel class
+
+
+
+#
+# Pattern for finding cookie
+#
+# This used to be strict parsing based on the RFC2109 and RFC2068
+# specifications.  I have since discovered that MSIE 3.0x doesn't
+# follow the character rules outlined in those specs.  As a
+# result, the parsing rules here are less strict.
+#
+
+_LegalCharsPatt  = r"[\w\d!#%&'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=]"
+_CookiePattern = re.compile(
+    r"(?x)"                       # This is a Verbose pattern
+    r"(?P<key>"                   # Start of group 'key'
+    ""+ _LegalCharsPatt +"+?"     # Any word of at least one letter, nongreedy
+    r")"                          # End of group 'key'
+    r"\s*=\s*"                    # Equal Sign
+    r"(?P<val>"                   # Start of group 'val'
+    r'"(?:[^\\"]|\\.)*"'            # Any doublequoted string
+    r"|"                            # or
+    ""+ _LegalCharsPatt +"*"        # Any word or empty string
+    r")"                          # End of group 'val'
+    r"\s*;?"                      # Probably ending in a semi-colon
+    )
+
+
+# At long last, here is the cookie class.
+#   Using this class is almost just like using a dictionary.
+# See this module's docstring for example usage.
+#
+class BaseCookie(UserDict):
+    # A container class for a set of Morsels
+    #
+
+    def value_decode(self, val):
+        """real_value, coded_value = value_decode(STRING)
+        Called prior to setting a cookie's value from the network
+        representation.  The VALUE is the value read from HTTP
+        header.
+        Override this function to modify the behavior of cookies.
+        """
+        return val, val
+    # end value_encode
+
+    def value_encode(self, val):
+        """real_value, coded_value = value_encode(VALUE)
+        Called prior to setting a cookie's value from the dictionary
+        representation.  The VALUE is the value being assigned.
+        Override this function to modify the behavior of cookies.
+        """
+        strval = str(val)
+        return strval, strval
+    # end value_encode
+
+    def __init__(self, input=None):
+        UserDict.__init__(self)
+        if input: self.load(input)
+    # end __init__
+
+    def __set(self, key, real_value, coded_value):
+        """Private method for setting a cookie's value"""
+        M = self.get(key, Morsel())
+        M.set(key, real_value, coded_value)
+        UserDict.__setitem__(self, key, M)
+    # end __set
+
+    def __setitem__(self, key, value):
+        """Dictionary style assignment."""
+        rval, cval = self.value_encode(value)
+        self.__set(key, rval, cval)
+    # end __setitem__
+
+    def output(self, attrs=None, header="Set-Cookie:", sep="\n"):
+        """Return a string suitable for HTTP."""
+        result = []
+        items = self.items()
+        items.sort()
+        for K,V in items:
+            result.append( V.output(attrs, header) )
+        return string.join(result, sep)
+    # end output
+
+    __str__ = output
+
+    def __repr__(self):
+        L = []
+        items = self.items()
+        items.sort()
+        for K,V in items:
+            L.append( '%s=%s' % (K,repr(V.value) ) )
+        return '<%s: %s>' % (self.__class__.__name__, string.join(L))
+
+    def js_output(self, attrs=None):
+        """Return a string suitable for JavaScript."""
+        result = []
+        items = self.items()
+        items.sort()
+        for K,V in items:
+            result.append( V.js_output(attrs) )
+        return string.join(result, "")
+    # end js_output
+
+    def load(self, rawdata):
+        """Load cookies from a string (presumably HTTP_COOKIE) or
+        from a dictionary.  Loading cookies from a dictionary 'd'
+        is equivalent to calling:
+            map(Cookie.__setitem__, d.keys(), d.values())
+        """
+        if type(rawdata) == type(""):
+            self.__ParseString(rawdata)
+        else:
+            self.update(rawdata)
+        return
+    # end load()
+
+    def __ParseString(self, str, patt=_CookiePattern):
+        i = 0            # Our starting point
+        n = len(str)     # Length of string
+        M = None         # current morsel
+
+        while 0 <= i < n:
+            # Start looking for a cookie
+            match = patt.search(str, i)
+            if not match: break          # No more cookies
+
+            K,V = match.group("key"), match.group("val")
+            i = match.end(0)
+
+            # Parse the key, value in case it's metainfo
+            if K[0] == "$":
+                # We ignore attributes which pertain to the cookie
+                # mechanism as a whole.  See RFC 2109.
+                # (Does anyone care?)
+                if M:
+                    M[ K[1:] ] = V
+            elif string.lower(K) in Morsel._reserved_keys:
+                if M:
+                    M[ K ] = _unquote(V)
+            else:
+                rval, cval = self.value_decode(V)
+                self.__set(K, rval, cval)
+                M = self[K]
+    # end __ParseString
+# end BaseCookie class
+
+class SimpleCookie(BaseCookie):
+    """SimpleCookie
+    SimpleCookie supports strings as cookie values.  When setting
+    the value using the dictionary assignment notation, SimpleCookie
+    calls the builtin str() to convert the value to a string.  Values
+    received from HTTP are kept as strings.
+    """
+    def value_decode(self, val):
+        return _unquote( val ), val
+    def value_encode(self, val):
+        strval = str(val)
+        return strval, _quote( strval )
+# end SimpleCookie
+
+class SerialCookie(BaseCookie):
+    """SerialCookie
+    SerialCookie supports arbitrary objects as cookie values. All
+    values are serialized (using cPickle) before being sent to the
+    client.  All incoming values are assumed to be valid Pickle
+    representations.  IF AN INCOMING VALUE IS NOT IN A VALID PICKLE
+    FORMAT, THEN AN EXCEPTION WILL BE RAISED.
+
+    Note: Large cookie values add overhead because they must be
+    retransmitted on every HTTP transaction.
+
+    Note: HTTP has a 2k limit on the size of a cookie.  This class
+    does not check for this limit, so be careful!!!
+    """
+    def value_decode(self, val):
+        # This could raise an exception!
+        return loads( _unquote(val) ), val
+    def value_encode(self, val):
+        return val, _quote( dumps(val) )
+# end SerialCookie
+
+class SmartCookie(BaseCookie):
+    """SmartCookie
+    SmartCookie supports arbitrary objects as cookie values.  If the
+    object is a string, then it is quoted.  If the object is not a
+    string, however, then SmartCookie will use cPickle to serialize
+    the object into a string representation.
+
+    Note: Large cookie values add overhead because they must be
+    retransmitted on every HTTP transaction.
+
+    Note: HTTP has a 2k limit on the size of a cookie.  This class
+    does not check for this limit, so be careful!!!
+    """
+    def value_decode(self, val):
+        strval = _unquote(val)
+        try:
+            return loads(strval), val
+        except:
+            return strval, val
+    def value_encode(self, val):
+        if type(val) == type(""):
+            return val, _quote(val)
+        else:
+            return val, _quote( dumps(val) )
+# end SmartCookie
+
+
+###########################################################
+# Backwards Compatibility:  Don't break any existing code!
+
+# We provide Cookie() as an alias for SmartCookie()
+Cookie = SmartCookie
+
+#
+###########################################################
+
+def _test():
+    import doctest, Cookie
+    return doctest.testmod(Cookie)
+
+if __name__ == "__main__":
+    _test()
+
+
+#Local Variables:
+#tab-width: 4
+#end:
diff --git a/lib-python/2.2/FCNTL.py b/lib-python/2.2/FCNTL.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/FCNTL.py
@@ -0,0 +1,14 @@
+"""Backward-compatibility version of FCNTL; export constants exported by
+fcntl, and issue a deprecation warning.
+"""
+
+import warnings
+warnings.warn("the FCNTL module is deprecated; please use fcntl",
+              DeprecationWarning)
+
+
+# Export the constants known to the fcntl module:
+from fcntl import *
+
+# and *only* the constants:
+__all__ = [s for s in dir() if s[0] in "ABCDEFGHIJKLMNOPQRSTUVWXYZ"]
diff --git a/lib-python/2.2/HTMLParser.py b/lib-python/2.2/HTMLParser.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/HTMLParser.py
@@ -0,0 +1,383 @@
+"""A parser for HTML and XHTML."""
+
+# This file is based on sgmllib.py, but the API is slightly different.
+
+# XXX There should be a way to distinguish between PCDATA (parsed
+# character data -- the normal case), RCDATA (replaceable character
+# data -- only char and entity references and end tags are special)
+# and CDATA (character data -- only end tags are special).
+
+
+import markupbase
+import re
+
+# Regular expressions used for parsing
+
+interesting_normal = re.compile('[&<]')
+interesting_cdata = re.compile(r'<(/|\Z)')
+incomplete = re.compile('&[a-zA-Z#]')
+
+entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]')
+charref = re.compile('&#(?:[0-9]+|[xX][0-9a-fA-F]+)[^0-9a-fA-F]')
+
+starttagopen = re.compile('<[a-zA-Z]')
+piclose = re.compile('>')
+commentclose = re.compile(r'--\s*>')
+tagfind = re.compile('[a-zA-Z][-.a-zA-Z0-9:_]*')
+attrfind = re.compile(
+    r'\s*([a-zA-Z_][-.:a-zA-Z_0-9]*)(\s*=\s*'
+    r'(\'[^\']*\'|"[^"]*"|[-a-zA-Z0-9./:;+*%?!&$\(\)_#=~]*))?')
+
+locatestarttagend = re.compile(r"""
+  <[a-zA-Z][-.a-zA-Z0-9:_]*          # tag name
+  (?:\s+                             # whitespace before attribute name
+    (?:[a-zA-Z_][-.:a-zA-Z0-9_]*     # attribute name
+      (?:\s*=\s*                     # value indicator
+        (?:'[^']*'                   # LITA-enclosed value
+          |\"[^\"]*\"                # LIT-enclosed value
+          |[^'\">\s]+                # bare value
+         )
+       )?
+     )
+   )*
+  \s*                                # trailing whitespace
+""", re.VERBOSE)
+endendtag = re.compile('>')
+endtagfind = re.compile('</\s*([a-zA-Z][-.a-zA-Z0-9:_]*)\s*>')
+
+
+class HTMLParseError(Exception):
+    """Exception raised for all parse errors."""
+
+    def __init__(self, msg, position=(None, None)):
+        assert msg
+        self.msg = msg
+        self.lineno = position[0]
+        self.offset = position[1]
+
+    def __str__(self):
+        result = self.msg
+        if self.lineno is not None:
+            result = result + ", at line %d" % self.lineno
+        if self.offset is not None:
+            result = result + ", column %d" % (self.offset + 1)
+        return result
+
+
+class HTMLParser(markupbase.ParserBase):
+    """Find tags and other markup and call handler functions.
+
+    Usage:
+        p = HTMLParser()
+        p.feed(data)
+        ...
+        p.close()
+
+    Start tags are handled by calling self.handle_starttag() or
+    self.handle_startendtag(); end tags by self.handle_endtag().  The
+    data between tags is passed from the parser to the derived class
+    by calling self.handle_data() with the data as argument (the data
+    may be split up in arbitrary chunks).  Entity references are
+    passed by calling self.handle_entityref() with the entity
+    reference as the argument.  Numeric character references are
+    passed to self.handle_charref() with the string containing the
+    reference as the argument.
+    """
+
+    CDATA_CONTENT_ELEMENTS = ("script", "style")
+
+
+    def __init__(self):
+        """Initialize and reset this instance."""
+        self.reset()
+
+    def reset(self):
+        """Reset this instance.  Loses all unprocessed data."""
+        self.rawdata = ''
+        self.lasttag = '???'
+        self.interesting = interesting_normal
+        markupbase.ParserBase.reset(self)
+
+    def feed(self, data):
+        """Feed data to the parser.
+
+        Call this as often as you want, with as little or as much text
+        as you want (may include '\n').
+        """
+        self.rawdata = self.rawdata + data
+        self.goahead(0)
+
+    def close(self):
+        """Handle any buffered data."""
+        self.goahead(1)
+
+    def error(self, message):
+        raise HTMLParseError(message, self.getpos())
+
+    __starttag_text = None
+
+    def get_starttag_text(self):
+        """Return full source of start tag: '<...>'."""
+        return self.__starttag_text
+
+    def set_cdata_mode(self):
+        self.interesting = interesting_cdata
+
+    def clear_cdata_mode(self):
+        self.interesting = interesting_normal
+
+    # Internal -- handle data as far as reasonable.  May leave state
+    # and data to be processed by a subsequent call.  If 'end' is
+    # true, force handling all data as if followed by EOF marker.
+    def goahead(self, end):
+        rawdata = self.rawdata
+        i = 0
+        n = len(rawdata)
+        while i < n:
+            match = self.interesting.search(rawdata, i) # < or &
+            if match:
+                j = match.start()
+            else:
+                j = n
+            if i < j: self.handle_data(rawdata[i:j])
+            i = self.updatepos(i, j)
+            if i == n: break
+            startswith = rawdata.startswith
+            if startswith('<', i):
+                if starttagopen.match(rawdata, i): # < + letter
+                    k = self.parse_starttag(i)
+                elif startswith("</", i):
+                    k = self.parse_endtag(i)
+                    if k >= 0:
+                        self.clear_cdata_mode()
+                elif startswith("<!--", i):
+                    k = self.parse_comment(i)
+                elif startswith("<?", i):
+                    k = self.parse_pi(i)
+                elif startswith("<!", i):
+                    k = self.parse_declaration(i)
+                elif (i + 1) < n:
+                    self.handle_data("<")
+                    k = i + 1
+                else:
+                    break
+                if k < 0:
+                    if end:
+                        self.error("EOF in middle of construct")
+                    break
+                i = self.updatepos(i, k)
+            elif startswith("&#", i):
+                match = charref.match(rawdata, i)
+                if match:
+                    name = match.group()[2:-1]
+                    self.handle_charref(name)
+                    k = match.end()
+                    if not startswith(';', k-1):
+                        k = k - 1
+                    i = self.updatepos(i, k)
+                    continue
+                else:
+                    break
+            elif startswith('&', i):
+                match = entityref.match(rawdata, i)
+                if match:
+                    name = match.group(1)
+                    self.handle_entityref(name)
+                    k = match.end()
+                    if not startswith(';', k-1):
+                        k = k - 1
+                    i = self.updatepos(i, k)
+                    continue
+                match = incomplete.match(rawdata, i)
+                if match:
+                    # match.group() will contain at least 2 chars
+                    if end and match.group() == rawdata[i:]:
+                        self.error("EOF in middle of entity or char ref")
+                    # incomplete
+                    break
+                elif (i + 1) < n:
+                    # not the end of the buffer, and can't be confused
+                    # with some other construct
+                    self.handle_data("&")
+                    i = self.updatepos(i, i + 1)
+                else:
+                    break
+            else:
+                assert 0, "interesting.search() lied"
+        # end while
+        if end and i < n:
+            self.handle_data(rawdata[i:n])
+            i = self.updatepos(i, n)
+        self.rawdata = rawdata[i:]
+
+    # Internal -- parse comment, return end or -1 if not terminated
+    def parse_comment(self, i, report=1):
+        rawdata = self.rawdata
+        assert rawdata[i:i+4] == '<!--', 'unexpected call to parse_comment()'
+        match = commentclose.search(rawdata, i+4)
+        if not match:
+            return -1
+        if report:
+            j = match.start()
+            self.handle_comment(rawdata[i+4: j])
+        j = match.end()
+        return j
+
+    # Internal -- parse processing instr, return end or -1 if not terminated
+    def parse_pi(self, i):
+        rawdata = self.rawdata
+        assert rawdata[i:i+2] == '<?', 'unexpected call to parse_pi()'
+        match = piclose.search(rawdata, i+2) # >
+        if not match:
+            return -1
+        j = match.start()
+        self.handle_pi(rawdata[i+2: j])
+        j = match.end()
+        return j
+
+    # Internal -- handle starttag, return end or -1 if not terminated
+    def parse_starttag(self, i):
+        self.__starttag_text = None
+        endpos = self.check_for_whole_start_tag(i)
+        if endpos < 0:
+            return endpos
+        rawdata = self.rawdata
+        self.__starttag_text = rawdata[i:endpos]
+
+        # Now parse the data between i+1 and j into a tag and attrs
+        attrs = []
+        match = tagfind.match(rawdata, i+1)
+        assert match, 'unexpected call to parse_starttag()'
+        k = match.end()
+        self.lasttag = tag = rawdata[i+1:k].lower()
+
+        while k < endpos:
+            m = attrfind.match(rawdata, k)
+            if not m:
+                break
+            attrname, rest, attrvalue = m.group(1, 2, 3)
+            if not rest:
+                attrvalue = None
+            elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
+                 attrvalue[:1] == '"' == attrvalue[-1:]:
+                attrvalue = attrvalue[1:-1]
+                attrvalue = self.unescape(attrvalue)
+            attrs.append((attrname.lower(), attrvalue))
+            k = m.end()
+
+        end = rawdata[k:endpos].strip()
+        if end not in (">", "/>"):
+            lineno, offset = self.getpos()
+            if "\n" in self.__starttag_text:
+                lineno = lineno + self.__starttag_text.count("\n")
+                offset = len(self.__starttag_text) \
+                         - self.__starttag_text.rfind("\n")
+            else:
+                offset = offset + len(self.__starttag_text)
+            self.error("junk characters in start tag: %s"
+                       % `rawdata[k:endpos][:20]`)
+        if end.endswith('/>'):
+            # XHTML-style empty tag: <span attr="value" />
+            self.handle_startendtag(tag, attrs)
+        else:
+            self.handle_starttag(tag, attrs)
+            if tag in self.CDATA_CONTENT_ELEMENTS:
+                self.set_cdata_mode()
+        return endpos
+
+    # Internal -- check to see if we have a complete starttag; return end
+    # or -1 if incomplete.
+    def check_for_whole_start_tag(self, i):
+        rawdata = self.rawdata
+        m = locatestarttagend.match(rawdata, i)
+        if m:
+            j = m.end()
+            next = rawdata[j:j+1]
+            if next == ">":
+                return j + 1
+            if next == "/":
+                if rawdata.startswith("/>", j):
+                    return j + 2
+                if rawdata.startswith("/", j):
+                    # buffer boundary
+                    return -1
+                # else bogus input
+                self.updatepos(i, j + 1)
+                self.error("malformed empty start tag")
+            if next == "":
+                # end of input
+                return -1
+            if next in ("abcdefghijklmnopqrstuvwxyz=/"
+                        "ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
+                # end of input in or before attribute value, or we have the
+                # '/' from a '/>' ending
+                return -1
+            self.updatepos(i, j)
+            self.error("malformed start tag")
+        raise AssertionError("we should not get here!")
+
+    # Internal -- parse endtag, return end or -1 if incomplete
+    def parse_endtag(self, i):
+        rawdata = self.rawdata
+        assert rawdata[i:i+2] == "</", "unexpected call to parse_endtag"
+        match = endendtag.search(rawdata, i+1) # >
+        if not match:
+            return -1
+        j = match.end()
+        match = endtagfind.match(rawdata, i) # </ + tag + >
+        if not match:
+            self.error("bad end tag: %s" % `rawdata[i:j]`)
+        tag = match.group(1)
+        self.handle_endtag(tag.lower())
+        return j
+
+    # Overridable -- finish processing of start+end tag: <tag.../>
+    def handle_startendtag(self, tag, attrs):
+        self.handle_starttag(tag, attrs)
+        self.handle_endtag(tag)
+
+    # Overridable -- handle start tag
+    def handle_starttag(self, tag, attrs):
+        pass
+
+    # Overridable -- handle end tag
+    def handle_endtag(self, tag):
+        pass
+
+    # Overridable -- handle character reference
+    def handle_charref(self, name):
+        pass
+
+    # Overridable -- handle entity reference
+    def handle_entityref(self, name):
+        pass
+
+    # Overridable -- handle data
+    def handle_data(self, data):
+        pass
+
+    # Overridable -- handle comment
+    def handle_comment(self, data):
+        pass
+
+    # Overridable -- handle declaration
+    def handle_decl(self, decl):
+        pass
+
+    # Overridable -- handle processing instruction
+    def handle_pi(self, data):
+        pass
+
+    def unknown_decl(self, data):
+        self.error("unknown declaration: " + `data`)
+
+    # Internal -- helper to remove special character quoting
+    def unescape(self, s):
+        if '&' not in s:
+            return s
+        s = s.replace("&lt;", "<")
+        s = s.replace("&gt;", ">")
+        s = s.replace("&apos;", "'")
+        s = s.replace("&quot;", '"')
+        s = s.replace("&amp;", "&") # Must be last
+        return s
diff --git a/lib-python/2.2/MimeWriter.py b/lib-python/2.2/MimeWriter.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/MimeWriter.py
@@ -0,0 +1,181 @@
+"""Generic MIME writer.
+
+This module defines the class MimeWriter.  The MimeWriter class implements
+a basic formatter for creating MIME multi-part files.  It doesn't seek around
+the output file nor does it use large amounts of buffer space. You must write
+the parts out in the order that they should occur in the final file.
+MimeWriter does buffer the headers you add, allowing you to rearrange their
+order.
+
+"""
+
+
+import mimetools
+
+__all__ = ["MimeWriter"]
+
+class MimeWriter:
+
+    """Generic MIME writer.
+
+    Methods:
+
+    __init__()
+    addheader()
+    flushheaders()
+    startbody()
+    startmultipartbody()
+    nextpart()
+    lastpart()
+
+    A MIME writer is much more primitive than a MIME parser.  It
+    doesn't seek around on the output file, and it doesn't use large
+    amounts of buffer space, so you have to write the parts in the
+    order they should occur on the output file.  It does buffer the
+    headers you add, allowing you to rearrange their order.
+
+    General usage is:
+
+    f = <open the output file>
+    w = MimeWriter(f)
+    ...call w.addheader(key, value) 0 or more times...
+
+    followed by either:
+
+    f = w.startbody(content_type)
+    ...call f.write(data) for body data...
+
+    or:
+
+    w.startmultipartbody(subtype)
+    for each part:
+        subwriter = w.nextpart()
+        ...use the subwriter's methods to create the subpart...
+    w.lastpart()
+
+    The subwriter is another MimeWriter instance, and should be
+    treated in the same way as the toplevel MimeWriter.  This way,
+    writing recursive body parts is easy.
+
+    Warning: don't forget to call lastpart()!
+
+    XXX There should be more state so calls made in the wrong order
+    are detected.
+
+    Some special cases:
+
+    - startbody() just returns the file passed to the constructor;
+      but don't use this knowledge, as it may be changed.
+
+    - startmultipartbody() actually returns a file as well;
+      this can be used to write the initial 'if you can read this your
+      mailer is not MIME-aware' message.
+
+    - If you call flushheaders(), the headers accumulated so far are
+      written out (and forgotten); this is useful if you don't need a
+      body part at all, e.g. for a subpart of type message/rfc822
+      that's (mis)used to store some header-like information.
+
+    - Passing a keyword argument 'prefix=<flag>' to addheader(),
+      start*body() affects where the header is inserted; 0 means
+      append at the end, 1 means insert at the start; default is
+      append for addheader(), but insert for start*body(), which use
+      it to determine where the Content-Type header goes.
+
+    """
+
+    def __init__(self, fp):
+        self._fp = fp
+        self._headers = []
+
+    def addheader(self, key, value, prefix=0):
+        """Add a header line to the MIME message.
+
+        The key is the name of the header, where the value obviously provides
+        the value of the header. The optional argument prefix determines
+        where the header is inserted; 0 means append at the end, 1 means
+        insert at the start. The default is to append.
+
+        """
+        lines = value.split("\n")
+        while lines and not lines[-1]: del lines[-1]
+        while lines and not lines[0]: del lines[0]
+        for i in range(1, len(lines)):
+            lines[i] = "    " + lines[i].strip()
+        value = "\n".join(lines) + "\n"
+        line = key + ": " + value
+        if prefix:
+            self._headers.insert(0, line)
+        else:
+            self._headers.append(line)
+
+    def flushheaders(self):
+        """Writes out and forgets all headers accumulated so far.
+
+        This is useful if you don't need a body part at all; for example,
+        for a subpart of type message/rfc822 that's (mis)used to store some
+        header-like information.
+
+        """
+        self._fp.writelines(self._headers)
+        self._headers = []
+
+    def startbody(self, ctype, plist=[], prefix=1):
+        """Returns a file-like object for writing the body of the message.
+
+        The content-type is set to the provided ctype, and the optional
+        parameter, plist, provides additional parameters for the
+        content-type declaration.  The optional argument prefix determines
+        where the header is inserted; 0 means append at the end, 1 means
+        insert at the start. The default is to insert at the start.
+
+        """
+        for name, value in plist:
+            ctype = ctype + ';\n %s=\"%s\"' % (name, value)
+        self.addheader("Content-Type", ctype, prefix=prefix)
+        self.flushheaders()
+        self._fp.write("\n")
+        return self._fp
+
+    def startmultipartbody(self, subtype, boundary=None, plist=[], prefix=1):
+        """Returns a file-like object for writing the body of the message.
+
+        Additionally, this method initializes the multi-part code, where the
+        subtype parameter provides the multipart subtype, the boundary
+        parameter may provide a user-defined boundary specification, and the
+        plist parameter provides optional parameters for the subtype.  The
+        optional argument, prefix, determines where the header is inserted;
+        0 means append at the end, 1 means insert at the start. The default
+        is to insert at the start.  Subparts should be created using the
+        nextpart() method.
+
+        """
+        self._boundary = boundary or mimetools.choose_boundary()
+        return self.startbody("multipart/" + subtype,
+                              [("boundary", self._boundary)] + plist,
+                              prefix=prefix)
+
+    def nextpart(self):
+        """Returns a new instance of MimeWriter which represents an
+        individual part in a multipart message.
+
+        This may be used to write the part as well as used for creating
+        recursively complex multipart messages. The message must first be
+        initialized with the startmultipartbody() method before using the
+        nextpart() method.
+
+        """
+        self._fp.write("\n--" + self._boundary + "\n")
+        return self.__class__(self._fp)
+
+    def lastpart(self):
+        """This is used to designate the last part of a multipart message.
+
+        It should always be used when writing multipart messages.
+
+        """
+        self._fp.write("\n--" + self._boundary + "--\n")
+
+
+if __name__ == '__main__':
+    import test.test_MimeWriter
diff --git a/lib-python/2.2/Queue.py b/lib-python/2.2/Queue.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/Queue.py
@@ -0,0 +1,151 @@
+"""A multi-producer, multi-consumer queue."""
+
+class Empty(Exception):
+    "Exception raised by Queue.get(block=0)/get_nowait()."
+    pass
+
+class Full(Exception):
+    "Exception raised by Queue.put(block=0)/put_nowait()."
+    pass
+
+class Queue:
+    def __init__(self, maxsize=0):
+        """Initialize a queue object with a given maximum size.
+
+        If maxsize is <= 0, the queue size is infinite.
+        """
+        import thread
+        self._init(maxsize)
+        self.mutex = thread.allocate_lock()
+        self.esema = thread.allocate_lock()
+        self.esema.acquire()
+        self.fsema = thread.allocate_lock()
+
+    def qsize(self):
+        """Return the approximate size of the queue (not reliable!)."""
+        self.mutex.acquire()
+        n = self._qsize()
+        self.mutex.release()
+        return n
+
+    def empty(self):
+        """Return 1 if the queue is empty, 0 otherwise (not reliable!)."""
+        self.mutex.acquire()
+        n = self._empty()
+        self.mutex.release()
+        return n
+
+    def full(self):
+        """Return 1 if the queue is full, 0 otherwise (not reliable!)."""
+        self.mutex.acquire()
+        n = self._full()
+        self.mutex.release()
+        return n
+
+    def put(self, item, block=1):
+        """Put an item into the queue.
+
+        If optional arg 'block' is 1 (the default), block if
+        necessary until a free slot is available.  Otherwise (block
+        is 0), put an item on the queue if a free slot is immediately
+        available, else raise the Full exception.
+        """
+        if block:
+            self.fsema.acquire()
+        elif not self.fsema.acquire(0):
+            raise Full
+        self.mutex.acquire()
+        release_fsema = True
+        try:
+            was_empty = self._empty()
+            self._put(item)
+            # If we fail before here, the empty state has
+            # not changed, so we can skip the release of esema
+            if was_empty:
+                self.esema.release()
+            # If we fail before here, the queue can not be full, so
+            # release_full_sema remains True
+            release_fsema = not self._full()
+        finally:
+            # Catching system level exceptions here (RecursionDepth,
+            # OutOfMemory, etc) - so do as little as possible in terms
+            # of Python calls.
+            if release_fsema:
+                self.fsema.release()
+            self.mutex.release()
+
+    def put_nowait(self, item):
+        """Put an item into the queue without blocking.
+
+        Only enqueue the item if a free slot is immediately available.
+        Otherwise raise the Full exception.
+        """
+        return self.put(item, 0)
+
+    def get(self, block=1):
+        """Remove and return an item from the queue.
+
+        If optional arg 'block' is 1 (the default), block if
+        necessary until an item is available.  Otherwise (block is 0),
+        return an item if one is immediately available, else raise the
+        Empty exception.
+        """
+        if block:
+            self.esema.acquire()
+        elif not self.esema.acquire(0):
+            raise Empty
+        self.mutex.acquire()
+        release_esema = True
+        try:
+            was_full = self._full()
+            item = self._get()
+            # If we fail before here, the full state has
+            # not changed, so we can skip the release of fsema
+            if was_full:
+                self.fsema.release()
+            # Failure means empty state also unchanged - release_esema
+            # remains True.
+            release_esema = not self._empty()
+        finally:
+            if release_esema:
+                self.esema.release()
+            self.mutex.release()
+        return item
+
+    def get_nowait(self):
+        """Remove and return an item from the queue without blocking.
+
+        Only get an item if one is immediately available.  Otherwise
+        raise the Empty exception.
+        """
+        return self.get(0)
+
+    # Override these methods to implement other queue organizations
+    # (e.g. stack or priority queue).
+    # These will only be called with appropriate locks held
+
+    # Initialize the queue representation
+    def _init(self, maxsize):
+        self.maxsize = maxsize
+        self.queue = []
+
+    def _qsize(self):
+        return len(self.queue)
+
+    # Check whether the queue is empty
+    def _empty(self):
+        return not self.queue
+
+    # Check whether the queue is full
+    def _full(self):
+        return self.maxsize > 0 and len(self.queue) == self.maxsize
+
+    # Put a new item in the queue
+    def _put(self, item):
+        self.queue.append(item)
+
+    # Get an item from the queue
+    def _get(self):
+        item = self.queue[0]
+        del self.queue[0]
+        return item
diff --git a/lib-python/2.2/SimpleHTTPServer.py b/lib-python/2.2/SimpleHTTPServer.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/SimpleHTTPServer.py
@@ -0,0 +1,198 @@
+"""Simple HTTP Server.
+
+This module builds on BaseHTTPServer by implementing the standard GET
+and HEAD requests in a fairly straightforward manner.
+
+"""
+
+
+__version__ = "0.6"
+
+__all__ = ["SimpleHTTPRequestHandler"]
+
+import os
+import posixpath
+import BaseHTTPServer
+import urllib
+import cgi
+import shutil
+import mimetypes
+from StringIO import StringIO
+
+
+class SimpleHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
+
+    """Simple HTTP request handler with GET and HEAD commands.
+
+    This serves files from the current directory and any of its
+    subdirectories.  It assumes that all files are plain text files
+    unless they have the extension ".html" in which case it assumes
+    they are HTML files.
+
+    The GET and HEAD requests are identical except that the HEAD
+    request omits the actual contents of the file.
+
+    """
+
+    server_version = "SimpleHTTP/" + __version__
+
+    def do_GET(self):
+        """Serve a GET request."""
+        f = self.send_head()
+        if f:
+            self.copyfile(f, self.wfile)
+            f.close()
+
+    def do_HEAD(self):
+        """Serve a HEAD request."""
+        f = self.send_head()
+        if f:
+            f.close()
+
+    def send_head(self):
+        """Common code for GET and HEAD commands.
+
+        This sends the response code and MIME headers.
+
+        Return value is either a file object (which has to be copied
+        to the outputfile by the caller unless the command was HEAD,
+        and must be closed by the caller under all circumstances), or
+        None, in which case the caller has nothing further to do.
+
+        """
+        path = self.translate_path(self.path)
+        f = None
+        if os.path.isdir(path):
+            for index in "index.html", "index.htm":
+                index = os.path.join(path, index)
+                if os.path.exists(index):
+                    path = index
+                    break
+            else:
+                return self.list_directory(path)
+        ctype = self.guess_type(path)
+        if ctype.startswith('text/'):
+            mode = 'r'
+        else:
+            mode = 'rb'
+        try:
+            f = open(path, mode)
+        except IOError:
+            self.send_error(404, "File not found")
+            return None
+        self.send_response(200)
+        self.send_header("Content-type", ctype)
+        self.end_headers()
+        return f
+
+    def list_directory(self, path):
+        """Helper to produce a directory listing (absent index.html).
+
+        Return value is either a file object, or None (indicating an
+        error).  In either case, the headers are sent, making the
+        interface the same as for send_head().
+
+        """
+        try:
+            list = os.listdir(path)
+        except os.error:
+            self.send_error(404, "No permission to list directory")
+            return None
+        list.sort(lambda a, b: cmp(a.lower(), b.lower()))
+        f = StringIO()
+        f.write("<title>Directory listing for %s</title>\n" % self.path)
+        f.write("<h2>Directory listing for %s</h2>\n" % self.path)
+        f.write("<hr>\n<ul>\n")
+        for name in list:
+            fullname = os.path.join(path, name)
+            displayname = linkname = name = cgi.escape(name)
+            # Append / for directories or @ for symbolic links
+            if os.path.isdir(fullname):
+                displayname = name + "/"
+                linkname = name + "/"
+            if os.path.islink(fullname):
+                displayname = name + "@"
+                # Note: a link to a directory displays with @ and links with /
+            f.write('<li><a href="%s">%s</a>\n' % (linkname, displayname))
+        f.write("</ul>\n<hr>\n")
+        f.seek(0)
+        self.send_response(200)
+        self.send_header("Content-type", "text/html")
+        self.end_headers()
+        return f
+
+    def translate_path(self, path):
+        """Translate a /-separated PATH to the local filename syntax.
+
+        Components that mean special things to the local file system
+        (e.g. drive or directory names) are ignored.  (XXX They should
+        probably be diagnosed.)
+
+        """
+        path = posixpath.normpath(urllib.unquote(path))
+        words = path.split('/')
+        words = filter(None, words)
+        path = os.getcwd()
+        for word in words:
+            drive, word = os.path.splitdrive(word)
+            head, word = os.path.split(word)
+            if word in (os.curdir, os.pardir): continue
+            path = os.path.join(path, word)
+        return path
+
+    def copyfile(self, source, outputfile):
+        """Copy all data between two file objects.
+
+        The SOURCE argument is a file object open for reading
+        (or anything with a read() method) and the DESTINATION
+        argument is a file object open for writing (or
+        anything with a write() method).
+
+        The only reason for overriding this would be to change
+        the block size or perhaps to replace newlines by CRLF
+        -- note however that this the default server uses this
+        to copy binary data as well.
+
+        """
+        shutil.copyfileobj(source, outputfile)
+
+    def guess_type(self, path):
+        """Guess the type of a file.
+
+        Argument is a PATH (a filename).
+
+        Return value is a string of the form type/subtype,
+        usable for a MIME Content-type header.
+
+        The default implementation looks the file's extension
+        up in the table self.extensions_map, using text/plain
+        as a default; however it would be permissible (if
+        slow) to look inside the data to make a better guess.
+
+        """
+
+        base, ext = posixpath.splitext(path)
+        if self.extensions_map.has_key(ext):
+            return self.extensions_map[ext]
+        ext = ext.lower()
+        if self.extensions_map.has_key(ext):
+            return self.extensions_map[ext]
+        else:
+            return self.extensions_map['']
+
+    extensions_map = mimetypes.types_map.copy()
+    extensions_map.update({
+        '': 'application/octet-stream', # Default
+        '.py': 'text/plain',
+        '.c': 'text/plain',
+        '.h': 'text/plain',
+        })
+
+
+def test(HandlerClass = SimpleHTTPRequestHandler,
+         ServerClass = BaseHTTPServer.HTTPServer):
+    BaseHTTPServer.test(HandlerClass, ServerClass)
+
+
+if __name__ == '__main__':
+    test()
diff --git a/lib-python/2.2/SimpleXMLRPCServer.py b/lib-python/2.2/SimpleXMLRPCServer.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/SimpleXMLRPCServer.py
@@ -0,0 +1,274 @@
+"""Simple XML-RPC Server.
+
+This module can be used to create simple XML-RPC servers
+by creating a server and either installing functions, a
+class instance, or by extending the SimpleXMLRPCRequestHandler
+class.
+
+A list of possible usage patterns follows:
+
+1. Install functions:
+
+server = SimpleXMLRPCServer(("localhost", 8000))
+server.register_function(pow)
+server.register_function(lambda x,y: x+y, 'add')
+server.serve_forever()
+
+2. Install an instance:
+
+class MyFuncs:
+    def __init__(self):
+        # make all of the string functions available through
+        # string.func_name
+        import string
+        self.string = string
+    def pow(self, x, y): return pow(x, y)
+    def add(self, x, y) : return x + y
+server = SimpleXMLRPCServer(("localhost", 8000))
+server.register_instance(MyFuncs())
+server.serve_forever()
+
+3. Install an instance with custom dispatch method:
+
+class Math:
+    def _dispatch(self, method, params):
+        if method == 'pow':
+            return apply(pow, params)
+        elif method == 'add':
+            return params[0] + params[1]
+        else:
+            raise 'bad method'
+server = SimpleXMLRPCServer(("localhost", 8000))
+server.register_instance(Math())
+server.serve_forever()
+
+4. Subclass SimpleXMLRPCRequestHandler:
+
+class MathHandler(SimpleXMLRPCRequestHandler):
+    def _dispatch(self, method, params):
+        try:
+            # We are forcing the 'export_' prefix on methods that are
+            # callable through XML-RPC to prevent potential security
+            # problems
+            func = getattr(self, 'export_' + method)
+        except AttributeError:
+            raise Exception('method "%s" is not supported' % method)
+        else:
+            return apply(func, params)
+
+    def log_message(self, format, *args):
+        pass # maybe do something fancy like write the messages to a file
+
+    def export_add(self, x, y):
+        return x + y
+
+server = SimpleXMLRPCServer(("localhost", 8000), MathHandler)
+server.serve_forever()
+"""
+
+# Written by Brian Quinlan (brian at sweetapp.com).
+# Based on code written by Fredrik Lundh.
+
+import xmlrpclib
+import SocketServer
+import BaseHTTPServer
+import sys
+
+class SimpleXMLRPCRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
+    """Simple XML-RPC request handler class.
+
+    Handles all HTTP POST requests and attempts to decode them as
+    XML-RPC requests.
+
+    XML-RPC requests are dispatched to the _dispatch method, which
+    may be overriden by subclasses. The default implementation attempts
+    to dispatch XML-RPC calls to the functions or instance installed
+    in the server.
+    """
+
+    def do_POST(self):
+        """Handles the HTTP POST request.
+
+        Attempts to interpret all HTTP POST requests as XML-RPC calls,
+        which are forwarded to the _dispatch method for handling.
+        """
+
+        try:
+            # get arguments
+            data = self.rfile.read(int(self.headers["content-length"]))
+            params, method = xmlrpclib.loads(data)
+
+            # generate response
+            try:
+                response = self._dispatch(method, params)
+                # wrap response in a singleton tuple
+                response = (response,)
+            except:
+                # report exception back to server
+                response = xmlrpclib.dumps(
+                    xmlrpclib.Fault(1, "%s:%s" % (sys.exc_type, sys.exc_value))
+                    )
+            else:
+                response = xmlrpclib.dumps(response, methodresponse=1)
+        except:
+            # internal error, report as HTTP server error
+            self.send_response(500)
+            self.end_headers()
+        else:
+            # got a valid XML RPC response
+            self.send_response(200)
+            self.send_header("Content-type", "text/xml")
+            self.send_header("Content-length", str(len(response)))
+            self.end_headers()
+            self.wfile.write(response)
+
+            # shut down the connection
+            self.wfile.flush()
+            self.connection.shutdown(1)
+
+    def _dispatch(self, method, params):
+        """Dispatches the XML-RPC method.
+
+        XML-RPC calls are forwarded to a registered function that
+        matches the called XML-RPC method name. If no such function
+        exists then the call is forwarded to the registered instance,
+        if available.
+
+        If the registered instance has a _dispatch method then that
+        method will be called with the name of the XML-RPC method and
+        it's parameters as a tuple
+        e.g. instance._dispatch('add',(2,3))
+
+        If the registered instance does not have a _dispatch method
+        then the instance will be searched to find a matching method
+        and, if found, will be called.
+
+        Methods beginning with an '_' are considered private and will
+        not be called by SimpleXMLRPCServer.
+        """
+
+        func = None
+        try:
+            # check to see if a matching function has been registered
+            func = self.server.funcs[method]
+        except KeyError:
+            if self.server.instance is not None:
+                # check for a _dispatch method
+                if hasattr(self.server.instance, '_dispatch'):
+                    return self.server.instance._dispatch(method, params)
+                else:
+                    # call instance method directly
+                    try:
+                        func = _resolve_dotted_attribute(
+                            self.server.instance,
+                            method,
+                            self.allow_dotted_names
+                            )
+                    except AttributeError:
+                        pass
+
+        if func is not None:
+            return apply(func, params)
+        else:
+            raise Exception('method "%s" is not supported' % method)
+
+    def log_request(self, code='-', size='-'):
+        """Selectively log an accepted request."""
+
+        if self.server.logRequests:
+            BaseHTTPServer.BaseHTTPRequestHandler.log_request(self, code, size)
+
+
+def _resolve_dotted_attribute(obj, attr, allow_dotted_names=True):
+    """Resolves a dotted attribute name to an object.  Raises
+    an AttributeError if any attribute in the chain starts with a '_'.
+
+    If the optional allow_dotted_names argument is false, dots are not
+    supported and this function operates similar to getattr(obj, attr).
+    """
+
+    if allow_dotted_names:
+        attrs = attr.split('.')
+    else:
+        attrs = [attr]
+
+    for i in attrs:
+        if i.startswith('_'):
+            raise AttributeError(
+                'attempt to access private attribute "%s"' % i
+                )
+        else:
+            obj = getattr(obj,i)
+    return obj
+
+
+class SimpleXMLRPCServer(SocketServer.TCPServer):
+    """Simple XML-RPC server.
+
+    Simple XML-RPC server that allows functions and a single instance
+    to be installed to handle requests.
+    """
+
+    def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler,
+                 logRequests=1):
+        self.funcs = {}
+        self.logRequests = logRequests
+        self.instance = None
+        SocketServer.TCPServer.__init__(self, addr, requestHandler)
+
+    def register_instance(self, instance, allow_dotted_names=False):
+        """Registers an instance to respond to XML-RPC requests.
+
+        Only one instance can be installed at a time.
+
+        If the registered instance has a _dispatch method then that
+        method will be called with the name of the XML-RPC method and
+        it's parameters as a tuple
+        e.g. instance._dispatch('add',(2,3))
+
+        If the registered instance does not have a _dispatch method
+        then the instance will be searched to find a matching method
+        and, if found, will be called.
+
+        Methods beginning with an '_' are considered private and will
+        not be called by SimpleXMLRPCServer.
+
+        If a registered function matches a XML-RPC request, then it
+        will be called instead of the registered instance.
+
+        If the optional allow_dotted_names argument is true and the
+        instance does not have a _dispatch method, method names
+        containing dots are supported and resolved, as long as none of
+        the name segments start with an '_'.
+
+            *** SECURITY WARNING: ***
+
+            Enabling the allow_dotted_names options allows intruders
+            to access your module's global variables and may allow
+            intruders to execute arbitrary code on your machine.  Only
+            use this option on a secure, closed network.
+
+        """
+
+        self.instance = instance
+        self.allow_dotted_names = allow_dotted_names
+
+    def register_function(self, function, name = None):
+        """Registers a function to respond to XML-RPC requests.
+
+        The optional name argument can be used to set a Unicode name
+        for the function.
+
+        If an instance is also registered then it will only be called
+        if a matching function is not found.
+        """
+
+        if name is None:
+            name = function.__name__
+        self.funcs[name] = function
+
+if __name__ == '__main__':
+    server = SimpleXMLRPCServer(("localhost", 8000))
+    server.register_function(pow)
+    server.register_function(lambda x,y: x+y, 'add')
+    server.serve_forever()
diff --git a/lib-python/2.2/SocketServer.py b/lib-python/2.2/SocketServer.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/SocketServer.py
@@ -0,0 +1,576 @@
+"""Generic socket server classes.
+
+This module tries to capture the various aspects of defining a server:
+
+For socket-based servers:
+
+- address family:
+        - AF_INET{,6}: IP (Internet Protocol) sockets (default)
+        - AF_UNIX: Unix domain sockets
+        - others, e.g. AF_DECNET are conceivable (see <socket.h>
+- socket type:
+        - SOCK_STREAM (reliable stream, e.g. TCP)
+        - SOCK_DGRAM (datagrams, e.g. UDP)
+
+For request-based servers (including socket-based):
+
+- client address verification before further looking at the request
+        (This is actually a hook for any processing that needs to look
+         at the request before anything else, e.g. logging)
+- how to handle multiple requests:
+        - synchronous (one request is handled at a time)
+        - forking (each request is handled by a new process)
+        - threading (each request is handled by a new thread)
+
+The classes in this module favor the server type that is simplest to
+write: a synchronous TCP/IP server.  This is bad class design, but
+save some typing.  (There's also the issue that a deep class hierarchy
+slows down method lookups.)
+
+There are five classes in an inheritance diagram, four of which represent
+synchronous servers of four types:
+
+        +------------+
+        | BaseServer |
+        +------------+
+              |
+              v
+        +-----------+        +------------------+
+        | TCPServer |------->| UnixStreamServer |
+        +-----------+        +------------------+
+              |
+              v
+        +-----------+        +--------------------+
+        | UDPServer |------->| UnixDatagramServer |
+        +-----------+        +--------------------+
+
+Note that UnixDatagramServer derives from UDPServer, not from
+UnixStreamServer -- the only difference between an IP and a Unix
+stream server is the address family, which is simply repeated in both
+unix server classes.
+
+Forking and threading versions of each type of server can be created
+using the ForkingServer and ThreadingServer mix-in classes.  For
+instance, a threading UDP server class is created as follows:
+
+        class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass
+
+The Mix-in class must come first, since it overrides a method defined
+in UDPServer!
+
+To implement a service, you must derive a class from
+BaseRequestHandler and redefine its handle() method.  You can then run
+various versions of the service by combining one of the server classes
+with your request handler class.
+
+The request handler class must be different for datagram or stream
+services.  This can be hidden by using the mix-in request handler
+classes StreamRequestHandler or DatagramRequestHandler.
+
+Of course, you still have to use your head!
+
+For instance, it makes no sense to use a forking server if the service
+contains state in memory that can be modified by requests (since the
+modifications in the child process would never reach the initial state
+kept in the parent process and passed to each child).  In this case,
+you can use a threading server, but you will probably have to use
+locks to avoid two requests that come in nearly simultaneous to apply
+conflicting changes to the server state.
+
+On the other hand, if you are building e.g. an HTTP server, where all
+data is stored externally (e.g. in the file system), a synchronous
+class will essentially render the service "deaf" while one request is
+being handled -- which may be for a very long time if a client is slow
+to reqd all the data it has requested.  Here a threading or forking
+server is appropriate.
+
+In some cases, it may be appropriate to process part of a request
+synchronously, but to finish processing in a forked child depending on
+the request data.  This can be implemented by using a synchronous
+server and doing an explicit fork in the request handler class
+handle() method.
+
+Another approach to handling multiple simultaneous requests in an
+environment that supports neither threads nor fork (or where these are
+too expensive or inappropriate for the service) is to maintain an
+explicit table of partially finished requests and to use select() to
+decide which request to work on next (or whether to handle a new
+incoming request).  This is particularly important for stream services
+where each client can potentially be connected for a long time (if
+threads or subprocesses cannot be used).
+
+Future work:
+- Standard classes for Sun RPC (which uses either UDP or TCP)
+- Standard mix-in classes to implement various authentication
+  and encryption schemes
+- Standard framework for select-based multiplexing
+
+XXX Open problems:
+- What to do with out-of-band data?
+
+BaseServer:
+- split generic "request" functionality out into BaseServer class.
+  Copyright (C) 2000  Luke Kenneth Casson Leighton <lkcl at samba.org>
+
+  example: read entries from a SQL database (requires overriding
+  get_request() to return a table entry from the database).
+  entry is processed by a RequestHandlerClass.
+
+"""
+
+# Author of the BaseServer patch: Luke Kenneth Casson Leighton
+
+# XXX Warning!
+# There is a test suite for this module, but it cannot be run by the
+# standard regression test.
+# To run it manually, run Lib/test/test_socketserver.py.
+
+__version__ = "0.4"
+
+
+import socket
+import sys
+import os
+
+__all__ = ["TCPServer","UDPServer","ForkingUDPServer","ForkingTCPServer",
+           "ThreadingUDPServer","ThreadingTCPServer","BaseRequestHandler",
+           "StreamRequestHandler","DatagramRequestHandler",
+           "ThreadingMixIn", "ForkingMixIn"]
+if hasattr(socket, "AF_UNIX"):
+    __all__.extend(["UnixStreamServer","UnixDatagramServer",
+                    "ThreadingUnixStreamServer",
+                    "ThreadingUnixDatagramServer"])
+
+class BaseServer:
+
+    """Base class for server classes.
+
+    Methods for the caller:
+
+    - __init__(server_address, RequestHandlerClass)
+    - serve_forever()
+    - handle_request()  # if you do not use serve_forever()
+    - fileno() -> int   # for select()
+
+    Methods that may be overridden:
+
+    - server_bind()
+    - server_activate()
+    - get_request() -> request, client_address
+    - verify_request(request, client_address)
+    - server_close()
+    - process_request(request, client_address)
+    - close_request(request)
+    - handle_error()
+
+    Methods for derived classes:
+
+    - finish_request(request, client_address)
+
+    Class variables that may be overridden by derived classes or
+    instances:
+
+    - address_family
+    - socket_type
+    - reuse_address
+
+    Instance variables:
+
+    - RequestHandlerClass
+    - socket
+
+    """
+
+    def __init__(self, server_address, RequestHandlerClass):
+        """Constructor.  May be extended, do not override."""
+        self.server_address = server_address
+        self.RequestHandlerClass = RequestHandlerClass
+
+    def server_activate(self):
+        """Called by constructor to activate the server.
+
+        May be overridden.
+
+        """
+        pass
+
+    def serve_forever(self):
+        """Handle one request at a time until doomsday."""
+        while 1:
+            self.handle_request()
+
+    # The distinction between handling, getting, processing and
+    # finishing a request is fairly arbitrary.  Remember:
+    #
+    # - handle_request() is the top-level call.  It calls
+    #   get_request(), verify_request() and process_request()
+    # - get_request() is different for stream or datagram sockets
+    # - process_request() is the place that may fork a new process
+    #   or create a new thread to finish the request
+    # - finish_request() instantiates the request handler class;
+    #   this constructor will handle the request all by itself
+
+    def handle_request(self):
+        """Handle one request, possibly blocking."""
+        try:
+            request, client_address = self.get_request()
+        except socket.error:
+            return
+        if self.verify_request(request, client_address):
+            try:
+                self.process_request(request, client_address)
+            except:
+                self.handle_error(request, client_address)
+                self.close_request(request)
+
+    def verify_request(self, request, client_address):
+        """Verify the request.  May be overridden.
+
+        Return true if we should proceed with this request.
+
+        """
+        return 1
+
+    def process_request(self, request, client_address):
+        """Call finish_request.
+
+        Overridden by ForkingMixIn and ThreadingMixIn.
+
+        """
+        self.finish_request(request, client_address)
+        self.close_request(request)
+
+    def server_close(self):
+        """Called to clean-up the server.
+
+        May be overridden.
+
+        """
+        pass
+
+    def finish_request(self, request, client_address):
+        """Finish one request by instantiating RequestHandlerClass."""
+        self.RequestHandlerClass(request, client_address, self)
+
+    def close_request(self, request):
+        """Called to clean up an individual request."""
+        pass
+
+    def handle_error(self, request, client_address):
+        """Handle an error gracefully.  May be overridden.
+
+        The default is to print a traceback and continue.
+
+        """
+        print '-'*40
+        print 'Exception happened during processing of request from',
+        print client_address
+        import traceback
+        traceback.print_exc() # XXX But this goes to stderr!
+        print '-'*40
+
+
+class TCPServer(BaseServer):
+
+    """Base class for various socket-based server classes.
+
+    Defaults to synchronous IP stream (i.e., TCP).
+
+    Methods for the caller:
+
+    - __init__(server_address, RequestHandlerClass)
+    - serve_forever()
+    - handle_request()  # if you don't use serve_forever()
+    - fileno() -> int   # for select()
+
+    Methods that may be overridden:
+
+    - server_bind()
+    - server_activate()
+    - get_request() -> request, client_address
+    - verify_request(request, client_address)
+    - process_request(request, client_address)
+    - close_request(request)
+    - handle_error()
+
+    Methods for derived classes:
+
+    - finish_request(request, client_address)
+
+    Class variables that may be overridden by derived classes or
+    instances:
+
+    - address_family
+    - socket_type
+    - request_queue_size (only for stream sockets)
+    - reuse_address
+
+    Instance variables:
+
+    - server_address
+    - RequestHandlerClass
+    - socket
+
+    """
+
+    address_family = socket.AF_INET
+
+    socket_type = socket.SOCK_STREAM
+
+    request_queue_size = 5
+
+    allow_reuse_address = 0
+
+    def __init__(self, server_address, RequestHandlerClass):
+        """Constructor.  May be extended, do not override."""
+        BaseServer.__init__(self, server_address, RequestHandlerClass)
+        self.socket = socket.socket(self.address_family,
+                                    self.socket_type)
+        self.server_bind()
+        self.server_activate()
+
+    def server_bind(self):
+        """Called by constructor to bind the socket.
+
+        May be overridden.
+
+        """
+        if self.allow_reuse_address:
+            self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+        self.socket.bind(self.server_address)
+
+    def server_activate(self):
+        """Called by constructor to activate the server.
+
+        May be overridden.
+
+        """
+        self.socket.listen(self.request_queue_size)
+
+    def server_close(self):
+        """Called to clean-up the server.
+
+        May be overridden.
+
+        """
+        self.socket.close()
+
+    def fileno(self):
+        """Return socket file number.
+
+        Interface required by select().
+
+        """
+        return self.socket.fileno()
+
+    def get_request(self):
+        """Get the request and client address from the socket.
+
+        May be overridden.
+
+        """
+        return self.socket.accept()
+
+    def close_request(self, request):
+        """Called to clean up an individual request."""
+        request.close()
+
+
+class UDPServer(TCPServer):
+
+    """UDP server class."""
+
+    allow_reuse_address = 0
+
+    socket_type = socket.SOCK_DGRAM
+
+    max_packet_size = 8192
+
+    def get_request(self):
+        data, client_addr = self.socket.recvfrom(self.max_packet_size)
+        return (data, self.socket), client_addr
+
+    def server_activate(self):
+        # No need to call listen() for UDP.
+        pass
+
+    def close_request(self, request):
+        # No need to close anything.
+        pass
+
+class ForkingMixIn:
+
+    """Mix-in class to handle each request in a new process."""
+
+    active_children = None
+    max_children = 40
+
+    def collect_children(self):
+        """Internal routine to wait for died children."""
+        while self.active_children:
+            if len(self.active_children) < self.max_children:
+                options = os.WNOHANG
+            else:
+                # If the maximum number of children are already
+                # running, block while waiting for a child to exit
+                options = 0
+            try:
+                pid, status = os.waitpid(0, options)
+            except os.error:
+                pid = None
+            if not pid: break
+            self.active_children.remove(pid)
+
+    def process_request(self, request, client_address):
+        """Fork a new subprocess to process the request."""
+        self.collect_children()
+        pid = os.fork()
+        if pid:
+            # Parent process
+            if self.active_children is None:
+                self.active_children = []
+            self.active_children.append(pid)
+            self.close_request(request)
+            return
+        else:
+            # Child process.
+            # This must never return, hence os._exit()!
+            try:
+                self.finish_request(request, client_address)
+                os._exit(0)
+            except:
+                try:
+                    self.handle_error(request, client_address)
+                finally:
+                    os._exit(1)
+
+
+class ThreadingMixIn:
+    """Mix-in class to handle each request in a new thread."""
+
+    def process_request_thread(self, request, client_address):
+        """Same as in BaseServer but as a thread.
+
+        In addition, exception handling is done here.
+
+        """
+        try:
+            self.finish_request(request, client_address)
+            self.close_request(request)
+        except:
+            self.handle_error(request, client_address)
+            self.close_request(request)
+
+    def process_request(self, request, client_address):
+        """Start a new thread to process the request."""
+        import threading
+        t = threading.Thread(target = self.process_request_thread,
+                             args = (request, client_address))
+        t.start()
+
+
+class ForkingUDPServer(ForkingMixIn, UDPServer): pass
+class ForkingTCPServer(ForkingMixIn, TCPServer): pass
+
+class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass
+class ThreadingTCPServer(ThreadingMixIn, TCPServer): pass
+
+if hasattr(socket, 'AF_UNIX'):
+
+    class UnixStreamServer(TCPServer):
+        address_family = socket.AF_UNIX
+
+    class UnixDatagramServer(UDPServer):
+        address_family = socket.AF_UNIX
+
+    class ThreadingUnixStreamServer(ThreadingMixIn, UnixStreamServer): pass
+
+    class ThreadingUnixDatagramServer(ThreadingMixIn, UnixDatagramServer): pass
+
+class BaseRequestHandler:
+
+    """Base class for request handler classes.
+
+    This class is instantiated for each request to be handled.  The
+    constructor sets the instance variables request, client_address
+    and server, and then calls the handle() method.  To implement a
+    specific service, all you need to do is to derive a class which
+    defines a handle() method.
+
+    The handle() method can find the request as self.request, the
+    client address as self.client_address, and the server (in case it
+    needs access to per-server information) as self.server.  Since a
+    separate instance is created for each request, the handle() method
+    can define arbitrary other instance variariables.
+
+    """
+
+    def __init__(self, request, client_address, server):
+        self.request = request
+        self.client_address = client_address
+        self.server = server
+        try:
+            self.setup()
+            self.handle()
+            self.finish()
+        finally:
+            sys.exc_traceback = None    # Help garbage collection
+
+    def setup(self):
+        pass
+
+    def handle(self):
+        pass
+
+    def finish(self):
+        pass
+
+
+# The following two classes make it possible to use the same service
+# class for stream or datagram servers.
+# Each class sets up these instance variables:
+# - rfile: a file object from which receives the request is read
+# - wfile: a file object to which the reply is written
+# When the handle() method returns, wfile is flushed properly
+
+
+class StreamRequestHandler(BaseRequestHandler):
+
+    """Define self.rfile and self.wfile for stream sockets."""
+
+    # Default buffer sizes for rfile, wfile.
+    # We default rfile to buffered because otherwise it could be
+    # really slow for large data (a getc() call per byte); we make
+    # wfile unbuffered because (a) often after a write() we want to
+    # read and we need to flush the line; (b) big writes to unbuffered
+    # files are typically optimized by stdio even when big reads
+    # aren't.
+    rbufsize = -1
+    wbufsize = 0
+
+    def setup(self):
+        self.connection = self.request
+        self.rfile = self.connection.makefile('rb', self.rbufsize)
+        self.wfile = self.connection.makefile('wb', self.wbufsize)
+
+    def finish(self):
+        self.wfile.flush()
+        self.wfile.close()
+        self.rfile.close()
+
+
+class DatagramRequestHandler(BaseRequestHandler):
+
+    # XXX Regrettably, I cannot get this working on Linux;
+    # s.recvfrom() doesn't return a meaningful client address.
+
+    """Define self.rfile and self.wfile for datagram sockets."""
+
+    def setup(self):
+        import StringIO
+        self.packet, self.socket = self.request
+        self.rfile = StringIO.StringIO(self.packet)
+        self.wfile = StringIO.StringIO()
+
+    def finish(self):
+        self.socket.sendto(self.wfile.getvalue(), self.client_address)
diff --git a/lib-python/2.2/StringIO.py b/lib-python/2.2/StringIO.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/StringIO.py
@@ -0,0 +1,239 @@
+"""File-like objects that read from or write to a string buffer.
+
+This implements (nearly) all stdio methods.
+
+f = StringIO()      # ready for writing
+f = StringIO(buf)   # ready for reading
+f.close()           # explicitly release resources held
+flag = f.isatty()   # always false
+pos = f.tell()      # get current position
+f.seek(pos)         # set current position
+f.seek(pos, mode)   # mode 0: absolute; 1: relative; 2: relative to EOF
+buf = f.read()      # read until EOF
+buf = f.read(n)     # read up to n bytes
+buf = f.readline()  # read until end of line ('\n') or EOF
+list = f.readlines()# list of f.readline() results until EOF
+f.truncate([size])  # truncate file at to at most size (default: current pos)
+f.write(buf)        # write at current position
+f.writelines(list)  # for line in list: f.write(line)
+f.getvalue()        # return whole file's contents as a string
+
+Notes:
+- Using a real file is often faster (but less convenient).
+- There's also a much faster implementation in C, called cStringIO, but
+  it's not subclassable.
+- fileno() is left unimplemented so that code which uses it triggers
+  an exception early.
+- Seeking far beyond EOF and then writing will insert real null
+  bytes that occupy space in the buffer.
+- There's a simple test set (see end of this file).
+"""
+import types
+try:
+    from errno import EINVAL
+except ImportError:
+    EINVAL = 22
+
+__all__ = ["StringIO"]
+
+class StringIO:
+    """class StringIO([buffer])
+
+    When a StringIO object is created, it can be initialized to an existing
+    string by passing the string to the constructor. If no string is given,
+    the StringIO will start empty.
+
+    The StringIO object can accept either Unicode or 8-bit strings, but
+    mixing the two may take some care. If both are used, 8-bit strings that
+    cannot be interpreted as 7-bit ASCII (that use the 8th bit) will cause
+    a UnicodeError to be raised when getvalue() is called.
+    """
+    def __init__(self, buf = ''):
+        # Force self.buf to be a string or unicode
+        if type(buf) not in types.StringTypes:
+            buf = str(buf)
+        self.buf = buf
+        self.len = len(buf)
+        self.buflist = []
+        self.pos = 0
+        self.closed = 0
+        self.softspace = 0
+
+    def __iter__(self):
+        return iter(self.readline, '')
+
+    def close(self):
+        """Free the memory buffer."""
+        if not self.closed:
+            self.closed = 1
+            del self.buf, self.pos
+
+    def isatty(self):
+        if self.closed:
+            raise ValueError, "I/O operation on closed file"
+        return 0
+
+    def seek(self, pos, mode = 0):
+        if self.closed:
+            raise ValueError, "I/O operation on closed file"
+        if self.buflist:
+            self.buf += ''.join(self.buflist)
+            self.buflist = []
+        if mode == 1:
+            pos += self.pos
+        elif mode == 2:
+            pos += self.len
+        self.pos = max(0, pos)
+
+    def tell(self):
+        if self.closed:
+            raise ValueError, "I/O operation on closed file"
+        return self.pos
+
+    def read(self, n = -1):
+        if self.closed:
+            raise ValueError, "I/O operation on closed file"
+        if self.buflist:
+            self.buf += ''.join(self.buflist)
+            self.buflist = []
+        if n < 0:
+            newpos = self.len
+        else:
+            newpos = min(self.pos+n, self.len)
+        r = self.buf[self.pos:newpos]
+        self.pos = newpos
+        return r
+
+    def readline(self, length=None):
+        if self.closed:
+            raise ValueError, "I/O operation on closed file"
+        if self.buflist:
+            self.buf += ''.join(self.buflist)
+            self.buflist = []
+        i = self.buf.find('\n', self.pos)
+        if i < 0:
+            newpos = self.len
+        else:
+            newpos = i+1
+        if length is not None:
+            if self.pos + length < newpos:
+                newpos = self.pos + length
+        r = self.buf[self.pos:newpos]
+        self.pos = newpos
+        return r
+
+    def readlines(self, sizehint = 0):
+        total = 0
+        lines = []
+        line = self.readline()
+        while line:
+            lines.append(line)
+            total += len(line)
+            if 0 < sizehint <= total:
+                break
+            line = self.readline()
+        return lines
+
+    def truncate(self, size=None):
+        if self.closed:
+            raise ValueError, "I/O operation on closed file"
+        if size is None:
+            size = self.pos
+        elif size < 0:
+            raise IOError(EINVAL, "Negative size not allowed")
+        elif size < self.pos:
+            self.pos = size
+        self.buf = self.getvalue()[:size]
+
+    def write(self, s):
+        if self.closed:
+            raise ValueError, "I/O operation on closed file"
+        if not s: return
+        # Force s to be a string or unicode
+        if type(s) not in types.StringTypes:
+            s = str(s)
+        if self.pos > self.len:
+            self.buflist.append('\0'*(self.pos - self.len))
+            self.len = self.pos
+        newpos = self.pos + len(s)
+        if self.pos < self.len:
+            if self.buflist:
+                self.buf += ''.join(self.buflist)
+                self.buflist = []
+            self.buflist = [self.buf[:self.pos], s, self.buf[newpos:]]
+            self.buf = ''
+            if newpos > self.len:
+                self.len = newpos
+        else:
+            self.buflist.append(s)
+            self.len = newpos
+        self.pos = newpos
+
+    def writelines(self, list):
+        self.write(''.join(list))
+
+    def flush(self):
+        if self.closed:
+            raise ValueError, "I/O operation on closed file"
+
+    def getvalue(self):
+        """
+        Retrieve the entire contents of the "file" at any time before
+        the StringIO object's close() method is called.
+
+        The StringIO object can accept either Unicode or 8-bit strings,
+        but mixing the two may take some care. If both are used, 8-bit
+        strings that cannot be interpreted as 7-bit ASCII (that use the
+        8th bit) will cause a UnicodeError to be raised when getvalue()
+        is called.
+        """
+        if self.buflist:
+            self.buf += ''.join(self.buflist)
+            self.buflist = []
+        return self.buf
+
+
+# A little test suite
+
+def test():
+    import sys
+    if sys.argv[1:]:
+        file = sys.argv[1]
+    else:
+        file = '/etc/passwd'
+    lines = open(file, 'r').readlines()
+    text = open(file, 'r').read()
+    f = StringIO()
+    for line in lines[:-2]:
+        f.write(line)
+    f.writelines(lines[-2:])
+    if f.getvalue() != text:
+        raise RuntimeError, 'write failed'
+    length = f.tell()
+    print 'File length =', length
+    f.seek(len(lines[0]))
+    f.write(lines[1])
+    f.seek(0)
+    print 'First line =', `f.readline()`
+    here = f.tell()
+    line = f.readline()
+    print 'Second line =', `line`
+    f.seek(-len(line), 1)
+    line2 = f.read(len(line))
+    if line != line2:
+        raise RuntimeError, 'bad result after seek back'
+    f.seek(len(line2), 1)
+    list = f.readlines()
+    line = list[-1]
+    f.seek(f.tell() - len(line))
+    line2 = f.read()
+    if line != line2:
+        raise RuntimeError, 'bad result after seek back from EOF'
+    print 'Read', len(list), 'more lines'
+    print 'File length =', f.tell()
+    if f.tell() != length:
+        raise RuntimeError, 'bad length'
+    f.close()
+
+if __name__ == '__main__':
+    test()
diff --git a/lib-python/2.2/TERMIOS.py b/lib-python/2.2/TERMIOS.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/TERMIOS.py
@@ -0,0 +1,14 @@
+"""Backward-compatibility version of TERMIOS; export constants exported by
+termios, and issue a deprecation warning.
+"""
+
+import warnings
+warnings.warn("the TERMIOS module is deprecated; please use termios",
+              DeprecationWarning)
+
+
+# Export the constants known to the termios module:
+from termios import *
+
+# and *only* the constants:
+__all__ = [s for s in dir() if s[0] in "ABCDEFGHIJKLMNOPQRSTUVWXYZ"]
diff --git a/lib-python/2.2/UserDict.py b/lib-python/2.2/UserDict.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/UserDict.py
@@ -0,0 +1,60 @@
+"""A more or less complete user-defined wrapper around dictionary objects."""
+
+class UserDict:
+    def __init__(self, dict=None):
+        self.data = {}
+        if dict is not None: self.update(dict)
+    def __repr__(self): return repr(self.data)
+    def __cmp__(self, dict):
+        if isinstance(dict, UserDict):
+            return cmp(self.data, dict.data)
+        else:
+            return cmp(self.data, dict)
+    def __len__(self): return len(self.data)
+    def __getitem__(self, key): return self.data[key]
+    def __setitem__(self, key, item): self.data[key] = item
+    def __delitem__(self, key): del self.data[key]
+    def clear(self): self.data.clear()
+    def copy(self):
+        if self.__class__ is UserDict:
+            return UserDict(self.data)
+        import copy
+        data = self.data
+        try:
+            self.data = {}
+            c = copy.copy(self)
+        finally:
+            self.data = data
+        c.update(self)
+        return c
+    def keys(self): return self.data.keys()
+    def items(self): return self.data.items()
+    def iteritems(self): return self.data.iteritems()
+    def iterkeys(self): return self.data.iterkeys()
+    def itervalues(self): return self.data.itervalues()
+    def values(self): return self.data.values()
+    def has_key(self, key): return self.data.has_key(key)
+    def update(self, dict):
+        if isinstance(dict, UserDict):
+            self.data.update(dict.data)
+        elif isinstance(dict, type(self.data)):
+            self.data.update(dict)
+        else:
+            for k, v in dict.items():
+                self[k] = v
+    def get(self, key, failobj=None):
+        if not self.has_key(key):
+            return failobj
+        return self[key]
+    def setdefault(self, key, failobj=None):
+        if not self.has_key(key):
+            self[key] = failobj
+        return self[key]
+    def popitem(self):
+        return self.data.popitem()
+    def __contains__(self, key):
+        return key in self.data
+
+class IterableUserDict(UserDict):
+    def __iter__(self):
+        return iter(self.data)
diff --git a/lib-python/2.2/UserList.py b/lib-python/2.2/UserList.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/UserList.py
@@ -0,0 +1,85 @@
+"""A more or less complete user-defined wrapper around list objects."""
+
+class UserList:
+    def __init__(self, initlist=None):
+        self.data = []
+        if initlist is not None:
+            # XXX should this accept an arbitrary sequence?
+            if type(initlist) == type(self.data):
+                self.data[:] = initlist
+            elif isinstance(initlist, UserList):
+                self.data[:] = initlist.data[:]
+            else:
+                self.data = list(initlist)
+    def __repr__(self): return repr(self.data)
+    def __lt__(self, other): return self.data <  self.__cast(other)
+    def __le__(self, other): return self.data <= self.__cast(other)
+    def __eq__(self, other): return self.data == self.__cast(other)
+    def __ne__(self, other): return self.data != self.__cast(other)
+    def __gt__(self, other): return self.data >  self.__cast(other)
+    def __ge__(self, other): return self.data >= self.__cast(other)
+    def __cast(self, other):
+        if isinstance(other, UserList): return other.data
+        else: return other
+    def __cmp__(self, other):
+        return cmp(self.data, self.__cast(other))
+    def __contains__(self, item): return item in self.data
+    def __len__(self): return len(self.data)
+    def __getitem__(self, i): return self.data[i]
+    def __setitem__(self, i, item): self.data[i] = item
+    def __delitem__(self, i): del self.data[i]
+    def __getslice__(self, i, j):
+        i = max(i, 0); j = max(j, 0)
+        return self.__class__(self.data[i:j])
+    def __setslice__(self, i, j, other):
+        i = max(i, 0); j = max(j, 0)
+        if isinstance(other, UserList):
+            self.data[i:j] = other.data
+        elif isinstance(other, type(self.data)):
+            self.data[i:j] = other
+        else:
+            self.data[i:j] = list(other)
+    def __delslice__(self, i, j):
+        i = max(i, 0); j = max(j, 0)
+        del self.data[i:j]
+    def __add__(self, other):
+        if isinstance(other, UserList):
+            return self.__class__(self.data + other.data)
+        elif isinstance(other, type(self.data)):
+            return self.__class__(self.data + other)
+        else:
+            return self.__class__(self.data + list(other))
+    def __radd__(self, other):
+        if isinstance(other, UserList):
+            return self.__class__(other.data + self.data)
+        elif isinstance(other, type(self.data)):
+            return self.__class__(other + self.data)
+        else:
+            return self.__class__(list(other) + self.data)
+    def __iadd__(self, other):
+        if isinstance(other, UserList):
+            self.data += other.data
+        elif isinstance(other, type(self.data)):
+            self.data += other
+        else:
+            self.data += list(other)
+        return self
+    def __mul__(self, n):
+        return self.__class__(self.data*n)
+    __rmul__ = __mul__
+    def __imul__(self, n):
+        self.data *= n
+        return self
+    def append(self, item): self.data.append(item)
+    def insert(self, i, item): self.data.insert(i, item)
+    def pop(self, i=-1): return self.data.pop(i)
+    def remove(self, item): self.data.remove(item)
+    def count(self, item): return self.data.count(item)
+    def index(self, item): return self.data.index(item)
+    def reverse(self): self.data.reverse()
+    def sort(self, *args): apply(self.data.sort, args)
+    def extend(self, other):
+        if isinstance(other, UserList):
+            self.data.extend(other.data)
+        else:
+            self.data.extend(other)
diff --git a/lib-python/2.2/UserString.py b/lib-python/2.2/UserString.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/UserString.py
@@ -0,0 +1,182 @@
+#!/usr/bin/env python
+## vim:ts=4:et:nowrap
+"""A user-defined wrapper around string objects
+
+Note: string objects have grown methods in Python 1.6
+This module requires Python 1.6 or later.
+"""
+from types import StringType, UnicodeType
+import sys
+
+__all__ = ["UserString","MutableString"]
+
+class UserString:
+    def __init__(self, seq):
+        if isinstance(seq, StringType) or isinstance(seq, UnicodeType):
+            self.data = seq
+        elif isinstance(seq, UserString):
+            self.data = seq.data[:]
+        else:
+            self.data = str(seq)
+    def __str__(self): return str(self.data)
+    def __repr__(self): return repr(self.data)
+    def __int__(self): return int(self.data)
+    def __long__(self): return long(self.data)
+    def __float__(self): return float(self.data)
+    def __complex__(self): return complex(self.data)
+    def __hash__(self): return hash(self.data)
+
+    def __cmp__(self, string):
+        if isinstance(string, UserString):
+            return cmp(self.data, string.data)
+        else:
+            return cmp(self.data, string)
+    def __contains__(self, char):
+        return char in self.data
+
+    def __len__(self): return len(self.data)
+    def __getitem__(self, index): return self.__class__(self.data[index])
+    def __getslice__(self, start, end):
+        start = max(start, 0); end = max(end, 0)
+        return self.__class__(self.data[start:end])
+
+    def __add__(self, other):
+        if isinstance(other, UserString):
+            return self.__class__(self.data + other.data)
+        elif isinstance(other, StringType) or isinstance(other, UnicodeType):
+            return self.__class__(self.data + other)
+        else:
+            return self.__class__(self.data + str(other))
+    def __radd__(self, other):
+        if isinstance(other, StringType) or isinstance(other, UnicodeType):
+            return self.__class__(other + self.data)
+        else:
+            return self.__class__(str(other) + self.data)
+    def __iadd__(self, other):
+        if isinstance(other, UserString):
+            self.data += other.data
+        elif isinstance(other, StringType) or isinstance(other, UnicodeType):
+            self.data += other
+        else:
+            self.data += str(other)
+        return self
+    def __mul__(self, n):
+        return self.__class__(self.data*n)
+    __rmul__ = __mul__
+    def __imul__(self, n):
+        self.data *= n
+        return self
+
+    # the following methods are defined in alphabetical order:
+    def capitalize(self): return self.__class__(self.data.capitalize())
+    def center(self, width): return self.__class__(self.data.center(width))
+    def count(self, sub, start=0, end=sys.maxint):
+        return self.data.count(sub, start, end)
+    def decode(self, encoding=None, errors=None): # XXX improve this?
+        if encoding:
+            if errors:
+                return self.__class__(self.data.decode(encoding, errors))
+            else:
+                return self.__class__(self.data.decode(encoding))
+        else:
+            return self.__class__(self.data.decode())
+    def encode(self, encoding=None, errors=None): # XXX improve this?
+        if encoding:
+            if errors:
+                return self.__class__(self.data.encode(encoding, errors))
+            else:
+                return self.__class__(self.data.encode(encoding))
+        else:
+            return self.__class__(self.data.encode())
+    def endswith(self, suffix, start=0, end=sys.maxint):
+        return self.data.endswith(suffix, start, end)
+    def expandtabs(self, tabsize=8):
+        return self.__class__(self.data.expandtabs(tabsize))
+    def find(self, sub, start=0, end=sys.maxint):
+        return self.data.find(sub, start, end)
+    def index(self, sub, start=0, end=sys.maxint):
+        return self.data.index(sub, start, end)
+    def isalpha(self): return self.data.isalpha()
+    def isalnum(self): return self.data.isalnum()
+    def isdecimal(self): return self.data.isdecimal()
+    def isdigit(self): return self.data.isdigit()
+    def islower(self): return self.data.islower()
+    def isnumeric(self): return self.data.isnumeric()
+    def isspace(self): return self.data.isspace()
+    def istitle(self): return self.data.istitle()
+    def isupper(self): return self.data.isupper()
+    def join(self, seq): return self.data.join(seq)
+    def ljust(self, width): return self.__class__(self.data.ljust(width))
+    def lower(self): return self.__class__(self.data.lower())
+    def lstrip(self, chars=None): return self.__class__(self.data.lstrip(chars))
+    def replace(self, old, new, maxsplit=-1):
+        return self.__class__(self.data.replace(old, new, maxsplit))
+    def rfind(self, sub, start=0, end=sys.maxint):
+        return self.data.rfind(sub, start, end)
+    def rindex(self, sub, start=0, end=sys.maxint):
+        return self.data.rindex(sub, start, end)
+    def rjust(self, width): return self.__class__(self.data.rjust(width))
+    def rstrip(self, chars=None): return self.__class__(self.data.rstrip(chars))
+    def split(self, sep=None, maxsplit=-1):
+        return self.data.split(sep, maxsplit)
+    def splitlines(self, keepends=0): return self.data.splitlines(keepends)
+    def startswith(self, prefix, start=0, end=sys.maxint):
+        return self.data.startswith(prefix, start, end)
+    def strip(self, chars=None): return self.__class__(self.data.strip(chars))
+    def swapcase(self): return self.__class__(self.data.swapcase())
+    def title(self): return self.__class__(self.data.title())
+    def translate(self, *args):
+        return self.__class__(self.data.translate(*args))
+    def upper(self): return self.__class__(self.data.upper())
+    def zfill(self, width): return self.__class__(self.data.zfill(width))
+
+class MutableString(UserString):
+    """mutable string objects
+
+    Python strings are immutable objects.  This has the advantage, that
+    strings may be used as dictionary keys.  If this property isn't needed
+    and you insist on changing string values in place instead, you may cheat
+    and use MutableString.
+
+    But the purpose of this class is an educational one: to prevent
+    people from inventing their own mutable string class derived
+    from UserString and than forget thereby to remove (override) the
+    __hash__ method inherited from ^UserString.  This would lead to
+    errors that would be very hard to track down.
+
+    A faster and better solution is to rewrite your program using lists."""
+    def __init__(self, string=""):
+        self.data = string
+    def __hash__(self):
+        raise TypeError, "unhashable type (it is mutable)"
+    def __setitem__(self, index, sub):
+        if index < 0 or index >= len(self.data): raise IndexError
+        self.data = self.data[:index] + sub + self.data[index+1:]
+    def __delitem__(self, index):
+        if index < 0 or index >= len(self.data): raise IndexError
+        self.data = self.data[:index] + self.data[index+1:]
+    def __setslice__(self, start, end, sub):
+        start = max(start, 0); end = max(end, 0)
+        if isinstance(sub, UserString):
+            self.data = self.data[:start]+sub.data+self.data[end:]
+        elif isinstance(sub, StringType) or isinstance(sub, UnicodeType):
+            self.data = self.data[:start]+sub+self.data[end:]
+        else:
+            self.data =  self.data[:start]+str(sub)+self.data[end:]
+    def __delslice__(self, start, end):
+        start = max(start, 0); end = max(end, 0)
+        self.data = self.data[:start] + self.data[end:]
+    def immutable(self):
+        return UserString(self.data)
+
+if __name__ == "__main__":
+    # execute the regression test to stdout, if called as a script:
+    import os
+    called_in_dir, called_as = os.path.split(sys.argv[0])
+    called_in_dir = os.path.abspath(called_in_dir)
+    called_as, py = os.path.splitext(called_as)
+    sys.path.append(os.path.join(called_in_dir, 'test'))
+    if '-q' in sys.argv:
+        import test_support
+        test_support.verbose = 0
+    __import__('test_' + called_as.lower())
diff --git a/lib-python/2.2/__future__.py b/lib-python/2.2/__future__.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/__future__.py
@@ -0,0 +1,104 @@
+"""Record of phased-in incompatible language changes.
+
+Each line is of the form:
+
+    FeatureName = "_Feature(" OptionalRelease "," MandatoryRelease ","
+                              CompilerFlag ")"
+
+where, normally, OptionalRelease < MandatoryRelease, and both are 5-tuples
+of the same form as sys.version_info:
+
+    (PY_MAJOR_VERSION, # the 2 in 2.1.0a3; an int
+     PY_MINOR_VERSION, # the 1; an int
+     PY_MICRO_VERSION, # the 0; an int
+     PY_RELEASE_LEVEL, # "alpha", "beta", "candidate" or "final"; string
+     PY_RELEASE_SERIAL # the 3; an int
+    )
+
+OptionalRelease records the first release in which
+
+    from __future__ import FeatureName
+
+was accepted.
+
+In the case of MandatoryReleases that have not yet occurred,
+MandatoryRelease predicts the release in which the feature will become part
+of the language.
+
+Else MandatoryRelease records when the feature became part of the language;
+in releases at or after that, modules no longer need
+
+    from __future__ import FeatureName
+
+to use the feature in question, but may continue to use such imports.
+
+MandatoryRelease may also be None, meaning that a planned feature got
+dropped.
+
+Instances of class _Feature have two corresponding methods,
+.getOptionalRelease() and .getMandatoryRelease().
+
+CompilerFlag is the (bitfield) flag that should be passed in the fourth
+argument to the builtin function compile() to enable the feature in
+dynamically compiled code.  This flag is stored in the .compiler_flag
+attribute on _Future instances.  These values must match the appropriate
+#defines of CO_xxx flags in Include/compile.h.
+
+No feature line is ever to be deleted from this file.
+"""
+
+all_feature_names = [
+    "nested_scopes",
+    "generators",
+    "division",
+]
+
+__all__ = ["all_feature_names"] + all_feature_names
+
+# The CO_xxx symbols are defined here under the same names used by
+# compile.h, so that an editor search will find them here.  However,
+# they're not exported in __all__, because they don't really belong to
+# this module.
+CO_NESTED            = 0x0010   # nested_scopes
+CO_GENERATOR_ALLOWED = 0x1000   # generators
+CO_FUTURE_DIVISION   = 0x2000   # division
+
+class _Feature:
+    def __init__(self, optionalRelease, mandatoryRelease, compiler_flag):
+        self.optional = optionalRelease
+        self.mandatory = mandatoryRelease
+        self.compiler_flag = compiler_flag
+
+    def getOptionalRelease(self):
+        """Return first release in which this feature was recognized.
+
+        This is a 5-tuple, of the same form as sys.version_info.
+        """
+
+        return self.optional
+
+    def getMandatoryRelease(self):
+        """Return release in which this feature will become mandatory.
+
+        This is a 5-tuple, of the same form as sys.version_info, or, if
+        the feature was dropped, is None.
+        """
+
+        return self.mandatory
+
+    def __repr__(self):
+        return "_Feature" + repr((self.optional,
+                                  self.mandatory,
+                                  self.compiler_flag))
+
+nested_scopes = _Feature((2, 1, 0, "beta",  1),
+                         (2, 2, 0, "alpha", 0),
+                         CO_NESTED)
+
+generators = _Feature((2, 2, 0, "alpha", 1),
+                      (2, 3, 0, "final", 0),
+                      CO_GENERATOR_ALLOWED)
+
+division = _Feature((2, 2, 0, "alpha", 2),
+                    (3, 0, 0, "alpha", 0),
+                    CO_FUTURE_DIVISION)
diff --git a/lib-python/2.2/__phello__.foo.py b/lib-python/2.2/__phello__.foo.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/__phello__.foo.py
@@ -0,0 +1,1 @@
+# This file exists as a helper for the test.test_frozen module.
diff --git a/lib-python/2.2/aifc.py b/lib-python/2.2/aifc.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/aifc.py
@@ -0,0 +1,961 @@
+"""Stuff to parse AIFF-C and AIFF files.
+
+Unless explicitly stated otherwise, the description below is true
+both for AIFF-C files and AIFF files.
+
+An AIFF-C file has the following structure.
+
+  +-----------------+
+  | FORM            |
+  +-----------------+
+  | <size>          |
+  +----+------------+
+  |    | AIFC       |
+  |    +------------+
+  |    | <chunks>   |
+  |    |    .       |
+  |    |    .       |
+  |    |    .       |
+  +----+------------+
+
+An AIFF file has the string "AIFF" instead of "AIFC".
+
+A chunk consists of an identifier (4 bytes) followed by a size (4 bytes,
+big endian order), followed by the data.  The size field does not include
+the size of the 8 byte header.
+
+The following chunk types are recognized.
+
+  FVER
+      <version number of AIFF-C defining document> (AIFF-C only).
+  MARK
+      <# of markers> (2 bytes)
+      list of markers:
+          <marker ID> (2 bytes, must be > 0)
+          <position> (4 bytes)
+          <marker name> ("pstring")
+  COMM
+      <# of channels> (2 bytes)
+      <# of sound frames> (4 bytes)
+      <size of the samples> (2 bytes)
+      <sampling frequency> (10 bytes, IEEE 80-bit extended
+          floating point)
+      in AIFF-C files only:
+      <compression type> (4 bytes)
+      <human-readable version of compression type> ("pstring")
+  SSND
+      <offset> (4 bytes, not used by this program)
+      <blocksize> (4 bytes, not used by this program)
+      <sound data>
+
+A pstring consists of 1 byte length, a string of characters, and 0 or 1
+byte pad to make the total length even.
+
+Usage.
+
+Reading AIFF files:
+  f = aifc.open(file, 'r')
+where file is either the name of a file or an open file pointer.
+The open file pointer must have methods read(), seek(), and close().
+In some types of audio files, if the setpos() method is not used,
+the seek() method is not necessary.
+
+This returns an instance of a class with the following public methods:
+  getnchannels()  -- returns number of audio channels (1 for
+             mono, 2 for stereo)
+  getsampwidth()  -- returns sample width in bytes
+  getframerate()  -- returns sampling frequency
+  getnframes()    -- returns number of audio frames
+  getcomptype()   -- returns compression type ('NONE' for AIFF files)
+  getcompname()   -- returns human-readable version of
+             compression type ('not compressed' for AIFF files)
+  getparams() -- returns a tuple consisting of all of the
+             above in the above order
+  getmarkers()    -- get the list of marks in the audio file or None
+             if there are no marks
+  getmark(id) -- get mark with the specified id (raises an error
+             if the mark does not exist)
+  readframes(n)   -- returns at most n frames of audio
+  rewind()    -- rewind to the beginning of the audio stream
+  setpos(pos) -- seek to the specified position
+  tell()      -- return the current position
+  close()     -- close the instance (make it unusable)
+The position returned by tell(), the position given to setpos() and
+the position of marks are all compatible and have nothing to do with
+the actual position in the file.
+The close() method is called automatically when the class instance
+is destroyed.
+
+Writing AIFF files:
+  f = aifc.open(file, 'w')
+where file is either the name of a file or an open file pointer.
+The open file pointer must have methods write(), tell(), seek(), and
+close().
+
+This returns an instance of a class with the following public methods:
+  aiff()      -- create an AIFF file (AIFF-C default)
+  aifc()      -- create an AIFF-C file
+  setnchannels(n) -- set the number of channels
+  setsampwidth(n) -- set the sample width
+  setframerate(n) -- set the frame rate
+  setnframes(n)   -- set the number of frames
+  setcomptype(type, name)
+          -- set the compression type and the
+             human-readable compression type
+  setparams(tuple)
+          -- set all parameters at once
+  setmark(id, pos, name)
+          -- add specified mark to the list of marks
+  tell()      -- return current position in output file (useful
+             in combination with setmark())
+  writeframesraw(data)
+          -- write audio frames without pathing up the
+             file header
+  writeframes(data)
+          -- write audio frames and patch up the file header
+  close()     -- patch up the file header and close the
+             output file
+You should set the parameters before the first writeframesraw or
+writeframes.  The total number of frames does not need to be set,
+but when it is set to the correct value, the header does not have to
+be patched up.
+It is best to first set all parameters, perhaps possibly the
+compression type, and then write audio frames using writeframesraw.
+When all frames have been written, either call writeframes('') or
+close() to patch up the sizes in the header.
+Marks can be added anytime.  If there are any marks, ypu must call
+close() after all frames have been written.
+The close() method is called automatically when the class instance
+is destroyed.
+
+When a file is opened with the extension '.aiff', an AIFF file is
+written, otherwise an AIFF-C file is written.  This default can be
+changed by calling aiff() or aifc() before the first writeframes or
+writeframesraw.
+"""
+
+import struct
+import __builtin__
+
+__all__ = ["Error","open","openfp"]
+
+class Error(Exception):
+    pass
+
+_AIFC_version = 0xA2805140      # Version 1 of AIFF-C
+
+_skiplist = 'COMT', 'INST', 'MIDI', 'AESD', \
+      'APPL', 'NAME', 'AUTH', '(c) ', 'ANNO'
+
+def _read_long(file):
+    try:
+        return struct.unpack('>l', file.read(4))[0]
+    except struct.error:
+        raise EOFError
+
+def _read_ulong(file):
+    try:
+        return struct.unpack('>L', file.read(4))[0]
+    except struct.error:
+        raise EOFError
+
+def _read_short(file):
+    try:
+        return struct.unpack('>h', file.read(2))[0]
+    except struct.error:
+        raise EOFError
+
+def _read_string(file):
+    length = ord(file.read(1))
+    if length == 0:
+        data = ''
+    else:
+        data = file.read(length)
+    if length & 1 == 0:
+        dummy = file.read(1)
+    return data
+
+_HUGE_VAL = 1.79769313486231e+308 # See <limits.h>
+
+def _read_float(f): # 10 bytes
+    import math
+    expon = _read_short(f) # 2 bytes
+    sign = 1
+    if expon < 0:
+        sign = -1
+        expon = expon + 0x8000
+    himant = _read_ulong(f) # 4 bytes
+    lomant = _read_ulong(f) # 4 bytes
+    if expon == himant == lomant == 0:
+        f = 0.0
+    elif expon == 0x7FFF:
+        f = _HUGE_VAL
+    else:
+        expon = expon - 16383
+        f = (himant * 0x100000000L + lomant) * pow(2.0, expon - 63)
+    return sign * f
+
+def _write_short(f, x):
+    f.write(struct.pack('>h', x))
+
+def _write_long(f, x):
+    f.write(struct.pack('>L', x))
+
+def _write_string(f, s):
+    f.write(chr(len(s)))
+    f.write(s)
+    if len(s) & 1 == 0:
+        f.write(chr(0))
+
+def _write_float(f, x):
+    import math
+    if x < 0:
+        sign = 0x8000
+        x = x * -1
+    else:
+        sign = 0
+    if x == 0:
+        expon = 0
+        himant = 0
+        lomant = 0
+    else:
+        fmant, expon = math.frexp(x)
+        if expon > 16384 or fmant >= 1:     # Infinity or NaN
+            expon = sign|0x7FFF
+            himant = 0
+            lomant = 0
+        else:                   # Finite
+            expon = expon + 16382
+            if expon < 0:           # denormalized
+                fmant = math.ldexp(fmant, expon)
+                expon = 0
+            expon = expon | sign
+            fmant = math.ldexp(fmant, 32)
+            fsmant = math.floor(fmant)
+            himant = long(fsmant)
+            fmant = math.ldexp(fmant - fsmant, 32)
+            fsmant = math.floor(fmant)
+            lomant = long(fsmant)
+    _write_short(f, expon)
+    _write_long(f, himant)
+    _write_long(f, lomant)
+
+from chunk import Chunk
+
+class Aifc_read:
+    # Variables used in this class:
+    #
+    # These variables are available to the user though appropriate
+    # methods of this class:
+    # _file -- the open file with methods read(), close(), and seek()
+    #       set through the __init__() method
+    # _nchannels -- the number of audio channels
+    #       available through the getnchannels() method
+    # _nframes -- the number of audio frames
+    #       available through the getnframes() method
+    # _sampwidth -- the number of bytes per audio sample
+    #       available through the getsampwidth() method
+    # _framerate -- the sampling frequency
+    #       available through the getframerate() method
+    # _comptype -- the AIFF-C compression type ('NONE' if AIFF)
+    #       available through the getcomptype() method
+    # _compname -- the human-readable AIFF-C compression type
+    #       available through the getcomptype() method
+    # _markers -- the marks in the audio file
+    #       available through the getmarkers() and getmark()
+    #       methods
+    # _soundpos -- the position in the audio stream
+    #       available through the tell() method, set through the
+    #       setpos() method
+    #
+    # These variables are used internally only:
+    # _version -- the AIFF-C version number
+    # _decomp -- the decompressor from builtin module cl
+    # _comm_chunk_read -- 1 iff the COMM chunk has been read
+    # _aifc -- 1 iff reading an AIFF-C file
+    # _ssnd_seek_needed -- 1 iff positioned correctly in audio
+    #       file for readframes()
+    # _ssnd_chunk -- instantiation of a chunk class for the SSND chunk
+    # _framesize -- size of one frame in the file
+
+    def initfp(self, file):
+        self._version = 0
+        self._decomp = None
+        self._convert = None
+        self._markers = []
+        self._soundpos = 0
+        self._file = Chunk(file)
+        if self._file.getname() != 'FORM':
+            raise Error, 'file does not start with FORM id'
+        formdata = self._file.read(4)
+        if formdata == 'AIFF':
+            self._aifc = 0
+        elif formdata == 'AIFC':
+            self._aifc = 1
+        else:
+            raise Error, 'not an AIFF or AIFF-C file'
+        self._comm_chunk_read = 0
+        while 1:
+            self._ssnd_seek_needed = 1
+            try:
+                chunk = Chunk(self._file)
+            except EOFError:
+                break
+            chunkname = chunk.getname()
+            if chunkname == 'COMM':
+                self._read_comm_chunk(chunk)
+                self._comm_chunk_read = 1
+            elif chunkname == 'SSND':
+                self._ssnd_chunk = chunk
+                dummy = chunk.read(8)
+                self._ssnd_seek_needed = 0
+            elif chunkname == 'FVER':
+                self._version = _read_long(chunk)
+            elif chunkname == 'MARK':
+                self._readmark(chunk)
+            elif chunkname in _skiplist:
+                pass
+            else:
+                raise Error, 'unrecognized chunk type '+chunk.chunkname
+            chunk.skip()
+        if not self._comm_chunk_read or not self._ssnd_chunk:
+            raise Error, 'COMM chunk and/or SSND chunk missing'
+        if self._aifc and self._decomp:
+            import cl
+            params = [cl.ORIGINAL_FORMAT, 0,
+                  cl.BITS_PER_COMPONENT, self._sampwidth * 8,
+                  cl.FRAME_RATE, self._framerate]
+            if self._nchannels == 1:
+                params[1] = cl.MONO
+            elif self._nchannels == 2:
+                params[1] = cl.STEREO_INTERLEAVED
+            else:
+                raise Error, 'cannot compress more than 2 channels'
+            self._decomp.SetParams(params)
+
+    def __init__(self, f):
+        if type(f) == type(''):
+            f = __builtin__.open(f, 'rb')
+        # else, assume it is an open file object already
+        self.initfp(f)
+
+    #
+    # User visible methods.
+    #
+    def getfp(self):
+        return self._file
+
+    def rewind(self):
+        self._ssnd_seek_needed = 1
+        self._soundpos = 0
+
+    def close(self):
+        if self._decomp:
+            self._decomp.CloseDecompressor()
+            self._decomp = None
+        self._file = None
+
+    def tell(self):
+        return self._soundpos
+
+    def getnchannels(self):
+        return self._nchannels
+
+    def getnframes(self):
+        return self._nframes
+
+    def getsampwidth(self):
+        return self._sampwidth
+
+    def getframerate(self):
+        return self._framerate
+
+    def getcomptype(self):
+        return self._comptype
+
+    def getcompname(self):
+        return self._compname
+
+##  def getversion(self):
+##      return self._version
+
+    def getparams(self):
+        return self.getnchannels(), self.getsampwidth(), \
+              self.getframerate(), self.getnframes(), \
+              self.getcomptype(), self.getcompname()
+
+    def getmarkers(self):
+        if len(self._markers) == 0:
+            return None
+        return self._markers
+
+    def getmark(self, id):
+        for marker in self._markers:
+            if id == marker[0]:
+                return marker
+        raise Error, 'marker ' + `id` + ' does not exist'
+
+    def setpos(self, pos):
+        if pos < 0 or pos > self._nframes:
+            raise Error, 'position not in range'
+        self._soundpos = pos
+        self._ssnd_seek_needed = 1
+
+    def readframes(self, nframes):
+        if self._ssnd_seek_needed:
+            self._ssnd_chunk.seek(0)
+            dummy = self._ssnd_chunk.read(8)
+            pos = self._soundpos * self._framesize
+            if pos:
+                self._ssnd_chunk.seek(pos + 8)
+            self._ssnd_seek_needed = 0
+        if nframes == 0:
+            return ''
+        data = self._ssnd_chunk.read(nframes * self._framesize)
+        if self._convert and data:
+            data = self._convert(data)
+        self._soundpos = self._soundpos + len(data) / (self._nchannels * self._sampwidth)
+        return data
+
+    #
+    # Internal methods.
+    #
+
+    def _decomp_data(self, data):
+        import cl
+        dummy = self._decomp.SetParam(cl.FRAME_BUFFER_SIZE,
+                          len(data) * 2)
+        return self._decomp.Decompress(len(data) / self._nchannels,
+                           data)
+
+    def _ulaw2lin(self, data):
+        import audioop
+        return audioop.ulaw2lin(data, 2)
+
+    def _adpcm2lin(self, data):
+        import audioop
+        if not hasattr(self, '_adpcmstate'):
+            # first time
+            self._adpcmstate = None
+        data, self._adpcmstate = audioop.adpcm2lin(data, 2,
+                               self._adpcmstate)
+        return data
+
+    def _read_comm_chunk(self, chunk):
+        self._nchannels = _read_short(chunk)
+        self._nframes = _read_long(chunk)
+        self._sampwidth = (_read_short(chunk) + 7) / 8
+        self._framerate = int(_read_float(chunk))
+        self._framesize = self._nchannels * self._sampwidth
+        if self._aifc:
+            #DEBUG: SGI's soundeditor produces a bad size :-(
+            kludge = 0
+            if chunk.chunksize == 18:
+                kludge = 1
+                print 'Warning: bad COMM chunk size'
+                chunk.chunksize = 23
+            #DEBUG end
+            self._comptype = chunk.read(4)
+            #DEBUG start
+            if kludge:
+                length = ord(chunk.file.read(1))
+                if length & 1 == 0:
+                    length = length + 1
+                chunk.chunksize = chunk.chunksize + length
+                chunk.file.seek(-1, 1)
+            #DEBUG end
+            self._compname = _read_string(chunk)
+            if self._comptype != 'NONE':
+                if self._comptype == 'G722':
+                    try:
+                        import audioop
+                    except ImportError:
+                        pass
+                    else:
+                        self._convert = self._adpcm2lin
+                        self._framesize = self._framesize / 4
+                        return
+                # for ULAW and ALAW try Compression Library
+                try:
+                    import cl
+                except ImportError:
+                    if self._comptype == 'ULAW':
+                        try:
+                            import audioop
+                            self._convert = self._ulaw2lin
+                            self._framesize = self._framesize / 2
+                            return
+                        except ImportError:
+                            pass
+                    raise Error, 'cannot read compressed AIFF-C files'
+                if self._comptype == 'ULAW':
+                    scheme = cl.G711_ULAW
+                    self._framesize = self._framesize / 2
+                elif self._comptype == 'ALAW':
+                    scheme = cl.G711_ALAW
+                    self._framesize = self._framesize / 2
+                else:
+                    raise Error, 'unsupported compression type'
+                self._decomp = cl.OpenDecompressor(scheme)
+                self._convert = self._decomp_data
+        else:
+            self._comptype = 'NONE'
+            self._compname = 'not compressed'
+
+    def _readmark(self, chunk):
+        nmarkers = _read_short(chunk)
+        # Some files appear to contain invalid counts.
+        # Cope with this by testing for EOF.
+        try:
+            for i in range(nmarkers):
+                id = _read_short(chunk)
+                pos = _read_long(chunk)
+                name = _read_string(chunk)
+                if pos or name:
+                    # some files appear to have
+                    # dummy markers consisting of
+                    # a position 0 and name ''
+                    self._markers.append((id, pos, name))
+        except EOFError:
+            print 'Warning: MARK chunk contains only',
+            print len(self._markers),
+            if len(self._markers) == 1: print 'marker',
+            else: print 'markers',
+            print 'instead of', nmarkers
+
+class Aifc_write:
+    # Variables used in this class:
+    #
+    # These variables are user settable through appropriate methods
+    # of this class:
+    # _file -- the open file with methods write(), close(), tell(), seek()
+    #       set through the __init__() method
+    # _comptype -- the AIFF-C compression type ('NONE' in AIFF)
+    #       set through the setcomptype() or setparams() method
+    # _compname -- the human-readable AIFF-C compression type
+    #       set through the setcomptype() or setparams() method
+    # _nchannels -- the number of audio channels
+    #       set through the setnchannels() or setparams() method
+    # _sampwidth -- the number of bytes per audio sample
+    #       set through the setsampwidth() or setparams() method
+    # _framerate -- the sampling frequency
+    #       set through the setframerate() or setparams() method
+    # _nframes -- the number of audio frames written to the header
+    #       set through the setnframes() or setparams() method
+    # _aifc -- whether we're writing an AIFF-C file or an AIFF file
+    #       set through the aifc() method, reset through the
+    #       aiff() method
+    #
+    # These variables are used internally only:
+    # _version -- the AIFF-C version number
+    # _comp -- the compressor from builtin module cl
+    # _nframeswritten -- the number of audio frames actually written
+    # _datalength -- the size of the audio samples written to the header
+    # _datawritten -- the size of the audio samples actually written
+
+    def __init__(self, f):
+        if type(f) == type(''):
+            filename = f
+            f = __builtin__.open(f, 'wb')
+        else:
+            # else, assume it is an open file object already
+            filename = '???'
+        self.initfp(f)
+        if filename[-5:] == '.aiff':
+            self._aifc = 0
+        else:
+            self._aifc = 1
+
+    def initfp(self, file):
+        self._file = file
+        self._version = _AIFC_version
+        self._comptype = 'NONE'
+        self._compname = 'not compressed'
+        self._comp = None
+        self._convert = None
+        self._nchannels = 0
+        self._sampwidth = 0
+        self._framerate = 0
+        self._nframes = 0
+        self._nframeswritten = 0
+        self._datawritten = 0
+        self._datalength = 0
+        self._markers = []
+        self._marklength = 0
+        self._aifc = 1      # AIFF-C is default
+
+    def __del__(self):
+        if self._file:
+            self.close()
+
+    #
+    # User visible methods.
+    #
+    def aiff(self):
+        if self._nframeswritten:
+            raise Error, 'cannot change parameters after starting to write'
+        self._aifc = 0
+
+    def aifc(self):
+        if self._nframeswritten:
+            raise Error, 'cannot change parameters after starting to write'
+        self._aifc = 1
+
+    def setnchannels(self, nchannels):
+        if self._nframeswritten:
+            raise Error, 'cannot change parameters after starting to write'
+        if nchannels < 1:
+            raise Error, 'bad # of channels'
+        self._nchannels = nchannels
+
+    def getnchannels(self):
+        if not self._nchannels:
+            raise Error, 'number of channels not set'
+        return self._nchannels
+
+    def setsampwidth(self, sampwidth):
+        if self._nframeswritten:
+            raise Error, 'cannot change parameters after starting to write'
+        if sampwidth < 1 or sampwidth > 4:
+            raise Error, 'bad sample width'
+        self._sampwidth = sampwidth
+
+    def getsampwidth(self):
+        if not self._sampwidth:
+            raise Error, 'sample width not set'
+        return self._sampwidth
+
+    def setframerate(self, framerate):
+        if self._nframeswritten:
+            raise Error, 'cannot change parameters after starting to write'
+        if framerate <= 0:
+            raise Error, 'bad frame rate'
+        self._framerate = framerate
+
+    def getframerate(self):
+        if not self._framerate:
+            raise Error, 'frame rate not set'
+        return self._framerate
+
+    def setnframes(self, nframes):
+        if self._nframeswritten:
+            raise Error, 'cannot change parameters after starting to write'
+        self._nframes = nframes
+
+    def getnframes(self):
+        return self._nframeswritten
+
+    def setcomptype(self, comptype, compname):
+        if self._nframeswritten:
+            raise Error, 'cannot change parameters after starting to write'
+        if comptype not in ('NONE', 'ULAW', 'ALAW', 'G722'):
+            raise Error, 'unsupported compression type'
+        self._comptype = comptype
+        self._compname = compname
+
+    def getcomptype(self):
+        return self._comptype
+
+    def getcompname(self):
+        return self._compname
+
+##  def setversion(self, version):
+##      if self._nframeswritten:
+##          raise Error, 'cannot change parameters after starting to write'
+##      self._version = version
+
+    def setparams(self, (nchannels, sampwidth, framerate, nframes, comptype, compname)):
+        if self._nframeswritten:
+            raise Error, 'cannot change parameters after starting to write'
+        if comptype not in ('NONE', 'ULAW', 'ALAW', 'G722'):
+            raise Error, 'unsupported compression type'
+        self.setnchannels(nchannels)
+        self.setsampwidth(sampwidth)
+        self.setframerate(framerate)
+        self.setnframes(nframes)
+        self.setcomptype(comptype, compname)
+
+    def getparams(self):
+        if not self._nchannels or not self._sampwidth or not self._framerate:
+            raise Error, 'not all parameters set'
+        return self._nchannels, self._sampwidth, self._framerate, \
+              self._nframes, self._comptype, self._compname
+
+    def setmark(self, id, pos, name):
+        if id <= 0:
+            raise Error, 'marker ID must be > 0'
+        if pos < 0:
+            raise Error, 'marker position must be >= 0'
+        if type(name) != type(''):
+            raise Error, 'marker name must be a string'
+        for i in range(len(self._markers)):
+            if id == self._markers[i][0]:
+                self._markers[i] = id, pos, name
+                return
+        self._markers.append((id, pos, name))
+
+    def getmark(self, id):
+        for marker in self._markers:
+            if id == marker[0]:
+                return marker
+        raise Error, 'marker ' + `id` + ' does not exist'
+
+    def getmarkers(self):
+        if len(self._markers) == 0:
+            return None
+        return self._markers
+
+    def tell(self):
+        return self._nframeswritten
+
+    def writeframesraw(self, data):
+        self._ensure_header_written(len(data))
+        nframes = len(data) / (self._sampwidth * self._nchannels)
+        if self._convert:
+            data = self._convert(data)
+        self._file.write(data)
+        self._nframeswritten = self._nframeswritten + nframes
+        self._datawritten = self._datawritten + len(data)
+
+    def writeframes(self, data):
+        self.writeframesraw(data)
+        if self._nframeswritten != self._nframes or \
+              self._datalength != self._datawritten:
+            self._patchheader()
+
+    def close(self):
+        self._ensure_header_written(0)
+        if self._datawritten & 1:
+            # quick pad to even size
+            self._file.write(chr(0))
+            self._datawritten = self._datawritten + 1
+        self._writemarkers()
+        if self._nframeswritten != self._nframes or \
+              self._datalength != self._datawritten or \
+              self._marklength:
+            self._patchheader()
+        if self._comp:
+            self._comp.CloseCompressor()
+            self._comp = None
+        self._file.flush()
+        self._file = None
+
+    #
+    # Internal methods.
+    #
+
+    def _comp_data(self, data):
+        import cl
+        dum = self._comp.SetParam(cl.FRAME_BUFFER_SIZE, len(data))
+        dum = self._comp.SetParam(cl.COMPRESSED_BUFFER_SIZE, len(data))
+        return self._comp.Compress(self._nframes, data)
+
+    def _lin2ulaw(self, data):
+        import audioop
+        return audioop.lin2ulaw(data, 2)
+
+    def _lin2adpcm(self, data):
+        import audioop
+        if not hasattr(self, '_adpcmstate'):
+            self._adpcmstate = None
+        data, self._adpcmstate = audioop.lin2adpcm(data, 2,
+                               self._adpcmstate)
+        return data
+
+    def _ensure_header_written(self, datasize):
+        if not self._nframeswritten:
+            if self._comptype in ('ULAW', 'ALAW'):
+                if not self._sampwidth:
+                    self._sampwidth = 2
+                if self._sampwidth != 2:
+                    raise Error, 'sample width must be 2 when compressing with ULAW or ALAW'
+            if self._comptype == 'G722':
+                if not self._sampwidth:
+                    self._sampwidth = 2
+                if self._sampwidth != 2:
+                    raise Error, 'sample width must be 2 when compressing with G7.22 (ADPCM)'
+            if not self._nchannels:
+                raise Error, '# channels not specified'
+            if not self._sampwidth:
+                raise Error, 'sample width not specified'
+            if not self._framerate:
+                raise Error, 'sampling rate not specified'
+            self._write_header(datasize)
+
+    def _init_compression(self):
+        if self._comptype == 'G722':
+            import audioop
+            self._convert = self._lin2adpcm
+            return
+        try:
+            import cl
+        except ImportError:
+            if self._comptype == 'ULAW':
+                try:
+                    import audioop
+                    self._convert = self._lin2ulaw
+                    return
+                except ImportError:
+                    pass
+            raise Error, 'cannot write compressed AIFF-C files'
+        if self._comptype == 'ULAW':
+            scheme = cl.G711_ULAW
+        elif self._comptype == 'ALAW':
+            scheme = cl.G711_ALAW
+        else:
+            raise Error, 'unsupported compression type'
+        self._comp = cl.OpenCompressor(scheme)
+        params = [cl.ORIGINAL_FORMAT, 0,
+              cl.BITS_PER_COMPONENT, self._sampwidth * 8,
+              cl.FRAME_RATE, self._framerate,
+              cl.FRAME_BUFFER_SIZE, 100,
+              cl.COMPRESSED_BUFFER_SIZE, 100]
+        if self._nchannels == 1:
+            params[1] = cl.MONO
+        elif self._nchannels == 2:
+            params[1] = cl.STEREO_INTERLEAVED
+        else:
+            raise Error, 'cannot compress more than 2 channels'
+        self._comp.SetParams(params)
+        # the compressor produces a header which we ignore
+        dummy = self._comp.Compress(0, '')
+        self._convert = self._comp_data
+
+    def _write_header(self, initlength):
+        if self._aifc and self._comptype != 'NONE':
+            self._init_compression()
+        self._file.write('FORM')
+        if not self._nframes:
+            self._nframes = initlength / (self._nchannels * self._sampwidth)
+        self._datalength = self._nframes * self._nchannels * self._sampwidth
+        if self._datalength & 1:
+            self._datalength = self._datalength + 1
+        if self._aifc:
+            if self._comptype in ('ULAW', 'ALAW'):
+                self._datalength = self._datalength / 2
+                if self._datalength & 1:
+                    self._datalength = self._datalength + 1
+            elif self._comptype == 'G722':
+                self._datalength = (self._datalength + 3) / 4
+                if self._datalength & 1:
+                    self._datalength = self._datalength + 1
+        self._form_length_pos = self._file.tell()
+        commlength = self._write_form_length(self._datalength)
+        if self._aifc:
+            self._file.write('AIFC')
+            self._file.write('FVER')
+            _write_long(self._file, 4)
+            _write_long(self._file, self._version)
+        else:
+            self._file.write('AIFF')
+        self._file.write('COMM')
+        _write_long(self._file, commlength)
+        _write_short(self._file, self._nchannels)
+        self._nframes_pos = self._file.tell()
+        _write_long(self._file, self._nframes)
+        _write_short(self._file, self._sampwidth * 8)
+        _write_float(self._file, self._framerate)
+        if self._aifc:
+            self._file.write(self._comptype)
+            _write_string(self._file, self._compname)
+        self._file.write('SSND')
+        self._ssnd_length_pos = self._file.tell()
+        _write_long(self._file, self._datalength + 8)
+        _write_long(self._file, 0)
+        _write_long(self._file, 0)
+
+    def _write_form_length(self, datalength):
+        if self._aifc:
+            commlength = 18 + 5 + len(self._compname)
+            if commlength & 1:
+                commlength = commlength + 1
+            verslength = 12
+        else:
+            commlength = 18
+            verslength = 0
+        _write_long(self._file, 4 + verslength + self._marklength + \
+                    8 + commlength + 16 + datalength)
+        return commlength
+
+    def _patchheader(self):
+        curpos = self._file.tell()
+        if self._datawritten & 1:
+            datalength = self._datawritten + 1
+            self._file.write(chr(0))
+        else:
+            datalength = self._datawritten
+        if datalength == self._datalength and \
+              self._nframes == self._nframeswritten and \
+              self._marklength == 0:
+            self._file.seek(curpos, 0)
+            return
+        self._file.seek(self._form_length_pos, 0)
+        dummy = self._write_form_length(datalength)
+        self._file.seek(self._nframes_pos, 0)
+        _write_long(self._file, self._nframeswritten)
+        self._file.seek(self._ssnd_length_pos, 0)
+        _write_long(self._file, datalength + 8)
+        self._file.seek(curpos, 0)
+        self._nframes = self._nframeswritten
+        self._datalength = datalength
+
+    def _writemarkers(self):
+        if len(self._markers) == 0:
+            return
+        self._file.write('MARK')
+        length = 2
+        for marker in self._markers:
+            id, pos, name = marker
+            length = length + len(name) + 1 + 6
+            if len(name) & 1 == 0:
+                length = length + 1
+        _write_long(self._file, length)
+        self._marklength = length + 8
+        _write_short(self._file, len(self._markers))
+        for marker in self._markers:
+            id, pos, name = marker
+            _write_short(self._file, id)
+            _write_long(self._file, pos)
+            _write_string(self._file, name)
+
+def open(f, mode=None):
+    if mode is None:
+        if hasattr(f, 'mode'):
+            mode = f.mode
+        else:
+            mode = 'rb'
+    if mode in ('r', 'rb'):
+        return Aifc_read(f)
+    elif mode in ('w', 'wb'):
+        return Aifc_write(f)
+    else:
+        raise Error, "mode must be 'r', 'rb', 'w', or 'wb'"
+
+openfp = open # B/W compatibility
+
+if __name__ == '__main__':
+    import sys
+    if not sys.argv[1:]:
+        sys.argv.append('/usr/demos/data/audio/bach.aiff')
+    fn = sys.argv[1]
+    f = open(fn, 'r')
+    print "Reading", fn
+    print "nchannels =", f.getnchannels()
+    print "nframes   =", f.getnframes()
+    print "sampwidth =", f.getsampwidth()
+    print "framerate =", f.getframerate()
+    print "comptype  =", f.getcomptype()
+    print "compname  =", f.getcompname()
+    if sys.argv[2:]:
+        gn = sys.argv[2]
+        print "Writing", gn
+        g = open(gn, 'w')
+        g.setparams(f.getparams())
+        while 1:
+            data = f.readframes(1024)
+            if not data:
+                break
+            g.writeframes(data)
+        g.close()
+        f.close()
+        print "Done."
diff --git a/lib-python/2.2/anydbm.py b/lib-python/2.2/anydbm.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/anydbm.py
@@ -0,0 +1,86 @@
+"""Generic interface to all dbm clones.
+
+Instead of
+
+        import dbm
+        d = dbm.open(file, 'w', 0666)
+
+use
+
+        import anydbm
+        d = anydbm.open(file, 'w')
+
+The returned object is a dbhash, gdbm, dbm or dumbdbm object,
+dependent on the type of database being opened (determined by whichdb
+module) in the case of an existing dbm. If the dbm does not exist and
+the create or new flag ('c' or 'n') was specified, the dbm type will
+be determined by the availability of the modules (tested in the above
+order).
+
+It has the following interface (key and data are strings):
+
+        d[key] = data   # store data at key (may override data at
+                        # existing key)
+        data = d[key]   # retrieve data at key (raise KeyError if no
+                        # such key)
+        del d[key]      # delete data stored at key (raises KeyError
+                        # if no such key)
+        flag = d.has_key(key)   # true if the key exists
+        list = d.keys() # return a list of all existing keys (slow!)
+
+Future versions may change the order in which implementations are
+tested for existence, add interfaces to other dbm-like
+implementations.
+
+The open function has an optional second argument.  This can be 'r',
+for read-only access, 'w', for read-write access of an existing
+database, 'c' for read-write access to a new or existing database, and
+'n' for read-write access to a new database.  The default is 'r'.
+
+Note: 'r' and 'w' fail if the database doesn't exist; 'c' creates it
+only if it doesn't exist; and 'n' always creates a new database.
+
+"""
+
+try:
+    class error(Exception):
+        pass
+except (NameError, TypeError):
+    error = "anydbm.error"
+
+_names = ['dbhash', 'gdbm', 'dbm', 'dumbdbm']
+_errors = [error]
+_defaultmod = None
+
+for _name in _names:
+    try:
+        _mod = __import__(_name)
+    except ImportError:
+        continue
+    if not _defaultmod:
+        _defaultmod = _mod
+    _errors.append(_mod.error)
+
+if not _defaultmod:
+    raise ImportError, "no dbm clone found; tried %s" % _names
+
+error = tuple(_errors)
+
+def open(file, flag = 'r', mode = 0666):
+    # guess the type of an existing database
+    from whichdb import whichdb
+    result=whichdb(file)
+    if result is None:
+        # db doesn't exist
+        if 'c' in flag or 'n' in flag:
+            # file doesn't exist and the new
+            # flag was used so use default type
+            mod = _defaultmod
+        else:
+            raise error, "need 'c' or 'n' flag to open new db"
+    elif result == "":
+        # db type cannot be determined
+        raise error, "db type could not be determined"
+    else:
+        mod = __import__(result)
+    return mod.open(file, flag, mode)
diff --git a/lib-python/2.2/asynchat.py b/lib-python/2.2/asynchat.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/asynchat.py
@@ -0,0 +1,293 @@
+# -*- Mode: Python; tab-width: 4 -*-
+#       Id: asynchat.py,v 2.26 2000/09/07 22:29:26 rushing Exp
+#       Author: Sam Rushing <rushing at nightmare.com>
+
+# ======================================================================
+# Copyright 1996 by Sam Rushing
+#
+#                         All Rights Reserved
+#
+# Permission to use, copy, modify, and distribute this software and
+# its documentation for any purpose and without fee is hereby
+# granted, provided that the above copyright notice appear in all
+# copies and that both that copyright notice and this permission
+# notice appear in supporting documentation, and that the name of Sam
+# Rushing not be used in advertising or publicity pertaining to
+# distribution of the software without specific, written prior
+# permission.
+#
+# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
+# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
+# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+# ======================================================================
+
+r"""A class supporting chat-style (command/response) protocols.
+
+This class adds support for 'chat' style protocols - where one side
+sends a 'command', and the other sends a response (examples would be
+the common internet protocols - smtp, nntp, ftp, etc..).
+
+The handle_read() method looks at the input stream for the current
+'terminator' (usually '\r\n' for single-line responses, '\r\n.\r\n'
+for multi-line output), calling self.found_terminator() on its
+receipt.
+
+for example:
+Say you build an async nntp client using this class.  At the start
+of the connection, you'll have self.terminator set to '\r\n', in
+order to process the single-line greeting.  Just before issuing a
+'LIST' command you'll set it to '\r\n.\r\n'.  The output of the LIST
+command will be accumulated (using your own 'collect_incoming_data'
+method) up to the terminator, and then control will be returned to
+you - by calling your self.found_terminator() method.
+"""
+
+import socket
+import asyncore
+
+class async_chat (asyncore.dispatcher):
+    """This is an abstract class.  You must derive from this class, and add
+    the two methods collect_incoming_data() and found_terminator()"""
+
+    # these are overridable defaults
+
+    ac_in_buffer_size       = 4096
+    ac_out_buffer_size      = 4096
+
+    def __init__ (self, conn=None):
+        self.ac_in_buffer = ''
+        self.ac_out_buffer = ''
+        self.producer_fifo = fifo()
+        asyncore.dispatcher.__init__ (self, conn)
+
+    def set_terminator (self, term):
+        "Set the input delimiter.  Can be a fixed string of any length, an integer, or None"
+        self.terminator = term
+
+    def get_terminator (self):
+        return self.terminator
+
+    # grab some more data from the socket,
+    # throw it to the collector method,
+    # check for the terminator,
+    # if found, transition to the next state.
+
+    def handle_read (self):
+
+        try:
+            data = self.recv (self.ac_in_buffer_size)
+        except socket.error, why:
+            self.handle_error()
+            return
+
+        self.ac_in_buffer = self.ac_in_buffer + data
+
+        # Continue to search for self.terminator in self.ac_in_buffer,
+        # while calling self.collect_incoming_data.  The while loop
+        # is necessary because we might read several data+terminator
+        # combos with a single recv(1024).
+
+        while self.ac_in_buffer:
+            lb = len(self.ac_in_buffer)
+            terminator = self.get_terminator()
+            if terminator is None or terminator == '':
+                # no terminator, collect it all
+                self.collect_incoming_data (self.ac_in_buffer)
+                self.ac_in_buffer = ''
+            elif type(terminator) == type(0):
+                # numeric terminator
+                n = terminator
+                if lb < n:
+                    self.collect_incoming_data (self.ac_in_buffer)
+                    self.ac_in_buffer = ''
+                    self.terminator = self.terminator - lb
+                else:
+                    self.collect_incoming_data (self.ac_in_buffer[:n])
+                    self.ac_in_buffer = self.ac_in_buffer[n:]
+                    self.terminator = 0
+                    self.found_terminator()
+            else:
+                # 3 cases:
+                # 1) end of buffer matches terminator exactly:
+                #    collect data, transition
+                # 2) end of buffer matches some prefix:
+                #    collect data to the prefix
+                # 3) end of buffer does not match any prefix:
+                #    collect data
+                terminator_len = len(terminator)
+                index = self.ac_in_buffer.find(terminator)
+                if index != -1:
+                    # we found the terminator
+                    if index > 0:
+                        # don't bother reporting the empty string (source of subtle bugs)
+                        self.collect_incoming_data (self.ac_in_buffer[:index])
+                    self.ac_in_buffer = self.ac_in_buffer[index+terminator_len:]
+                    # This does the Right Thing if the terminator is changed here.
+                    self.found_terminator()
+                else:
+                    # check for a prefix of the terminator
+                    index = find_prefix_at_end (self.ac_in_buffer, terminator)
+                    if index:
+                        if index != lb:
+                            # we found a prefix, collect up to the prefix
+                            self.collect_incoming_data (self.ac_in_buffer[:-index])
+                            self.ac_in_buffer = self.ac_in_buffer[-index:]
+                        break
+                    else:
+                        # no prefix, collect it all
+                        self.collect_incoming_data (self.ac_in_buffer)
+                        self.ac_in_buffer = ''
+
+    def handle_write (self):
+        self.initiate_send ()
+
+    def handle_close (self):
+        self.close()
+
+    def push (self, data):
+        self.producer_fifo.push (simple_producer (data))
+        self.initiate_send()
+
+    def push_with_producer (self, producer):
+        self.producer_fifo.push (producer)
+        self.initiate_send()
+
+    def readable (self):
+        "predicate for inclusion in the readable for select()"
+        return (len(self.ac_in_buffer) <= self.ac_in_buffer_size)
+
+    def writable (self):
+        "predicate for inclusion in the writable for select()"
+        # return len(self.ac_out_buffer) or len(self.producer_fifo) or (not self.connected)
+        # this is about twice as fast, though not as clear.
+        return not (
+                (self.ac_out_buffer == '') and
+                self.producer_fifo.is_empty() and
+                self.connected
+                )
+
+    def close_when_done (self):
+        "automatically close this channel once the outgoing queue is empty"
+        self.producer_fifo.push (None)
+
+    # refill the outgoing buffer by calling the more() method
+    # of the first producer in the queue
+    def refill_buffer (self):
+        _string_type = type('')
+        while 1:
+            if len(self.producer_fifo):
+                p = self.producer_fifo.first()
+                # a 'None' in the producer fifo is a sentinel,
+                # telling us to close the channel.
+                if p is None:
+                    if not self.ac_out_buffer:
+                        self.producer_fifo.pop()
+                        self.close()
+                    return
+                elif type(p) is _string_type:
+                    self.producer_fifo.pop()
+                    self.ac_out_buffer = self.ac_out_buffer + p
+                    return
+                data = p.more()
+                if data:
+                    self.ac_out_buffer = self.ac_out_buffer + data
+                    return
+                else:
+                    self.producer_fifo.pop()
+            else:
+                return
+
+    def initiate_send (self):
+        obs = self.ac_out_buffer_size
+        # try to refill the buffer
+        if (len (self.ac_out_buffer) < obs):
+            self.refill_buffer()
+
+        if self.ac_out_buffer and self.connected:
+            # try to send the buffer
+            try:
+                num_sent = self.send (self.ac_out_buffer[:obs])
+                if num_sent:
+                    self.ac_out_buffer = self.ac_out_buffer[num_sent:]
+
+            except socket.error, why:
+                self.handle_error()
+                return
+
+    def discard_buffers (self):
+        # Emergencies only!
+        self.ac_in_buffer = ''
+        self.ac_out_buffer = ''
+        while self.producer_fifo:
+            self.producer_fifo.pop()
+
+
+class simple_producer:
+
+    def __init__ (self, data, buffer_size=512):
+        self.data = data
+        self.buffer_size = buffer_size
+
+    def more (self):
+        if len (self.data) > self.buffer_size:
+            result = self.data[:self.buffer_size]
+            self.data = self.data[self.buffer_size:]
+            return result
+        else:
+            result = self.data
+            self.data = ''
+            return result
+
+class fifo:
+    def __init__ (self, list=None):
+        if not list:
+            self.list = []
+        else:
+            self.list = list
+
+    def __len__ (self):
+        return len(self.list)
+
+    def is_empty (self):
+        return self.list == []
+
+    def first (self):
+        return self.list[0]
+
+    def push (self, data):
+        self.list.append (data)
+
+    def pop (self):
+        if self.list:
+            result = self.list[0]
+            del self.list[0]
+            return (1, result)
+        else:
+            return (0, None)
+
+# Given 'haystack', see if any prefix of 'needle' is at its end.  This
+# assumes an exact match has already been checked.  Return the number of
+# characters matched.
+# for example:
+# f_p_a_e ("qwerty\r", "\r\n") => 1
+# f_p_a_e ("qwerty\r\n", "\r\n") => 2
+# f_p_a_e ("qwertydkjf", "\r\n") => 0
+
+# this could maybe be made faster with a computed regex?
+# [answer: no; circa Python-2.0, Jan 2001]
+# python:    18307/s
+# re:        12820/s
+# regex:     14035/s
+
+def find_prefix_at_end (haystack, needle):
+    nl = len(needle)
+    result = 0
+    for i in range (1,nl):
+        if haystack[-(nl-i):] == needle[:(nl-i)]:
+            result = nl-i
+            break
+    return result
diff --git a/lib-python/2.2/asyncore.py b/lib-python/2.2/asyncore.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/asyncore.py
@@ -0,0 +1,556 @@
+# -*- Mode: Python -*-
+#   Id: asyncore.py,v 2.51 2000/09/07 22:29:26 rushing Exp
+#   Author: Sam Rushing <rushing at nightmare.com>
+
+# ======================================================================
+# Copyright 1996 by Sam Rushing
+#
+#                         All Rights Reserved
+#
+# Permission to use, copy, modify, and distribute this software and
+# its documentation for any purpose and without fee is hereby
+# granted, provided that the above copyright notice appear in all
+# copies and that both that copyright notice and this permission
+# notice appear in supporting documentation, and that the name of Sam
+# Rushing not be used in advertising or publicity pertaining to
+# distribution of the software without specific, written prior
+# permission.
+#
+# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
+# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
+# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+# ======================================================================
+
+"""Basic infrastructure for asynchronous socket service clients and servers.
+
+There are only two ways to have a program on a single processor do "more
+than one thing at a time".  Multi-threaded programming is the simplest and
+most popular way to do it, but there is another very different technique,
+that lets you have nearly all the advantages of multi-threading, without
+actually using multiple threads. it's really only practical if your program
+is largely I/O bound. If your program is CPU bound, then pre-emptive
+scheduled threads are probably what you really need. Network servers are
+rarely CPU-bound, however.
+
+If your operating system supports the select() system call in its I/O
+library (and nearly all do), then you can use it to juggle multiple
+communication channels at once; doing other work while your I/O is taking
+place in the "background."  Although this strategy can seem strange and
+complex, especially at first, it is in many ways easier to understand and
+control than multi-threaded programming. The module documented here solves
+many of the difficult problems for you, making the task of building
+sophisticated high-performance network servers and clients a snap.
+"""
+
+import exceptions
+import select
+import socket
+import sys
+import time
+
+import os
+from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, ECONNRESET, \
+     ENOTCONN, ESHUTDOWN, EINTR, EISCONN
+
+try:
+    socket_map
+except NameError:
+    socket_map = {}
+
+class ExitNow (exceptions.Exception):
+    pass
+
+DEBUG = 0
+
+def poll (timeout=0.0, map=None):
+    if map is None:
+        map = socket_map
+    if map:
+        r = []; w = []; e = []
+        for fd, obj in map.items():
+            if obj.readable():
+                r.append (fd)
+            if obj.writable():
+                w.append (fd)
+        if [] == r == w == e:
+            time.sleep(timeout)
+        else:
+            try:
+                r,w,e = select.select (r,w,e, timeout)
+            except select.error, err:
+                if err[0] != EINTR:
+                    raise
+                else:
+                    return
+
+        if DEBUG:
+            print r,w,e
+
+        for fd in r:
+            try:
+                obj = map[fd]
+            except KeyError:
+                continue
+
+            try:
+                obj.handle_read_event()
+            except ExitNow:
+                raise ExitNow
+            except:
+                obj.handle_error()
+
+        for fd in w:
+            try:
+                obj = map[fd]
+            except KeyError:
+                continue
+
+            try:
+                obj.handle_write_event()
+            except ExitNow:
+                raise ExitNow
+            except:
+                obj.handle_error()
+
+def poll2 (timeout=0.0, map=None):
+    import poll
+    if map is None:
+        map=socket_map
+    if timeout is not None:
+        # timeout is in milliseconds
+        timeout = int(timeout*1000)
+    if map:
+        l = []
+        for fd, obj in map.items():
+            flags = 0
+            if obj.readable():
+                flags = poll.POLLIN
+            if obj.writable():
+                flags = flags | poll.POLLOUT
+            if flags:
+                l.append ((fd, flags))
+        r = poll.poll (l, timeout)
+        for fd, flags in r:
+            try:
+                obj = map[fd]
+            except KeyError:
+                continue
+
+            try:
+                if (flags  & poll.POLLIN):
+                    obj.handle_read_event()
+                if (flags & poll.POLLOUT):
+                    obj.handle_write_event()
+            except ExitNow:
+                raise ExitNow
+            except:
+                obj.handle_error()
+
+def poll3 (timeout=0.0, map=None):
+    # Use the poll() support added to the select module in Python 2.0
+    if map is None:
+        map=socket_map
+    if timeout is not None:
+        # timeout is in milliseconds
+        timeout = int(timeout*1000)
+    pollster = select.poll()
+    if map:
+        for fd, obj in map.items():
+            flags = 0
+            if obj.readable():
+                flags = select.POLLIN
+            if obj.writable():
+                flags = flags | select.POLLOUT
+            if flags:
+                pollster.register(fd, flags)
+        try:
+            r = pollster.poll (timeout)
+        except select.error, err:
+            if err[0] != EINTR:
+                raise
+            r = []
+        for fd, flags in r:
+            try:
+                obj = map[fd]
+            except KeyError:
+                continue
+
+            try:
+                if (flags  & select.POLLIN):
+                    obj.handle_read_event()
+                if (flags & select.POLLOUT):
+                    obj.handle_write_event()
+            except ExitNow:
+                raise ExitNow
+            except:
+                obj.handle_error()
+
+def loop (timeout=30.0, use_poll=0, map=None):
+
+    if map is None:
+        map=socket_map
+
+    if use_poll:
+        if hasattr (select, 'poll'):
+            poll_fun = poll3
+        else:
+            poll_fun = poll2
+    else:
+        poll_fun = poll
+
+    while map:
+        poll_fun (timeout, map)
+
+class dispatcher:
+    debug = 0
+    connected = 0
+    accepting = 0
+    closing = 0
+    addr = None
+
+    def __init__ (self, sock=None, map=None):
+        if sock:
+            self.set_socket (sock, map)
+            # I think it should inherit this anyway
+            self.socket.setblocking (0)
+            self.connected = 1
+            # XXX Does the constructor require that the socket passed
+            # be connected?
+            try:
+                self.addr = sock.getpeername()
+            except socket.error:
+                # The addr isn't crucial
+                pass
+        else:
+            self.socket = None
+
+    def __repr__ (self):
+        status = [self.__class__.__module__+"."+self.__class__.__name__]
+        if self.accepting and self.addr:
+            status.append ('listening')
+        elif self.connected:
+            status.append ('connected')
+        if self.addr is not None:
+            try:
+                status.append ('%s:%d' % self.addr)
+            except TypeError:
+                status.append (repr(self.addr))
+        return '<%s at %#x>' % (' '.join (status), id (self))
+
+    def add_channel (self, map=None):
+        #self.log_info ('adding channel %s' % self)
+        if map is None:
+            map=socket_map
+        map [self._fileno] = self
+
+    def del_channel (self, map=None):
+        fd = self._fileno
+        if map is None:
+            map=socket_map
+        if map.has_key (fd):
+            #self.log_info ('closing channel %d:%s' % (fd, self))
+            del map [fd]
+
+    def create_socket (self, family, type):
+        self.family_and_type = family, type
+        self.socket = socket.socket (family, type)
+        self.socket.setblocking(0)
+        self._fileno = self.socket.fileno()
+        self.add_channel()
+
+    def set_socket (self, sock, map=None):
+        self.socket = sock
+##        self.__dict__['socket'] = sock
+        self._fileno = sock.fileno()
+        self.add_channel (map)
+
+    def set_reuse_addr (self):
+        # try to re-use a server port if possible
+        try:
+            self.socket.setsockopt (
+                socket.SOL_SOCKET, socket.SO_REUSEADDR,
+                self.socket.getsockopt (socket.SOL_SOCKET,
+                                        socket.SO_REUSEADDR) | 1
+                )
+        except socket.error:
+            pass
+
+    # ==================================================
+    # predicates for select()
+    # these are used as filters for the lists of sockets
+    # to pass to select().
+    # ==================================================
+
+    def readable (self):
+        return 1
+
+    if os.name == 'mac':
+        # The macintosh will select a listening socket for
+        # write if you let it.  What might this mean?
+        def writable (self):
+            return not self.accepting
+    else:
+        def writable (self):
+            return 1
+
+    # ==================================================
+    # socket object methods.
+    # ==================================================
+
+    def listen (self, num):
+        self.accepting = 1
+        if os.name == 'nt' and num > 5:
+            num = 1
+        return self.socket.listen (num)
+
+    def bind (self, addr):
+        self.addr = addr
+        return self.socket.bind (addr)
+
+    def connect (self, address):
+        self.connected = 0
+        err = self.socket.connect_ex(address)
+        if err in (EINPROGRESS, EALREADY, EWOULDBLOCK):
+            return
+        if err in (0, EISCONN):
+            self.addr = address
+            self.connected = 1
+            self.handle_connect()
+        else:
+            raise socket.error, err
+
+    def accept (self):
+        try:
+            conn, addr = self.socket.accept()
+            return conn, addr
+        except socket.error, why:
+            if why[0] == EWOULDBLOCK:
+                pass
+            else:
+                raise socket.error, why
+
+    def send (self, data):
+        try:
+            result = self.socket.send (data)
+            return result
+        except socket.error, why:
+            if why[0] == EWOULDBLOCK:
+                return 0
+            else:
+                raise socket.error, why
+            return 0
+
+    def recv (self, buffer_size):
+        try:
+            data = self.socket.recv (buffer_size)
+            if not data:
+                # a closed connection is indicated by signaling
+                # a read condition, and having recv() return 0.
+                self.handle_close()
+                return ''
+            else:
+                return data
+        except socket.error, why:
+            # winsock sometimes throws ENOTCONN
+            if why[0] in [ECONNRESET, ENOTCONN, ESHUTDOWN]:
+                self.handle_close()
+                return ''
+            else:
+                raise socket.error, why
+
+    def close (self):
+        self.del_channel()
+        self.socket.close()
+
+    # cheap inheritance, used to pass all other attribute
+    # references to the underlying socket object.
+    def __getattr__ (self, attr):
+        return getattr (self.socket, attr)
+
+    # log and log_info maybe overriden to provide more sophisticated
+    # logging and warning methods. In general, log is for 'hit' logging
+    # and 'log_info' is for informational, warning and error logging.
+
+    def log (self, message):
+        sys.stderr.write ('log: %s\n' % str(message))
+
+    def log_info (self, message, type='info'):
+        if __debug__ or type != 'info':
+            print '%s: %s' % (type, message)
+
+    def handle_read_event (self):
+        if self.accepting:
+            # for an accepting socket, getting a read implies
+            # that we are connected
+            if not self.connected:
+                self.connected = 1
+            self.handle_accept()
+        elif not self.connected:
+            self.handle_connect()
+            self.connected = 1
+            self.handle_read()
+        else:
+            self.handle_read()
+
+    def handle_write_event (self):
+        # getting a write implies that we are connected
+        if not self.connected:
+            self.handle_connect()
+            self.connected = 1
+        self.handle_write()
+
+    def handle_expt_event (self):
+        self.handle_expt()
+
+    def handle_error (self):
+        nil, t, v, tbinfo = compact_traceback()
+
+        # sometimes a user repr method will crash.
+        try:
+            self_repr = repr (self)
+        except:
+            self_repr = '<__repr__ (self) failed for object at %0x>' % id(self)
+
+        self.log_info (
+            'uncaptured python exception, closing channel %s (%s:%s %s)' % (
+                self_repr,
+                t,
+                v,
+                tbinfo
+                ),
+            'error'
+            )
+        self.close()
+
+    def handle_expt (self):
+        self.log_info ('unhandled exception', 'warning')
+
+    def handle_read (self):
+        self.log_info ('unhandled read event', 'warning')
+
+    def handle_write (self):
+        self.log_info ('unhandled write event', 'warning')
+
+    def handle_connect (self):
+        self.log_info ('unhandled connect event', 'warning')
+
+    def handle_accept (self):
+        self.log_info ('unhandled accept event', 'warning')
+
+    def handle_close (self):
+        self.log_info ('unhandled close event', 'warning')
+        self.close()
+
+# ---------------------------------------------------------------------------
+# adds simple buffered output capability, useful for simple clients.
+# [for more sophisticated usage use asynchat.async_chat]
+# ---------------------------------------------------------------------------
+
+class dispatcher_with_send (dispatcher):
+    def __init__ (self, sock=None):
+        dispatcher.__init__ (self, sock)
+        self.out_buffer = ''
+
+    def initiate_send (self):
+        num_sent = 0
+        num_sent = dispatcher.send (self, self.out_buffer[:512])
+        self.out_buffer = self.out_buffer[num_sent:]
+
+    def handle_write (self):
+        self.initiate_send()
+
+    def writable (self):
+        return (not self.connected) or len(self.out_buffer)
+
+    def send (self, data):
+        if self.debug:
+            self.log_info ('sending %s' % repr(data))
+        self.out_buffer = self.out_buffer + data
+        self.initiate_send()
+
+# ---------------------------------------------------------------------------
+# used for debugging.
+# ---------------------------------------------------------------------------
+
+def compact_traceback ():
+    t,v,tb = sys.exc_info()
+    tbinfo = []
+    while 1:
+        tbinfo.append ((
+            tb.tb_frame.f_code.co_filename,
+            tb.tb_frame.f_code.co_name,
+            str(tb.tb_lineno)
+            ))
+        tb = tb.tb_next
+        if not tb:
+            break
+
+    # just to be safe
+    del tb
+
+    file, function, line = tbinfo[-1]
+    info = '[' + '] ['.join(map(lambda x: '|'.join(x), tbinfo)) + ']'
+    return (file, function, line), t, v, info
+
+def close_all (map=None):
+    if map is None:
+        map=socket_map
+    for x in map.values():
+        x.socket.close()
+    map.clear()
+
+# Asynchronous File I/O:
+#
+# After a little research (reading man pages on various unixen, and
+# digging through the linux kernel), I've determined that select()
+# isn't meant for doing doing asynchronous file i/o.
+# Heartening, though - reading linux/mm/filemap.c shows that linux
+# supports asynchronous read-ahead.  So _MOST_ of the time, the data
+# will be sitting in memory for us already when we go to read it.
+#
+# What other OS's (besides NT) support async file i/o?  [VMS?]
+#
+# Regardless, this is useful for pipes, and stdin/stdout...
+
+if os.name == 'posix':
+    import fcntl
+
+    class file_wrapper:
+        # here we override just enough to make a file
+        # look like a socket for the purposes of asyncore.
+        def __init__ (self, fd):
+            self.fd = fd
+
+        def recv (self, *args):
+            return apply (os.read, (self.fd,)+args)
+
+        def send (self, *args):
+            return apply (os.write, (self.fd,)+args)
+
+        read = recv
+        write = send
+
+        def close (self):
+            return os.close (self.fd)
+
+        def fileno (self):
+            return self.fd
+
+    class file_dispatcher (dispatcher):
+        def __init__ (self, fd):
+            dispatcher.__init__ (self)
+            self.connected = 1
+            # set it to non-blocking mode
+            flags = fcntl.fcntl (fd, fcntl.F_GETFL, 0)
+            flags = flags | os.O_NONBLOCK
+            fcntl.fcntl (fd, fcntl.F_SETFL, flags)
+            self.set_file (fd)
+
+        def set_file (self, fd):
+            self._fileno = fd
+            self.socket = file_wrapper (fd)
+            self.add_channel()
diff --git a/lib-python/2.2/atexit.py b/lib-python/2.2/atexit.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/atexit.py
@@ -0,0 +1,50 @@
+"""
+atexit.py - allow programmer to define multiple exit functions to be executed
+upon normal program termination.
+
+One public function, register, is defined.
+"""
+
+__all__ = ["register"]
+
+_exithandlers = []
+def _run_exitfuncs():
+    """run any registered exit functions
+
+    _exithandlers is traversed in reverse order so functions are executed
+    last in, first out.
+    """
+
+    while _exithandlers:
+        func, targs, kargs = _exithandlers.pop()
+        apply(func, targs, kargs)
+
+def register(func, *targs, **kargs):
+    """register a function to be executed upon normal program termination
+
+    func - function to be called at exit
+    targs - optional arguments to pass to func
+    kargs - optional keyword arguments to pass to func
+    """
+    _exithandlers.append((func, targs, kargs))
+
+import sys
+if hasattr(sys, "exitfunc"):
+    # Assume it's another registered exit function - append it to our list
+    register(sys.exitfunc)
+sys.exitfunc = _run_exitfuncs
+
+del sys
+
+if __name__ == "__main__":
+    def x1():
+        print "running x1"
+    def x2(n):
+        print "running x2(%s)" % `n`
+    def x3(n, kwd=None):
+        print "running x3(%s, kwd=%s)" % (`n`, `kwd`)
+
+    register(x1)
+    register(x2, 12)
+    register(x3, 5, "bar")
+    register(x3, "no kwd args")
diff --git a/lib-python/2.2/audiodev.py b/lib-python/2.2/audiodev.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/audiodev.py
@@ -0,0 +1,257 @@
+"""Classes for manipulating audio devices (currently only for Sun and SGI)"""
+
+__all__ = ["error","AudioDev"]
+
+class error(Exception):
+    pass
+
+class Play_Audio_sgi:
+    # Private instance variables
+##      if 0: access frameratelist, nchannelslist, sampwidthlist, oldparams, \
+##                params, config, inited_outrate, inited_width, \
+##                inited_nchannels, port, converter, classinited: private
+
+    classinited = 0
+    frameratelist = nchannelslist = sampwidthlist = None
+
+    def initclass(self):
+        import AL
+        self.frameratelist = [
+                  (48000, AL.RATE_48000),
+                  (44100, AL.RATE_44100),
+                  (32000, AL.RATE_32000),
+                  (22050, AL.RATE_22050),
+                  (16000, AL.RATE_16000),
+                  (11025, AL.RATE_11025),
+                  ( 8000,  AL.RATE_8000),
+                  ]
+        self.nchannelslist = [
+                  (1, AL.MONO),
+                  (2, AL.STEREO),
+                  (4, AL.QUADRO),
+                  ]
+        self.sampwidthlist = [
+                  (1, AL.SAMPLE_8),
+                  (2, AL.SAMPLE_16),
+                  (3, AL.SAMPLE_24),
+                  ]
+        self.classinited = 1
+
+    def __init__(self):
+        import al, AL
+        if not self.classinited:
+            self.initclass()
+        self.oldparams = []
+        self.params = [AL.OUTPUT_RATE, 0]
+        self.config = al.newconfig()
+        self.inited_outrate = 0
+        self.inited_width = 0
+        self.inited_nchannels = 0
+        self.converter = None
+        self.port = None
+        return
+
+    def __del__(self):
+        if self.port:
+            self.stop()
+        if self.oldparams:
+            import al, AL
+            al.setparams(AL.DEFAULT_DEVICE, self.oldparams)
+            self.oldparams = []
+
+    def wait(self):
+        if not self.port:
+            return
+        import time
+        while self.port.getfilled() > 0:
+            time.sleep(0.1)
+        self.stop()
+
+    def stop(self):
+        if self.port:
+            self.port.closeport()
+            self.port = None
+        if self.oldparams:
+            import al, AL
+            al.setparams(AL.DEFAULT_DEVICE, self.oldparams)
+            self.oldparams = []
+
+    def setoutrate(self, rate):
+        for (raw, cooked) in self.frameratelist:
+            if rate == raw:
+                self.params[1] = cooked
+                self.inited_outrate = 1
+                break
+        else:
+            raise error, 'bad output rate'
+
+    def setsampwidth(self, width):
+        for (raw, cooked) in self.sampwidthlist:
+            if width == raw:
+                self.config.setwidth(cooked)
+                self.inited_width = 1
+                break
+        else:
+            if width == 0:
+                import AL
+                self.inited_width = 0
+                self.config.setwidth(AL.SAMPLE_16)
+                self.converter = self.ulaw2lin
+            else:
+                raise error, 'bad sample width'
+
+    def setnchannels(self, nchannels):
+        for (raw, cooked) in self.nchannelslist:
+            if nchannels == raw:
+                self.config.setchannels(cooked)
+                self.inited_nchannels = 1
+                break
+        else:
+            raise error, 'bad # of channels'
+
+    def writeframes(self, data):
+        if not (self.inited_outrate and self.inited_nchannels):
+            raise error, 'params not specified'
+        if not self.port:
+            import al, AL
+            self.port = al.openport('Python', 'w', self.config)
+            self.oldparams = self.params[:]
+            al.getparams(AL.DEFAULT_DEVICE, self.oldparams)
+            al.setparams(AL.DEFAULT_DEVICE, self.params)
+        if self.converter:
+            data = self.converter(data)
+        self.port.writesamps(data)
+
+    def getfilled(self):
+        if self.port:
+            return self.port.getfilled()
+        else:
+            return 0
+
+    def getfillable(self):
+        if self.port:
+            return self.port.getfillable()
+        else:
+            return self.config.getqueuesize()
+
+    # private methods
+##      if 0: access *: private
+
+    def ulaw2lin(self, data):
+        import audioop
+        return audioop.ulaw2lin(data, 2)
+
+class Play_Audio_sun:
+##      if 0: access outrate, sampwidth, nchannels, inited_outrate, inited_width, \
+##                inited_nchannels, converter: private
+
+    def __init__(self):
+        self.outrate = 0
+        self.sampwidth = 0
+        self.nchannels = 0
+        self.inited_outrate = 0
+        self.inited_width = 0
+        self.inited_nchannels = 0
+        self.converter = None
+        self.port = None
+        return
+
+    def __del__(self):
+        self.stop()
+
+    def setoutrate(self, rate):
+        self.outrate = rate
+        self.inited_outrate = 1
+
+    def setsampwidth(self, width):
+        self.sampwidth = width
+        self.inited_width = 1
+
+    def setnchannels(self, nchannels):
+        self.nchannels = nchannels
+        self.inited_nchannels = 1
+
+    def writeframes(self, data):
+        if not (self.inited_outrate and self.inited_width and self.inited_nchannels):
+            raise error, 'params not specified'
+        if not self.port:
+            import sunaudiodev, SUNAUDIODEV
+            self.port = sunaudiodev.open('w')
+            info = self.port.getinfo()
+            info.o_sample_rate = self.outrate
+            info.o_channels = self.nchannels
+            if self.sampwidth == 0:
+                info.o_precision = 8
+                self.o_encoding = SUNAUDIODEV.ENCODING_ULAW
+                # XXX Hack, hack -- leave defaults
+            else:
+                info.o_precision = 8 * self.sampwidth
+                info.o_encoding = SUNAUDIODEV.ENCODING_LINEAR
+                self.port.setinfo(info)
+        if self.converter:
+            data = self.converter(data)
+        self.port.write(data)
+
+    def wait(self):
+        if not self.port:
+            return
+        self.port.drain()
+        self.stop()
+
+    def stop(self):
+        if self.port:
+            self.port.flush()
+            self.port.close()
+            self.port = None
+
+    def getfilled(self):
+        if self.port:
+            return self.port.obufcount()
+        else:
+            return 0
+
+##    # Nobody remembers what this method does, and it's broken. :-(
+##    def getfillable(self):
+##        return BUFFERSIZE - self.getfilled()
+
+def AudioDev():
+    # Dynamically try to import and use a platform specific module.
+    try:
+        import al
+    except ImportError:
+        try:
+            import sunaudiodev
+            return Play_Audio_sun()
+        except ImportError:
+            try:
+                import Audio_mac
+            except ImportError:
+                raise error, 'no audio device'
+            else:
+                return Audio_mac.Play_Audio_mac()
+    else:
+        return Play_Audio_sgi()
+
+def test(fn = None):
+    import sys
+    if sys.argv[1:]:
+        fn = sys.argv[1]
+    else:
+        fn = 'f:just samples:just.aif'
+    import aifc
+    af = aifc.open(fn, 'r')
+    print fn, af.getparams()
+    p = AudioDev()
+    p.setoutrate(af.getframerate())
+    p.setsampwidth(af.getsampwidth())
+    p.setnchannels(af.getnchannels())
+    BUFSIZ = af.getframerate()/af.getsampwidth()/af.getnchannels()
+    while 1:
+        data = af.readframes(BUFSIZ)
+        if not data: break
+        print len(data)
+        p.writeframes(data)
+    p.wait()
+
+if __name__ == '__main__':
+    test()
diff --git a/lib-python/2.2/base64.py b/lib-python/2.2/base64.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/base64.py
@@ -0,0 +1,77 @@
+#! /usr/bin/env python
+
+"""Conversions to/from base64 transport encoding as per RFC-1521."""
+
+# Modified 04-Oct-95 by Jack to use binascii module
+
+import binascii
+
+__all__ = ["encode","decode","encodestring","decodestring"]
+
+MAXLINESIZE = 76 # Excluding the CRLF
+MAXBINSIZE = (MAXLINESIZE//4)*3
+
+def encode(input, output):
+    """Encode a file."""
+    while 1:
+        s = input.read(MAXBINSIZE)
+        if not s: break
+        while len(s) < MAXBINSIZE:
+            ns = input.read(MAXBINSIZE-len(s))
+            if not ns: break
+            s = s + ns
+        line = binascii.b2a_base64(s)
+        output.write(line)
+
+def decode(input, output):
+    """Decode a file."""
+    while 1:
+        line = input.readline()
+        if not line: break
+        s = binascii.a2b_base64(line)
+        output.write(s)
+
+def encodestring(s):
+    """Encode a string."""
+    pieces = []
+    for i in range(0, len(s), MAXBINSIZE):
+        chunk = s[i : i + MAXBINSIZE]
+        pieces.append(binascii.b2a_base64(chunk))
+    return "".join(pieces)
+
+def decodestring(s):
+    """Decode a string."""
+    return binascii.a2b_base64(s)
+
+def test():
+    """Small test program"""
+    import sys, getopt
+    try:
+        opts, args = getopt.getopt(sys.argv[1:], 'deut')
+    except getopt.error, msg:
+        sys.stdout = sys.stderr
+        print msg
+        print """usage: %s [-d|-e|-u|-t] [file|-]
+        -d, -u: decode
+        -e: encode (default)
+        -t: encode and decode string 'Aladdin:open sesame'"""%sys.argv[0]
+        sys.exit(2)
+    func = encode
+    for o, a in opts:
+        if o == '-e': func = encode
+        if o == '-d': func = decode
+        if o == '-u': func = decode
+        if o == '-t': test1(); return
+    if args and args[0] != '-':
+        func(open(args[0], 'rb'), sys.stdout)
+    else:
+        func(sys.stdin, sys.stdout)
+
+def test1():
+    s0 = "Aladdin:open sesame"
+    s1 = encodestring(s0)
+    s2 = decodestring(s1)
+    print s0, `s1`, s2
+
+if __name__ == '__main__':
+    test()
diff --git a/lib-python/2.2/bdb.py b/lib-python/2.2/bdb.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/bdb.py
@@ -0,0 +1,563 @@
+"""Debugger basics"""
+
+import sys
+import os
+import types
+
+__all__ = ["BdbQuit","Bdb","Breakpoint"]
+
+BdbQuit = 'bdb.BdbQuit' # Exception to give up completely
+
+
+class Bdb:
+
+    """Generic Python debugger base class.
+
+    This class takes care of details of the trace facility;
+    a derived class should implement user interaction.
+    The standard debugger class (pdb.Pdb) is an example.
+    """
+
+    def __init__(self):
+        self.breaks = {}
+        self.fncache = {}
+
+    def canonic(self, filename):
+        if filename == "<" + filename[1:-1] + ">":
+            return filename
+        canonic = self.fncache.get(filename)
+        if not canonic:
+            canonic = os.path.abspath(filename)
+            canonic = os.path.normcase(canonic)
+            self.fncache[filename] = canonic
+        return canonic
+
+    def reset(self):
+        import linecache
+        linecache.checkcache()
+        self.botframe = None
+        self.stopframe = None
+        self.returnframe = None
+        self.quitting = 0
+
+    def trace_dispatch(self, frame, event, arg):
+        if self.quitting:
+            return # None
+        if event == 'line':
+            return self.dispatch_line(frame)
+        if event == 'call':
+            return self.dispatch_call(frame, arg)
+        if event == 'return':
+            return self.dispatch_return(frame, arg)
+        if event == 'exception':
+            return self.dispatch_exception(frame, arg)
+        print 'bdb.Bdb.dispatch: unknown debugging event:', `event`
+        return self.trace_dispatch
+
+    def dispatch_line(self, frame):
+        if self.stop_here(frame) or self.break_here(frame):
+            self.user_line(frame)
+            if self.quitting: raise BdbQuit
+        return self.trace_dispatch
+
+    def dispatch_call(self, frame, arg):
+        # XXX 'arg' is no longer used
+        if self.botframe is None:
+            # First call of dispatch since reset()
+            self.botframe = frame.f_back # (CT) Note that this may also be None!
+            return self.trace_dispatch
+        if not (self.stop_here(frame) or self.break_anywhere(frame)):
+            # No need to trace this function
+            return # None
+        self.user_call(frame, arg)
+        if self.quitting: raise BdbQuit
+        return self.trace_dispatch
+
+    def dispatch_return(self, frame, arg):
+        if self.stop_here(frame) or frame == self.returnframe:
+            self.user_return(frame, arg)
+            if self.quitting: raise BdbQuit
+        return self.trace_dispatch
+
+    def dispatch_exception(self, frame, arg):
+        if self.stop_here(frame):
+            self.user_exception(frame, arg)
+            if self.quitting: raise BdbQuit
+        return self.trace_dispatch
+
+    # Normally derived classes don't override the following
+    # methods, but they may if they want to redefine the
+    # definition of stopping and breakpoints.
+
+    def stop_here(self, frame):
+        # (CT) stopframe may now also be None, see dispatch_call.
+        # (CT) the former test for None is therefore removed from here.
+        if frame is self.stopframe:
+            return 1
+        while frame is not None and frame is not self.stopframe:
+            if frame is self.botframe:
+                return 1
+            frame = frame.f_back
+        return 0
+
+    def break_here(self, frame):
+        filename = self.canonic(frame.f_code.co_filename)
+        if not self.breaks.has_key(filename):
+            return 0
+        lineno = frame.f_lineno
+        if not lineno in self.breaks[filename]:
+            return 0
+        # flag says ok to delete temp. bp
+        (bp, flag) = effective(filename, lineno, frame)
+        if bp:
+            self.currentbp = bp.number
+            if (flag and bp.temporary):
+                self.do_clear(str(bp.number))
+            return 1
+        else:
+            return 0
+
+    def do_clear(self, arg):
+        raise NotImplementedError, "subclass of bdb must implement do_clear()"
+
+    def break_anywhere(self, frame):
+        return self.breaks.has_key(
+            self.canonic(frame.f_code.co_filename))
+
+    # Derived classes should override the user_* methods
+    # to gain control.
+
+    def user_call(self, frame, argument_list):
+        """This method is called when there is the remote possibility
+        that we ever need to stop in this function."""
+        pass
+
+    def user_line(self, frame):
+        """This method is called when we stop or break at this line."""
+        pass
+
+    def user_return(self, frame, return_value):
+        """This method is called when a return trap is set here."""
+        pass
+
+    def user_exception(self, frame, (exc_type, exc_value, exc_traceback)):
+        """This method is called if an exception occurs,
+        but only if we are to stop at or just below this level."""
+        pass
+
+    # Derived classes and clients can call the following methods
+    # to affect the stepping state.
+
+    def set_step(self):
+        """Stop after one line of code."""
+        self.stopframe = None
+        self.returnframe = None
+        self.quitting = 0
+
+    def set_next(self, frame):
+        """Stop on the next line in or below the given frame."""
+        self.stopframe = frame
+        self.returnframe = None
+        self.quitting = 0
+
+    def set_return(self, frame):
+        """Stop when returning from the given frame."""
+        self.stopframe = frame.f_back
+        self.returnframe = frame
+        self.quitting = 0
+
+    def set_trace(self):
+        """Start debugging from here."""
+        frame = sys._getframe().f_back
+        self.reset()
+        while frame:
+            frame.f_trace = self.trace_dispatch
+            self.botframe = frame
+            frame = frame.f_back
+        self.set_step()
+        sys.settrace(self.trace_dispatch)
+
+    def set_continue(self):
+        # Don't stop except at breakpoints or when finished
+        self.stopframe = self.botframe
+        self.returnframe = None
+        self.quitting = 0
+        if not self.breaks:
+            # no breakpoints; run without debugger overhead
+            sys.settrace(None)
+            frame = sys._getframe().f_back
+            while frame and frame is not self.botframe:
+                del frame.f_trace
+                frame = frame.f_back
+
+    def set_quit(self):
+        self.stopframe = self.botframe
+        self.returnframe = None
+        self.quitting = 1
+        sys.settrace(None)
+
+    # Derived classes and clients can call the following methods
+    # to manipulate breakpoints.  These methods return an
+    # error message is something went wrong, None if all is well.
+    # Set_break prints out the breakpoint line and file:lineno.
+    # Call self.get_*break*() to see the breakpoints or better
+    # for bp in Breakpoint.bpbynumber: if bp: bp.bpprint().
+
+    def set_break(self, filename, lineno, temporary=0, cond = None):
+        filename = self.canonic(filename)
+        import linecache # Import as late as possible
+        line = linecache.getline(filename, lineno)
+        if not line:
+            return 'Line %s:%d does not exist' % (filename,
+                                   lineno)
+        if not self.breaks.has_key(filename):
+            self.breaks[filename] = []
+        list = self.breaks[filename]
+        if not lineno in list:
+            list.append(lineno)
+        bp = Breakpoint(filename, lineno, temporary, cond)
+
+    def clear_break(self, filename, lineno):
+        filename = self.canonic(filename)
+        if not self.breaks.has_key(filename):
+            return 'There are no breakpoints in %s' % filename
+        if lineno not in self.breaks[filename]:
+            return 'There is no breakpoint at %s:%d' % (filename,
+                                    lineno)
+        # If there's only one bp in the list for that file,line
+        # pair, then remove the breaks entry
+        for bp in Breakpoint.bplist[filename, lineno][:]:
+            bp.deleteMe()
+        if not Breakpoint.bplist.has_key((filename, lineno)):
+            self.breaks[filename].remove(lineno)
+        if not self.breaks[filename]:
+            del self.breaks[filename]
+
+    def clear_bpbynumber(self, arg):
+        try:
+            number = int(arg)
+        except:
+            return 'Non-numeric breakpoint number (%s)' % arg
+        try:
+            bp = Breakpoint.bpbynumber[number]
+        except IndexError:
+            return 'Breakpoint number (%d) out of range' % number
+        if not bp:
+            return 'Breakpoint (%d) already deleted' % number
+        self.clear_break(bp.file, bp.line)
+
+    def clear_all_file_breaks(self, filename):
+        filename = self.canonic(filename)
+        if not self.breaks.has_key(filename):
+            return 'There are no breakpoints in %s' % filename
+        for line in self.breaks[filename]:
+            blist = Breakpoint.bplist[filename, line]
+            for bp in blist:
+                bp.deleteMe()
+        del self.breaks[filename]
+
+    def clear_all_breaks(self):
+        if not self.breaks:
+            return 'There are no breakpoints'
+        for bp in Breakpoint.bpbynumber:
+            if bp:
+                bp.deleteMe()
+        self.breaks = {}
+
+    def get_break(self, filename, lineno):
+        filename = self.canonic(filename)
+        return self.breaks.has_key(filename) and \
+            lineno in self.breaks[filename]
+
+    def get_breaks(self, filename, lineno):
+        filename = self.canonic(filename)
+        return self.breaks.has_key(filename) and \
+            lineno in self.breaks[filename] and \
+            Breakpoint.bplist[filename, lineno] or []
+
+    def get_file_breaks(self, filename):
+        filename = self.canonic(filename)
+        if self.breaks.has_key(filename):
+            return self.breaks[filename]
+        else:
+            return []
+
+    def get_all_breaks(self):
+        return self.breaks
+
+    # Derived classes and clients can call the following method
+    # to get a data structure representing a stack trace.
+
+    def get_stack(self, f, t):
+        stack = []
+        if t and t.tb_frame is f:
+            t = t.tb_next
+        while f is not None:
+            stack.append((f, f.f_lineno))
+            if f is self.botframe:
+                break
+            f = f.f_back
+        stack.reverse()
+        i = max(0, len(stack) - 1)
+        while t is not None:
+            stack.append((t.tb_frame, t.tb_lineno))
+            t = t.tb_next
+        return stack, i
+
+    #
+
+    def format_stack_entry(self, frame_lineno, lprefix=': '):
+        import linecache, repr
+        frame, lineno = frame_lineno
+        filename = self.canonic(frame.f_code.co_filename)
+        s = filename + '(' + `lineno` + ')'
+        if frame.f_code.co_name:
+            s = s + frame.f_code.co_name
+        else:
+            s = s + "<lambda>"
+        if frame.f_locals.has_key('__args__'):
+            args = frame.f_locals['__args__']
+        else:
+            args = None
+        if args:
+            s = s + repr.repr(args)
+        else:
+            s = s + '()'
+        if frame.f_locals.has_key('__return__'):
+            rv = frame.f_locals['__return__']
+            s = s + '->'
+            s = s + repr.repr(rv)
+        line = linecache.getline(filename, lineno)
+        if line: s = s + lprefix + line.strip()
+        return s
+
+    # The following two methods can be called by clients to use
+    # a debugger to debug a statement, given as a string.
+
+    def run(self, cmd, globals=None, locals=None):
+        if globals is None:
+            import __main__
+            globals = __main__.__dict__
+        if locals is None:
+            locals = globals
+        self.reset()
+        sys.settrace(self.trace_dispatch)
+        if not isinstance(cmd, types.CodeType):
+            cmd = cmd+'\n'
+        try:
+            try:
+                exec cmd in globals, locals
+            except BdbQuit:
+                pass
+        finally:
+            self.quitting = 1
+            sys.settrace(None)
+
+    def runeval(self, expr, globals=None, locals=None):
+        if globals is None:
+            import __main__
+            globals = __main__.__dict__
+        if locals is None:
+            locals = globals
+        self.reset()
+        sys.settrace(self.trace_dispatch)
+        if not isinstance(expr, types.CodeType):
+            expr = expr+'\n'
+        try:
+            try:
+                return eval(expr, globals, locals)
+            except BdbQuit:
+                pass
+        finally:
+            self.quitting = 1
+            sys.settrace(None)
+
+    def runctx(self, cmd, globals, locals):
+        # B/W compatibility
+        self.run(cmd, globals, locals)
+
+    # This method is more useful to debug a single function call.
+
+    def runcall(self, func, *args):
+        self.reset()
+        sys.settrace(self.trace_dispatch)
+        res = None
+        try:
+            try:
+                res = apply(func, args)
+            except BdbQuit:
+                pass
+        finally:
+            self.quitting = 1
+            sys.settrace(None)
+        return res
+
+
+def set_trace():
+    Bdb().set_trace()
+
+
+class Breakpoint:
+
+    """Breakpoint class
+
+    Implements temporary breakpoints, ignore counts, disabling and
+    (re)-enabling, and conditionals.
+
+    Breakpoints are indexed by number through bpbynumber and by
+    the file,line tuple using bplist.  The former points to a
+    single instance of class Breakpoint.  The latter points to a
+    list of such instances since there may be more than one
+    breakpoint per line.
+
+    """
+
+    # XXX Keeping state in the class is a mistake -- this means
+    # you cannot have more than one active Bdb instance.
+
+    next = 1        # Next bp to be assigned
+    bplist = {}     # indexed by (file, lineno) tuple
+    bpbynumber = [None] # Each entry is None or an instance of Bpt
+                # index 0 is unused, except for marking an
+                # effective break .... see effective()
+
+    def __init__(self, file, line, temporary=0, cond = None):
+        self.file = file    # This better be in canonical form!
+        self.line = line
+        self.temporary = temporary
+        self.cond = cond
+        self.enabled = 1
+        self.ignore = 0
+        self.hits = 0
+        self.number = Breakpoint.next
+        Breakpoint.next = Breakpoint.next + 1
+        # Build the two lists
+        self.bpbynumber.append(self)
+        if self.bplist.has_key((file, line)):
+            self.bplist[file, line].append(self)
+        else:
+            self.bplist[file, line] = [self]
+
+
+    def deleteMe(self):
+        index = (self.file, self.line)
+        self.bpbynumber[self.number] = None   # No longer in list
+        self.bplist[index].remove(self)
+        if not self.bplist[index]:
+            # No more bp for this f:l combo
+            del self.bplist[index]
+
+    def enable(self):
+        self.enabled = 1
+
+    def disable(self):
+        self.enabled = 0
+
+    def bpprint(self):
+        if self.temporary:
+            disp = 'del  '
+        else:
+            disp = 'keep '
+        if self.enabled:
+            disp = disp + 'yes'
+        else:
+            disp = disp + 'no '
+        print '%-4dbreakpoint    %s at %s:%d' % (self.number, disp,
+                             self.file, self.line)
+        if self.cond:
+            print '\tstop only if %s' % (self.cond,)
+        if self.ignore:
+            print '\tignore next %d hits' % (self.ignore)
+        if (self.hits):
+            if (self.hits > 1): ss = 's'
+            else: ss = ''
+            print ('\tbreakpoint already hit %d time%s' %
+                   (self.hits, ss))
+
+# -----------end of Breakpoint class----------
+
+# Determines if there is an effective (active) breakpoint at this
+# line of code.  Returns breakpoint number or 0 if none
+def effective(file, line, frame):
+    """Determine which breakpoint for this file:line is to be acted upon.
+
+    Called only if we know there is a bpt at this
+    location.  Returns breakpoint that was triggered and a flag
+    that indicates if it is ok to delete a temporary bp.
+
+    """
+    possibles = Breakpoint.bplist[file,line]
+    for i in range(0, len(possibles)):
+        b = possibles[i]
+        if b.enabled == 0:
+            continue
+        # Count every hit when bp is enabled
+        b.hits = b.hits + 1
+        if not b.cond:
+            # If unconditional, and ignoring,
+            # go on to next, else break
+            if b.ignore > 0:
+                b.ignore = b.ignore -1
+                continue
+            else:
+                # breakpoint and marker that's ok
+                # to delete if temporary
+                return (b,1)
+        else:
+            # Conditional bp.
+            # Ignore count applies only to those bpt hits where the
+            # condition evaluates to true.
+            try:
+                val = eval(b.cond, frame.f_globals,
+                       frame.f_locals)
+                if val:
+                    if b.ignore > 0:
+                        b.ignore = b.ignore -1
+                        # continue
+                    else:
+                        return (b,1)
+                # else:
+                #   continue
+            except:
+                # if eval fails, most conservative
+                # thing is to stop on breakpoint
+                # regardless of ignore count.
+                # Don't delete temporary,
+                # as another hint to user.
+                return (b,0)
+    return (None, None)
+
+# -------------------- testing --------------------
+
+class Tdb(Bdb):
+    def user_call(self, frame, args):
+        name = frame.f_code.co_name
+        if not name: name = '???'
+        print '+++ call', name, args
+    def user_line(self, frame):
+        import linecache
+        name = frame.f_code.co_name
+        if not name: name = '???'
+        fn = self.canonic(frame.f_code.co_filename)
+        line = linecache.getline(fn, frame.f_lineno)
+        print '+++', fn, frame.f_lineno, name, ':', line.strip()
+    def user_return(self, frame, retval):
+        print '+++ return', retval
+    def user_exception(self, frame, exc_stuff):
+        print '+++ exception', exc_stuff
+        self.set_continue()
+
+def foo(n):
+    print 'foo(', n, ')'
+    x = bar(n*10)
+    print 'bar returned', x
+
+def bar(a):
+    print 'bar(', a, ')'
+    return a/2
+
+def test():
+    t = Tdb()
+    t.run('import bdb; bdb.foo(10)')
+
+# end
diff --git a/lib-python/2.2/binhex.py b/lib-python/2.2/binhex.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/binhex.py
@@ -0,0 +1,531 @@
+"""Macintosh binhex compression/decompression.
+
+easy interface:
+binhex(inputfilename, outputfilename)
+hexbin(inputfilename, outputfilename)
+"""
+
+#
+# Jack Jansen, CWI, August 1995.
+#
+# The module is supposed to be as compatible as possible. Especially the
+# easy interface should work "as expected" on any platform.
+# XXXX Note: currently, textfiles appear in mac-form on all platforms.
+# We seem to lack a simple character-translate in python.
+# (we should probably use ISO-Latin-1 on all but the mac platform).
+# XXXX The simple routines are too simple: they expect to hold the complete
+# files in-core. Should be fixed.
+# XXXX It would be nice to handle AppleDouble format on unix
+# (for servers serving macs).
+# XXXX I don't understand what happens when you get 0x90 times the same byte on
+# input. The resulting code (xx 90 90) would appear to be interpreted as an
+# escaped *value* of 0x90. All coders I've seen appear to ignore this nicety...
+#
+import sys
+import os
+import struct
+import binascii
+
+__all__ = ["binhex","hexbin","Error"]
+
+class Error(Exception):
+    pass
+
+# States (what have we written)
+[_DID_HEADER, _DID_DATA, _DID_RSRC] = range(3)
+
+# Various constants
+REASONABLY_LARGE=32768  # Minimal amount we pass the rle-coder
+LINELEN=64
+RUNCHAR=chr(0x90)   # run-length introducer
+
+#
+# This code is no longer byte-order dependent
+
+#
+# Workarounds for non-mac machines.
+if os.name == 'mac':
+    import macfs
+    import MacOS
+    try:
+        openrf = MacOS.openrf
+    except AttributeError:
+        # Backward compatibility
+        openrf = open
+
+    def FInfo():
+        return macfs.FInfo()
+
+    def getfileinfo(name):
+        finfo = macfs.FSSpec(name).GetFInfo()
+        dir, file = os.path.split(name)
+        # XXXX Get resource/data sizes
+        fp = open(name, 'rb')
+        fp.seek(0, 2)
+        dlen = fp.tell()
+        fp = openrf(name, '*rb')
+        fp.seek(0, 2)
+        rlen = fp.tell()
+        return file, finfo, dlen, rlen
+
+    def openrsrc(name, *mode):
+        if not mode:
+            mode = '*rb'
+        else:
+            mode = '*' + mode[0]
+        return openrf(name, mode)
+
+else:
+    #
+    # Glue code for non-macintosh usage
+    #
+
+    class FInfo:
+        def __init__(self):
+            self.Type = '????'
+            self.Creator = '????'
+            self.Flags = 0
+
+    def getfileinfo(name):
+        finfo = FInfo()
+        # Quick check for textfile
+        fp = open(name)
+        data = open(name).read(256)
+        for c in data:
+            if not c.isspace() and (c<' ' or ord(c) > 0x7f):
+                break
+        else:
+            finfo.Type = 'TEXT'
+        fp.seek(0, 2)
+        dsize = fp.tell()
+        fp.close()
+        dir, file = os.path.split(name)
+        file = file.replace(':', '-', 1)
+        return file, finfo, dsize, 0
+
+    class openrsrc:
+        def __init__(self, *args):
+            pass
+
+        def read(self, *args):
+            return ''
+
+        def write(self, *args):
+            pass
+
+        def close(self):
+            pass
+
+class _Hqxcoderengine:
+    """Write data to the coder in 3-byte chunks"""
+
+    def __init__(self, ofp):
+        self.ofp = ofp
+        self.data = ''
+        self.hqxdata = ''
+        self.linelen = LINELEN-1
+
+    def write(self, data):
+        self.data = self.data + data
+        datalen = len(self.data)
+        todo = (datalen//3)*3
+        data = self.data[:todo]
+        self.data = self.data[todo:]
+        if not data:
+            return
+        self.hqxdata = self.hqxdata + binascii.b2a_hqx(data)
+        self._flush(0)
+
+    def _flush(self, force):
+        first = 0
+        while first <= len(self.hqxdata)-self.linelen:
+            last = first + self.linelen
+            self.ofp.write(self.hqxdata[first:last]+'\n')
+            self.linelen = LINELEN
+            first = last
+        self.hqxdata = self.hqxdata[first:]
+        if force:
+            self.ofp.write(self.hqxdata + ':\n')
+
+    def close(self):
+        if self.data:
+            self.hqxdata = \
+                 self.hqxdata + binascii.b2a_hqx(self.data)
+        self._flush(1)
+        self.ofp.close()
+        del self.ofp
+
+class _Rlecoderengine:
+    """Write data to the RLE-coder in suitably large chunks"""
+
+    def __init__(self, ofp):
+        self.ofp = ofp
+        self.data = ''
+
+    def write(self, data):
+        self.data = self.data + data
+        if len(self.data) < REASONABLY_LARGE:
+            return
+        rledata = binascii.rlecode_hqx(self.data)
+        self.ofp.write(rledata)
+        self.data = ''
+
+    def close(self):
+        if self.data:
+            rledata = binascii.rlecode_hqx(self.data)
+            self.ofp.write(rledata)
+        self.ofp.close()
+        del self.ofp
+
+class BinHex:
+    def __init__(self, (name, finfo, dlen, rlen), ofp):
+        if type(ofp) == type(''):
+            ofname = ofp
+            ofp = open(ofname, 'w')
+            if os.name == 'mac':
+                fss = macfs.FSSpec(ofname)
+                fss.SetCreatorType('BnHq', 'TEXT')
+        ofp.write('(This file must be converted with BinHex 4.0)\n\n:')
+        hqxer = _Hqxcoderengine(ofp)
+        self.ofp = _Rlecoderengine(hqxer)
+        self.crc = 0
+        if finfo is None:
+            finfo = FInfo()
+        self.dlen = dlen
+        self.rlen = rlen
+        self._writeinfo(name, finfo)
+        self.state = _DID_HEADER
+
+    def _writeinfo(self, name, finfo):
+        name = name
+        nl = len(name)
+        if nl > 63:
+            raise Error, 'Filename too long'
+        d = chr(nl) + name + '\0'
+        d2 = finfo.Type + finfo.Creator
+
+        # Force all structs to be packed with big-endian
+        d3 = struct.pack('>h', finfo.Flags)
+        d4 = struct.pack('>ii', self.dlen, self.rlen)
+        info = d + d2 + d3 + d4
+        self._write(info)
+        self._writecrc()
+
+    def _write(self, data):
+        self.crc = binascii.crc_hqx(data, self.crc)
+        self.ofp.write(data)
+
+    def _writecrc(self):
+        # XXXX Should this be here??
+        # self.crc = binascii.crc_hqx('\0\0', self.crc)
+        self.ofp.write(struct.pack('>h', self.crc))
+        self.crc = 0
+
+    def write(self, data):
+        if self.state != _DID_HEADER:
+            raise Error, 'Writing data at the wrong time'
+        self.dlen = self.dlen - len(data)
+        self._write(data)
+
+    def close_data(self):
+        if self.dlen != 0:
+            raise Error, 'Incorrect data size, diff='+`self.rlen`
+        self._writecrc()
+        self.state = _DID_DATA
+
+    def write_rsrc(self, data):
+        if self.state < _DID_DATA:
+            self.close_data()
+        if self.state != _DID_DATA:
+            raise Error, 'Writing resource data at the wrong time'
+        self.rlen = self.rlen - len(data)
+        self._write(data)
+
+    def close(self):
+        if self.state < _DID_DATA:
+            self.close_data()
+        if self.state != _DID_DATA:
+            raise Error, 'Close at the wrong time'
+        if self.rlen != 0:
+            raise Error, \
+                  "Incorrect resource-datasize, diff="+`self.rlen`
+        self._writecrc()
+        self.ofp.close()
+        self.state = None
+        del self.ofp
+
+def binhex(inp, out):
+    """(infilename, outfilename) - Create binhex-encoded copy of a file"""
+    finfo = getfileinfo(inp)
+    ofp = BinHex(finfo, out)
+
+    ifp = open(inp, 'rb')
+    # XXXX Do textfile translation on non-mac systems
+    while 1:
+        d = ifp.read(128000)
+        if not d: break
+        ofp.write(d)
+    ofp.close_data()
+    ifp.close()
+
+    ifp = openrsrc(inp, 'rb')
+    while 1:
+        d = ifp.read(128000)
+        if not d: break
+        ofp.write_rsrc(d)
+    ofp.close()
+    ifp.close()
+
+class _Hqxdecoderengine:
+    """Read data via the decoder in 4-byte chunks"""
+
+    def __init__(self, ifp):
+        self.ifp = ifp
+        self.eof = 0
+
+    def read(self, totalwtd):
+        """Read at least wtd bytes (or until EOF)"""
+        decdata = ''
+        wtd = totalwtd
+        #
+        # The loop here is convoluted, since we don't really now how
+        # much to decode: there may be newlines in the incoming data.
+        while wtd > 0:
+            if self.eof: return decdata
+            wtd = ((wtd+2)//3)*4
+            data = self.ifp.read(wtd)
+            #
+            # Next problem: there may not be a complete number of
+            # bytes in what we pass to a2b. Solve by yet another
+            # loop.
+            #
+            while 1:
+                try:
+                    decdatacur, self.eof = \
+                            binascii.a2b_hqx(data)
+                    break
+                except binascii.Incomplete:
+                    pass
+                newdata = self.ifp.read(1)
+                if not newdata:
+                    raise Error, \
+                          'Premature EOF on binhex file'
+                data = data + newdata
+            decdata = decdata + decdatacur
+            wtd = totalwtd - len(decdata)
+            if not decdata and not self.eof:
+                raise Error, 'Premature EOF on binhex file'
+        return decdata
+
+    def close(self):
+        self.ifp.close()
+
+class _Rledecoderengine:
+    """Read data via the RLE-coder"""
+
+    def __init__(self, ifp):
+        self.ifp = ifp
+        self.pre_buffer = ''
+        self.post_buffer = ''
+        self.eof = 0
+
+    def read(self, wtd):
+        if wtd > len(self.post_buffer):
+            self._fill(wtd-len(self.post_buffer))
+        rv = self.post_buffer[:wtd]
+        self.post_buffer = self.post_buffer[wtd:]
+        return rv
+
+    def _fill(self, wtd):
+        self.pre_buffer = self.pre_buffer + self.ifp.read(wtd+4)
+        if self.ifp.eof:
+            self.post_buffer = self.post_buffer + \
+                binascii.rledecode_hqx(self.pre_buffer)
+            self.pre_buffer = ''
+            return
+
+        #
+        # Obfuscated code ahead. We have to take care that we don't
+        # end up with an orphaned RUNCHAR later on. So, we keep a couple
+        # of bytes in the buffer, depending on what the end of
+        # the buffer looks like:
+        # '\220\0\220' - Keep 3 bytes: repeated \220 (escaped as \220\0)
+        # '?\220' - Keep 2 bytes: repeated something-else
+        # '\220\0' - Escaped \220: Keep 2 bytes.
+        # '?\220?' - Complete repeat sequence: decode all
+        # otherwise: keep 1 byte.
+        #
+        mark = len(self.pre_buffer)
+        if self.pre_buffer[-3:] == RUNCHAR + '\0' + RUNCHAR:
+            mark = mark - 3
+        elif self.pre_buffer[-1] == RUNCHAR:
+            mark = mark - 2
+        elif self.pre_buffer[-2:] == RUNCHAR + '\0':
+            mark = mark - 2
+        elif self.pre_buffer[-2] == RUNCHAR:
+            pass # Decode all
+        else:
+            mark = mark - 1
+
+        self.post_buffer = self.post_buffer + \
+            binascii.rledecode_hqx(self.pre_buffer[:mark])
+        self.pre_buffer = self.pre_buffer[mark:]
+
+    def close(self):
+        self.ifp.close()
+
+class HexBin:
+    def __init__(self, ifp):
+        if type(ifp) == type(''):
+            ifp = open(ifp)
+        #
+        # Find initial colon.
+        #
+        while 1:
+            ch = ifp.read(1)
+            if not ch:
+                raise Error, "No binhex data found"
+            # Cater for \r\n terminated lines (which show up as \n\r, hence
+            # all lines start with \r)
+            if ch == '\r':
+                continue
+            if ch == ':':
+                break
+            if ch != '\n':
+                dummy = ifp.readline()
+
+        hqxifp = _Hqxdecoderengine(ifp)
+        self.ifp = _Rledecoderengine(hqxifp)
+        self.crc = 0
+        self._readheader()
+
+    def _read(self, len):
+        data = self.ifp.read(len)
+        self.crc = binascii.crc_hqx(data, self.crc)
+        return data
+
+    def _checkcrc(self):
+        filecrc = struct.unpack('>h', self.ifp.read(2))[0] & 0xffff
+        #self.crc = binascii.crc_hqx('\0\0', self.crc)
+        # XXXX Is this needed??
+        self.crc = self.crc & 0xffff
+        if filecrc != self.crc:
+            raise Error, 'CRC error, computed %x, read %x' \
+                  %(self.crc, filecrc)
+        self.crc = 0
+
+    def _readheader(self):
+        len = self._read(1)
+        fname = self._read(ord(len))
+        rest = self._read(1+4+4+2+4+4)
+        self._checkcrc()
+
+        type = rest[1:5]
+        creator = rest[5:9]
+        flags = struct.unpack('>h', rest[9:11])[0]
+        self.dlen = struct.unpack('>l', rest[11:15])[0]
+        self.rlen = struct.unpack('>l', rest[15:19])[0]
+
+        self.FName = fname
+        self.FInfo = FInfo()
+        self.FInfo.Creator = creator
+        self.FInfo.Type = type
+        self.FInfo.Flags = flags
+
+        self.state = _DID_HEADER
+
+    def read(self, *n):
+        if self.state != _DID_HEADER:
+            raise Error, 'Read data at wrong time'
+        if n:
+            n = n[0]
+            n = min(n, self.dlen)
+        else:
+            n = self.dlen
+        rv = ''
+        while len(rv) < n:
+            rv = rv + self._read(n-len(rv))
+        self.dlen = self.dlen - n
+        return rv
+
+    def close_data(self):
+        if self.state != _DID_HEADER:
+            raise Error, 'close_data at wrong time'
+        if self.dlen:
+            dummy = self._read(self.dlen)
+        self._checkcrc()
+        self.state = _DID_DATA
+
+    def read_rsrc(self, *n):
+        if self.state == _DID_HEADER:
+            self.close_data()
+        if self.state != _DID_DATA:
+            raise Error, 'Read resource data at wrong time'
+        if n:
+            n = n[0]
+            n = min(n, self.rlen)
+        else:
+            n = self.rlen
+        self.rlen = self.rlen - n
+        return self._read(n)
+
+    def close(self):
+        if self.rlen:
+            dummy = self.read_rsrc(self.rlen)
+        self._checkcrc()
+        self.state = _DID_RSRC
+        self.ifp.close()
+
+def hexbin(inp, out):
+    """(infilename, outfilename) - Decode binhexed file"""
+    ifp = HexBin(inp)
+    finfo = ifp.FInfo
+    if not out:
+        out = ifp.FName
+    if os.name == 'mac':
+        ofss = macfs.FSSpec(out)
+        out = ofss.as_pathname()
+
+    ofp = open(out, 'wb')
+    # XXXX Do translation on non-mac systems
+    while 1:
+        d = ifp.read(128000)
+        if not d: break
+        ofp.write(d)
+    ofp.close()
+    ifp.close_data()
+
+    d = ifp.read_rsrc(128000)
+    if d:
+        ofp = openrsrc(out, 'wb')
+        ofp.write(d)
+        while 1:
+            d = ifp.read_rsrc(128000)
+            if not d: break
+            ofp.write(d)
+        ofp.close()
+
+    if os.name == 'mac':
+        nfinfo = ofss.GetFInfo()
+        nfinfo.Creator = finfo.Creator
+        nfinfo.Type = finfo.Type
+        nfinfo.Flags = finfo.Flags
+        ofss.SetFInfo(nfinfo)
+
+    ifp.close()
+
+def _test():
+    if os.name == 'mac':
+        fss, ok = macfs.PromptGetFile('File to convert:')
+        if not ok:
+            sys.exit(0)
+        fname = fss.as_pathname()
+    else:
+        fname = sys.argv[1]
+    binhex(fname, fname+'.hqx')
+    hexbin(fname+'.hqx', fname+'.viahqx')
+    #hexbin(fname, fname+'.unpacked')
+    sys.exit(1)
+
+if __name__ == '__main__':
+    _test()
diff --git a/lib-python/2.2/bisect.py b/lib-python/2.2/bisect.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/bisect.py
@@ -0,0 +1,78 @@
+"""Bisection algorithms."""
+
+def insort_right(a, x, lo=0, hi=None):
+    """Insert item x in list a, and keep it sorted assuming a is sorted.
+
+    If x is already in a, insert it to the right of the rightmost x.
+
+    Optional args lo (default 0) and hi (default len(a)) bound the
+    slice of a to be searched.
+    """
+
+    if hi is None:
+        hi = len(a)
+    while lo < hi:
+        mid = (lo+hi)//2
+        if x < a[mid]: hi = mid
+        else: lo = mid+1
+    a.insert(lo, x)
+
+insort = insort_right   # backward compatibility
+
+def bisect_right(a, x, lo=0, hi=None):
+    """Return the index where to insert item x in list a, assuming a is sorted.
+
+    The return value i is such that all e in a[:i] have e <= x, and all e in
+    a[i:] have e > x.  So if x already appears in the list, i points just
+    beyond the rightmost x already there.
+
+    Optional args lo (default 0) and hi (default len(a)) bound the
+    slice of a to be searched.
+    """
+
+    if hi is None:
+        hi = len(a)
+    while lo < hi:
+        mid = (lo+hi)//2
+        if x < a[mid]: hi = mid
+        else: lo = mid+1
+    return lo
+
+bisect = bisect_right   # backward compatibility
+
+def insort_left(a, x, lo=0, hi=None):
+    """Insert item x in list a, and keep it sorted assuming a is sorted.
+
+    If x is already in a, insert it to the left of the leftmost x.
+
+    Optional args lo (default 0) and hi (default len(a)) bound the
+    slice of a to be searched.
+    """
+
+    if hi is None:
+        hi = len(a)
+    while lo < hi:
+        mid = (lo+hi)//2
+        if a[mid] < x: lo = mid+1
+        else: hi = mid
+    a.insert(lo, x)
+
+
+def bisect_left(a, x, lo=0, hi=None):
+    """Return the index where to insert item x in list a, assuming a is sorted.
+
+    The return value i is such that all e in a[:i] have e < x, and all e in
+    a[i:] have e >= x.  So if x already appears in the list, i points just
+    before the leftmost x already there.
+
+    Optional args lo (default 0) and hi (default len(a)) bound the
+    slice of a to be searched.
+    """
+
+    if hi is None:
+        hi = len(a)
+    while lo < hi:
+        mid = (lo+hi)//2
+        if a[mid] < x: lo = mid+1
+        else: hi = mid
+    return lo
diff --git a/lib-python/2.2/calendar.py b/lib-python/2.2/calendar.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/calendar.py
@@ -0,0 +1,246 @@
+"""Calendar printing functions
+
+Note when comparing these calendars to the ones printed by cal(1): By
+default, these calendars have Monday as the first day of the week, and
+Sunday as the last (the European convention). Use setfirstweekday() to
+set the first day of the week (0=Monday, 6=Sunday)."""
+
+# Revision 2: uses functions from built-in time module
+
+# Import functions and variables from time module
+from time import localtime, mktime, strftime
+from types import SliceType
+
+__all__ = ["error","setfirstweekday","firstweekday","isleap",
+           "leapdays","weekday","monthrange","monthcalendar",
+           "prmonth","month","prcal","calendar","timegm",
+           "month_name", "month_abbr", "day_name", "day_abbr"]
+
+# Exception raised for bad input (with string parameter for details)
+error = ValueError
+
+# Constants for months referenced later
+January = 1
+February = 2
+
+# Number of days per month (except for February in leap years)
+mdays = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
+
+# This module used to have hard-coded lists of day and month names, as
+# English strings.  The classes following emulate a read-only version of
+# that, but supply localized names.  Note that the values are computed
+# fresh on each call, in case the user changes locale between calls.
+
+class _indexer:
+    def __getitem__(self, i):
+        if isinstance(i, SliceType):
+            return self.data[i.start : i.stop]
+        else:
+            # May raise an appropriate exception.
+            return self.data[i]
+
+class _localized_month(_indexer):
+    def __init__(self, format):
+        self.format = format
+
+    def __getitem__(self, i):
+        self.data = [strftime(self.format, (2001, j, 1, 12, 0, 0, 1, 1, 0))
+                     for j in range(1, 13)]
+        self.data.insert(0, "")
+        return _indexer.__getitem__(self, i)
+
+    def __len__(self):
+        return 13
+
+class _localized_day(_indexer):
+    def __init__(self, format):
+        self.format = format
+
+    def __getitem__(self, i):
+        # January 1, 2001, was a Monday.
+        self.data = [strftime(self.format, (2001, 1, j+1, 12, 0, 0, j, j+1, 0))
+                     for j in range(7)]
+        return _indexer.__getitem__(self, i)
+
+    def __len__(self_):
+        return 7
+
+# Full and abbreviated names of weekdays
+day_name = _localized_day('%A')
+day_abbr = _localized_day('%a')
+
+# Full and abbreviated names of months (1-based arrays!!!)
+month_name = _localized_month('%B')
+month_abbr = _localized_month('%b')
+
+# Constants for weekdays
+(MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY) = range(7)
+
+_firstweekday = 0                       # 0 = Monday, 6 = Sunday
+
+def firstweekday():
+    return _firstweekday
+
+def setfirstweekday(weekday):
+    """Set weekday (Monday=0, Sunday=6) to start each week."""
+    global _firstweekday
+    if not MONDAY <= weekday <= SUNDAY:
+        raise ValueError, \
+              'bad weekday number; must be 0 (Monday) to 6 (Sunday)'
+    _firstweekday = weekday
+
+def isleap(year):
+    """Return 1 for leap years, 0 for non-leap years."""
+    return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
+
+def leapdays(y1, y2):
+    """Return number of leap years in range [y1, y2).
+       Assume y1 <= y2."""
+    y1 -= 1
+    y2 -= 1
+    return (y2/4 - y1/4) - (y2/100 - y1/100) + (y2/400 - y1/400)
+
+def weekday(year, month, day):
+    """Return weekday (0-6 ~ Mon-Sun) for year (1970-...), month (1-12),
+       day (1-31)."""
+    secs = mktime((year, month, day, 0, 0, 0, 0, 0, 0))
+    tuple = localtime(secs)
+    return tuple[6]
+
+def monthrange(year, month):
+    """Return weekday (0-6 ~ Mon-Sun) and number of days (28-31) for
+       year, month."""
+    if not 1 <= month <= 12:
+        raise ValueError, 'bad month number'
+    day1 = weekday(year, month, 1)
+    ndays = mdays[month] + (month == February and isleap(year))
+    return day1, ndays
+
+def monthcalendar(year, month):
+    """Return a matrix representing a month's calendar.
+       Each row represents a week; days outside this month are zero."""
+    day1, ndays = monthrange(year, month)
+    rows = []
+    r7 = range(7)
+    day = (_firstweekday - day1 + 6) % 7 - 5   # for leading 0's in first week
+    while day <= ndays:
+        row = [0, 0, 0, 0, 0, 0, 0]
+        for i in r7:
+            if 1 <= day <= ndays: row[i] = day
+            day = day + 1
+        rows.append(row)
+    return rows
+
+def _center(str, width):
+    """Center a string in a field."""
+    n = width - len(str)
+    if n <= 0:
+        return str
+    return ' '*((n+1)/2) + str + ' '*((n)/2)
+
+def prweek(theweek, width):
+    """Print a single week (no newline)."""
+    print week(theweek, width),
+
+def week(theweek, width):
+    """Returns a single week in a string (no newline)."""
+    days = []
+    for day in theweek:
+        if day == 0:
+            s = ''
+        else:
+            s = '%2i' % day             # right-align single-digit days
+        days.append(_center(s, width))
+    return ' '.join(days)
+
+def weekheader(width):
+    """Return a header for a week."""
+    if width >= 9:
+        names = day_name
+    else:
+        names = day_abbr
+    days = []
+    for i in range(_firstweekday, _firstweekday + 7):
+        days.append(_center(names[i%7][:width], width))
+    return ' '.join(days)
+
+def prmonth(theyear, themonth, w=0, l=0):
+    """Print a month's calendar."""
+    print month(theyear, themonth, w, l),
+
+def month(theyear, themonth, w=0, l=0):
+    """Return a month's calendar string (multi-line)."""
+    w = max(2, w)
+    l = max(1, l)
+    s = (_center(month_name[themonth] + ' ' + `theyear`,
+                 7 * (w + 1) - 1).rstrip() +
+         '\n' * l + weekheader(w).rstrip() + '\n' * l)
+    for aweek in monthcalendar(theyear, themonth):
+        s = s + week(aweek, w).rstrip() + '\n' * l
+    return s[:-l] + '\n'
+
+# Spacing of month columns for 3-column year calendar
+_colwidth = 7*3 - 1         # Amount printed by prweek()
+_spacing = 6                # Number of spaces between columns
+
+def format3c(a, b, c, colwidth=_colwidth, spacing=_spacing):
+    """Prints 3-column formatting for year calendars"""
+    print format3cstring(a, b, c, colwidth, spacing)
+
+def format3cstring(a, b, c, colwidth=_colwidth, spacing=_spacing):
+    """Returns a string formatted from 3 strings, centered within 3 columns."""
+    return (_center(a, colwidth) + ' ' * spacing + _center(b, colwidth) +
+            ' ' * spacing + _center(c, colwidth))
+
+def prcal(year, w=0, l=0, c=_spacing):
+    """Print a year's calendar."""
+    print calendar(year, w, l, c),
+
+def calendar(year, w=0, l=0, c=_spacing):
+    """Returns a year's calendar as a multi-line string."""
+    w = max(2, w)
+    l = max(1, l)
+    c = max(2, c)
+    colwidth = (w + 1) * 7 - 1
+    s = _center(`year`, colwidth * 3 + c * 2).rstrip() + '\n' * l
+    header = weekheader(w)
+    header = format3cstring(header, header, header, colwidth, c).rstrip()
+    for q in range(January, January+12, 3):
+        s = (s + '\n' * l +
+             format3cstring(month_name[q], month_name[q+1], month_name[q+2],
+                            colwidth, c).rstrip() +
+             '\n' * l + header + '\n' * l)
+        data = []
+        height = 0
+        for amonth in range(q, q + 3):
+            cal = monthcalendar(year, amonth)
+            if len(cal) > height:
+                height = len(cal)
+            data.append(cal)
+        for i in range(height):
+            weeks = []
+            for cal in data:
+                if i >= len(cal):
+                    weeks.append('')
+                else:
+                    weeks.append(week(cal[i], w))
+            s = s + format3cstring(weeks[0], weeks[1], weeks[2],
+                                   colwidth, c).rstrip() + '\n' * l
+    return s[:-l] + '\n'
+
+EPOCH = 1970
+def timegm(tuple):
+    """Unrelated but handy function to calculate Unix timestamp from GMT."""
+    year, month, day, hour, minute, second = tuple[:6]
+    assert year >= EPOCH
+    assert 1 <= month <= 12
+    days = 365*(year-EPOCH) + leapdays(EPOCH, year)
+    for i in range(1, month):
+        days = days + mdays[i]
+    if month > 2 and isleap(year):
+        days = days + 1
+    days = days + day - 1
+    hours = days*24 + hour
+    minutes = hours*60 + minute
+    seconds = minutes*60 + second
+    return seconds
diff --git a/lib-python/2.2/cgi.py b/lib-python/2.2/cgi.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/cgi.py
@@ -0,0 +1,1040 @@
+#! /usr/local/bin/python
+
+# NOTE: the above "/usr/local/bin/python" is NOT a mistake.  It is
+# intentionally NOT "/usr/bin/env python".  On many systems
+# (e.g. Solaris), /usr/local/bin is not in $PATH as passed to CGI
+# scripts, and /usr/local/bin is the default directory where Python is
+# installed, so /usr/bin/env would be unable to find python.  Granted,
+# binary installations by Linux vendors often install Python in
+# /usr/bin.  So let those vendors patch cgi.py to match their choice
+# of installation.
+
+"""Support module for CGI (Common Gateway Interface) scripts.
+
+This module defines a number of utilities for use by CGI scripts
+written in Python.
+"""
+
+# XXX Perhaps there should be a slimmed version that doesn't contain
+# all those backwards compatible and debugging classes and functions?
+
+# History
+# -------
+#
+# Michael McLay started this module.  Steve Majewski changed the
+# interface to SvFormContentDict and FormContentDict.  The multipart
+# parsing was inspired by code submitted by Andreas Paepcke.  Guido van
+# Rossum rewrote, reformatted and documented the module and is currently
+# responsible for its maintenance.
+#
+
+__version__ = "2.6"
+
+
+# Imports
+# =======
+
+import sys
+import os
+import urllib
+import mimetools
+import rfc822
+import UserDict
+from StringIO import StringIO
+
+__all__ = ["MiniFieldStorage", "FieldStorage", "FormContentDict",
+           "SvFormContentDict", "InterpFormContentDict", "FormContent",
+           "parse", "parse_qs", "parse_qsl", "parse_multipart",
+           "parse_header", "print_exception", "print_environ",
+           "print_form", "print_directory", "print_arguments",
+           "print_environ_usage", "escape"]
+
+# Logging support
+# ===============
+
+logfile = ""            # Filename to log to, if not empty
+logfp = None            # File object to log to, if not None
+
+def initlog(*allargs):
+    """Write a log message, if there is a log file.
+
+    Even though this function is called initlog(), you should always
+    use log(); log is a variable that is set either to initlog
+    (initially), to dolog (once the log file has been opened), or to
+    nolog (when logging is disabled).
+
+    The first argument is a format string; the remaining arguments (if
+    any) are arguments to the % operator, so e.g.
+        log("%s: %s", "a", "b")
+    will write "a: b" to the log file, followed by a newline.
+
+    If the global logfp is not None, it should be a file object to
+    which log data is written.
+
+    If the global logfp is None, the global logfile may be a string
+    giving a filename to open, in append mode.  This file should be
+    world writable!!!  If the file can't be opened, logging is
+    silently disabled (since there is no safe place where we could
+    send an error message).
+
+    """
+    global logfp, log
+    if logfile and not logfp:
+        try:
+            logfp = open(logfile, "a")
+        except IOError:
+            pass
+    if not logfp:
+        log = nolog
+    else:
+        log = dolog
+    apply(log, allargs)
+
+def dolog(fmt, *args):
+    """Write a log message to the log file.  See initlog() for docs."""
+    logfp.write(fmt%args + "\n")
+
+def nolog(*allargs):
+    """Dummy function, assigned to log when logging is disabled."""
+    pass
+
+log = initlog           # The current logging function
+
+
+# Parsing functions
+# =================
+
+# Maximum input we will accept when REQUEST_METHOD is POST
+# 0 ==> unlimited input
+maxlen = 0
+
+def parse(fp=None, environ=os.environ, keep_blank_values=0, strict_parsing=0):
+    """Parse a query in the environment or from a file (default stdin)
+
+        Arguments, all optional:
+
+        fp              : file pointer; default: sys.stdin
+
+        environ         : environment dictionary; default: os.environ
+
+        keep_blank_values: flag indicating whether blank values in
+            URL encoded forms should be treated as blank strings.
+            A true value indicates that blanks should be retained as
+            blank strings.  The default false value indicates that
+            blank values are to be ignored and treated as if they were
+            not included.
+
+        strict_parsing: flag indicating what to do with parsing errors.
+            If false (the default), errors are silently ignored.
+            If true, errors raise a ValueError exception.
+    """
+    if not fp:
+        fp = sys.stdin
+    if not environ.has_key('REQUEST_METHOD'):
+        environ['REQUEST_METHOD'] = 'GET'       # For testing stand-alone
+    if environ['REQUEST_METHOD'] == 'POST':
+        ctype, pdict = parse_header(environ['CONTENT_TYPE'])
+        if ctype == 'multipart/form-data':
+            return parse_multipart(fp, pdict)
+        elif ctype == 'application/x-www-form-urlencoded':
+            clength = int(environ['CONTENT_LENGTH'])
+            if maxlen and clength > maxlen:
+                raise ValueError, 'Maximum content length exceeded'
+            qs = fp.read(clength)
+        else:
+            qs = ''                     # Unknown content-type
+        if environ.has_key('QUERY_STRING'):
+            if qs: qs = qs + '&'
+            qs = qs + environ['QUERY_STRING']
+        elif sys.argv[1:]:
+            if qs: qs = qs + '&'
+            qs = qs + sys.argv[1]
+        environ['QUERY_STRING'] = qs    # XXX Shouldn't, really
+    elif environ.has_key('QUERY_STRING'):
+        qs = environ['QUERY_STRING']
+    else:
+        if sys.argv[1:]:
+            qs = sys.argv[1]
+        else:
+            qs = ""
+        environ['QUERY_STRING'] = qs    # XXX Shouldn't, really
+    return parse_qs(qs, keep_blank_values, strict_parsing)
+
+
+def parse_qs(qs, keep_blank_values=0, strict_parsing=0):
+    """Parse a query given as a string argument.
+
+        Arguments:
+
+        qs: URL-encoded query string to be parsed
+
+        keep_blank_values: flag indicating whether blank values in
+            URL encoded queries should be treated as blank strings.
+            A true value indicates that blanks should be retained as
+            blank strings.  The default false value indicates that
+            blank values are to be ignored and treated as if they were
+            not included.
+
+        strict_parsing: flag indicating what to do with parsing errors.
+            If false (the default), errors are silently ignored.
+            If true, errors raise a ValueError exception.
+    """
+    dict = {}
+    for name, value in parse_qsl(qs, keep_blank_values, strict_parsing):
+        if dict.has_key(name):
+            dict[name].append(value)
+        else:
+            dict[name] = [value]
+    return dict
+
+def parse_qsl(qs, keep_blank_values=0, strict_parsing=0):
+    """Parse a query given as a string argument.
+
+    Arguments:
+
+    qs: URL-encoded query string to be parsed
+
+    keep_blank_values: flag indicating whether blank values in
+        URL encoded queries should be treated as blank strings.  A
+        true value indicates that blanks should be retained as blank
+        strings.  The default false value indicates that blank values
+        are to be ignored and treated as if they were  not included.
+
+    strict_parsing: flag indicating what to do with parsing errors. If
+        false (the default), errors are silently ignored. If true,
+        errors raise a ValueError exception.
+
+    Returns a list, as G-d intended.
+    """
+    pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
+    r = []
+    for name_value in pairs:
+        nv = name_value.split('=', 1)
+        if len(nv) != 2:
+            if strict_parsing:
+                raise ValueError, "bad query field: %s" % `name_value`
+            continue
+        if len(nv[1]) or keep_blank_values:
+            name = urllib.unquote(nv[0].replace('+', ' '))
+            value = urllib.unquote(nv[1].replace('+', ' '))
+            r.append((name, value))
+
+    return r
+
+
+def parse_multipart(fp, pdict):
+    """Parse multipart input.
+
+    Arguments:
+    fp   : input file
+    pdict: dictionary containing other parameters of conten-type header
+
+    Returns a dictionary just like parse_qs(): keys are the field names, each
+    value is a list of values for that field.  This is easy to use but not
+    much good if you are expecting megabytes to be uploaded -- in that case,
+    use the FieldStorage class instead which is much more flexible.  Note
+    that content-type is the raw, unparsed contents of the content-type
+    header.
+
+    XXX This does not parse nested multipart parts -- use FieldStorage for
+    that.
+
+    XXX This should really be subsumed by FieldStorage altogether -- no
+    point in having two implementations of the same parsing algorithm.
+
+    """
+    boundary = ""
+    if pdict.has_key('boundary'):
+        boundary = pdict['boundary']
+    if not valid_boundary(boundary):
+        raise ValueError,  ('Invalid boundary in multipart form: %s'
+                            % `boundary`)
+
+    nextpart = "--" + boundary
+    lastpart = "--" + boundary + "--"
+    partdict = {}
+    terminator = ""
+
+    while terminator != lastpart:
+        bytes = -1
+        data = None
+        if terminator:
+            # At start of next part.  Read headers first.
+            headers = mimetools.Message(fp)
+            clength = headers.getheader('content-length')
+            if clength:
+                try:
+                    bytes = int(clength)
+                except ValueError:
+                    pass
+            if bytes > 0:
+                if maxlen and bytes > maxlen:
+                    raise ValueError, 'Maximum content length exceeded'
+                data = fp.read(bytes)
+            else:
+                data = ""
+        # Read lines until end of part.
+        lines = []
+        while 1:
+            line = fp.readline()
+            if not line:
+                terminator = lastpart # End outer loop
+                break
+            if line[:2] == "--":
+                terminator = line.strip()
+                if terminator in (nextpart, lastpart):
+                    break
+            lines.append(line)
+        # Done with part.
+        if data is None:
+            continue
+        if bytes < 0:
+            if lines:
+                # Strip final line terminator
+                line = lines[-1]
+                if line[-2:] == "\r\n":
+                    line = line[:-2]
+                elif line[-1:] == "\n":
+                    line = line[:-1]
+                lines[-1] = line
+                data = "".join(lines)
+        line = headers['content-disposition']
+        if not line:
+            continue
+        key, params = parse_header(line)
+        if key != 'form-data':
+            continue
+        if params.has_key('name'):
+            name = params['name']
+        else:
+            continue
+        if partdict.has_key(name):
+            partdict[name].append(data)
+        else:
+            partdict[name] = [data]
+
+    return partdict
+
+
+def parse_header(line):
+    """Parse a Content-type like header.
+
+    Return the main content-type and a dictionary of options.
+
+    """
+    plist = map(lambda x: x.strip(), line.split(';'))
+    key = plist[0].lower()
+    del plist[0]
+    pdict = {}
+    for p in plist:
+        i = p.find('=')
+        if i >= 0:
+            name = p[:i].strip().lower()
+            value = p[i+1:].strip()
+            if len(value) >= 2 and value[0] == value[-1] == '"':
+                value = value[1:-1]
+            pdict[name] = value
+    return key, pdict
+
+
+# Classes for field storage
+# =========================
+
+class MiniFieldStorage:
+
+    """Like FieldStorage, for use when no file uploads are possible."""
+
+    # Dummy attributes
+    filename = None
+    list = None
+    type = None
+    file = None
+    type_options = {}
+    disposition = None
+    disposition_options = {}
+    headers = {}
+
+    def __init__(self, name, value):
+        """Constructor from field name and value."""
+        self.name = name
+        self.value = value
+        # self.file = StringIO(value)
+
+    def __repr__(self):
+        """Return printable representation."""
+        return "MiniFieldStorage(%s, %s)" % (`self.name`, `self.value`)
+
+
+class FieldStorage:
+
+    """Store a sequence of fields, reading multipart/form-data.
+
+    This class provides naming, typing, files stored on disk, and
+    more.  At the top level, it is accessible like a dictionary, whose
+    keys are the field names.  (Note: None can occur as a field name.)
+    The items are either a Python list (if there's multiple values) or
+    another FieldStorage or MiniFieldStorage object.  If it's a single
+    object, it has the following attributes:
+
+    name: the field name, if specified; otherwise None
+
+    filename: the filename, if specified; otherwise None; this is the
+        client side filename, *not* the file name on which it is
+        stored (that's a temporary file you don't deal with)
+
+    value: the value as a *string*; for file uploads, this
+        transparently reads the file every time you request the value
+
+    file: the file(-like) object from which you can read the data;
+        None if the data is stored a simple string
+
+    type: the content-type, or None if not specified
+
+    type_options: dictionary of options specified on the content-type
+        line
+
+    disposition: content-disposition, or None if not specified
+
+    disposition_options: dictionary of corresponding options
+
+    headers: a dictionary(-like) object (sometimes rfc822.Message or a
+        subclass thereof) containing *all* headers
+
+    The class is subclassable, mostly for the purpose of overriding
+    the make_file() method, which is called internally to come up with
+    a file open for reading and writing.  This makes it possible to
+    override the default choice of storing all files in a temporary
+    directory and unlinking them as soon as they have been opened.
+
+    """
+
+    def __init__(self, fp=None, headers=None, outerboundary="",
+                 environ=os.environ, keep_blank_values=0, strict_parsing=0):
+        """Constructor.  Read multipart/* until last part.
+
+        Arguments, all optional:
+
+        fp              : file pointer; default: sys.stdin
+            (not used when the request method is GET)
+
+        headers         : header dictionary-like object; default:
+            taken from environ as per CGI spec
+
+        outerboundary   : terminating multipart boundary
+            (for internal use only)
+
+        environ         : environment dictionary; default: os.environ
+
+        keep_blank_values: flag indicating whether blank values in
+            URL encoded forms should be treated as blank strings.
+            A true value indicates that blanks should be retained as
+            blank strings.  The default false value indicates that
+            blank values are to be ignored and treated as if they were
+            not included.
+
+        strict_parsing: flag indicating what to do with parsing errors.
+            If false (the default), errors are silently ignored.
+            If true, errors raise a ValueError exception.
+
+        """
+        method = 'GET'
+        self.keep_blank_values = keep_blank_values
+        self.strict_parsing = strict_parsing
+        if environ.has_key('REQUEST_METHOD'):
+            method = environ['REQUEST_METHOD'].upper()
+        if method == 'GET' or method == 'HEAD':
+            if environ.has_key('QUERY_STRING'):
+                qs = environ['QUERY_STRING']
+            elif sys.argv[1:]:
+                qs = sys.argv[1]
+            else:
+                qs = ""
+            fp = StringIO(qs)
+            if headers is None:
+                headers = {'content-type':
+                           "application/x-www-form-urlencoded"}
+        if headers is None:
+            headers = {}
+            if method == 'POST':
+                # Set default content-type for POST to what's traditional
+                headers['content-type'] = "application/x-www-form-urlencoded"
+            if environ.has_key('CONTENT_TYPE'):
+                headers['content-type'] = environ['CONTENT_TYPE']
+            if environ.has_key('CONTENT_LENGTH'):
+                headers['content-length'] = environ['CONTENT_LENGTH']
+        self.fp = fp or sys.stdin
+        self.headers = headers
+        self.outerboundary = outerboundary
+
+        # Process content-disposition header
+        cdisp, pdict = "", {}
+        if self.headers.has_key('content-disposition'):
+            cdisp, pdict = parse_header(self.headers['content-disposition'])
+        self.disposition = cdisp
+        self.disposition_options = pdict
+        self.name = None
+        if pdict.has_key('name'):
+            self.name = pdict['name']
+        self.filename = None
+        if pdict.has_key('filename'):
+            self.filename = pdict['filename']
+
+        # Process content-type header
+        #
+        # Honor any existing content-type header.  But if there is no
+        # content-type header, use some sensible defaults.  Assume
+        # outerboundary is "" at the outer level, but something non-false
+        # inside a multi-part.  The default for an inner part is text/plain,
+        # but for an outer part it should be urlencoded.  This should catch
+        # bogus clients which erroneously forget to include a content-type
+        # header.
+        #
+        # See below for what we do if there does exist a content-type header,
+        # but it happens to be something we don't understand.
+        if self.headers.has_key('content-type'):
+            ctype, pdict = parse_header(self.headers['content-type'])
+        elif self.outerboundary or method != 'POST':
+            ctype, pdict = "text/plain", {}
+        else:
+            ctype, pdict = 'application/x-www-form-urlencoded', {}
+        self.type = ctype
+        self.type_options = pdict
+        self.innerboundary = ""
+        if pdict.has_key('boundary'):
+            self.innerboundary = pdict['boundary']
+        clen = -1
+        if self.headers.has_key('content-length'):
+            try:
+                clen = int(self.headers['content-length'])
+            except:
+                pass
+            if maxlen and clen > maxlen:
+                raise ValueError, 'Maximum content length exceeded'
+        self.length = clen
+
+        self.list = self.file = None
+        self.done = 0
+        if ctype == 'application/x-www-form-urlencoded':
+            self.read_urlencoded()
+        elif ctype[:10] == 'multipart/':
+            self.read_multi(environ, keep_blank_values, strict_parsing)
+        else:
+            self.read_single()
+
+    def __repr__(self):
+        """Return a printable representation."""
+        return "FieldStorage(%s, %s, %s)" % (
+                `self.name`, `self.filename`, `self.value`)
+
+    def __getattr__(self, name):
+        if name != 'value':
+            raise AttributeError, name
+        if self.file:
+            self.file.seek(0)
+            value = self.file.read()
+            self.file.seek(0)
+        elif self.list is not None:
+            value = self.list
+        else:
+            value = None
+        return value
+
+    def __getitem__(self, key):
+        """Dictionary style indexing."""
+        if self.list is None:
+            raise TypeError, "not indexable"
+        found = []
+        for item in self.list:
+            if item.name == key: found.append(item)
+        if not found:
+            raise KeyError, key
+        if len(found) == 1:
+            return found[0]
+        else:
+            return found
+
+    def getvalue(self, key, default=None):
+        """Dictionary style get() method, including 'value' lookup."""
+        if self.has_key(key):
+            value = self[key]
+            if type(value) is type([]):
+                return map(lambda v: v.value, value)
+            else:
+                return value.value
+        else:
+            return default
+
+    def getfirst(self, key, default=None):
+        """ Return the first value received."""
+        if self.has_key(key):
+            value = self[key]
+            if type(value) is type([]):
+                return value[0].value
+            else:
+                return value.value
+        else:
+            return default
+
+    def getlist(self, key):
+        """ Return list of received values."""
+        if self.has_key(key):
+            value = self[key]
+            if type(value) is type([]):
+                return map(lambda v: v.value, value)
+            else:
+                return [value.value]
+        else:
+            return []
+
+    def keys(self):
+        """Dictionary style keys() method."""
+        if self.list is None:
+            raise TypeError, "not indexable"
+        keys = []
+        for item in self.list:
+            if item.name not in keys: keys.append(item.name)
+        return keys
+
+    def has_key(self, key):
+        """Dictionary style has_key() method."""
+        if self.list is None:
+            raise TypeError, "not indexable"
+        for item in self.list:
+            if item.name == key: return 1
+        return 0
+
+    def __len__(self):
+        """Dictionary style len(x) support."""
+        return len(self.keys())
+
+    def read_urlencoded(self):
+        """Internal: read data in query string format."""
+        qs = self.fp.read(self.length)
+        self.list = list = []
+        for key, value in parse_qsl(qs, self.keep_blank_values,
+                                    self.strict_parsing):
+            list.append(MiniFieldStorage(key, value))
+        self.skip_lines()
+
+    FieldStorageClass = None
+
+    def read_multi(self, environ, keep_blank_values, strict_parsing):
+        """Internal: read a part that is itself multipart."""
+        ib = self.innerboundary
+        if not valid_boundary(ib):
+            raise ValueError, ('Invalid boundary in multipart form: %s'
+                               % `ib`)
+        self.list = []
+        klass = self.FieldStorageClass or self.__class__
+        part = klass(self.fp, {}, ib,
+                     environ, keep_blank_values, strict_parsing)
+        # Throw first part away
+        while not part.done:
+            headers = rfc822.Message(self.fp)
+            part = klass(self.fp, headers, ib,
+                         environ, keep_blank_values, strict_parsing)
+            self.list.append(part)
+        self.skip_lines()
+
+    def read_single(self):
+        """Internal: read an atomic part."""
+        if self.length >= 0:
+            self.read_binary()
+            self.skip_lines()
+        else:
+            self.read_lines()
+        self.file.seek(0)
+
+    bufsize = 8*1024            # I/O buffering size for copy to file
+
+    def read_binary(self):
+        """Internal: read binary data."""
+        self.file = self.make_file('b')
+        todo = self.length
+        if todo >= 0:
+            while todo > 0:
+                data = self.fp.read(min(todo, self.bufsize))
+                if not data:
+                    self.done = -1
+                    break
+                self.file.write(data)
+                todo = todo - len(data)
+
+    def read_lines(self):
+        """Internal: read lines until EOF or outerboundary."""
+        self.file = self.__file = StringIO()
+        if self.outerboundary:
+            self.read_lines_to_outerboundary()
+        else:
+            self.read_lines_to_eof()
+
+    def __write(self, line):
+        if self.__file is not None:
+            if self.__file.tell() + len(line) > 1000:
+                self.file = self.make_file('')
+                self.file.write(self.__file.getvalue())
+                self.__file = None
+        self.file.write(line)
+
+    def read_lines_to_eof(self):
+        """Internal: read lines until EOF."""
+        while 1:
+            line = self.fp.readline()
+            if not line:
+                self.done = -1
+                break
+            self.__write(line)
+
+    def read_lines_to_outerboundary(self):
+        """Internal: read lines until outerboundary."""
+        next = "--" + self.outerboundary
+        last = next + "--"
+        delim = ""
+        while 1:
+            line = self.fp.readline()
+            if not line:
+                self.done = -1
+                break
+            if line[:2] == "--":
+                strippedline = line.strip()
+                if strippedline == next:
+                    break
+                if strippedline == last:
+                    self.done = 1
+                    break
+            odelim = delim
+            if line[-2:] == "\r\n":
+                delim = "\r\n"
+                line = line[:-2]
+            elif line[-1] == "\n":
+                delim = "\n"
+                line = line[:-1]
+            else:
+                delim = ""
+            self.__write(odelim + line)
+
+    def skip_lines(self):
+        """Internal: skip lines until outer boundary if defined."""
+        if not self.outerboundary or self.done:
+            return
+        next = "--" + self.outerboundary
+        last = next + "--"
+        while 1:
+            line = self.fp.readline()
+            if not line:
+                self.done = -1
+                break
+            if line[:2] == "--":
+                strippedline = line.strip()
+                if strippedline == next:
+                    break
+                if strippedline == last:
+                    self.done = 1
+                    break
+
+    def make_file(self, binary=None):
+        """Overridable: return a readable & writable file.
+
+        The file will be used as follows:
+        - data is written to it
+        - seek(0)
+        - data is read from it
+
+        The 'binary' argument is unused -- the file is always opened
+        in binary mode.
+
+        This version opens a temporary file for reading and writing,
+        and immediately deletes (unlinks) it.  The trick (on Unix!) is
+        that the file can still be used, but it can't be opened by
+        another process, and it will automatically be deleted when it
+        is closed or when the current process terminates.
+
+        If you want a more permanent file, you derive a class which
+        overrides this method.  If you want a visible temporary file
+        that is nevertheless automatically deleted when the script
+        terminates, try defining a __del__ method in a derived class
+        which unlinks the temporary files you have created.
+
+        """
+        import tempfile
+        return tempfile.TemporaryFile("w+b")
+
+
+
+# Backwards Compatibility Classes
+# ===============================
+
+class FormContentDict(UserDict.UserDict):
+    """Form content as dictionary with a list of values per field.
+
+    form = FormContentDict()
+
+    form[key] -> [value, value, ...]
+    form.has_key(key) -> Boolean
+    form.keys() -> [key, key, ...]
+    form.values() -> [[val, val, ...], [val, val, ...], ...]
+    form.items() ->  [(key, [val, val, ...]), (key, [val, val, ...]), ...]
+    form.dict == {key: [val, val, ...], ...}
+
+    """
+    def __init__(self, environ=os.environ):
+        self.dict = self.data = parse(environ=environ)
+        self.query_string = environ['QUERY_STRING']
+
+
+class SvFormContentDict(FormContentDict):
+    """Form content as dictionary expecting a single value per field.
+
+    If you only expect a single value for each field, then form[key]
+    will return that single value.  It will raise an IndexError if
+    that expectation is not true.  If you expect a field to have
+    possible multiple values, than you can use form.getlist(key) to
+    get all of the values.  values() and items() are a compromise:
+    they return single strings where there is a single value, and
+    lists of strings otherwise.
+
+    """
+    def __getitem__(self, key):
+        if len(self.dict[key]) > 1:
+            raise IndexError, 'expecting a single value'
+        return self.dict[key][0]
+    def getlist(self, key):
+        return self.dict[key]
+    def values(self):
+        result = []
+        for value in self.dict.values():
+            if len(value) == 1:
+                result.append(value[0])
+            else: result.append(value)
+        return result
+    def items(self):
+        result = []
+        for key, value in self.dict.items():
+            if len(value) == 1:
+                result.append((key, value[0]))
+            else: result.append((key, value))
+        return result
+
+
+class InterpFormContentDict(SvFormContentDict):
+    """This class is present for backwards compatibility only."""
+    def __getitem__(self, key):
+        v = SvFormContentDict.__getitem__(self, key)
+        if v[0] in '0123456789+-.':
+            try: return int(v)
+            except ValueError:
+                try: return float(v)
+                except ValueError: pass
+        return v.strip()
+    def values(self):
+        result = []
+        for key in self.keys():
+            try:
+                result.append(self[key])
+            except IndexError:
+                result.append(self.dict[key])
+        return result
+    def items(self):
+        result = []
+        for key in self.keys():
+            try:
+                result.append((key, self[key]))
+            except IndexError:
+                result.append((key, self.dict[key]))
+        return result
+
+
+class FormContent(FormContentDict):
+    """This class is present for backwards compatibility only."""
+    def values(self, key):
+        if self.dict.has_key(key) :return self.dict[key]
+        else: return None
+    def indexed_value(self, key, location):
+        if self.dict.has_key(key):
+            if len(self.dict[key]) > location:
+                return self.dict[key][location]
+            else: return None
+        else: return None
+    def value(self, key):
+        if self.dict.has_key(key): return self.dict[key][0]
+        else: return None
+    def length(self, key):
+        return len(self.dict[key])
+    def stripped(self, key):
+        if self.dict.has_key(key): return self.dict[key][0].strip()
+        else: return None
+    def pars(self):
+        return self.dict
+
+
+# Test/debug code
+# ===============
+
+def test(environ=os.environ):
+    """Robust test CGI script, usable as main program.
+
+    Write minimal HTTP headers and dump all information provided to
+    the script in HTML form.
+
+    """
+    import traceback
+    print "Content-type: text/html"
+    print
+    sys.stderr = sys.stdout
+    try:
+        form = FieldStorage()   # Replace with other classes to test those
+        print_directory()
+        print_arguments()
+        print_form(form)
+        print_environ(environ)
+        print_environ_usage()
+        def f():
+            exec "testing print_exception() -- <I>italics?</I>"
+        def g(f=f):
+            f()
+        print "<H3>What follows is a test, not an actual exception:</H3>"
+        g()
+    except:
+        print_exception()
+
+    print "<H1>Second try with a small maxlen...</H1>"
+
+    global maxlen
+    maxlen = 50
+    try:
+        form = FieldStorage()   # Replace with other classes to test those
+        print_directory()
+        print_arguments()
+        print_form(form)
+        print_environ(environ)
+    except:
+        print_exception()
+
+def print_exception(type=None, value=None, tb=None, limit=None):
+    if type is None:
+        type, value, tb = sys.exc_info()
+    import traceback
+    print
+    print "<H3>Traceback (most recent call last):</H3>"
+    list = traceback.format_tb(tb, limit) + \
+           traceback.format_exception_only(type, value)
+    print "<PRE>%s<B>%s</B></PRE>" % (
+        escape("".join(list[:-1])),
+        escape(list[-1]),
+        )
+    del tb
+
+def print_environ(environ=os.environ):
+    """Dump the shell environment as HTML."""
+    keys = environ.keys()
+    keys.sort()
+    print
+    print "<H3>Shell Environment:</H3>"
+    print "<DL>"
+    for key in keys:
+        print "<DT>", escape(key), "<DD>", escape(environ[key])
+    print "</DL>"
+    print
+
+def print_form(form):
+    """Dump the contents of a form as HTML."""
+    keys = form.keys()
+    keys.sort()
+    print
+    print "<H3>Form Contents:</H3>"
+    if not keys:
+        print "<P>No form fields."
+    print "<DL>"
+    for key in keys:
+        print "<DT>" + escape(key) + ":",
+        value = form[key]
+        print "<i>" + escape(`type(value)`) + "</i>"
+        print "<DD>" + escape(`value`)
+    print "</DL>"
+    print
+
+def print_directory():
+    """Dump the current directory as HTML."""
+    print
+    print "<H3>Current Working Directory:</H3>"
+    try:
+        pwd = os.getcwd()
+    except os.error, msg:
+        print "os.error:", escape(str(msg))
+    else:
+        print escape(pwd)
+    print
+
+def print_arguments():
+    print
+    print "<H3>Command Line Arguments:</H3>"
+    print
+    print sys.argv
+    print
+
+def print_environ_usage():
+    """Dump a list of environment variables used by CGI as HTML."""
+    print """
+<H3>These environment variables could have been set:</H3>
+<UL>
+<LI>AUTH_TYPE
+<LI>CONTENT_LENGTH
+<LI>CONTENT_TYPE
+<LI>DATE_GMT
+<LI>DATE_LOCAL
+<LI>DOCUMENT_NAME
+<LI>DOCUMENT_ROOT
+<LI>DOCUMENT_URI
+<LI>GATEWAY_INTERFACE
+<LI>LAST_MODIFIED
+<LI>PATH
+<LI>PATH_INFO
+<LI>PATH_TRANSLATED
+<LI>QUERY_STRING
+<LI>REMOTE_ADDR
+<LI>REMOTE_HOST
+<LI>REMOTE_IDENT
+<LI>REMOTE_USER
+<LI>REQUEST_METHOD
+<LI>SCRIPT_NAME
+<LI>SERVER_NAME
+<LI>SERVER_PORT
+<LI>SERVER_PROTOCOL
+<LI>SERVER_ROOT
+<LI>SERVER_SOFTWARE
+</UL>
+In addition, HTTP headers sent by the server may be passed in the
+environment as well.  Here are some common variable names:
+<UL>
+<LI>HTTP_ACCEPT
+<LI>HTTP_CONNECTION
+<LI>HTTP_HOST
+<LI>HTTP_PRAGMA
+<LI>HTTP_REFERER
+<LI>HTTP_USER_AGENT
+</UL>
+"""
+
+
+# Utilities
+# =========
+
+def escape(s, quote=None):
+    """Replace special characters '&', '<' and '>' by SGML entities."""
+    s = s.replace("&", "&amp;") # Must be done first!
+    s = s.replace("<", "&lt;")
+    s = s.replace(">", "&gt;")
+    if quote:
+        s = s.replace('"', "&quot;")
+    return s
+
+def valid_boundary(s, _vb_pattern="^[ -~]{0,200}[!-~]$"):
+    import re
+    return re.match(_vb_pattern, s)
+
+# Invoke mainline
+# ===============
+
+# Call test() when this file is run as a script (not imported as a module)
+if __name__ == '__main__':
+    test()
diff --git a/lib-python/2.2/cgitb.py b/lib-python/2.2/cgitb.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/cgitb.py
@@ -0,0 +1,205 @@
+"""Handle exceptions in CGI scripts by formatting tracebacks into nice HTML.
+
+To enable this module, do:
+
+    import cgitb; cgitb.enable()
+
+at the top of your CGI script.  The optional arguments to enable() are:
+
+    display     - if true, tracebacks are displayed in the web browser
+    logdir      - if set, tracebacks are written to files in this directory
+    context     - number of lines of source code to show for each stack frame
+
+By default, tracebacks are displayed but not saved, and context is 5.
+
+Alternatively, if you have caught an exception and want cgitb to display it
+for you, call cgitb.handler().  The optional argument to handler() is a 3-item
+tuple (etype, evalue, etb) just like the value of sys.exc_info()."""
+
+__author__ = 'Ka-Ping Yee'
+__version__ = '$Revision$'
+
+import sys
+
+def reset():
+    """Return a string that resets the CGI and browser to a known state."""
+    return '''<!--: spam
+Content-Type: text/html
+
+<body bgcolor="#f0f0f8"><font color="#f0f0f8" size="-5"> -->
+<body bgcolor="#f0f0f8"><font color="#f0f0f8" size="-5"> --> -->
+</font> </font> </font> </script> </object> </blockquote> </pre>
+</table> </table> </table> </table> </table> </font> </font> </font>'''
+
+__UNDEF__ = []                          # a special sentinel object
+def small(text): return '<small>' + text + '</small>'
+def strong(text): return '<strong>' + text + '</strong>'
+def grey(text): return '<font color="#909090">' + text + '</font>'
+
+def lookup(name, frame, locals):
+    """Find the value for a given name in the given environment."""
+    if name in locals:
+        return 'local', locals[name]
+    if name in frame.f_globals:
+        return 'global', frame.f_globals[name]
+    return None, __UNDEF__
+
+def scanvars(reader, frame, locals):
+    """Scan one logical line of Python and look up values of variables used."""
+    import tokenize, keyword
+    vars, lasttoken, parent, prefix = [], None, None, ''
+    for ttype, token, start, end, line in tokenize.generate_tokens(reader):
+        if ttype == tokenize.NEWLINE: break
+        if ttype == tokenize.NAME and token not in keyword.kwlist:
+            if lasttoken == '.':
+                if parent is not __UNDEF__:
+                    value = getattr(parent, token, __UNDEF__)
+                    vars.append((prefix + token, prefix, value))
+            else:
+                where, value = lookup(token, frame, locals)
+                vars.append((token, where, value))
+        elif token == '.':
+            prefix += lasttoken + '.'
+            parent = value
+        else:
+            parent, prefix = None, ''
+        lasttoken = token
+    return vars
+
+def html((etype, evalue, etb), context=5):
+    """Return a nice HTML document describing a given traceback."""
+    import os, types, time, traceback, linecache, inspect, pydoc
+
+    if type(etype) is types.ClassType:
+        etype = etype.__name__
+    pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable
+    date = time.ctime(time.time())
+    head = '<body bgcolor="#f0f0f8">' + pydoc.html.heading(
+        '<big><big><strong>%s</strong></big></big>' % str(etype),
+        '#ffffff', '#6622aa', pyver + '<br>' + date) + '''
+<p>A problem occurred in a Python script.  Here is the sequence of
+function calls leading up to the error, in the order they occurred.'''
+
+    indent = '<tt>' + small('&nbsp;' * 5) + '&nbsp;</tt>'
+    frames = []
+    records = inspect.getinnerframes(etb, context)
+    for frame, file, lnum, func, lines, index in records:
+        file = file and os.path.abspath(file) or '?'
+        link = '<a href="file://%s">%s</a>' % (file, pydoc.html.escape(file))
+        args, varargs, varkw, locals = inspect.getargvalues(frame)
+        call = ''
+        if func != '?':
+            call = 'in ' + strong(func) + \
+                inspect.formatargvalues(args, varargs, varkw, locals,
+                    formatvalue=lambda value: '=' + pydoc.html.repr(value))
+
+        highlight = {}
+        def reader(lnum=[lnum]):
+            highlight[lnum[0]] = 1
+            try: return linecache.getline(file, lnum[0])
+            finally: lnum[0] += 1
+        vars = scanvars(reader, frame, locals)
+
+        rows = ['<tr><td bgcolor="#d8bbff">%s%s %s</td></tr>' %
+                ('<big>&nbsp;</big>', link, call)]
+        if index is not None:
+            i = lnum - index
+            for line in lines:
+                num = small('&nbsp;' * (5-len(str(i))) + str(i)) + '&nbsp;'
+                line = '<tt>%s%s</tt>' % (num, pydoc.html.preformat(line))
+                if i in highlight:
+                    rows.append('<tr><td bgcolor="#ffccee">%s</td></tr>' % line)
+                else:
+                    rows.append('<tr><td>%s</td></tr>' % grey(line))
+                i += 1
+
+        done, dump = {}, []
+        for name, where, value in vars:
+            if name in done: continue
+            done[name] = 1
+            if value is not __UNDEF__:
+                if where == 'global': name = '<em>global</em> ' + strong(name)
+                elif where == 'local': name = strong(name)
+                else: name = where + strong(name.split('.')[-1])
+                dump.append('%s&nbsp;= %s' % (name, pydoc.html.repr(value)))
+            else:
+                dump.append(name + ' <em>undefined</em>')
+
+        rows.append('<tr><td>%s</td></tr>' % small(grey(', '.join(dump))))
+        frames.append('''<p>
+<table width="100%%" cellspacing=0 cellpadding=0 border=0>
+%s</table>''' % '\n'.join(rows))
+
+    exception = ['<p>%s: %s' % (strong(str(etype)), str(evalue))]
+    if type(evalue) is types.InstanceType:
+        for name in dir(evalue):
+            value = pydoc.html.repr(getattr(evalue, name))
+            exception.append('\n<br>%s%s&nbsp;=\n%s' % (indent, name, value))
+
+    import traceback
+    return head + ''.join(frames) + ''.join(exception) + '''
+
+
+<!-- The above is a description of an error in a Python program, formatted
+     for a Web browser because the 'cgitb' module was enabled.  In case you
+     are not reading this in a Web browser, here is the original traceback:
+
+%s
+-->
+''' % ''.join(traceback.format_exception(etype, evalue, etb))
+
+class Hook:
+    """A hook to replace sys.excepthook that shows tracebacks in HTML."""
+
+    def __init__(self, display=1, logdir=None, context=5, file=None):
+        self.display = display          # send tracebacks to browser if true
+        self.logdir = logdir            # log tracebacks to files if not None
+        self.context = context          # number of source code lines per frame
+        self.file = file or sys.stdout  # place to send the output
+
+    def __call__(self, etype, evalue, etb):
+        self.handle((etype, evalue, etb))
+
+    def handle(self, info=None):
+        info = info or sys.exc_info()
+        self.file.write(reset())
+
+        try:
+            text, doc = 0, html(info, self.context)
+        except:                         # just in case something goes wrong
+            import traceback
+            text, doc = 1, ''.join(traceback.format_exception(*info))
+
+        if self.display:
+            if text:
+                doc = doc.replace('&', '&amp;').replace('<', '&lt;')
+                self.file.write('<pre>' + doc + '</pre>\n')
+            else:
+                self.file.write(doc + '\n')
+        else:
+            self.file.write('<p>A problem occurred in a Python script.\n')
+
+        if self.logdir is not None:
+            import os, tempfile
+            name = tempfile.mktemp(['.html', '.txt'][text])
+            path = os.path.join(self.logdir, os.path.basename(name))
+            try:
+                file = open(path, 'w')
+                file.write(doc)
+                file.close()
+                msg = '<p> %s contains the description of this error.' % path
+            except:
+                msg = '<p> Tried to save traceback to %s, but failed.' % path
+            self.file.write(msg + '\n')
+        try:
+            self.file.flush()
+        except: pass
+
+handler = Hook().handle
+def enable(display=1, logdir=None, context=5):
+    """Install an exception handler that formats tracebacks as HTML.
+
+    The optional argument 'display' can be set to 0 to suppress sending the
+    traceback to the browser, and 'logdir' can be set to a directory to cause
+    tracebacks to be written to files there."""
+    sys.excepthook = Hook(display, logdir, context)
diff --git a/lib-python/2.2/chunk.py b/lib-python/2.2/chunk.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/chunk.py
@@ -0,0 +1,167 @@
+"""Simple class to read IFF chunks.
+
+An IFF chunk (used in formats such as AIFF, TIFF, RMFF (RealMedia File
+Format)) has the following structure:
+
++----------------+
+| ID (4 bytes)   |
++----------------+
+| size (4 bytes) |
++----------------+
+| data           |
+| ...            |
++----------------+
+
+The ID is a 4-byte string which identifies the type of chunk.
+
+The size field (a 32-bit value, encoded using big-endian byte order)
+gives the size of the whole chunk, including the 8-byte header.
+
+Usually an IFF-type file consists of one or more chunks.  The proposed
+usage of the Chunk class defined here is to instantiate an instance at
+the start of each chunk and read from the instance until it reaches
+the end, after which a new instance can be instantiated.  At the end
+of the file, creating a new instance will fail with a EOFError
+exception.
+
+Usage:
+while 1:
+    try:
+        chunk = Chunk(file)
+    except EOFError:
+        break
+    chunktype = chunk.getname()
+    while 1:
+        data = chunk.read(nbytes)
+        if not data:
+            pass
+        # do something with data
+
+The interface is file-like.  The implemented methods are:
+read, close, seek, tell, isatty.
+Extra methods are: skip() (called by close, skips to the end of the chunk),
+getname() (returns the name (ID) of the chunk)
+
+The __init__ method has one required argument, a file-like object
+(including a chunk instance), and one optional argument, a flag which
+specifies whether or not chunks are aligned on 2-byte boundaries.  The
+default is 1, i.e. aligned.
+"""
+
+class Chunk:
+    def __init__(self, file, align = 1, bigendian = 1, inclheader = 0):
+        import struct
+        self.closed = 0
+        self.align = align      # whether to align to word (2-byte) boundaries
+        if bigendian:
+            strflag = '>'
+        else:
+            strflag = '<'
+        self.file = file
+        self.chunkname = file.read(4)
+        if len(self.chunkname) < 4:
+            raise EOFError
+        try:
+            self.chunksize = struct.unpack(strflag+'l', file.read(4))[0]
+        except struct.error:
+            raise EOFError
+        if inclheader:
+            self.chunksize = self.chunksize - 8 # subtract header
+        self.size_read = 0
+        try:
+            self.offset = self.file.tell()
+        except (AttributeError, IOError):
+            self.seekable = 0
+        else:
+            self.seekable = 1
+
+    def getname(self):
+        """Return the name (ID) of the current chunk."""
+        return self.chunkname
+
+    def getsize(self):
+        """Return the size of the current chunk."""
+        return self.chunksize
+
+    def close(self):
+        if not self.closed:
+            self.skip()
+            self.closed = 1
+
+    def isatty(self):
+        if self.closed:
+            raise ValueError, "I/O operation on closed file"
+        return 0
+
+    def seek(self, pos, whence = 0):
+        """Seek to specified position into the chunk.
+        Default position is 0 (start of chunk).
+        If the file is not seekable, this will result in an error.
+        """
+
+        if self.closed:
+            raise ValueError, "I/O operation on closed file"
+        if not self.seekable:
+            raise IOError, "cannot seek"
+        if whence == 1:
+            pos = pos + self.size_read
+        elif whence == 2:
+            pos = pos + self.chunksize
+        if pos < 0 or pos > self.chunksize:
+            raise RuntimeError
+        self.file.seek(self.offset + pos, 0)
+        self.size_read = pos
+
+    def tell(self):
+        if self.closed:
+            raise ValueError, "I/O operation on closed file"
+        return self.size_read
+
+    def read(self, size = -1):
+        """Read at most size bytes from the chunk.
+        If size is omitted or negative, read until the end
+        of the chunk.
+        """
+
+        if self.closed:
+            raise ValueError, "I/O operation on closed file"
+        if self.size_read >= self.chunksize:
+            return ''
+        if size < 0:
+            size = self.chunksize - self.size_read
+        if size > self.chunksize - self.size_read:
+            size = self.chunksize - self.size_read
+        data = self.file.read(size)
+        self.size_read = self.size_read + len(data)
+        if self.size_read == self.chunksize and \
+           self.align and \
+           (self.chunksize & 1):
+            dummy = self.file.read(1)
+            self.size_read = self.size_read + len(dummy)
+        return data
+
+    def skip(self):
+        """Skip the rest of the chunk.
+        If you are not interested in the contents of the chunk,
+        this method should be called so that the file points to
+        the start of the next chunk.
+        """
+
+        if self.closed:
+            raise ValueError, "I/O operation on closed file"
+        if self.seekable:
+            try:
+                n = self.chunksize - self.size_read
+                # maybe fix alignment
+                if self.align and (self.chunksize & 1):
+                    n = n + 1
+                self.file.seek(n, 1)
+                self.size_read = self.size_read + n
+                return
+            except IOError:
+                pass
+        while self.size_read < self.chunksize:
+            n = min(8192, self.chunksize - self.size_read)
+            dummy = self.read(n)
+            if not dummy:
+                raise EOFError
diff --git a/lib-python/2.2/cmd.py b/lib-python/2.2/cmd.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/cmd.py
@@ -0,0 +1,336 @@
+"""A generic class to build line-oriented command interpreters.
+
+Interpreters constructed with this class obey the following conventions:
+
+1. End of file on input is processed as the command 'EOF'.
+2. A command is parsed out of each line by collecting the prefix composed
+   of characters in the identchars member.
+3. A command `foo' is dispatched to a method 'do_foo()'; the do_ method
+   is passed a single argument consisting of the remainder of the line.
+4. Typing an empty line repeats the last command.  (Actually, it calls the
+   method `emptyline', which may be overridden in a subclass.)
+5. There is a predefined `help' method.  Given an argument `topic', it
+   calls the command `help_topic'.  With no arguments, it lists all topics
+   with defined help_ functions, broken into up to three topics; documented
+   commands, miscellaneous help topics, and undocumented commands.
+6. The command '?' is a synonym for `help'.  The command '!' is a synonym
+   for `shell', if a do_shell method exists.
+7. If completion is enabled, completing commands will be done automatically,
+   and completing of commands args is done by calling complete_foo() with
+   arguments text, line, begidx, endidx.  text is string we are matching
+   against, all returned matches must begin with it.  line is the current
+   input line (lstripped), begidx and endidx are the beginning and end
+   indexes of the text being matched, which could be used to provide
+   different completion depending upon which position the argument is in.
+
+The `default' method may be overridden to intercept commands for which there
+is no do_ method.
+
+The `completedefault' method may be overridden to intercept completions for
+commands that have no complete_ method.
+
+The data member `self.ruler' sets the character used to draw separator lines
+in the help messages.  If empty, no ruler line is drawn.  It defaults to "=".
+
+If the value of `self.intro' is nonempty when the cmdloop method is called,
+it is printed out on interpreter startup.  This value may be overridden
+via an optional argument to the cmdloop() method.
+
+The data members `self.doc_header', `self.misc_header', and
+`self.undoc_header' set the headers used for the help function's
+listings of documented functions, miscellaneous topics, and undocumented
+functions respectively.
+
+These interpreters use raw_input; thus, if the readline module is loaded,
+they automatically support Emacs-like command history and editing features.
+"""
+
+import string, sys
+
+__all__ = ["Cmd"]
+
+PROMPT = '(Cmd) '
+IDENTCHARS = string.ascii_letters + string.digits + '_'
+
+class Cmd:
+    """A simple framework for writing line-oriented command interpreters.
+
+    These are often useful for test harnesses, administrative tools, and
+    prototypes that will later be wrapped in a more sophisticated interface.
+
+    A Cmd instance or subclass instance is a line-oriented interpreter
+    framework.  There is no good reason to instantiate Cmd itself; rather,
+    it's useful as a superclass of an interpreter class you define yourself
+    in order to inherit Cmd's methods and encapsulate action methods.
+
+    """
+    prompt = PROMPT
+    identchars = IDENTCHARS
+    ruler = '='
+    lastcmd = ''
+    intro = None
+    doc_leader = ""
+    doc_header = "Documented commands (type help <topic>):"
+    misc_header = "Miscellaneous help topics:"
+    undoc_header = "Undocumented commands:"
+    nohelp = "*** No help on %s"
+    use_rawinput = 1
+
+    def __init__(self, completekey='tab'):
+        """Instantiate a line-oriented interpreter framework.
+
+        The optional argument is the readline name of a completion key;
+        it defaults to the Tab key. If completekey is not None and the
+        readline module is available, command completion is done
+        automatically.
+
+        """
+        self.cmdqueue = []
+        self.completekey = completekey
+
+    def cmdloop(self, intro=None):
+        """Repeatedly issue a prompt, accept input, parse an initial prefix
+        off the received input, and dispatch to action methods, passing them
+        the remainder of the line as argument.
+
+        """
+
+        self.preloop()
+        if intro is not None:
+            self.intro = intro
+        if self.intro:
+            print self.intro
+        stop = None
+        while not stop:
+            if self.cmdqueue:
+                line = self.cmdqueue[0]
+                del self.cmdqueue[0]
+            else:
+                if self.use_rawinput:
+                    try:
+                        line = raw_input(self.prompt)
+                    except EOFError:
+                        line = 'EOF'
+                else:
+                    sys.stdout.write(self.prompt)
+                    sys.stdout.flush()
+                    line = sys.stdin.readline()
+                    if not len(line):
+                        line = 'EOF'
+                    else:
+                        line = line[:-1] # chop \n
+            line = self.precmd(line)
+            stop = self.onecmd(line)
+            stop = self.postcmd(stop, line)
+        self.postloop()
+
+    def precmd(self, line):
+        """Hook method executed just before the command line is
+        interpreted, but after the input prompt is generated and issued.
+
+        """
+        return line
+
+    def postcmd(self, stop, line):
+        """Hook method executed just after a command dispatch is finished."""
+        return stop
+
+    def preloop(self):
+        """Hook method executed once when the cmdloop() method is called."""
+        if self.completekey:
+            try:
+                import readline
+                self.old_completer = readline.get_completer()
+                readline.set_completer(self.complete)
+                readline.parse_and_bind(self.completekey+": complete")
+            except ImportError:
+                pass
+
+    def postloop(self):
+        """Hook method executed once when the cmdloop() method is about to
+        return.
+
+        """
+        if self.completekey:
+            try:
+                import readline
+                readline.set_completer(self.old_completer)
+            except ImportError:
+                pass
+
+    def parseline(self, line):
+        line = line.strip()
+        if not line:
+            return None, None, line
+        elif line[0] == '?':
+            line = 'help ' + line[1:]
+        elif line[0] == '!':
+            if hasattr(self, 'do_shell'):
+                line = 'shell ' + line[1:]
+            else:
+                return None, None, line
+        i, n = 0, len(line)
+        while i < n and line[i] in self.identchars: i = i+1
+        cmd, arg = line[:i], line[i:].strip()
+        return cmd, arg, line
+
+    def onecmd(self, line):
+        """Interpret the argument as though it had been typed in response
+        to the prompt.
+
+        This may be overridden, but should not normally need to be;
+        see the precmd() and postcmd() methods for useful execution hooks.
+        The return value is a flag indicating whether interpretation of
+        commands by the interpreter should stop.
+
+        """
+        cmd, arg, line = self.parseline(line)
+        if not line:
+            return self.emptyline()
+        if cmd is None:
+            return self.default(line)
+        self.lastcmd = line
+        if cmd == '':
+            return self.default(line)
+        else:
+            try:
+                func = getattr(self, 'do_' + cmd)
+            except AttributeError:
+                return self.default(line)
+            return func(arg)
+
+    def emptyline(self):
+        """Called when an empty line is entered in response to the prompt.
+
+        If this method is not overridden, it repeats the last nonempty
+        command entered.
+
+        """
+        if self.lastcmd:
+            return self.onecmd(self.lastcmd)
+
+    def default(self, line):
+        """Called on an input line when the command prefix is not recognized.
+
+        If this method is not overridden, it prints an error message and
+        returns.
+
+        """
+        print '*** Unknown syntax:', line
+
+    def completedefault(self, *ignored):
+        """Method called to complete an input line when no command-specific
+        complete_*() method is available.
+
+        By default, it returns an empty list.
+
+        """
+        return []
+
+    def completenames(self, text, *ignored):
+        dotext = 'do_'+text
+        return [a[3:] for a in self.get_names() if a.startswith(dotext)]
+
+    def complete(self, text, state):
+        """Return the next possible completion for 'text'.
+
+        If a command has not been entered, then complete against command list.
+        Otherwise try to call complete_<command> to get list of completions.
+        """
+        if state == 0:
+            import readline
+            origline = readline.get_line_buffer()
+            line = origline.lstrip()
+            stripped = len(origline) - len(line)
+            begidx = readline.get_begidx() - stripped
+            endidx = readline.get_endidx() - stripped
+            if begidx>0:
+                cmd, args, foo = self.parseline(line)
+                if cmd == '':
+                    compfunc = self.completedefault
+                else:
+                    try:
+                        compfunc = getattr(self, 'complete_' + cmd)
+                    except AttributeError:
+                        compfunc = self.completedefault
+            else:
+                compfunc = self.completenames
+            self.completion_matches = compfunc(text, line, begidx, endidx)
+        try:
+            return self.completion_matches[state]
+        except IndexError:
+            return None
+
+    def get_names(self):
+        # Inheritance says we have to look in class and
+        # base classes; order is not important.
+        names = []
+        classes = [self.__class__]
+        while classes:
+            aclass = classes[0]
+            if aclass.__bases__:
+                classes = classes + list(aclass.__bases__)
+            names = names + dir(aclass)
+            del classes[0]
+        return names
+
+    def complete_help(self, *args):
+        return self.completenames(*args)
+
+    def do_help(self, arg):
+        if arg:
+            # XXX check arg syntax
+            try:
+                func = getattr(self, 'help_' + arg)
+            except:
+                try:
+                    doc=getattr(self, 'do_' + arg).__doc__
+                    if doc:
+                        print doc
+                        return
+                except:
+                    pass
+                print self.nohelp % (arg,)
+                return
+            func()
+        else:
+            names = self.get_names()
+            cmds_doc = []
+            cmds_undoc = []
+            help = {}
+            for name in names:
+                if name[:5] == 'help_':
+                    help[name[5:]]=1
+            names.sort()
+            # There can be duplicates if routines overridden
+            prevname = ''
+            for name in names:
+                if name[:3] == 'do_':
+                    if name == prevname:
+                        continue
+                    prevname = name
+                    cmd=name[3:]
+                    if help.has_key(cmd):
+                        cmds_doc.append(cmd)
+                        del help[cmd]
+                    elif getattr(self, name).__doc__:
+                        cmds_doc.append(cmd)
+                    else:
+                        cmds_undoc.append(cmd)
+            print self.doc_leader
+            self.print_topics(self.doc_header,   cmds_doc,   15,80)
+            self.print_topics(self.misc_header,  help.keys(),15,80)
+            self.print_topics(self.undoc_header, cmds_undoc, 15,80)
+
+    def print_topics(self, header, cmds, cmdlen, maxcol):
+        if cmds:
+            print header
+            if self.ruler:
+                print self.ruler * len(header)
+            (cmds_per_line,junk)=divmod(maxcol,cmdlen)
+            col=cmds_per_line
+            for cmd in cmds:
+                if col==0: print
+                print (("%-"+`cmdlen`+"s") % cmd),
+                col = (col+1) % cmds_per_line
+            print "\n"
diff --git a/lib-python/2.2/code.py b/lib-python/2.2/code.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/code.py
@@ -0,0 +1,311 @@
+"""Utilities needed to emulate Python's interactive interpreter.
+
+"""
+
+# Inspired by similar code by Jeff Epler and Fredrik Lundh.
+
+
+import sys
+import traceback
+from codeop import CommandCompiler, compile_command
+
+__all__ = ["InteractiveInterpreter", "InteractiveConsole", "interact",
+           "compile_command"]
+
+def softspace(file, newvalue):
+    oldvalue = 0
+    try:
+        oldvalue = file.softspace
+    except AttributeError:
+        pass
+    try:
+        file.softspace = newvalue
+    except (AttributeError, TypeError):
+        # "attribute-less object" or "read-only attributes"
+        pass
+    return oldvalue
+
+class InteractiveInterpreter:
+    """Base class for InteractiveConsole.
+
+    This class deals with parsing and interpreter state (the user's
+    namespace); it doesn't deal with input buffering or prompting or
+    input file naming (the filename is always passed in explicitly).
+
+    """
+
+    def __init__(self, locals=None):
+        """Constructor.
+
+        The optional 'locals' argument specifies the dictionary in
+        which code will be executed; it defaults to a newly created
+        dictionary with key "__name__" set to "__console__" and key
+        "__doc__" set to None.
+
+        """
+        if locals is None:
+            locals = {"__name__": "__console__", "__doc__": None}
+        self.locals = locals
+        self.compile = CommandCompiler()
+
+    def runsource(self, source, filename="<input>", symbol="single"):
+        """Compile and run some source in the interpreter.
+
+        Arguments are as for compile_command().
+
+        One several things can happen:
+
+        1) The input is incorrect; compile_command() raised an
+        exception (SyntaxError or OverflowError).  A syntax traceback
+        will be printed by calling the showsyntaxerror() method.
+
+        2) The input is incomplete, and more input is required;
+        compile_command() returned None.  Nothing happens.
+
+        3) The input is complete; compile_command() returned a code
+        object.  The code is executed by calling self.runcode() (which
+        also handles run-time exceptions, except for SystemExit).
+
+        The return value is 1 in case 2, 0 in the other cases (unless
+        an exception is raised).  The return value can be used to
+        decide whether to use sys.ps1 or sys.ps2 to prompt the next
+        line.
+
+        """
+        try:
+            code = self.compile(source, filename, symbol)
+        except (OverflowError, SyntaxError, ValueError):
+            # Case 1
+            self.showsyntaxerror(filename)
+            return 0
+
+        if code is None:
+            # Case 2
+            return 1
+
+        # Case 3
+        self.runcode(code)
+        return 0
+
+    def runcode(self, code):
+        """Execute a code object.
+
+        When an exception occurs, self.showtraceback() is called to
+        display a traceback.  All exceptions are caught except
+        SystemExit, which is reraised.
+
+        A note about KeyboardInterrupt: this exception may occur
+        elsewhere in this code, and may not always be caught.  The
+        caller should be prepared to deal with it.
+
+        """
+        try:
+            exec code in self.locals
+        except SystemExit:
+            raise
+        except:
+            self.showtraceback()
+        else:
+            if softspace(sys.stdout, 0):
+                print
+
+    def showsyntaxerror(self, filename=None):
+        """Display the syntax error that just occurred.
+
+        This doesn't display a stack trace because there isn't one.
+
+        If a filename is given, it is stuffed in the exception instead
+        of what was there before (because Python's parser always uses
+        "<string>" when reading from a string).
+
+        The output is written by self.write(), below.
+
+        """
+        type, value, sys.last_traceback = sys.exc_info()
+        sys.last_type = type
+        sys.last_value = value
+        if filename and type is SyntaxError:
+            # Work hard to stuff the correct filename in the exception
+            try:
+                msg, (dummy_filename, lineno, offset, line) = value
+            except:
+                # Not the format we expect; leave it alone
+                pass
+            else:
+                # Stuff in the right filename
+                try:
+                    # Assume SyntaxError is a class exception
+                    value = SyntaxError(msg, (filename, lineno, offset, line))
+                except:
+                    # If that failed, assume SyntaxError is a string
+                    value = msg, (filename, lineno, offset, line)
+                sys.last_value = value
+        list = traceback.format_exception_only(type, value)
+        map(self.write, list)
+
+    def showtraceback(self):
+        """Display the exception that just occurred.
+
+        We remove the first stack item because it is our own code.
+
+        The output is written by self.write(), below.
+
+        """
+        try:
+            type, value, tb = sys.exc_info()
+            sys.last_type = type
+            sys.last_value = value
+            sys.last_traceback = tb
+            tblist = traceback.extract_tb(tb)
+            del tblist[:1]
+            list = traceback.format_list(tblist)
+            if list:
+                list.insert(0, "Traceback (most recent call last):\n")
+            list[len(list):] = traceback.format_exception_only(type, value)
+        finally:
+            tblist = tb = None
+        map(self.write, list)
+
+    def write(self, data):
+        """Write a string.
+
+        The base implementation writes to sys.stderr; a subclass may
+        replace this with a different implementation.
+
+        """
+        sys.stderr.write(data)
+
+
+class InteractiveConsole(InteractiveInterpreter):
+    """Closely emulate the behavior of the interactive Python interpreter.
+
+    This class builds on InteractiveInterpreter and adds prompting
+    using the familiar sys.ps1 and sys.ps2, and input buffering.
+
+    """
+
+    def __init__(self, locals=None, filename="<console>"):
+        """Constructor.
+
+        The optional locals argument will be passed to the
+        InteractiveInterpreter base class.
+
+        The optional filename argument should specify the (file)name
+        of the input stream; it will show up in tracebacks.
+
+        """
+        InteractiveInterpreter.__init__(self, locals)
+        self.filename = filename
+        self.resetbuffer()
+
+    def resetbuffer(self):
+        """Reset the input buffer."""
+        self.buffer = []
+
+    def interact(self, banner=None):
+        """Closely emulate the interactive Python console.
+
+        The optional banner argument specify the banner to print
+        before the first interaction; by default it prints a banner
+        similar to the one printed by the real Python interpreter,
+        followed by the current class name in parentheses (so as not
+        to confuse this with the real interpreter -- since it's so
+        close!).
+
+        """
+        try:
+            sys.ps1
+        except AttributeError:
+            sys.ps1 = ">>> "
+        try:
+            sys.ps2
+        except AttributeError:
+            sys.ps2 = "... "
+        cprt = 'Type "copyright", "credits" or "license" for more information.'
+        if banner is None:
+            self.write("Python %s on %s\n%s\n(%s)\n" %
+                       (sys.version, sys.platform, cprt,
+                        self.__class__.__name__))
+        else:
+            self.write("%s\n" % str(banner))
+        more = 0
+        while 1:
+            try:
+                if more:
+                    prompt = sys.ps2
+                else:
+                    prompt = sys.ps1
+                try:
+                    line = self.raw_input(prompt)
+                except EOFError:
+                    self.write("\n")
+                    break
+                else:
+                    more = self.push(line)
+            except KeyboardInterrupt:
+                self.write("\nKeyboardInterrupt\n")
+                self.resetbuffer()
+                more = 0
+
+    def push(self, line):
+        """Push a line to the interpreter.
+
+        The line should not have a trailing newline; it may have
+        internal newlines.  The line is appended to a buffer and the
+        interpreter's runsource() method is called with the
+        concatenated contents of the buffer as source.  If this
+        indicates that the command was executed or invalid, the buffer
+        is reset; otherwise, the command is incomplete, and the buffer
+        is left as it was after the line was appended.  The return
+        value is 1 if more input is required, 0 if the line was dealt
+        with in some way (this is the same as runsource()).
+
+        """
+        self.buffer.append(line)
+        source = "\n".join(self.buffer)
+        more = self.runsource(source, self.filename)
+        if not more:
+            self.resetbuffer()
+        return more
+
+    def raw_input(self, prompt=""):
+        """Write a prompt and read a line.
+
+        The returned line does not include the trailing newline.
+        When the user enters the EOF key sequence, EOFError is raised.
+
+        The base implementation uses the built-in function
+        raw_input(); a subclass may replace this with a different
+        implementation.
+
+        """
+        return raw_input(prompt)
+
+
+def interact(banner=None, readfunc=None, local=None):
+    """Closely emulate the interactive Python interpreter.
+
+    This is a backwards compatible interface to the InteractiveConsole
+    class.  When readfunc is not specified, it attempts to import the
+    readline module to enable GNU readline if it is available.
+
+    Arguments (all optional, all default to None):
+
+    banner -- passed to InteractiveConsole.interact()
+    readfunc -- if not None, replaces InteractiveConsole.raw_input()
+    local -- passed to InteractiveInterpreter.__init__()
+
+    """
+    console = InteractiveConsole(local)
+    if readfunc is not None:
+        console.raw_input = readfunc
+    else:
+        try:
+            import readline
+        except:
+            pass
+    console.interact(banner)
+
+
+if __name__ == '__main__':
+    interact()
diff --git a/lib-python/2.2/codecs.py b/lib-python/2.2/codecs.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/codecs.py
@@ -0,0 +1,636 @@
+""" codecs -- Python Codec Registry, API and helpers.
+
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+
+"""#"
+
+import struct, __builtin__
+
+### Registry and builtin stateless codec functions
+
+try:
+    from _codecs import *
+except ImportError, why:
+    raise SystemError,\
+          'Failed to load the builtin codecs: %s' % why
+
+__all__ = ["register", "lookup", "open", "EncodedFile", "BOM", "BOM_BE",
+           "BOM_LE", "BOM32_BE", "BOM32_LE", "BOM64_BE", "BOM64_LE"]
+
+### Constants
+
+#
+# Byte Order Mark (BOM) and its possible values (BOM_BE, BOM_LE)
+#
+BOM = struct.pack('=H', 0xFEFF)
+#
+BOM_BE = BOM32_BE = '\376\377'
+#       corresponds to Unicode U+FEFF in UTF-16 on big endian
+#       platforms == ZERO WIDTH NO-BREAK SPACE
+BOM_LE = BOM32_LE = '\377\376'
+#       corresponds to Unicode U+FFFE in UTF-16 on little endian
+#       platforms == defined as being an illegal Unicode character
+
+#
+# 64-bit Byte Order Marks
+#
+BOM64_BE = '\000\000\376\377'
+#       corresponds to Unicode U+0000FEFF in UCS-4
+BOM64_LE = '\377\376\000\000'
+#       corresponds to Unicode U+0000FFFE in UCS-4
+
+
+### Codec base classes (defining the API)
+
+class Codec:
+
+    """ Defines the interface for stateless encoders/decoders.
+
+        The .encode()/.decode() methods may implement different error
+        handling schemes by providing the errors argument. These
+        string values are defined:
+
+         'strict' - raise a ValueError error (or a subclass)
+         'ignore' - ignore the character and continue with the next
+         'replace' - replace with a suitable replacement character;
+                    Python will use the official U+FFFD REPLACEMENT
+                    CHARACTER for the builtin Unicode codecs.
+
+    """
+    def encode(self, input, errors='strict'):
+
+        """ Encodes the object input and returns a tuple (output
+            object, length consumed).
+
+            errors defines the error handling to apply. It defaults to
+            'strict' handling.
+
+            The method may not store state in the Codec instance. Use
+            StreamCodec for codecs which have to keep state in order to
+            make encoding/decoding efficient.
+
+            The encoder must be able to handle zero length input and
+            return an empty object of the output object type in this
+            situation.
+
+        """
+        raise NotImplementedError
+
+    def decode(self, input, errors='strict'):
+
+        """ Decodes the object input and returns a tuple (output
+            object, length consumed).
+
+            input must be an object which provides the bf_getreadbuf
+            buffer slot. Python strings, buffer objects and memory
+            mapped files are examples of objects providing this slot.
+
+            errors defines the error handling to apply. It defaults to
+            'strict' handling.
+
+            The method may not store state in the Codec instance. Use
+            StreamCodec for codecs which have to keep state in order to
+            make encoding/decoding efficient.
+
+            The decoder must be able to handle zero length input and
+            return an empty object of the output object type in this
+            situation.
+
+        """
+        raise NotImplementedError
+
+#
+# The StreamWriter and StreamReader class provide generic working
+# interfaces which can be used to implement new encoding submodules
+# very easily. See encodings/utf_8.py for an example on how this is
+# done.
+#
+
+class StreamWriter(Codec):
+
+    def __init__(self, stream, errors='strict'):
+
+        """ Creates a StreamWriter instance.
+
+            stream must be a file-like object open for writing
+            (binary) data.
+
+            The StreamWriter may implement different error handling
+            schemes by providing the errors keyword argument. These
+            parameters are defined:
+
+             'strict' - raise a ValueError (or a subclass)
+             'ignore' - ignore the character and continue with the next
+             'replace'- replace with a suitable replacement character
+
+        """
+        self.stream = stream
+        self.errors = errors
+
+    def write(self, object):
+
+        """ Writes the object's contents encoded to self.stream.
+        """
+        data, consumed = self.encode(object, self.errors)
+        self.stream.write(data)
+
+    def writelines(self, list):
+
+        """ Writes the concatenated list of strings to the stream
+            using .write().
+        """
+        self.write(''.join(list))
+
+    def reset(self):
+
+        """ Flushes and resets the codec buffers used for keeping state.
+
+            Calling this method should ensure that the data on the
+            output is put into a clean state, that allows appending
+            of new fresh data without having to rescan the whole
+            stream to recover state.
+
+        """
+        pass
+
+    def __getattr__(self, name,
+                    getattr=getattr):
+
+        """ Inherit all other methods from the underlying stream.
+        """
+        return getattr(self.stream, name)
+
+###
+
+class StreamReader(Codec):
+
+    def __init__(self, stream, errors='strict'):
+
+        """ Creates a StreamReader instance.
+
+            stream must be a file-like object open for reading
+            (binary) data.
+
+            The StreamReader may implement different error handling
+            schemes by providing the errors keyword argument. These
+            parameters are defined:
+
+             'strict' - raise a ValueError (or a subclass)
+             'ignore' - ignore the character and continue with the next
+             'replace'- replace with a suitable replacement character;
+
+        """
+        self.stream = stream
+        self.errors = errors
+
+    def read(self, size=-1):
+
+        """ Decodes data from the stream self.stream and returns the
+            resulting object.
+
+            size indicates the approximate maximum number of bytes to
+            read from the stream for decoding purposes. The decoder
+            can modify this setting as appropriate. The default value
+            -1 indicates to read and decode as much as possible.  size
+            is intended to prevent having to decode huge files in one
+            step.
+
+            The method should use a greedy read strategy meaning that
+            it should read as much data as is allowed within the
+            definition of the encoding and the given size, e.g.  if
+            optional encoding endings or state markers are available
+            on the stream, these should be read too.
+
+        """
+        # Unsliced reading:
+        if size < 0:
+            return self.decode(self.stream.read(), self.errors)[0]
+
+        # Sliced reading:
+        read = self.stream.read
+        decode = self.decode
+        data = read(size)
+        i = 0
+        while 1:
+            try:
+                object, decodedbytes = decode(data, self.errors)
+            except ValueError, why:
+                # This method is slow but should work under pretty much
+                # all conditions; at most 10 tries are made
+                i = i + 1
+                newdata = read(1)
+                if not newdata or i > 10:
+                    raise
+                data = data + newdata
+            else:
+                return object
+
+    def readline(self, size=None):
+
+        """ Read one line from the input stream and return the
+            decoded data.
+
+            Note: Unlike the .readlines() method, this method inherits
+            the line breaking knowledge from the underlying stream's
+            .readline() method -- there is currently no support for
+            line breaking using the codec decoder due to lack of line
+            buffering. Sublcasses should however, if possible, try to
+            implement this method using their own knowledge of line
+            breaking.
+
+            size, if given, is passed as size argument to the stream's
+            .readline() method.
+
+        """
+        if size is None:
+            line = self.stream.readline()
+        else:
+            line = self.stream.readline(size)
+        return self.decode(line, self.errors)[0]
+
+
+    def readlines(self, sizehint=None):
+
+        """ Read all lines available on the input stream
+            and return them as list of lines.
+
+            Line breaks are implemented using the codec's decoder
+            method and are included in the list entries.
+
+            sizehint, if given, is passed as size argument to the
+            stream's .read() method.
+
+        """
+        if sizehint is None:
+            data = self.stream.read()
+        else:
+            data = self.stream.read(sizehint)
+        return self.decode(data, self.errors)[0].splitlines(1)
+
+    def reset(self):
+
+        """ Resets the codec buffers used for keeping state.
+
+            Note that no stream repositioning should take place.
+            This method is primarily intended to be able to recover
+            from decoding errors.
+
+        """
+        pass
+
+    def __getattr__(self, name,
+                    getattr=getattr):
+
+        """ Inherit all other methods from the underlying stream.
+        """
+        return getattr(self.stream, name)
+
+###
+
+class StreamReaderWriter:
+
+    """ StreamReaderWriter instances allow wrapping streams which
+        work in both read and write modes.
+
+        The design is such that one can use the factory functions
+        returned by the codec.lookup() function to construct the
+        instance.
+
+    """
+    # Optional attributes set by the file wrappers below
+    encoding = 'unknown'
+
+    def __init__(self, stream, Reader, Writer, errors='strict'):
+
+        """ Creates a StreamReaderWriter instance.
+
+            stream must be a Stream-like object.
+
+            Reader, Writer must be factory functions or classes
+            providing the StreamReader, StreamWriter interface resp.
+
+            Error handling is done in the same way as defined for the
+            StreamWriter/Readers.
+
+        """
+        self.stream = stream
+        self.reader = Reader(stream, errors)
+        self.writer = Writer(stream, errors)
+        self.errors = errors
+
+    def read(self, size=-1):
+
+        return self.reader.read(size)
+
+    def readline(self, size=None):
+
+        return self.reader.readline(size)
+
+    def readlines(self, sizehint=None):
+
+        return self.reader.readlines(sizehint)
+
+    def write(self, data):
+
+        return self.writer.write(data)
+
+    def writelines(self, list):
+
+        return self.writer.writelines(list)
+
+    def reset(self):
+
+        self.reader.reset()
+        self.writer.reset()
+
+    def __getattr__(self, name,
+                    getattr=getattr):
+
+        """ Inherit all other methods from the underlying stream.
+        """
+        return getattr(self.stream, name)
+
+###
+
+class StreamRecoder:
+
+    """ StreamRecoder instances provide a frontend - backend
+        view of encoding data.
+
+        They use the complete set of APIs returned by the
+        codecs.lookup() function to implement their task.
+
+        Data written to the stream is first decoded into an
+        intermediate format (which is dependent on the given codec
+        combination) and then written to the stream using an instance
+        of the provided Writer class.
+
+        In the other direction, data is read from the stream using a
+        Reader instance and then return encoded data to the caller.
+
+    """
+    # Optional attributes set by the file wrappers below
+    data_encoding = 'unknown'
+    file_encoding = 'unknown'
+
+    def __init__(self, stream, encode, decode, Reader, Writer,
+                 errors='strict'):
+
+        """ Creates a StreamRecoder instance which implements a two-way
+            conversion: encode and decode work on the frontend (the
+            input to .read() and output of .write()) while
+            Reader and Writer work on the backend (reading and
+            writing to the stream).
+
+            You can use these objects to do transparent direct
+            recodings from e.g. latin-1 to utf-8 and back.
+
+            stream must be a file-like object.
+
+            encode, decode must adhere to the Codec interface, Reader,
+            Writer must be factory functions or classes providing the
+            StreamReader, StreamWriter interface resp.
+
+            encode and decode are needed for the frontend translation,
+            Reader and Writer for the backend translation. Unicode is
+            used as intermediate encoding.
+
+            Error handling is done in the same way as defined for the
+            StreamWriter/Readers.
+
+        """
+        self.stream = stream
+        self.encode = encode
+        self.decode = decode
+        self.reader = Reader(stream, errors)
+        self.writer = Writer(stream, errors)
+        self.errors = errors
+
+    def read(self, size=-1):
+
+        data = self.reader.read(size)
+        data, bytesencoded = self.encode(data, self.errors)
+        return data
+
+    def readline(self, size=None):
+
+        if size is None:
+            data = self.reader.readline()
+        else:
+            data = self.reader.readline(size)
+        data, bytesencoded = self.encode(data, self.errors)
+        return data
+
+    def readlines(self, sizehint=None):
+
+        if sizehint is None:
+            data = self.reader.read()
+        else:
+            data = self.reader.read(sizehint)
+        data, bytesencoded = self.encode(data, self.errors)
+        return data.splitlines(1)
+
+    def write(self, data):
+
+        data, bytesdecoded = self.decode(data, self.errors)
+        return self.writer.write(data)
+
+    def writelines(self, list):
+
+        data = ''.join(list)
+        data, bytesdecoded = self.decode(data, self.errors)
+        return self.writer.write(data)
+
+    def reset(self):
+
+        self.reader.reset()
+        self.writer.reset()
+
+    def __getattr__(self, name,
+                    getattr=getattr):
+
+        """ Inherit all other methods from the underlying stream.
+        """
+        return getattr(self.stream, name)
+
+### Shortcuts
+
+def open(filename, mode='rb', encoding=None, errors='strict', buffering=1):
+
+    """ Open an encoded file using the given mode and return
+        a wrapped version providing transparent encoding/decoding.
+
+        Note: The wrapped version will only accept the object format
+        defined by the codecs, i.e. Unicode objects for most builtin
+        codecs. Output is also codec dependent and will usually by
+        Unicode as well.
+
+        Files are always opened in binary mode, even if no binary mode
+        was specified. Thisis done to avoid data loss due to encodings
+        using 8-bit values. The default file mode is 'rb' meaning to
+        open the file in binary read mode.
+
+        encoding specifies the encoding which is to be used for the
+        the file.
+
+        errors may be given to define the error handling. It defaults
+        to 'strict' which causes ValueErrors to be raised in case an
+        encoding error occurs.
+
+        buffering has the same meaning as for the builtin open() API.
+        It defaults to line buffered.
+
+        The returned wrapped file object provides an extra attribute
+        .encoding which allows querying the used encoding. This
+        attribute is only available if an encoding was specified as
+        parameter.
+
+    """
+    if encoding is not None and \
+       'b' not in mode:
+        # Force opening of the file in binary mode
+        mode = mode + 'b'
+    file = __builtin__.open(filename, mode, buffering)
+    if encoding is None:
+        return file
+    (e, d, sr, sw) = lookup(encoding)
+    srw = StreamReaderWriter(file, sr, sw, errors)
+    # Add attributes to simplify introspection
+    srw.encoding = encoding
+    return srw
+
+def EncodedFile(file, data_encoding, file_encoding=None, errors='strict'):
+
+    """ Return a wrapped version of file which provides transparent
+        encoding translation.
+
+        Strings written to the wrapped file are interpreted according
+        to the given data_encoding and then written to the original
+        file as string using file_encoding. The intermediate encoding
+        will usually be Unicode but depends on the specified codecs.
+
+        Strings are read from the file using file_encoding and then
+        passed back to the caller as string using data_encoding.
+
+        If file_encoding is not given, it defaults to data_encoding.
+
+        errors may be given to define the error handling. It defaults
+        to 'strict' which causes ValueErrors to be raised in case an
+        encoding error occurs.
+
+        The returned wrapped file object provides two extra attributes
+        .data_encoding and .file_encoding which reflect the given
+        parameters of the same name. The attributes can be used for
+        introspection by Python programs.
+
+    """
+    if file_encoding is None:
+        file_encoding = data_encoding
+    encode, decode = lookup(data_encoding)[:2]
+    Reader, Writer = lookup(file_encoding)[2:]
+    sr = StreamRecoder(file,
+                       encode, decode, Reader, Writer,
+                       errors)
+    # Add attributes to simplify introspection
+    sr.data_encoding = data_encoding
+    sr.file_encoding = file_encoding
+    return sr
+
+### Helpers for codec lookup
+
+def getencoder(encoding):
+
+    """ Lookup up the codec for the given encoding and return
+        its encoder function.
+
+        Raises a LookupError in case the encoding cannot be found.
+
+    """
+    return lookup(encoding)[0]
+
+def getdecoder(encoding):
+
+    """ Lookup up the codec for the given encoding and return
+        its decoder function.
+
+        Raises a LookupError in case the encoding cannot be found.
+
+    """
+    return lookup(encoding)[1]
+
+def getreader(encoding):
+
+    """ Lookup up the codec for the given encoding and return
+        its StreamReader class or factory function.
+
+        Raises a LookupError in case the encoding cannot be found.
+
+    """
+    return lookup(encoding)[2]
+
+def getwriter(encoding):
+
+    """ Lookup up the codec for the given encoding and return
+        its StreamWriter class or factory function.
+
+        Raises a LookupError in case the encoding cannot be found.
+
+    """
+    return lookup(encoding)[3]
+
+### Helpers for charmap-based codecs
+
+def make_identity_dict(rng):
+
+    """ make_identity_dict(rng) -> dict
+
+        Return a dictionary where elements of the rng sequence are
+        mapped to themselves.
+
+    """
+    res = {}
+    for i in rng:
+        res[i]=i
+    return res
+
+def make_encoding_map(decoding_map):
+
+    """ Creates an encoding map from a decoding map.
+
+        If a target mapping in the decoding map occurrs multiple
+        times, then that target is mapped to None (undefined mapping),
+        causing an exception when encountered by the charmap codec
+        during translation.
+
+        One example where this happens is cp875.py which decodes
+        multiple character to \u001a.
+
+    """
+    m = {}
+    for k,v in decoding_map.items():
+        if not m.has_key(v):
+            m[v] = k
+        else:
+            m[v] = None
+    return m
+
+# Tell modulefinder that using codecs probably needs the encodings
+# package
+_false = 0
+if _false:
+    import encodings
+
+### Tests
+
+if __name__ == '__main__':
+
+    import sys
+
+    # Make stdout translate Latin-1 output into UTF-8 output
+    sys.stdout = EncodedFile(sys.stdout, 'latin-1', 'utf-8')
+
+    # Have stdin translate Latin-1 input into UTF-8 input
+    sys.stdin = EncodedFile(sys.stdin, 'utf-8', 'latin-1')
diff --git a/lib-python/2.2/codeop.py b/lib-python/2.2/codeop.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/codeop.py
@@ -0,0 +1,171 @@
+r"""Utilities to compile possibly incomplete Python source code.
+
+This module provides two interfaces, broadly similar to the builtin
+function compile(), that take progam text, a filename and a 'mode'
+and:
+
+- Return a code object if the command is complete and valid
+- Return None if the command is incomplete
+- Raise SyntaxError, ValueError or OverflowError if the command is a
+  syntax error (OverflowError and ValueError can be produced by
+  malformed literals).
+
+Approach:
+
+First, check if the source consists entirely of blank lines and
+comments; if so, replace it with 'pass', because the built-in
+parser doesn't always do the right thing for these.
+
+Compile three times: as is, with \n, and with \n\n appended.  If it
+compiles as is, it's complete.  If it compiles with one \n appended,
+we expect more.  If it doesn't compile either way, we compare the
+error we get when compiling with \n or \n\n appended.  If the errors
+are the same, the code is broken.  But if the errors are different, we
+expect more.  Not intuitive; not even guaranteed to hold in future
+releases; but this matches the compiler's behavior from Python 1.4
+through 2.2, at least.
+
+Caveat:
+
+It is possible (but not likely) that the parser stops parsing with a
+successful outcome before reaching the end of the source; in this
+case, trailing symbols may be ignored instead of causing an error.
+For example, a backslash followed by two newlines may be followed by
+arbitrary garbage.  This will be fixed once the API for the parser is
+better.
+
+The two interfaces are:
+
+compile_command(source, filename, symbol):
+
+    Compiles a single command in the manner described above.
+
+CommandCompiler():
+
+    Instances of this class have __call__ methods identical in
+    signature to compile_command; the difference is that if the
+    instance compiles program text containing a __future__ statement,
+    the instance 'remembers' and compiles all subsequent program texts
+    with the statement in force.
+
+The module also provides another class:
+
+Compile():
+
+    Instances of this class act like the built-in function compile,
+    but with 'memory' in the sense described above.
+"""
+
+import __future__
+
+_features = [getattr(__future__, fname)
+             for fname in __future__.all_feature_names]
+
+__all__ = ["compile_command", "Compile", "CommandCompiler"]
+
+def _maybe_compile(compiler, source, filename, symbol):
+    # Check for source consisting of only blank lines and comments
+    for line in source.split("\n"):
+        line = line.strip()
+        if line and line[0] != '#':
+            break               # Leave it alone
+    else:
+        if symbol != "eval":
+            source = "pass"     # Replace it with a 'pass' statement
+
+    err = err1 = err2 = None
+    code = code1 = code2 = None
+
+    try:
+        code = compiler(source, filename, symbol)
+    except SyntaxError, err:
+        pass
+
+    try:
+        code1 = compiler(source + "\n", filename, symbol)
+    except SyntaxError, err1:
+        pass
+
+    try:
+        code2 = compiler(source + "\n\n", filename, symbol)
+    except SyntaxError, err2:
+        pass
+
+    if code:
+        return code
+    try:
+        e1 = err1.__dict__
+    except AttributeError:
+        e1 = err1
+    try:
+        e2 = err2.__dict__
+    except AttributeError:
+        e2 = err2
+    if not code1 and e1 == e2:
+        raise SyntaxError, err1
+
+def compile_command(source, filename="<input>", symbol="single"):
+    r"""Compile a command and determine whether it is incomplete.
+
+    Arguments:
+
+    source -- the source string; may contain \n characters
+    filename -- optional filename from which source was read; default
+                "<input>"
+    symbol -- optional grammar start symbol; "single" (default) or "eval"
+
+    Return value / exceptions raised:
+
+    - Return a code object if the command is complete and valid
+    - Return None if the command is incomplete
+    - Raise SyntaxError, ValueError or OverflowError if the command is a
+      syntax error (OverflowError and ValueError can be produced by
+      malformed literals).
+    """
+    return _maybe_compile(compile, source, filename, symbol)
+
+class Compile:
+    """Instances of this class behave much like the built-in compile
+    function, but if one is used to compile text containing a future
+    statement, it "remembers" and compiles all subsequent program texts
+    with the statement in force."""
+    def __init__(self):
+        self.flags = 0
+
+    def __call__(self, source, filename, symbol):
+        codeob = compile(source, filename, symbol, self.flags, 1)
+        for feature in _features:
+            if codeob.co_flags & feature.compiler_flag:
+                self.flags |= feature.compiler_flag
+        return codeob
+
+class CommandCompiler:
+    """Instances of this class have __call__ methods identical in
+    signature to compile_command; the difference is that if the
+    instance compiles program text containing a __future__ statement,
+    the instance 'remembers' and compiles all subsequent program texts
+    with the statement in force."""
+
+    def __init__(self,):
+        self.compiler = Compile()
+
+    def __call__(self, source, filename="<input>", symbol="single"):
+        r"""Compile a command and determine whether it is incomplete.
+
+        Arguments:
+
+        source -- the source string; may contain \n characters
+        filename -- optional filename from which source was read;
+                    default "<input>"
+        symbol -- optional grammar start symbol; "single" (default) or
+                  "eval"
+
+        Return value / exceptions raised:
+
+        - Return a code object if the command is complete and valid
+        - Return None if the command is incomplete
+        - Raise SyntaxError, ValueError or OverflowError if the command is a
+          syntax error (OverflowError and ValueError can be produced by
+          malformed literals).
+        """
+        return _maybe_compile(self.compiler, source, filename, symbol)
diff --git a/lib-python/2.2/colorsys.py b/lib-python/2.2/colorsys.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/colorsys.py
@@ -0,0 +1,123 @@
+"""Conversion functions between RGB and other color systems.
+
+This modules provides two functions for each color system ABC:
+
+  rgb_to_abc(r, g, b) --> a, b, c
+  abc_to_rgb(a, b, c) --> r, g, b
+
+All inputs and outputs are triples of floats in the range [0.0...1.0].
+Inputs outside this range may cause exceptions or invalid outputs.
+
+Supported color systems:
+RGB: Red, Green, Blue components
+YIQ: used by composite video signals
+HLS: Hue, Luminance, Saturation
+HSV: Hue, Saturation, Value
+"""
+# References:
+# XXX Where's the literature?
+
+__all__ = ["rgb_to_yiq","yiq_to_rgb","rgb_to_hls","hls_to_rgb",
+           "rgb_to_hsv","hsv_to_rgb"]
+
+# Some floating point constants
+
+ONE_THIRD = 1.0/3.0
+ONE_SIXTH = 1.0/6.0
+TWO_THIRD = 2.0/3.0
+
+
+# YIQ: used by composite video signals (linear combinations of RGB)
+# Y: perceived grey level (0.0 == black, 1.0 == white)
+# I, Q: color components
+
+def rgb_to_yiq(r, g, b):
+    y = 0.30*r + 0.59*g + 0.11*b
+    i = 0.60*r - 0.28*g - 0.32*b
+    q = 0.21*r - 0.52*g + 0.31*b
+    return (y, i, q)
+
+def yiq_to_rgb(y, i, q):
+    r = y + 0.948262*i + 0.624013*q
+    g = y - 0.276066*i - 0.639810*q
+    b = y - 1.105450*i + 1.729860*q
+    if r < 0.0: r = 0.0
+    if g < 0.0: g = 0.0
+    if b < 0.0: b = 0.0
+    if r > 1.0: r = 1.0
+    if g > 1.0: g = 1.0
+    if b > 1.0: b = 1.0
+    return (r, g, b)
+
+
+# HLS: Hue, Luminance, S???
+# H: position in the spectrum
+# L: ???
+# S: ???
+
+def rgb_to_hls(r, g, b):
+    maxc = max(r, g, b)
+    minc = min(r, g, b)
+    # XXX Can optimize (maxc+minc) and (maxc-minc)
+    l = (minc+maxc)/2.0
+    if minc == maxc: return 0.0, l, 0.0
+    if l <= 0.5: s = (maxc-minc) / (maxc+minc)
+    else: s = (maxc-minc) / (2.0-maxc-minc)
+    rc = (maxc-r) / (maxc-minc)
+    gc = (maxc-g) / (maxc-minc)
+    bc = (maxc-b) / (maxc-minc)
+    if r == maxc: h = bc-gc
+    elif g == maxc: h = 2.0+rc-bc
+    else: h = 4.0+gc-rc
+    h = (h/6.0) % 1.0
+    return h, l, s
+
+def hls_to_rgb(h, l, s):
+    if s == 0.0: return l, l, l
+    if l <= 0.5: m2 = l * (1.0+s)
+    else: m2 = l+s-(l*s)
+    m1 = 2.0*l - m2
+    return (_v(m1, m2, h+ONE_THIRD), _v(m1, m2, h), _v(m1, m2, h-ONE_THIRD))
+
+def _v(m1, m2, hue):
+    hue = hue % 1.0
+    if hue < ONE_SIXTH: return m1 + (m2-m1)*hue*6.0
+    if hue < 0.5: return m2
+    if hue < TWO_THIRD: return m1 + (m2-m1)*(TWO_THIRD-hue)*6.0
+    return m1
+
+
+# HSV: Hue, Saturation, Value(?)
+# H: position in the spectrum
+# S: ???
+# V: ???
+
+def rgb_to_hsv(r, g, b):
+    maxc = max(r, g, b)
+    minc = min(r, g, b)
+    v = maxc
+    if minc == maxc: return 0.0, 0.0, v
+    s = (maxc-minc) / maxc
+    rc = (maxc-r) / (maxc-minc)
+    gc = (maxc-g) / (maxc-minc)
+    bc = (maxc-b) / (maxc-minc)
+    if r == maxc: h = bc-gc
+    elif g == maxc: h = 2.0+rc-bc
+    else: h = 4.0+gc-rc
+    h = (h/6.0) % 1.0
+    return h, s, v
+
+def hsv_to_rgb(h, s, v):
+    if s == 0.0: return v, v, v
+    i = int(h*6.0) # XXX assume int() truncates!
+    f = (h*6.0) - i
+    p = v*(1.0 - s)
+    q = v*(1.0 - s*f)
+    t = v*(1.0 - s*(1.0-f))
+    if i%6 == 0: return v, t, p
+    if i == 1: return q, v, p
+    if i == 2: return p, v, t
+    if i == 3: return p, q, v
+    if i == 4: return t, p, v
+    if i == 5: return v, p, q
+    # Cannot get here
diff --git a/lib-python/2.2/commands.py b/lib-python/2.2/commands.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/commands.py
@@ -0,0 +1,84 @@
+"""Execute shell commands via os.popen() and return status, output.
+
+Interface summary:
+
+       import commands
+
+       outtext = commands.getoutput(cmd)
+       (exitstatus, outtext) = commands.getstatusoutput(cmd)
+       outtext = commands.getstatus(file)  # returns output of "ls -ld file"
+
+A trailing newline is removed from the output string.
+
+Encapsulates the basic operation:
+
+      pipe = os.popen('{ ' + cmd + '; } 2>&1', 'r')
+      text = pipe.read()
+      sts = pipe.close()
+
+ [Note:  it would be nice to add functions to interpret the exit status.]
+"""
+
+__all__ = ["getstatusoutput","getoutput","getstatus"]
+
+# Module 'commands'
+#
+# Various tools for executing commands and looking at their output and status.
+#
+# NB This only works (and is only relevant) for UNIX.
+
+
+# Get 'ls -l' status for an object into a string
+#
+def getstatus(file):
+    """Return output of "ls -ld <file>" in a string."""
+    return getoutput('ls -ld' + mkarg(file))
+
+
+# Get the output from a shell command into a string.
+# The exit status is ignored; a trailing newline is stripped.
+# Assume the command will work with '{ ... ; } 2>&1' around it..
+#
+def getoutput(cmd):
+    """Return output (stdout or stderr) of executing cmd in a shell."""
+    return getstatusoutput(cmd)[1]
+
+
+# Ditto but preserving the exit status.
+# Returns a pair (sts, output)
+#
+def getstatusoutput(cmd):
+    """Return (status, output) of executing cmd in a shell."""
+    import os
+    pipe = os.popen('{ ' + cmd + '; } 2>&1', 'r')
+    text = pipe.read()
+    sts = pipe.close()
+    if sts is None: sts = 0
+    if text[-1:] == '\n': text = text[:-1]
+    return sts, text
+
+
+# Make command argument from directory and pathname (prefix space, add quotes).
+#
+def mk2arg(head, x):
+    import os
+    return mkarg(os.path.join(head, x))
+
+
+# Make a shell command argument from a string.
+# Return a string beginning with a space followed by a shell-quoted
+# version of the argument.
+# Two strategies: enclose in single quotes if it contains none;
+# otherwise, enclose in double quotes and prefix quotable characters
+# with backslash.
+#
+def mkarg(x):
+    if '\'' not in x:
+        return ' \'' + x + '\''
+    s = ' "'
+    for c in x:
+        if c in '\\$"`':
+            s = s + '\\'
+        s = s + c
+    s = s + '"'
+    return s
diff --git a/lib-python/2.2/compileall.py b/lib-python/2.2/compileall.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/compileall.py
@@ -0,0 +1,148 @@
+"""Module/script to "compile" all .py files to .pyc (or .pyo) file.
+
+When called as a script with arguments, this compiles the directories
+given as arguments recursively; the -l option prevents it from
+recursing into directories.
+
+Without arguments, if compiles all modules on sys.path, without
+recursing into subdirectories.  (Even though it should do so for
+packages -- for now, you'll have to deal with packages separately.)
+
+See module py_compile for details of the actual byte-compilation.
+
+"""
+
+import os
+import stat
+import sys
+import py_compile
+
+__all__ = ["compile_dir","compile_path"]
+
+def compile_dir(dir, maxlevels=10, ddir=None, force=0, rx=None):
+    """Byte-compile all modules in the given directory tree.
+
+    Arguments (only dir is required):
+
+    dir:       the directory to byte-compile
+    maxlevels: maximum recursion level (default 10)
+    ddir:      if given, purported directory name (this is the
+               directory name that will show up in error messages)
+    force:     if 1, force compilation, even if timestamps are up-to-date
+
+    """
+    print 'Listing', dir, '...'
+    try:
+        names = os.listdir(dir)
+    except os.error:
+        print "Can't list", dir
+        names = []
+    names.sort()
+    success = 1
+    for name in names:
+        fullname = os.path.join(dir, name)
+        if ddir:
+            dfile = os.path.join(ddir, name)
+        else:
+            dfile = None
+        if rx:
+            mo = rx.search(fullname)
+            if mo:
+                continue
+        if os.path.isfile(fullname):
+            head, tail = name[:-3], name[-3:]
+            if tail == '.py':
+                cfile = fullname + (__debug__ and 'c' or 'o')
+                ftime = os.stat(fullname)[stat.ST_MTIME]
+                try: ctime = os.stat(cfile)[stat.ST_MTIME]
+                except os.error: ctime = 0
+                if (ctime > ftime) and not force: continue
+                print 'Compiling', fullname, '...'
+                try:
+                    ok = py_compile.compile(fullname, None, dfile)
+                except KeyboardInterrupt:
+                    raise KeyboardInterrupt
+                except:
+                    # XXX py_compile catches SyntaxErrors
+                    if type(sys.exc_type) == type(''):
+                        exc_type_name = sys.exc_type
+                    else: exc_type_name = sys.exc_type.__name__
+                    print 'Sorry:', exc_type_name + ':',
+                    print sys.exc_value
+                    success = 0
+                else:
+                    if ok == 0:
+                        success = 0
+        elif maxlevels > 0 and \
+             name != os.curdir and name != os.pardir and \
+             os.path.isdir(fullname) and \
+             not os.path.islink(fullname):
+            if not compile_dir(fullname, maxlevels - 1, dfile, force, rx):
+                success = 0
+    return success
+
+def compile_path(skip_curdir=1, maxlevels=0, force=0):
+    """Byte-compile all module on sys.path.
+
+    Arguments (all optional):
+
+    skip_curdir: if true, skip current directory (default true)
+    maxlevels:   max recursion level (default 0)
+    force: as for compile_dir() (default 0)
+
+    """
+    success = 1
+    for dir in sys.path:
+        if (not dir or dir == os.curdir) and skip_curdir:
+            print 'Skipping current directory'
+        else:
+            success = success and compile_dir(dir, maxlevels, None, force)
+    return success
+
+def main():
+    """Script main program."""
+    import getopt
+    try:
+        opts, args = getopt.getopt(sys.argv[1:], 'lfd:x:')
+    except getopt.error, msg:
+        print msg
+        print "usage: python compileall.py [-l] [-f] [-d destdir] " \
+              "[-s regexp] [directory ...]"
+        print "-l: don't recurse down"
+        print "-f: force rebuild even if timestamps are up-to-date"
+        print "-d destdir: purported directory name for error messages"
+        print "   if no directory arguments, -l sys.path is assumed"
+        print "-x regexp: skip files matching the regular expression regexp"
+        print "   the regexp is search for in the full path of the file"
+        sys.exit(2)
+    maxlevels = 10
+    ddir = None
+    force = 0
+    rx = None
+    for o, a in opts:
+        if o == '-l': maxlevels = 0
+        if o == '-d': ddir = a
+        if o == '-f': force = 1
+        if o == '-x':
+            import re
+            rx = re.compile(a)
+    if ddir:
+        if len(args) != 1:
+            print "-d destdir require exactly one directory argument"
+            sys.exit(2)
+    success = 1
+    try:
+        if args:
+            for dir in args:
+                if not compile_dir(dir, maxlevels, ddir, force, rx):
+                    success = 0
+        else:
+            success = compile_path()
+    except KeyboardInterrupt:
+        print "\n[interrupt]"
+        success = 0
+    return success
+
+if __name__ == '__main__':
+    exit_status = not main()
+    sys.exit(exit_status)
diff --git a/lib-python/2.2/compiler/__init__.py b/lib-python/2.2/compiler/__init__.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/compiler/__init__.py
@@ -0,0 +1,26 @@
+"""Package for parsing and compiling Python source code
+
+There are several functions defined at the top level that are imported
+from modules contained in the package.
+
+parse(buf, mode="exec") -> AST
+    Converts a string containing Python source code to an abstract
+    syntax tree (AST).  The AST is defined in compiler.ast.
+
+parseFile(path) -> AST
+    The same as parse(open(path))
+
+walk(ast, visitor, verbose=None)
+    Does a pre-order walk over the ast using the visitor instance.
+    See compiler.visitor for details.
+
+compile(source, filename, mode, flags=None, dont_inherit=None)
+    Returns a code object.  A replacement for the builtin compile() function.
+
+compileFile(filename)
+    Generates a .pyc file by compiling filename.
+"""
+
+from transformer import parse, parseFile
+from visitor import walk
+from pycodegen import compile, compileFile
diff --git a/lib-python/2.2/compiler/ast.py b/lib-python/2.2/compiler/ast.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/compiler/ast.py
@@ -0,0 +1,1241 @@
+"""Python abstract syntax node definitions
+
+This file is automatically generated.
+"""
+from types import TupleType, ListType
+from consts import CO_VARARGS, CO_VARKEYWORDS
+
+def flatten(list):
+    l = []
+    for elt in list:
+        t = type(elt)
+        if t is TupleType or t is ListType:
+            for elt2 in flatten(elt):
+                l.append(elt2)
+        else:
+            l.append(elt)
+    return l
+
+def flatten_nodes(list):
+    return [n for n in flatten(list) if isinstance(n, Node)]
+
+def asList(nodes):
+    l = []
+    for item in nodes:
+        if hasattr(item, "asList"):
+            l.append(item.asList())
+        else:
+            t = type(item)
+            if t is TupleType or t is ListType:
+                l.append(tuple(asList(item)))
+            else:
+                l.append(item)
+    return l
+
+nodes = {}
+
+class Node: # an abstract base class
+    lineno = None # provide a lineno for nodes that don't have one
+    def getType(self):
+        pass # implemented by subclass
+    def getChildren(self):
+        pass # implemented by subclasses
+    def asList(self):
+        return tuple(asList(self.getChildren()))
+    def getChildNodes(self):
+        pass # implemented by subclasses
+
+class EmptyNode(Node):
+    pass
+
+class Slice(Node):
+    nodes["slice"] = "Slice"
+    def __init__(self, expr, flags, lower, upper):
+        self.expr = expr
+        self.flags = flags
+        self.lower = lower
+        self.upper = upper
+
+    def getChildren(self):
+        children = []
+        children.append(self.expr)
+        children.append(self.flags)
+        children.append(self.lower)
+        children.append(self.upper)
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.append(self.expr)
+        if self.lower is not None:            nodes.append(self.lower)
+        if self.upper is not None:            nodes.append(self.upper)
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "Slice(%s, %s, %s, %s)" % (repr(self.expr), repr(self.flags), repr(self.lower), repr(self.upper))
+
+class Const(Node):
+    nodes["const"] = "Const"
+    def __init__(self, value):
+        self.value = value
+
+    def getChildren(self):
+        return self.value,
+
+    def getChildNodes(self):
+        return ()
+
+    def __repr__(self):
+        return "Const(%s)" % (repr(self.value),)
+
+class Raise(Node):
+    nodes["raise"] = "Raise"
+    def __init__(self, expr1, expr2, expr3):
+        self.expr1 = expr1
+        self.expr2 = expr2
+        self.expr3 = expr3
+
+    def getChildren(self):
+        children = []
+        children.append(self.expr1)
+        children.append(self.expr2)
+        children.append(self.expr3)
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        if self.expr1 is not None:            nodes.append(self.expr1)
+        if self.expr2 is not None:            nodes.append(self.expr2)
+        if self.expr3 is not None:            nodes.append(self.expr3)
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "Raise(%s, %s, %s)" % (repr(self.expr1), repr(self.expr2), repr(self.expr3))
+
+class For(Node):
+    nodes["for"] = "For"
+    def __init__(self, assign, list, body, else_):
+        self.assign = assign
+        self.list = list
+        self.body = body
+        self.else_ = else_
+
+    def getChildren(self):
+        children = []
+        children.append(self.assign)
+        children.append(self.list)
+        children.append(self.body)
+        children.append(self.else_)
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.append(self.assign)
+        nodes.append(self.list)
+        nodes.append(self.body)
+        if self.else_ is not None:            nodes.append(self.else_)
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "For(%s, %s, %s, %s)" % (repr(self.assign), repr(self.list), repr(self.body), repr(self.else_))
+
+class AssTuple(Node):
+    nodes["asstuple"] = "AssTuple"
+    def __init__(self, nodes):
+        self.nodes = nodes
+
+    def getChildren(self):
+        children = []
+        children.extend(flatten(self.nodes))
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.extend(flatten_nodes(self.nodes))
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "AssTuple(%s)" % (repr(self.nodes),)
+
+class Mul(Node):
+    nodes["mul"] = "Mul"
+    def __init__(self, (left, right)):
+        self.left = left
+        self.right = right
+
+    def getChildren(self):
+        return self.left, self.right
+
+    def getChildNodes(self):
+        return self.left, self.right
+
+    def __repr__(self):
+        return "Mul((%s, %s))" % (repr(self.left), repr(self.right))
+
+class Invert(Node):
+    nodes["invert"] = "Invert"
+    def __init__(self, expr):
+        self.expr = expr
+
+    def getChildren(self):
+        return self.expr,
+
+    def getChildNodes(self):
+        return self.expr,
+
+    def __repr__(self):
+        return "Invert(%s)" % (repr(self.expr),)
+
+class RightShift(Node):
+    nodes["rightshift"] = "RightShift"
+    def __init__(self, (left, right)):
+        self.left = left
+        self.right = right
+
+    def getChildren(self):
+        return self.left, self.right
+
+    def getChildNodes(self):
+        return self.left, self.right
+
+    def __repr__(self):
+        return "RightShift((%s, %s))" % (repr(self.left), repr(self.right))
+
+class AssList(Node):
+    nodes["asslist"] = "AssList"
+    def __init__(self, nodes):
+        self.nodes = nodes
+
+    def getChildren(self):
+        children = []
+        children.extend(flatten(self.nodes))
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.extend(flatten_nodes(self.nodes))
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "AssList(%s)" % (repr(self.nodes),)
+
+class From(Node):
+    nodes["from"] = "From"
+    def __init__(self, modname, names):
+        self.modname = modname
+        self.names = names
+
+    def getChildren(self):
+        return self.modname, self.names
+
+    def getChildNodes(self):
+        return ()
+
+    def __repr__(self):
+        return "From(%s, %s)" % (repr(self.modname), repr(self.names))
+
+class Getattr(Node):
+    nodes["getattr"] = "Getattr"
+    def __init__(self, expr, attrname):
+        self.expr = expr
+        self.attrname = attrname
+
+    def getChildren(self):
+        return self.expr, self.attrname
+
+    def getChildNodes(self):
+        return self.expr,
+
+    def __repr__(self):
+        return "Getattr(%s, %s)" % (repr(self.expr), repr(self.attrname))
+
+class Dict(Node):
+    nodes["dict"] = "Dict"
+    def __init__(self, items):
+        self.items = items
+
+    def getChildren(self):
+        children = []
+        children.extend(flatten(self.items))
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.extend(flatten_nodes(self.items))
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "Dict(%s)" % (repr(self.items),)
+
+class Module(Node):
+    nodes["module"] = "Module"
+    def __init__(self, doc, node):
+        self.doc = doc
+        self.node = node
+
+    def getChildren(self):
+        return self.doc, self.node
+
+    def getChildNodes(self):
+        return self.node,
+
+    def __repr__(self):
+        return "Module(%s, %s)" % (repr(self.doc), repr(self.node))
+
+class Expression(Node):
+    # Expression is an artifical node class to support "eval"
+    nodes["expression"] = "Expression"
+    def __init__(self, node):
+        self.node = node
+
+    def getChildren(self):
+        return self.node,
+
+    def getChildNodes(self):
+        return self.node,
+
+    def __repr__(self):
+        return "Expression(%s)" % (repr(self.node))
+
+class UnaryAdd(Node):
+    nodes["unaryadd"] = "UnaryAdd"
+    def __init__(self, expr):
+        self.expr = expr
+
+    def getChildren(self):
+        return self.expr,
+
+    def getChildNodes(self):
+        return self.expr,
+
+    def __repr__(self):
+        return "UnaryAdd(%s)" % (repr(self.expr),)
+
+class Ellipsis(Node):
+    nodes["ellipsis"] = "Ellipsis"
+    def __init__(self, ):
+        pass
+
+    def getChildren(self):
+        return ()
+
+    def getChildNodes(self):
+        return ()
+
+    def __repr__(self):
+        return "Ellipsis()"
+
+class Print(Node):
+    nodes["print"] = "Print"
+    def __init__(self, nodes, dest):
+        self.nodes = nodes
+        self.dest = dest
+
+    def getChildren(self):
+        children = []
+        children.extend(flatten(self.nodes))
+        children.append(self.dest)
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.extend(flatten_nodes(self.nodes))
+        if self.dest is not None:            nodes.append(self.dest)
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "Print(%s, %s)" % (repr(self.nodes), repr(self.dest))
+
+class Import(Node):
+    nodes["import"] = "Import"
+    def __init__(self, names):
+        self.names = names
+
+    def getChildren(self):
+        return self.names,
+
+    def getChildNodes(self):
+        return ()
+
+    def __repr__(self):
+        return "Import(%s)" % (repr(self.names),)
+
+class Subscript(Node):
+    nodes["subscript"] = "Subscript"
+    def __init__(self, expr, flags, subs):
+        self.expr = expr
+        self.flags = flags
+        self.subs = subs
+
+    def getChildren(self):
+        children = []
+        children.append(self.expr)
+        children.append(self.flags)
+        children.extend(flatten(self.subs))
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.append(self.expr)
+        nodes.extend(flatten_nodes(self.subs))
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "Subscript(%s, %s, %s)" % (repr(self.expr), repr(self.flags), repr(self.subs))
+
+class TryExcept(Node):
+    nodes["tryexcept"] = "TryExcept"
+    def __init__(self, body, handlers, else_):
+        self.body = body
+        self.handlers = handlers
+        self.else_ = else_
+
+    def getChildren(self):
+        children = []
+        children.append(self.body)
+        children.extend(flatten(self.handlers))
+        children.append(self.else_)
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.append(self.body)
+        nodes.extend(flatten_nodes(self.handlers))
+        if self.else_ is not None:            nodes.append(self.else_)
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "TryExcept(%s, %s, %s)" % (repr(self.body), repr(self.handlers), repr(self.else_))
+
+class Or(Node):
+    nodes["or"] = "Or"
+    def __init__(self, nodes):
+        self.nodes = nodes
+
+    def getChildren(self):
+        children = []
+        children.extend(flatten(self.nodes))
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.extend(flatten_nodes(self.nodes))
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "Or(%s)" % (repr(self.nodes),)
+
+class Name(Node):
+    nodes["name"] = "Name"
+    def __init__(self, name):
+        self.name = name
+
+    def getChildren(self):
+        return self.name,
+
+    def getChildNodes(self):
+        return ()
+
+    def __repr__(self):
+        return "Name(%s)" % (repr(self.name),)
+
+class Function(Node):
+    nodes["function"] = "Function"
+    def __init__(self, name, argnames, defaults, flags, doc, code):
+        self.name = name
+        self.argnames = argnames
+        self.defaults = defaults
+        self.flags = flags
+        self.doc = doc
+        self.code = code
+        self.varargs = self.kwargs = None
+        if flags & CO_VARARGS:
+            self.varargs = 1
+        if flags & CO_VARKEYWORDS:
+            self.kwargs = 1
+
+
+
+    def getChildren(self):
+        children = []
+        children.append(self.name)
+        children.append(self.argnames)
+        children.extend(flatten(self.defaults))
+        children.append(self.flags)
+        children.append(self.doc)
+        children.append(self.code)
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.extend(flatten_nodes(self.defaults))
+        nodes.append(self.code)
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "Function(%s, %s, %s, %s, %s, %s)" % (repr(self.name), repr(self.argnames), repr(self.defaults), repr(self.flags), repr(self.doc), repr(self.code))
+
+class Assert(Node):
+    nodes["assert"] = "Assert"
+    def __init__(self, test, fail):
+        self.test = test
+        self.fail = fail
+
+    def getChildren(self):
+        children = []
+        children.append(self.test)
+        children.append(self.fail)
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.append(self.test)
+        if self.fail is not None:            nodes.append(self.fail)
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "Assert(%s, %s)" % (repr(self.test), repr(self.fail))
+
+class Return(Node):
+    nodes["return"] = "Return"
+    def __init__(self, value):
+        self.value = value
+
+    def getChildren(self):
+        return self.value,
+
+    def getChildNodes(self):
+        return self.value,
+
+    def __repr__(self):
+        return "Return(%s)" % (repr(self.value),)
+
+class Power(Node):
+    nodes["power"] = "Power"
+    def __init__(self, (left, right)):
+        self.left = left
+        self.right = right
+
+    def getChildren(self):
+        return self.left, self.right
+
+    def getChildNodes(self):
+        return self.left, self.right
+
+    def __repr__(self):
+        return "Power((%s, %s))" % (repr(self.left), repr(self.right))
+
+class Exec(Node):
+    nodes["exec"] = "Exec"
+    def __init__(self, expr, locals, globals):
+        self.expr = expr
+        self.locals = locals
+        self.globals = globals
+
+    def getChildren(self):
+        children = []
+        children.append(self.expr)
+        children.append(self.locals)
+        children.append(self.globals)
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.append(self.expr)
+        if self.locals is not None:            nodes.append(self.locals)
+        if self.globals is not None:            nodes.append(self.globals)
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "Exec(%s, %s, %s)" % (repr(self.expr), repr(self.locals), repr(self.globals))
+
+class Stmt(Node):
+    nodes["stmt"] = "Stmt"
+    def __init__(self, nodes):
+        self.nodes = nodes
+
+    def getChildren(self):
+        children = []
+        children.extend(flatten(self.nodes))
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.extend(flatten_nodes(self.nodes))
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "Stmt(%s)" % (repr(self.nodes),)
+
+class Sliceobj(Node):
+    nodes["sliceobj"] = "Sliceobj"
+    def __init__(self, nodes):
+        self.nodes = nodes
+
+    def getChildren(self):
+        children = []
+        children.extend(flatten(self.nodes))
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.extend(flatten_nodes(self.nodes))
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "Sliceobj(%s)" % (repr(self.nodes),)
+
+class Break(Node):
+    nodes["break"] = "Break"
+    def __init__(self, ):
+        pass
+
+    def getChildren(self):
+        return ()
+
+    def getChildNodes(self):
+        return ()
+
+    def __repr__(self):
+        return "Break()"
+
+class Bitand(Node):
+    nodes["bitand"] = "Bitand"
+    def __init__(self, nodes):
+        self.nodes = nodes
+
+    def getChildren(self):
+        children = []
+        children.extend(flatten(self.nodes))
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.extend(flatten_nodes(self.nodes))
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "Bitand(%s)" % (repr(self.nodes),)
+
+class FloorDiv(Node):
+    nodes["floordiv"] = "FloorDiv"
+    def __init__(self, (left, right)):
+        self.left = left
+        self.right = right
+
+    def getChildren(self):
+        return self.left, self.right
+
+    def getChildNodes(self):
+        return self.left, self.right
+
+    def __repr__(self):
+        return "FloorDiv((%s, %s))" % (repr(self.left), repr(self.right))
+
+class TryFinally(Node):
+    nodes["tryfinally"] = "TryFinally"
+    def __init__(self, body, final):
+        self.body = body
+        self.final = final
+
+    def getChildren(self):
+        return self.body, self.final
+
+    def getChildNodes(self):
+        return self.body, self.final
+
+    def __repr__(self):
+        return "TryFinally(%s, %s)" % (repr(self.body), repr(self.final))
+
+class Not(Node):
+    nodes["not"] = "Not"
+    def __init__(self, expr):
+        self.expr = expr
+
+    def getChildren(self):
+        return self.expr,
+
+    def getChildNodes(self):
+        return self.expr,
+
+    def __repr__(self):
+        return "Not(%s)" % (repr(self.expr),)
+
+class Class(Node):
+    nodes["class"] = "Class"
+    def __init__(self, name, bases, doc, code):
+        self.name = name
+        self.bases = bases
+        self.doc = doc
+        self.code = code
+
+    def getChildren(self):
+        children = []
+        children.append(self.name)
+        children.extend(flatten(self.bases))
+        children.append(self.doc)
+        children.append(self.code)
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.extend(flatten_nodes(self.bases))
+        nodes.append(self.code)
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "Class(%s, %s, %s, %s)" % (repr(self.name), repr(self.bases), repr(self.doc), repr(self.code))
+
+class Mod(Node):
+    nodes["mod"] = "Mod"
+    def __init__(self, (left, right)):
+        self.left = left
+        self.right = right
+
+    def getChildren(self):
+        return self.left, self.right
+
+    def getChildNodes(self):
+        return self.left, self.right
+
+    def __repr__(self):
+        return "Mod((%s, %s))" % (repr(self.left), repr(self.right))
+
+class Printnl(Node):
+    nodes["printnl"] = "Printnl"
+    def __init__(self, nodes, dest):
+        self.nodes = nodes
+        self.dest = dest
+
+    def getChildren(self):
+        children = []
+        children.extend(flatten(self.nodes))
+        children.append(self.dest)
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.extend(flatten_nodes(self.nodes))
+        if self.dest is not None:            nodes.append(self.dest)
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "Printnl(%s, %s)" % (repr(self.nodes), repr(self.dest))
+
+class Tuple(Node):
+    nodes["tuple"] = "Tuple"
+    def __init__(self, nodes):
+        self.nodes = nodes
+
+    def getChildren(self):
+        children = []
+        children.extend(flatten(self.nodes))
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.extend(flatten_nodes(self.nodes))
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "Tuple(%s)" % (repr(self.nodes),)
+
+class AssAttr(Node):
+    nodes["assattr"] = "AssAttr"
+    def __init__(self, expr, attrname, flags):
+        self.expr = expr
+        self.attrname = attrname
+        self.flags = flags
+
+    def getChildren(self):
+        return self.expr, self.attrname, self.flags
+
+    def getChildNodes(self):
+        return self.expr,
+
+    def __repr__(self):
+        return "AssAttr(%s, %s, %s)" % (repr(self.expr), repr(self.attrname), repr(self.flags))
+
+class Keyword(Node):
+    nodes["keyword"] = "Keyword"
+    def __init__(self, name, expr):
+        self.name = name
+        self.expr = expr
+
+    def getChildren(self):
+        return self.name, self.expr
+
+    def getChildNodes(self):
+        return self.expr,
+
+    def __repr__(self):
+        return "Keyword(%s, %s)" % (repr(self.name), repr(self.expr))
+
+class AugAssign(Node):
+    nodes["augassign"] = "AugAssign"
+    def __init__(self, node, op, expr):
+        self.node = node
+        self.op = op
+        self.expr = expr
+
+    def getChildren(self):
+        return self.node, self.op, self.expr
+
+    def getChildNodes(self):
+        return self.node, self.expr
+
+    def __repr__(self):
+        return "AugAssign(%s, %s, %s)" % (repr(self.node), repr(self.op), repr(self.expr))
+
+class List(Node):
+    nodes["list"] = "List"
+    def __init__(self, nodes):
+        self.nodes = nodes
+
+    def getChildren(self):
+        children = []
+        children.extend(flatten(self.nodes))
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.extend(flatten_nodes(self.nodes))
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "List(%s)" % (repr(self.nodes),)
+
+class Yield(Node):
+    nodes["yield"] = "Yield"
+    def __init__(self, value):
+        self.value = value
+
+    def getChildren(self):
+        return self.value,
+
+    def getChildNodes(self):
+        return self.value,
+
+    def __repr__(self):
+        return "Yield(%s)" % (repr(self.value),)
+
+class LeftShift(Node):
+    nodes["leftshift"] = "LeftShift"
+    def __init__(self, (left, right)):
+        self.left = left
+        self.right = right
+
+    def getChildren(self):
+        return self.left, self.right
+
+    def getChildNodes(self):
+        return self.left, self.right
+
+    def __repr__(self):
+        return "LeftShift((%s, %s))" % (repr(self.left), repr(self.right))
+
+class AssName(Node):
+    nodes["assname"] = "AssName"
+    def __init__(self, name, flags):
+        self.name = name
+        self.flags = flags
+
+    def getChildren(self):
+        return self.name, self.flags
+
+    def getChildNodes(self):
+        return ()
+
+    def __repr__(self):
+        return "AssName(%s, %s)" % (repr(self.name), repr(self.flags))
+
+class While(Node):
+    nodes["while"] = "While"
+    def __init__(self, test, body, else_):
+        self.test = test
+        self.body = body
+        self.else_ = else_
+
+    def getChildren(self):
+        children = []
+        children.append(self.test)
+        children.append(self.body)
+        children.append(self.else_)
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.append(self.test)
+        nodes.append(self.body)
+        if self.else_ is not None:            nodes.append(self.else_)
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "While(%s, %s, %s)" % (repr(self.test), repr(self.body), repr(self.else_))
+
+class Continue(Node):
+    nodes["continue"] = "Continue"
+    def __init__(self, ):
+        pass
+
+    def getChildren(self):
+        return ()
+
+    def getChildNodes(self):
+        return ()
+
+    def __repr__(self):
+        return "Continue()"
+
+class Backquote(Node):
+    nodes["backquote"] = "Backquote"
+    def __init__(self, expr):
+        self.expr = expr
+
+    def getChildren(self):
+        return self.expr,
+
+    def getChildNodes(self):
+        return self.expr,
+
+    def __repr__(self):
+        return "Backquote(%s)" % (repr(self.expr),)
+
+class Discard(Node):
+    nodes["discard"] = "Discard"
+    def __init__(self, expr):
+        self.expr = expr
+
+    def getChildren(self):
+        return self.expr,
+
+    def getChildNodes(self):
+        return self.expr,
+
+    def __repr__(self):
+        return "Discard(%s)" % (repr(self.expr),)
+
+class Div(Node):
+    nodes["div"] = "Div"
+    def __init__(self, (left, right)):
+        self.left = left
+        self.right = right
+
+    def getChildren(self):
+        return self.left, self.right
+
+    def getChildNodes(self):
+        return self.left, self.right
+
+    def __repr__(self):
+        return "Div((%s, %s))" % (repr(self.left), repr(self.right))
+
+class Assign(Node):
+    nodes["assign"] = "Assign"
+    def __init__(self, nodes, expr):
+        self.nodes = nodes
+        self.expr = expr
+
+    def getChildren(self):
+        children = []
+        children.extend(flatten(self.nodes))
+        children.append(self.expr)
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.extend(flatten_nodes(self.nodes))
+        nodes.append(self.expr)
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "Assign(%s, %s)" % (repr(self.nodes), repr(self.expr))
+
+class Lambda(Node):
+    nodes["lambda"] = "Lambda"
+    def __init__(self, argnames, defaults, flags, code):
+        self.argnames = argnames
+        self.defaults = defaults
+        self.flags = flags
+        self.code = code
+        self.varargs = self.kwargs = None
+        if flags & CO_VARARGS:
+            self.varargs = 1
+        if flags & CO_VARKEYWORDS:
+            self.kwargs = 1
+
+
+    def getChildren(self):
+        children = []
+        children.append(self.argnames)
+        children.extend(flatten(self.defaults))
+        children.append(self.flags)
+        children.append(self.code)
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.extend(flatten_nodes(self.defaults))
+        nodes.append(self.code)
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "Lambda(%s, %s, %s, %s)" % (repr(self.argnames), repr(self.defaults), repr(self.flags), repr(self.code))
+
+class And(Node):
+    nodes["and"] = "And"
+    def __init__(self, nodes):
+        self.nodes = nodes
+
+    def getChildren(self):
+        children = []
+        children.extend(flatten(self.nodes))
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.extend(flatten_nodes(self.nodes))
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "And(%s)" % (repr(self.nodes),)
+
+class Compare(Node):
+    nodes["compare"] = "Compare"
+    def __init__(self, expr, ops):
+        self.expr = expr
+        self.ops = ops
+
+    def getChildren(self):
+        children = []
+        children.append(self.expr)
+        children.extend(flatten(self.ops))
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.append(self.expr)
+        nodes.extend(flatten_nodes(self.ops))
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "Compare(%s, %s)" % (repr(self.expr), repr(self.ops))
+
+class Bitor(Node):
+    nodes["bitor"] = "Bitor"
+    def __init__(self, nodes):
+        self.nodes = nodes
+
+    def getChildren(self):
+        children = []
+        children.extend(flatten(self.nodes))
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.extend(flatten_nodes(self.nodes))
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "Bitor(%s)" % (repr(self.nodes),)
+
+class Bitxor(Node):
+    nodes["bitxor"] = "Bitxor"
+    def __init__(self, nodes):
+        self.nodes = nodes
+
+    def getChildren(self):
+        children = []
+        children.extend(flatten(self.nodes))
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.extend(flatten_nodes(self.nodes))
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "Bitxor(%s)" % (repr(self.nodes),)
+
+class CallFunc(Node):
+    nodes["callfunc"] = "CallFunc"
+    def __init__(self, node, args, star_args = None, dstar_args = None):
+        self.node = node
+        self.args = args
+        self.star_args = star_args
+        self.dstar_args = dstar_args
+
+    def getChildren(self):
+        children = []
+        children.append(self.node)
+        children.extend(flatten(self.args))
+        children.append(self.star_args)
+        children.append(self.dstar_args)
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.append(self.node)
+        nodes.extend(flatten_nodes(self.args))
+        if self.star_args is not None:            nodes.append(self.star_args)
+        if self.dstar_args is not None:            nodes.append(self.dstar_args)
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "CallFunc(%s, %s, %s, %s)" % (repr(self.node), repr(self.args), repr(self.star_args), repr(self.dstar_args))
+
+class Global(Node):
+    nodes["global"] = "Global"
+    def __init__(self, names):
+        self.names = names
+
+    def getChildren(self):
+        return self.names,
+
+    def getChildNodes(self):
+        return ()
+
+    def __repr__(self):
+        return "Global(%s)" % (repr(self.names),)
+
+class Add(Node):
+    nodes["add"] = "Add"
+    def __init__(self, (left, right)):
+        self.left = left
+        self.right = right
+
+    def getChildren(self):
+        return self.left, self.right
+
+    def getChildNodes(self):
+        return self.left, self.right
+
+    def __repr__(self):
+        return "Add((%s, %s))" % (repr(self.left), repr(self.right))
+
+class ListCompIf(Node):
+    nodes["listcompif"] = "ListCompIf"
+    def __init__(self, test):
+        self.test = test
+
+    def getChildren(self):
+        return self.test,
+
+    def getChildNodes(self):
+        return self.test,
+
+    def __repr__(self):
+        return "ListCompIf(%s)" % (repr(self.test),)
+
+class Sub(Node):
+    nodes["sub"] = "Sub"
+    def __init__(self, (left, right)):
+        self.left = left
+        self.right = right
+
+    def getChildren(self):
+        return self.left, self.right
+
+    def getChildNodes(self):
+        return self.left, self.right
+
+    def __repr__(self):
+        return "Sub((%s, %s))" % (repr(self.left), repr(self.right))
+
+class Pass(Node):
+    nodes["pass"] = "Pass"
+    def __init__(self, ):
+        pass
+
+    def getChildren(self):
+        return ()
+
+    def getChildNodes(self):
+        return ()
+
+    def __repr__(self):
+        return "Pass()"
+
+class UnarySub(Node):
+    nodes["unarysub"] = "UnarySub"
+    def __init__(self, expr):
+        self.expr = expr
+
+    def getChildren(self):
+        return self.expr,
+
+    def getChildNodes(self):
+        return self.expr,
+
+    def __repr__(self):
+        return "UnarySub(%s)" % (repr(self.expr),)
+
+class If(Node):
+    nodes["if"] = "If"
+    def __init__(self, tests, else_):
+        self.tests = tests
+        self.else_ = else_
+
+    def getChildren(self):
+        children = []
+        children.extend(flatten(self.tests))
+        children.append(self.else_)
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.extend(flatten_nodes(self.tests))
+        if self.else_ is not None:            nodes.append(self.else_)
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "If(%s, %s)" % (repr(self.tests), repr(self.else_))
+
+class ListComp(Node):
+    nodes["listcomp"] = "ListComp"
+    def __init__(self, expr, quals):
+        self.expr = expr
+        self.quals = quals
+
+    def getChildren(self):
+        children = []
+        children.append(self.expr)
+        children.extend(flatten(self.quals))
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.append(self.expr)
+        nodes.extend(flatten_nodes(self.quals))
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "ListComp(%s, %s)" % (repr(self.expr), repr(self.quals))
+
+class ListCompFor(Node):
+    nodes["listcompfor"] = "ListCompFor"
+    def __init__(self, assign, list, ifs):
+        self.assign = assign
+        self.list = list
+        self.ifs = ifs
+
+    def getChildren(self):
+        children = []
+        children.append(self.assign)
+        children.append(self.list)
+        children.extend(flatten(self.ifs))
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.append(self.assign)
+        nodes.append(self.list)
+        nodes.extend(flatten_nodes(self.ifs))
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "ListCompFor(%s, %s, %s)" % (repr(self.assign), repr(self.list), repr(self.ifs))
+
+klasses = globals()
+for k in nodes.keys():
+    nodes[k] = klasses[nodes[k]]
diff --git a/lib-python/2.2/compiler/consts.py b/lib-python/2.2/compiler/consts.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/compiler/consts.py
@@ -0,0 +1,19 @@
+# operation flags
+OP_ASSIGN = 'OP_ASSIGN'
+OP_DELETE = 'OP_DELETE'
+OP_APPLY = 'OP_APPLY'
+
+SC_LOCAL = 1
+SC_GLOBAL = 2
+SC_FREE = 3
+SC_CELL = 4
+SC_UNKNOWN = 5
+
+CO_OPTIMIZED = 0x0001
+CO_NEWLOCALS = 0x0002
+CO_VARARGS = 0x0004
+CO_VARKEYWORDS = 0x0008
+CO_NESTED = 0x0010
+CO_GENERATOR = 0x0020
+CO_GENERATOR_ALLOWED = 0x1000
+CO_FUTURE_DIVISION = 0x2000
diff --git a/lib-python/2.2/compiler/future.py b/lib-python/2.2/compiler/future.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/compiler/future.py
@@ -0,0 +1,72 @@
+"""Parser for future statements
+
+"""
+
+from compiler import ast, walk
+
+def is_future(stmt):
+    """Return true if statement is a well-formed future statement"""
+    if not isinstance(stmt, ast.From):
+        return 0
+    if stmt.modname == "__future__":
+        return 1
+    else:
+        return 0
+
+class FutureParser:
+
+    features = ("nested_scopes", "generators", "division")
+
+    def __init__(self):
+        self.found = {} # set
+
+    def visitModule(self, node):
+        stmt = node.node
+        for s in stmt.nodes:
+            if not self.check_stmt(s):
+                break
+
+    def check_stmt(self, stmt):
+        if is_future(stmt):
+            for name, asname in stmt.names:
+                if name in self.features:
+                    self.found[name] = 1
+                else:
+                    raise SyntaxError, \
+                          "future feature %s is not defined" % name
+            stmt.valid_future = 1
+            return 1
+        return 0
+
+    def get_features(self):
+        """Return list of features enabled by future statements"""
+        return self.found.keys()
+
+class BadFutureParser:
+    """Check for invalid future statements"""
+
+    def visitFrom(self, node):
+        if hasattr(node, 'valid_future'):
+            return
+        if node.modname != "__future__":
+            return
+        raise SyntaxError, "invalid future statement"
+
+def find_futures(node):
+    p1 = FutureParser()
+    p2 = BadFutureParser()
+    walk(node, p1)
+    walk(node, p2)
+    return p1.get_features()
+
+if __name__ == "__main__":
+    import sys
+    from compiler import parseFile, walk
+
+    for file in sys.argv[1:]:
+        print file
+        tree = parseFile(file)
+        v = FutureParser()
+        walk(tree, v)
+        print v.found
+        print
diff --git a/lib-python/2.2/compiler/misc.py b/lib-python/2.2/compiler/misc.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/compiler/misc.py
@@ -0,0 +1,74 @@
+import types
+
+def flatten(tup):
+    elts = []
+    for elt in tup:
+        if type(elt) == types.TupleType:
+            elts = elts + flatten(elt)
+        else:
+            elts.append(elt)
+    return elts
+
+class Set:
+    def __init__(self):
+        self.elts = {}
+    def __len__(self):
+        return len(self.elts)
+    def __contains__(self, elt):
+        return self.elts.has_key(elt)
+    def add(self, elt):
+        self.elts[elt] = elt
+    def elements(self):
+        return self.elts.keys()
+    def has_elt(self, elt):
+        return self.elts.has_key(elt)
+    def remove(self, elt):
+        del self.elts[elt]
+    def copy(self):
+        c = Set()
+        c.elts.update(self.elts)
+        return c
+
+class Stack:
+    def __init__(self):
+        self.stack = []
+        self.pop = self.stack.pop
+    def __len__(self):
+        return len(self.stack)
+    def push(self, elt):
+        self.stack.append(elt)
+    def top(self):
+        return self.stack[-1]
+    def __getitem__(self, index): # needed by visitContinue()
+        return self.stack[index]
+
+MANGLE_LEN = 256 # magic constant from compile.c
+
+def mangle(name, klass):
+    if not name.startswith('__'):
+        return name
+    if len(name) + 2 >= MANGLE_LEN:
+        return name
+    if name.endswith('__'):
+        return name
+    try:
+        i = 0
+        while klass[i] == '_':
+            i = i + 1
+    except IndexError:
+        return name
+    klass = klass[i:]
+
+    tlen = len(klass) + len(name)
+    if tlen > MANGLE_LEN:
+        klass = klass[:MANGLE_LEN-tlen]
+
+    return "_%s%s" % (klass, name)
+
+def set_filename(filename, tree):
+    """Set the filename attribute to filename on every node in tree"""
+    worklist = [tree]
+    while worklist:
+        node = worklist.pop(0)
+        node.filename = filename
+        worklist.extend(node.getChildNodes())
diff --git a/lib-python/2.2/compiler/pyassem.py b/lib-python/2.2/compiler/pyassem.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/compiler/pyassem.py
@@ -0,0 +1,824 @@
+"""A flow graph representation for Python bytecode"""
+
+import dis
+import new
+import string
+import sys
+import types
+
+from compiler import misc
+from compiler.consts import CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, \
+     CO_VARKEYWORDS
+
+def xxx_sort(l):
+    l = l[:]
+    def sorter(a, b):
+        return cmp(a.bid, b.bid)
+    l.sort(sorter)
+    return l
+
+class FlowGraph:
+    def __init__(self):
+        self.current = self.entry = Block()
+        self.exit = Block("exit")
+        self.blocks = misc.Set()
+        self.blocks.add(self.entry)
+        self.blocks.add(self.exit)
+
+    def startBlock(self, block):
+        if self._debug:
+            if self.current:
+                print "end", repr(self.current)
+                print "    next", self.current.next
+                print "   ", self.current.get_children()
+            print repr(block)
+        self.current = block
+
+    def nextBlock(self, block=None):
+        # XXX think we need to specify when there is implicit transfer
+        # from one block to the next.  might be better to represent this
+        # with explicit JUMP_ABSOLUTE instructions that are optimized
+        # out when they are unnecessary.
+        #
+        # I think this strategy works: each block has a child
+        # designated as "next" which is returned as the last of the
+        # children.  because the nodes in a graph are emitted in
+        # reverse post order, the "next" block will always be emitted
+        # immediately after its parent.
+        # Worry: maintaining this invariant could be tricky
+        if block is None:
+            block = self.newBlock()
+
+        # Note: If the current block ends with an unconditional
+        # control transfer, then it is incorrect to add an implicit
+        # transfer to the block graph.  The current code requires
+        # these edges to get the blocks emitted in the right order,
+        # however. :-(  If a client needs to remove these edges, call
+        # pruneEdges().
+
+        self.current.addNext(block)
+        self.startBlock(block)
+
+    def newBlock(self):
+        b = Block()
+        self.blocks.add(b)
+        return b
+
+    def startExitBlock(self):
+        self.startBlock(self.exit)
+
+    _debug = 0
+
+    def _enable_debug(self):
+        self._debug = 1
+
+    def _disable_debug(self):
+        self._debug = 0
+
+    def emit(self, *inst):
+        if self._debug:
+            print "\t", inst
+        if inst[0] == 'RETURN_VALUE':
+            self.current.addOutEdge(self.exit)
+        if len(inst) == 2 and isinstance(inst[1], Block):
+            self.current.addOutEdge(inst[1])
+        self.current.emit(inst)
+
+    def getBlocksInOrder(self):
+        """Return the blocks in reverse postorder
+
+        i.e. each node appears before all of its successors
+        """
+        # XXX make sure every node that doesn't have an explicit next
+        # is set so that next points to exit
+        for b in self.blocks.elements():
+            if b is self.exit:
+                continue
+            if not b.next:
+                b.addNext(self.exit)
+        order = dfs_postorder(self.entry, {})
+        order.reverse()
+        self.fixupOrder(order, self.exit)
+        # hack alert
+        if not self.exit in order:
+            order.append(self.exit)
+
+        return order
+
+    def fixupOrder(self, blocks, default_next):
+        """Fixup bad order introduced by DFS."""
+
+        # XXX This is a total mess.  There must be a better way to get
+        # the code blocks in the right order.
+
+        self.fixupOrderHonorNext(blocks, default_next)
+        self.fixupOrderForward(blocks, default_next)
+
+    def fixupOrderHonorNext(self, blocks, default_next):
+        """Fix one problem with DFS.
+
+        The DFS uses child block, but doesn't know about the special
+        "next" block.  As a result, the DFS can order blocks so that a
+        block isn't next to the right block for implicit control
+        transfers.
+        """
+        index = {}
+        for i in range(len(blocks)):
+            index[blocks[i]] = i
+
+        for i in range(0, len(blocks) - 1):
+            b = blocks[i]
+            n = blocks[i + 1]
+            if not b.next or b.next[0] == default_next or b.next[0] == n:
+                continue
+            # The blocks are in the wrong order.  Find the chain of
+            # blocks to insert where they belong.
+            cur = b
+            chain = []
+            elt = cur
+            while elt.next and elt.next[0] != default_next:
+                chain.append(elt.next[0])
+                elt = elt.next[0]
+            # Now remove the blocks in the chain from the current
+            # block list, so that they can be re-inserted.
+            l = []
+            for b in chain:
+                assert index[b] > i
+                l.append((index[b], b))
+            l.sort()
+            l.reverse()
+            for j, b in l:
+                del blocks[index[b]]
+            # Insert the chain in the proper location
+            blocks[i:i + 1] = [cur] + chain
+            # Finally, re-compute the block indexes
+            for i in range(len(blocks)):
+                index[blocks[i]] = i
+
+    def fixupOrderForward(self, blocks, default_next):
+        """Make sure all JUMP_FORWARDs jump forward"""
+        index = {}
+        chains = []
+        cur = []
+        for b in blocks:
+            index[b] = len(chains)
+            cur.append(b)
+            if b.next and b.next[0] == default_next:
+                chains.append(cur)
+                cur = []
+        chains.append(cur)
+
+        while 1:
+            constraints = []
+
+            for i in range(len(chains)):
+                l = chains[i]
+                for b in l:
+                    for c in b.get_children():
+                        if index[c] < i:
+                            forward_p = 0
+                            for inst in b.insts:
+                                if inst[0] == 'JUMP_FORWARD':
+                                    if inst[1] == c:
+                                        forward_p = 1
+                            if not forward_p:
+                                continue
+                            constraints.append((index[c], i))
+
+            if not constraints:
+                break
+
+            # XXX just do one for now
+            # do swaps to get things in the right order
+            goes_before, a_chain = constraints[0]
+            assert a_chain > goes_before
+            c = chains[a_chain]
+            chains.remove(c)
+            chains.insert(goes_before, c)
+
+        del blocks[:]
+        for c in chains:
+            for b in c:
+                blocks.append(b)
+
+    def getBlocks(self):
+        return self.blocks.elements()
+
+    def getRoot(self):
+        """Return nodes appropriate for use with dominator"""
+        return self.entry
+
+    def getContainedGraphs(self):
+        l = []
+        for b in self.getBlocks():
+            l.extend(b.getContainedGraphs())
+        return l
+
+def dfs_postorder(b, seen):
+    """Depth-first search of tree rooted at b, return in postorder"""
+    order = []
+    seen[b] = b
+    for c in b.get_children():
+        if seen.has_key(c):
+            continue
+        order = order + dfs_postorder(c, seen)
+    order.append(b)
+    return order
+
+class Block:
+    _count = 0
+
+    def __init__(self, label=''):
+        self.insts = []
+        self.inEdges = misc.Set()
+        self.outEdges = misc.Set()
+        self.label = label
+        self.bid = Block._count
+        self.next = []
+        Block._count = Block._count + 1
+
+    def __repr__(self):
+        if self.label:
+            return "<block %s id=%d>" % (self.label, self.bid)
+        else:
+            return "<block id=%d>" % (self.bid)
+
+    def __str__(self):
+        insts = map(str, self.insts)
+        return "<block %s %d:\n%s>" % (self.label, self.bid,
+                                       string.join(insts, '\n'))
+
+    def emit(self, inst):
+        op = inst[0]
+        if op[:4] == 'JUMP':
+            self.outEdges.add(inst[1])
+        self.insts.append(inst)
+
+    def getInstructions(self):
+        return self.insts
+
+    def addInEdge(self, block):
+        self.inEdges.add(block)
+
+    def addOutEdge(self, block):
+        self.outEdges.add(block)
+
+    def addNext(self, block):
+        self.next.append(block)
+        assert len(self.next) == 1, map(str, self.next)
+
+    _uncond_transfer = ('RETURN_VALUE', 'RAISE_VARARGS',
+                        'JUMP_ABSOLUTE', 'JUMP_FORWARD', 'CONTINUE_LOOP')
+
+    def pruneNext(self):
+        """Remove bogus edge for unconditional transfers
+
+        Each block has a next edge that accounts for implicit control
+        transfers, e.g. from a JUMP_IF_FALSE to the block that will be
+        executed if the test is true.
+
+        These edges must remain for the current assembler code to
+        work. If they are removed, the dfs_postorder gets things in
+        weird orders.  However, they shouldn't be there for other
+        purposes, e.g. conversion to SSA form.  This method will
+        remove the next edge when it follows an unconditional control
+        transfer.
+        """
+        try:
+            op, arg = self.insts[-1]
+        except (IndexError, ValueError):
+            return
+        if op in self._uncond_transfer:
+            self.next = []
+
+    def get_children(self):
+        if self.next and self.next[0] in self.outEdges:
+            self.outEdges.remove(self.next[0])
+        return self.outEdges.elements() + self.next
+
+    def getContainedGraphs(self):
+        """Return all graphs contained within this block.
+
+        For example, a MAKE_FUNCTION block will contain a reference to
+        the graph for the function body.
+        """
+        contained = []
+        for inst in self.insts:
+            if len(inst) == 1:
+                continue
+            op = inst[1]
+            if hasattr(op, 'graph'):
+                contained.append(op.graph)
+        return contained
+
+# flags for code objects
+
+# the FlowGraph is transformed in place; it exists in one of these states
+RAW = "RAW"
+FLAT = "FLAT"
+CONV = "CONV"
+DONE = "DONE"
+
+class PyFlowGraph(FlowGraph):
+    super_init = FlowGraph.__init__
+
+    def __init__(self, name, filename, args=(), optimized=0, klass=None):
+        self.super_init()
+        self.name = name
+        self.filename = filename
+        self.docstring = None
+        self.args = args # XXX
+        self.argcount = getArgCount(args)
+        self.klass = klass
+        if optimized:
+            self.flags = CO_OPTIMIZED | CO_NEWLOCALS
+        else:
+            self.flags = 0
+        self.consts = []
+        self.names = []
+        # Free variables found by the symbol table scan, including
+        # variables used only in nested scopes, are included here.
+        self.freevars = []
+        self.cellvars = []
+        # The closure list is used to track the order of cell
+        # variables and free variables in the resulting code object.
+        # The offsets used by LOAD_CLOSURE/LOAD_DEREF refer to both
+        # kinds of variables.
+        self.closure = []
+        self.varnames = list(args) or []
+        for i in range(len(self.varnames)):
+            var = self.varnames[i]
+            if isinstance(var, TupleArg):
+                self.varnames[i] = var.getName()
+        self.stage = RAW
+
+    def setDocstring(self, doc):
+        self.docstring = doc
+
+    def setFlag(self, flag):
+        self.flags = self.flags | flag
+        if flag == CO_VARARGS:
+            self.argcount = self.argcount - 1
+
+    def checkFlag(self, flag):
+        if self.flags & flag:
+            return 1
+
+    def setFreeVars(self, names):
+        self.freevars = list(names)
+
+    def setCellVars(self, names):
+        self.cellvars = names
+
+    def getCode(self):
+        """Get a Python code object"""
+        if self.stage == RAW:
+            self.computeStackDepth()
+            self.flattenGraph()
+        if self.stage == FLAT:
+            self.convertArgs()
+        if self.stage == CONV:
+            self.makeByteCode()
+        if self.stage == DONE:
+            return self.newCodeObject()
+        raise RuntimeError, "inconsistent PyFlowGraph state"
+
+    def dump(self, io=None):
+        if io:
+            save = sys.stdout
+            sys.stdout = io
+        pc = 0
+        for t in self.insts:
+            opname = t[0]
+            if opname == "SET_LINENO":
+                print
+            if len(t) == 1:
+                print "\t", "%3d" % pc, opname
+                pc = pc + 1
+            else:
+                print "\t", "%3d" % pc, opname, t[1]
+                pc = pc + 3
+        if io:
+            sys.stdout = save
+
+    def computeStackDepth(self):
+        """Compute the max stack depth.
+
+        Approach is to compute the stack effect of each basic block.
+        Then find the path through the code with the largest total
+        effect.
+        """
+        depth = {}
+        exit = None
+        for b in self.getBlocks():
+            depth[b] = findDepth(b.getInstructions())
+
+        seen = {}
+
+        def max_depth(b, d):
+            if seen.has_key(b):
+                return d
+            seen[b] = 1
+            d = d + depth[b]
+            children = b.get_children()
+            if children:
+                return max([max_depth(c, d) for c in children])
+            else:
+                if not b.label == "exit":
+                    return max_depth(self.exit, d)
+                else:
+                    return d
+
+        self.stacksize = max_depth(self.entry, 0)
+
+    def flattenGraph(self):
+        """Arrange the blocks in order and resolve jumps"""
+        assert self.stage == RAW
+        self.insts = insts = []
+        pc = 0
+        begin = {}
+        end = {}
+        for b in self.getBlocksInOrder():
+            begin[b] = pc
+            for inst in b.getInstructions():
+                insts.append(inst)
+                if len(inst) == 1:
+                    pc = pc + 1
+                else:
+                    # arg takes 2 bytes
+                    pc = pc + 3
+            end[b] = pc
+        pc = 0
+        for i in range(len(insts)):
+            inst = insts[i]
+            if len(inst) == 1:
+                pc = pc + 1
+            else:
+                pc = pc + 3
+            opname = inst[0]
+            if self.hasjrel.has_elt(opname):
+                oparg = inst[1]
+                offset = begin[oparg] - pc
+                insts[i] = opname, offset
+            elif self.hasjabs.has_elt(opname):
+                insts[i] = opname, begin[inst[1]]
+        self.stage = FLAT
+
+    hasjrel = misc.Set()
+    for i in dis.hasjrel:
+        hasjrel.add(dis.opname[i])
+    hasjabs = misc.Set()
+    for i in dis.hasjabs:
+        hasjabs.add(dis.opname[i])
+
+    def convertArgs(self):
+        """Convert arguments from symbolic to concrete form"""
+        assert self.stage == FLAT
+        self.consts.insert(0, self.docstring)
+        self.sort_cellvars()
+        for i in range(len(self.insts)):
+            t = self.insts[i]
+            if len(t) == 2:
+                opname, oparg = t
+                conv = self._converters.get(opname, None)
+                if conv:
+                    self.insts[i] = opname, conv(self, oparg)
+        self.stage = CONV
+
+    def sort_cellvars(self):
+        """Sort cellvars in the order of varnames and prune from freevars.
+        """
+        cells = {}
+        for name in self.cellvars:
+            cells[name] = 1
+        self.cellvars = [name for name in self.varnames
+                         if cells.has_key(name)]
+        for name in self.cellvars:
+            del cells[name]
+        self.cellvars = self.cellvars + cells.keys()
+        self.closure = self.cellvars + self.freevars
+
+    def _lookupName(self, name, list):
+        """Return index of name in list, appending if necessary
+
+        This routine uses a list instead of a dictionary, because a
+        dictionary can't store two different keys if the keys have the
+        same value but different types, e.g. 2 and 2L.  The compiler
+        must treat these two separately, so it does an explicit type
+        comparison before comparing the values.
+        """
+        t = type(name)
+        for i in range(len(list)):
+            if t == type(list[i]) and list[i] == name:
+                return i
+        end = len(list)
+        list.append(name)
+        return end
+
+    _converters = {}
+    def _convert_LOAD_CONST(self, arg):
+        if hasattr(arg, 'getCode'):
+            arg = arg.getCode()
+        return self._lookupName(arg, self.consts)
+
+    def _convert_LOAD_FAST(self, arg):
+        self._lookupName(arg, self.names)
+        return self._lookupName(arg, self.varnames)
+    _convert_STORE_FAST = _convert_LOAD_FAST
+    _convert_DELETE_FAST = _convert_LOAD_FAST
+
+    def _convert_LOAD_NAME(self, arg):
+        if self.klass is None:
+            self._lookupName(arg, self.varnames)
+        return self._lookupName(arg, self.names)
+
+    def _convert_NAME(self, arg):
+        if self.klass is None:
+            self._lookupName(arg, self.varnames)
+        return self._lookupName(arg, self.names)
+    _convert_STORE_NAME = _convert_NAME
+    _convert_DELETE_NAME = _convert_NAME
+    _convert_IMPORT_NAME = _convert_NAME
+    _convert_IMPORT_FROM = _convert_NAME
+    _convert_STORE_ATTR = _convert_NAME
+    _convert_LOAD_ATTR = _convert_NAME
+    _convert_DELETE_ATTR = _convert_NAME
+    _convert_LOAD_GLOBAL = _convert_NAME
+    _convert_STORE_GLOBAL = _convert_NAME
+    _convert_DELETE_GLOBAL = _convert_NAME
+
+    def _convert_DEREF(self, arg):
+        self._lookupName(arg, self.names)
+        self._lookupName(arg, self.varnames)
+        return self._lookupName(arg, self.closure)
+    _convert_LOAD_DEREF = _convert_DEREF
+    _convert_STORE_DEREF = _convert_DEREF
+
+    def _convert_LOAD_CLOSURE(self, arg):
+        self._lookupName(arg, self.varnames)
+        return self._lookupName(arg, self.closure)
+
+    _cmp = list(dis.cmp_op)
+    def _convert_COMPARE_OP(self, arg):
+        return self._cmp.index(arg)
+
+    # similarly for other opcodes...
+
+    for name, obj in locals().items():
+        if name[:9] == "_convert_":
+            opname = name[9:]
+            _converters[opname] = obj
+    del name, obj, opname
+
+    def makeByteCode(self):
+        assert self.stage == CONV
+        self.lnotab = lnotab = LineAddrTable()
+        for t in self.insts:
+            opname = t[0]
+            if len(t) == 1:
+                lnotab.addCode(self.opnum[opname])
+            else:
+                oparg = t[1]
+                if opname == "SET_LINENO":
+                    lnotab.nextLine(oparg)
+                hi, lo = twobyte(oparg)
+                try:
+                    lnotab.addCode(self.opnum[opname], lo, hi)
+                except ValueError:
+                    print opname, oparg
+                    print self.opnum[opname], lo, hi
+                    raise
+        self.stage = DONE
+
+    opnum = {}
+    for num in range(len(dis.opname)):
+        opnum[dis.opname[num]] = num
+    del num
+
+    def newCodeObject(self):
+        assert self.stage == DONE
+        if (self.flags & CO_NEWLOCALS) == 0:
+            nlocals = 0
+        else:
+            nlocals = len(self.varnames)
+        argcount = self.argcount
+        if self.flags & CO_VARKEYWORDS:
+            argcount = argcount - 1
+        return new.code(argcount, nlocals, self.stacksize, self.flags,
+                        self.lnotab.getCode(), self.getConsts(),
+                        tuple(self.names), tuple(self.varnames),
+                        self.filename, self.name, self.lnotab.firstline,
+                        self.lnotab.getTable(), tuple(self.freevars),
+                        tuple(self.cellvars))
+
+    def getConsts(self):
+        """Return a tuple for the const slot of the code object
+
+        Must convert references to code (MAKE_FUNCTION) to code
+        objects recursively.
+        """
+        l = []
+        for elt in self.consts:
+            if isinstance(elt, PyFlowGraph):
+                elt = elt.getCode()
+            l.append(elt)
+        return tuple(l)
+
+def isJump(opname):
+    if opname[:4] == 'JUMP':
+        return 1
+
+class TupleArg:
+    """Helper for marking func defs with nested tuples in arglist"""
+    def __init__(self, count, names):
+        self.count = count
+        self.names = names
+    def __repr__(self):
+        return "TupleArg(%s, %s)" % (self.count, self.names)
+    def getName(self):
+        return ".%d" % self.count
+
+def getArgCount(args):
+    argcount = len(args)
+    if args:
+        for arg in args:
+            if isinstance(arg, TupleArg):
+                numNames = len(misc.flatten(arg.names))
+                argcount = argcount - numNames
+    return argcount
+
+def twobyte(val):
+    """Convert an int argument into high and low bytes"""
+    assert type(val) == types.IntType
+    return divmod(val, 256)
+
+class LineAddrTable:
+    """lnotab
+
+    This class builds the lnotab, which is documented in compile.c.
+    Here's a brief recap:
+
+    For each SET_LINENO instruction after the first one, two bytes are
+    added to lnotab.  (In some cases, multiple two-byte entries are
+    added.)  The first byte is the distance in bytes between the
+    instruction for the last SET_LINENO and the current SET_LINENO.
+    The second byte is offset in line numbers.  If either offset is
+    greater than 255, multiple two-byte entries are added -- see
+    compile.c for the delicate details.
+    """
+
+    def __init__(self):
+        self.code = []
+        self.codeOffset = 0
+        self.firstline = 0
+        self.lastline = 0
+        self.lastoff = 0
+        self.lnotab = []
+
+    def addCode(self, *args):
+        for arg in args:
+            self.code.append(chr(arg))
+        self.codeOffset = self.codeOffset + len(args)
+
+    def nextLine(self, lineno):
+        if self.firstline == 0:
+            self.firstline = lineno
+            self.lastline = lineno
+        else:
+            # compute deltas
+            addr = self.codeOffset - self.lastoff
+            line = lineno - self.lastline
+            # Python assumes that lineno always increases with
+            # increasing bytecode address (lnotab is unsigned char).
+            # Depending on when SET_LINENO instructions are emitted
+            # this is not always true.  Consider the code:
+            #     a = (1,
+            #          b)
+            # In the bytecode stream, the assignment to "a" occurs
+            # after the loading of "b".  This works with the C Python
+            # compiler because it only generates a SET_LINENO instruction
+            # for the assignment.
+            if line > 0:
+                push = self.lnotab.append
+                while addr > 255:
+                    push(255); push(0)
+                    addr -= 255
+                while line > 255:
+                    push(addr); push(255)
+                    line -= 255
+                    addr = 0
+                if addr > 0 or line > 0:
+                    push(addr); push(line)
+                self.lastline = lineno
+                self.lastoff = self.codeOffset
+
+    def getCode(self):
+        return string.join(self.code, '')
+
+    def getTable(self):
+        return string.join(map(chr, self.lnotab), '')
+
+class StackDepthTracker:
+    # XXX 1. need to keep track of stack depth on jumps
+    # XXX 2. at least partly as a result, this code is broken
+
+    def findDepth(self, insts, debug=0):
+        depth = 0
+        maxDepth = 0
+        for i in insts:
+            opname = i[0]
+            if debug:
+                print i,
+            delta = self.effect.get(opname, None)
+            if delta is not None:
+                depth = depth + delta
+            else:
+                # now check patterns
+                for pat, pat_delta in self.patterns:
+                    if opname[:len(pat)] == pat:
+                        delta = pat_delta
+                        depth = depth + delta
+                        break
+                # if we still haven't found a match
+                if delta is None:
+                    meth = getattr(self, opname, None)
+                    if meth is not None:
+                        depth = depth + meth(i[1])
+            if depth > maxDepth:
+                maxDepth = depth
+            if debug:
+                print depth, maxDepth
+        return maxDepth
+
+    effect = {
+        'POP_TOP': -1,
+        'DUP_TOP': 1,
+        'SLICE+1': -1,
+        'SLICE+2': -1,
+        'SLICE+3': -2,
+        'STORE_SLICE+0': -1,
+        'STORE_SLICE+1': -2,
+        'STORE_SLICE+2': -2,
+        'STORE_SLICE+3': -3,
+        'DELETE_SLICE+0': -1,
+        'DELETE_SLICE+1': -2,
+        'DELETE_SLICE+2': -2,
+        'DELETE_SLICE+3': -3,
+        'STORE_SUBSCR': -3,
+        'DELETE_SUBSCR': -2,
+        # PRINT_EXPR?
+        'PRINT_ITEM': -1,
+        'RETURN_VALUE': -1,
+        'EXEC_STMT': -3,
+        'BUILD_CLASS': -2,
+        'STORE_NAME': -1,
+        'STORE_ATTR': -2,
+        'DELETE_ATTR': -1,
+        'STORE_GLOBAL': -1,
+        'BUILD_MAP': 1,
+        'COMPARE_OP': -1,
+        'STORE_FAST': -1,
+        'IMPORT_STAR': -1,
+        'IMPORT_NAME': 0,
+        'IMPORT_FROM': 1,
+        'LOAD_ATTR': 0, # unlike other loads
+        # close enough...
+        'SETUP_EXCEPT': 3,
+        'SETUP_FINALLY': 3,
+        'FOR_ITER': 1,
+        }
+    # use pattern match
+    patterns = [
+        ('BINARY_', -1),
+        ('LOAD_', 1),
+        ]
+
+    def UNPACK_SEQUENCE(self, count):
+        return count-1
+    def BUILD_TUPLE(self, count):
+        return -count+1
+    def BUILD_LIST(self, count):
+        return -count+1
+    def CALL_FUNCTION(self, argc):
+        hi, lo = divmod(argc, 256)
+        return -(lo + hi * 2)
+    def CALL_FUNCTION_VAR(self, argc):
+        return self.CALL_FUNCTION(argc)-1
+    def CALL_FUNCTION_KW(self, argc):
+        return self.CALL_FUNCTION(argc)-1
+    def CALL_FUNCTION_VAR_KW(self, argc):
+        return self.CALL_FUNCTION(argc)-2
+    def MAKE_FUNCTION(self, argc):
+        return -argc
+    def MAKE_CLOSURE(self, argc):
+        # XXX need to account for free variables too!
+        return -argc
+    def BUILD_SLICE(self, argc):
+        if argc == 2:
+            return -1
+        elif argc == 3:
+            return -2
+    def DUP_TOPX(self, argc):
+        return argc
+
+findDepth = StackDepthTracker().findDepth
diff --git a/lib-python/2.2/compiler/pycodegen.py b/lib-python/2.2/compiler/pycodegen.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/compiler/pycodegen.py
@@ -0,0 +1,1388 @@
+import imp
+import os
+import marshal
+import stat
+import string
+import struct
+import sys
+import types
+from cStringIO import StringIO
+
+from compiler import ast, parse, walk, syntax
+from compiler import pyassem, misc, future, symbols
+from compiler.consts import SC_LOCAL, SC_GLOBAL, SC_FREE, SC_CELL
+from compiler.consts import CO_VARARGS, CO_VARKEYWORDS, CO_NEWLOCALS,\
+     CO_NESTED, CO_GENERATOR, CO_GENERATOR_ALLOWED, CO_FUTURE_DIVISION
+from compiler.pyassem import TupleArg
+
+# Do we have Python 1.x or Python 2.x?
+try:
+    VERSION = sys.version_info[0]
+except AttributeError:
+    VERSION = 1
+
+callfunc_opcode_info = {
+    # (Have *args, Have **args) : opcode
+    (0,0) : "CALL_FUNCTION",
+    (1,0) : "CALL_FUNCTION_VAR",
+    (0,1) : "CALL_FUNCTION_KW",
+    (1,1) : "CALL_FUNCTION_VAR_KW",
+}
+
+LOOP = 1
+EXCEPT = 2
+TRY_FINALLY = 3
+END_FINALLY = 4
+
+# XXX this doesn't seem to be used
+class BlockStack(misc.Stack):
+    __super_init = misc.Stack.__init__
+
+    def __init__(self):
+        self.__super_init(self)
+        self.loop = None
+
+def compileFile(filename, display=0):
+    f = open(filename)
+    buf = f.read()
+    f.close()
+    mod = Module(buf, filename)
+    try:
+        mod.compile(display)
+    except SyntaxError, err:
+        raise
+    else:
+        f = open(filename + "c", "wb")
+        mod.dump(f)
+        f.close()
+
+def compile(source, filename, mode, flags=None, dont_inherit=None):
+    """Replacement for builtin compile() function"""
+    if flags is not None or dont_inherit is not None:
+        raise RuntimeError, "not implemented yet"
+
+    if mode == "single":
+        gen = Interactive(source, filename)
+    elif mode == "exec":
+        gen = Module(source, filename)
+    elif mode == "eval":
+        gen = Expression(source, filename)
+    else:
+        raise ValueError("compile() 3rd arg must be 'exec' or "
+                         "'eval' or 'single'")
+    gen.compile()
+    return gen.code
+
+class AbstractCompileMode:
+
+    mode = None # defined by subclass
+
+    def __init__(self, source, filename):
+        self.source = source
+        self.filename = filename
+        self.code = None
+
+    def _get_tree(self):
+        tree = parse(self.source, self.mode)
+        misc.set_filename(self.filename, tree)
+        syntax.check(tree)
+        return tree
+
+    def compile(self):
+        pass # implemented by subclass
+
+    def getCode(self):
+        return self.code
+
+class Expression(AbstractCompileMode):
+
+    mode = "eval"
+
+    def compile(self):
+        tree = self._get_tree()
+        gen = ExpressionCodeGenerator(tree)
+        self.code = gen.getCode()
+
+class Interactive(AbstractCompileMode):
+
+    mode = "single"
+
+    def compile(self):
+        tree = self._get_tree()
+        gen = InteractiveCodeGenerator(tree)
+        self.code = gen.getCode()
+
+class Module(AbstractCompileMode):
+
+    mode = "exec"
+
+    def compile(self, display=0):
+        tree = self._get_tree()
+        gen = ModuleCodeGenerator(tree)
+        if display:
+            import pprint
+            print pprint.pprint(tree)
+        self.code = gen.getCode()
+
+    def dump(self, f):
+        f.write(self.getPycHeader())
+        marshal.dump(self.code, f)
+
+    MAGIC = imp.get_magic()
+
+    def getPycHeader(self):
+        # compile.c uses marshal to write a long directly, with
+        # calling the interface that would also generate a 1-byte code
+        # to indicate the type of the value.  simplest way to get the
+        # same effect is to call marshal and then skip the code.
+        mtime = os.stat(self.filename)[stat.ST_MTIME]
+        mtime = struct.pack('<i', mtime)
+        return self.MAGIC + mtime
+
+class LocalNameFinder:
+    """Find local names in scope"""
+    def __init__(self, names=()):
+        self.names = misc.Set()
+        self.globals = misc.Set()
+        for name in names:
+            self.names.add(name)
+
+    # XXX list comprehensions and for loops
+
+    def getLocals(self):
+        for elt in self.globals.elements():
+            if self.names.has_elt(elt):
+                self.names.remove(elt)
+        return self.names
+
+    def visitDict(self, node):
+        pass
+
+    def visitGlobal(self, node):
+        for name in node.names:
+            self.globals.add(name)
+
+    def visitFunction(self, node):
+        self.names.add(node.name)
+
+    def visitLambda(self, node):
+        pass
+
+    def visitImport(self, node):
+        for name, alias in node.names:
+            self.names.add(alias or name)
+
+    def visitFrom(self, node):
+        for name, alias in node.names:
+            self.names.add(alias or name)
+
+    def visitClass(self, node):
+        self.names.add(node.name)
+
+    def visitAssName(self, node):
+        self.names.add(node.name)
+
+def is_constant_false(node):
+    if isinstance(node, ast.Const):
+        if not node.value:
+            return 1
+    return 0
+
+class CodeGenerator:
+    """Defines basic code generator for Python bytecode
+
+    This class is an abstract base class.  Concrete subclasses must
+    define an __init__() that defines self.graph and then calls the
+    __init__() defined in this class.
+
+    The concrete class must also define the class attributes
+    NameFinder, FunctionGen, and ClassGen.  These attributes can be
+    defined in the initClass() method, which is a hook for
+    initializing these methods after all the classes have been
+    defined.
+    """
+
+    optimized = 0 # is namespace access optimized?
+    __initialized = None
+    class_name = None # provide default for instance variable
+
+    def __init__(self):
+        if self.__initialized is None:
+            self.initClass()
+            self.__class__.__initialized = 1
+        self.checkClass()
+        self.locals = misc.Stack()
+        self.setups = misc.Stack()
+        self.curStack = 0
+        self.maxStack = 0
+        self.last_lineno = None
+        self._setupGraphDelegation()
+        self._div_op = "BINARY_DIVIDE"
+
+        # XXX set flags based on future features
+        futures = self.get_module().futures
+        for feature in futures:
+            if feature == "division":
+                self.graph.setFlag(CO_FUTURE_DIVISION)
+                self._div_op = "BINARY_TRUE_DIVIDE"
+            elif feature == "generators":
+                self.graph.setFlag(CO_GENERATOR_ALLOWED)
+
+    def initClass(self):
+        """This method is called once for each class"""
+
+    def checkClass(self):
+        """Verify that class is constructed correctly"""
+        try:
+            assert hasattr(self, 'graph')
+            assert getattr(self, 'NameFinder')
+            assert getattr(self, 'FunctionGen')
+            assert getattr(self, 'ClassGen')
+        except AssertionError, msg:
+            intro = "Bad class construction for %s" % self.__class__.__name__
+            raise AssertionError, intro
+
+    def _setupGraphDelegation(self):
+        self.emit = self.graph.emit
+        self.newBlock = self.graph.newBlock
+        self.startBlock = self.graph.startBlock
+        self.nextBlock = self.graph.nextBlock
+        self.setDocstring = self.graph.setDocstring
+
+    def getCode(self):
+        """Return a code object"""
+        return self.graph.getCode()
+
+    def mangle(self, name):
+        if self.class_name is not None:
+            return misc.mangle(name, self.class_name)
+        else:
+            return name
+
+    def parseSymbols(self, tree):
+        s = symbols.SymbolVisitor()
+        walk(tree, s)
+        return s.scopes
+
+    def get_module(self):
+        raise RuntimeError, "should be implemented by subclasses"
+
+    # Next five methods handle name access
+
+    def isLocalName(self, name):
+        return self.locals.top().has_elt(name)
+
+    def storeName(self, name):
+        self._nameOp('STORE', name)
+
+    def loadName(self, name):
+        self._nameOp('LOAD', name)
+
+    def delName(self, name):
+        self._nameOp('DELETE', name)
+
+    def _nameOp(self, prefix, name):
+        name = self.mangle(name)
+        scope = self.scope.check_name(name)
+        if scope == SC_LOCAL:
+            if not self.optimized:
+                self.emit(prefix + '_NAME', name)
+            else:
+                self.emit(prefix + '_FAST', name)
+        elif scope == SC_GLOBAL:
+            if not self.optimized:
+                self.emit(prefix + '_NAME', name)
+            else:
+                self.emit(prefix + '_GLOBAL', name)
+        elif scope == SC_FREE or scope == SC_CELL:
+            self.emit(prefix + '_DEREF', name)
+        else:
+            raise RuntimeError, "unsupported scope for var %s: %d" % \
+                  (name, scope)
+
+    def _implicitNameOp(self, prefix, name):
+        """Emit name ops for names generated implicitly by for loops
+
+        The interpreter generates names that start with a period or
+        dollar sign.  The symbol table ignores these names because
+        they aren't present in the program text.
+        """
+        if self.optimized:
+            self.emit(prefix + '_FAST', name)
+        else:
+            self.emit(prefix + '_NAME', name)
+
+    def set_lineno(self, node, force=0):
+        """Emit SET_LINENO if node has lineno attribute and it is
+        different than the last lineno emitted.
+
+        Returns true if SET_LINENO was emitted.
+
+        There are no rules for when an AST node should have a lineno
+        attribute.  The transformer and AST code need to be reviewed
+        and a consistent policy implemented and documented.  Until
+        then, this method works around missing line numbers.
+        """
+        lineno = getattr(node, 'lineno', None)
+        if lineno is not None and (lineno != self.last_lineno
+                                   or force):
+            self.emit('SET_LINENO', lineno)
+            self.last_lineno = lineno
+            return 1
+        return 0
+
+    # The first few visitor methods handle nodes that generator new
+    # code objects.  They use class attributes to determine what
+    # specialized code generators to use.
+
+    NameFinder = LocalNameFinder
+    FunctionGen = None
+    ClassGen = None
+
+    def visitModule(self, node):
+        self.scopes = self.parseSymbols(node)
+        self.scope = self.scopes[node]
+        self.emit('SET_LINENO', 0)
+        if node.doc:
+            self.emit('LOAD_CONST', node.doc)
+            self.storeName('__doc__')
+        lnf = walk(node.node, self.NameFinder(), verbose=0)
+        self.locals.push(lnf.getLocals())
+        self.visit(node.node)
+        self.emit('LOAD_CONST', None)
+        self.emit('RETURN_VALUE')
+
+    def visitExpression(self, node):
+        self.set_lineno(node)
+        self.scopes = self.parseSymbols(node)
+        self.scope = self.scopes[node]
+        self.visit(node.node)
+        self.emit('RETURN_VALUE')
+
+    def visitFunction(self, node):
+        self._visitFuncOrLambda(node, isLambda=0)
+        if node.doc:
+            self.setDocstring(node.doc)
+        self.storeName(node.name)
+
+    def visitLambda(self, node):
+        self._visitFuncOrLambda(node, isLambda=1)
+
+    def _visitFuncOrLambda(self, node, isLambda=0):
+        gen = self.FunctionGen(node, self.scopes, isLambda,
+                               self.class_name, self.get_module())
+        walk(node.code, gen)
+        gen.finish()
+        self.set_lineno(node)
+        for default in node.defaults:
+            self.visit(default)
+        frees = gen.scope.get_free_vars()
+        if frees:
+            for name in frees:
+                self.emit('LOAD_CLOSURE', name)
+            self.emit('LOAD_CONST', gen)
+            self.emit('MAKE_CLOSURE', len(node.defaults))
+        else:
+            self.emit('LOAD_CONST', gen)
+            self.emit('MAKE_FUNCTION', len(node.defaults))
+
+    def visitClass(self, node):
+        gen = self.ClassGen(node, self.scopes,
+                            self.get_module())
+        walk(node.code, gen)
+        gen.finish()
+        self.set_lineno(node)
+        self.emit('LOAD_CONST', node.name)
+        for base in node.bases:
+            self.visit(base)
+        self.emit('BUILD_TUPLE', len(node.bases))
+        frees = gen.scope.get_free_vars()
+        for name in frees:
+            self.emit('LOAD_CLOSURE', name)
+        self.emit('LOAD_CONST', gen)
+        if frees:
+            self.emit('MAKE_CLOSURE', 0)
+        else:
+            self.emit('MAKE_FUNCTION', 0)
+        self.emit('CALL_FUNCTION', 0)
+        self.emit('BUILD_CLASS')
+        self.storeName(node.name)
+
+    # The rest are standard visitor methods
+
+    # The next few implement control-flow statements
+
+    def visitIf(self, node):
+        end = self.newBlock()
+        numtests = len(node.tests)
+        for i in range(numtests):
+            test, suite = node.tests[i]
+            if is_constant_false(test):
+                # XXX will need to check generator stuff here
+                continue
+            self.set_lineno(test)
+            self.visit(test)
+            nextTest = self.newBlock()
+            self.emit('JUMP_IF_FALSE', nextTest)
+            self.nextBlock()
+            self.emit('POP_TOP')
+            self.visit(suite)
+            self.emit('JUMP_FORWARD', end)
+            self.startBlock(nextTest)
+            self.emit('POP_TOP')
+        if node.else_:
+            self.visit(node.else_)
+        self.nextBlock(end)
+
+    def visitWhile(self, node):
+        self.set_lineno(node)
+
+        loop = self.newBlock()
+        else_ = self.newBlock()
+
+        after = self.newBlock()
+        self.emit('SETUP_LOOP', after)
+
+        self.nextBlock(loop)
+        self.setups.push((LOOP, loop))
+
+        self.set_lineno(node, force=1)
+        self.visit(node.test)
+        self.emit('JUMP_IF_FALSE', else_ or after)
+
+        self.nextBlock()
+        self.emit('POP_TOP')
+        self.visit(node.body)
+        self.emit('JUMP_ABSOLUTE', loop)
+
+        self.startBlock(else_) # or just the POPs if not else clause
+        self.emit('POP_TOP')
+        self.emit('POP_BLOCK')
+        self.setups.pop()
+        if node.else_:
+            self.visit(node.else_)
+        self.nextBlock(after)
+
+    def visitFor(self, node):
+        start = self.newBlock()
+        anchor = self.newBlock()
+        after = self.newBlock()
+        self.setups.push((LOOP, start))
+
+        self.set_lineno(node)
+        self.emit('SETUP_LOOP', after)
+        self.visit(node.list)
+        self.emit('GET_ITER')
+
+        self.nextBlock(start)
+        self.set_lineno(node, force=1)
+        self.emit('FOR_ITER', anchor)
+        self.visit(node.assign)
+        self.visit(node.body)
+        self.emit('JUMP_ABSOLUTE', start)
+        self.nextBlock(anchor)
+        self.emit('POP_BLOCK')
+        self.setups.pop()
+        if node.else_:
+            self.visit(node.else_)
+        self.nextBlock(after)
+
+    def visitBreak(self, node):
+        if not self.setups:
+            raise SyntaxError, "'break' outside loop (%s, %d)" % \
+                  (node.filename, node.lineno)
+        self.set_lineno(node)
+        self.emit('BREAK_LOOP')
+
+    def visitContinue(self, node):
+        if not self.setups:
+            raise SyntaxError, "'continue' outside loop (%s, %d)" % \
+                  (node.filename, node.lineno)
+        kind, block = self.setups.top()
+        if kind == LOOP:
+            self.set_lineno(node)
+            self.emit('JUMP_ABSOLUTE', block)
+            self.nextBlock()
+        elif kind == EXCEPT or kind == TRY_FINALLY:
+            self.set_lineno(node)
+            # find the block that starts the loop
+            top = len(self.setups)
+            while top > 0:
+                top = top - 1
+                kind, loop_block = self.setups[top]
+                if kind == LOOP:
+                    break
+            if kind != LOOP:
+                raise SyntaxError, "'continue' outside loop (%s, %d)" % \
+                      (node.filename, node.lineno)
+            self.emit('CONTINUE_LOOP', loop_block)
+            self.nextBlock()
+        elif kind == END_FINALLY:
+            msg = "'continue' not allowed inside 'finally' clause (%s, %d)"
+            raise SyntaxError, msg % (node.filename, node.lineno)
+
+    def visitTest(self, node, jump):
+        end = self.newBlock()
+        for child in node.nodes[:-1]:
+            self.visit(child)
+            self.emit(jump, end)
+            self.nextBlock()
+            self.emit('POP_TOP')
+        self.visit(node.nodes[-1])
+        self.nextBlock(end)
+
+    def visitAnd(self, node):
+        self.visitTest(node, 'JUMP_IF_FALSE')
+
+    def visitOr(self, node):
+        self.visitTest(node, 'JUMP_IF_TRUE')
+
+    def visitCompare(self, node):
+        self.visit(node.expr)
+        cleanup = self.newBlock()
+        for op, code in node.ops[:-1]:
+            self.visit(code)
+            self.emit('DUP_TOP')
+            self.emit('ROT_THREE')
+            self.emit('COMPARE_OP', op)
+            self.emit('JUMP_IF_FALSE', cleanup)
+            self.nextBlock()
+            self.emit('POP_TOP')
+        # now do the last comparison
+        if node.ops:
+            op, code = node.ops[-1]
+            self.visit(code)
+            self.emit('COMPARE_OP', op)
+        if len(node.ops) > 1:
+            end = self.newBlock()
+            self.emit('JUMP_FORWARD', end)
+            self.startBlock(cleanup)
+            self.emit('ROT_TWO')
+            self.emit('POP_TOP')
+            self.nextBlock(end)
+
+    # list comprehensions
+    __list_count = 0
+
+    def visitListComp(self, node):
+        self.set_lineno(node)
+        # setup list
+        append = "$append%d" % self.__list_count
+        self.__list_count = self.__list_count + 1
+        self.emit('BUILD_LIST', 0)
+        self.emit('DUP_TOP')
+        self.emit('LOAD_ATTR', 'append')
+        self._implicitNameOp('STORE', append)
+
+        stack = []
+        for i, for_ in zip(range(len(node.quals)), node.quals):
+            start, anchor = self.visit(for_)
+            cont = None
+            for if_ in for_.ifs:
+                if cont is None:
+                    cont = self.newBlock()
+                self.visit(if_, cont)
+            stack.insert(0, (start, cont, anchor))
+
+        self._implicitNameOp('LOAD', append)
+        self.visit(node.expr)
+        self.emit('CALL_FUNCTION', 1)
+        self.emit('POP_TOP')
+
+        for start, cont, anchor in stack:
+            if cont:
+                skip_one = self.newBlock()
+                self.emit('JUMP_FORWARD', skip_one)
+                self.startBlock(cont)
+                self.emit('POP_TOP')
+                self.nextBlock(skip_one)
+            self.emit('JUMP_ABSOLUTE', start)
+            self.startBlock(anchor)
+        self._implicitNameOp('DELETE', append)
+
+        self.__list_count = self.__list_count - 1
+
+    def visitListCompFor(self, node):
+        start = self.newBlock()
+        anchor = self.newBlock()
+
+        self.visit(node.list)
+        self.emit('GET_ITER')
+        self.nextBlock(start)
+        self.emit('SET_LINENO', node.lineno)
+        self.emit('FOR_ITER', anchor)
+        self.nextBlock()
+        self.visit(node.assign)
+        return start, anchor
+
+    def visitListCompIf(self, node, branch):
+        self.set_lineno(node, force=1)
+        self.visit(node.test)
+        self.emit('JUMP_IF_FALSE', branch)
+        self.newBlock()
+        self.emit('POP_TOP')
+
+    # exception related
+
+    def visitAssert(self, node):
+        # XXX would be interesting to implement this via a
+        # transformation of the AST before this stage
+        end = self.newBlock()
+        self.set_lineno(node)
+        # XXX __debug__ and AssertionError appear to be special cases
+        # -- they are always loaded as globals even if there are local
+        # names.  I guess this is a sort of renaming op.
+        self.emit('LOAD_GLOBAL', '__debug__')
+        self.emit('JUMP_IF_FALSE', end)
+        self.nextBlock()
+        self.emit('POP_TOP')
+        self.visit(node.test)
+        self.emit('JUMP_IF_TRUE', end)
+        self.nextBlock()
+        self.emit('POP_TOP')
+        self.emit('LOAD_GLOBAL', 'AssertionError')
+        if node.fail:
+            self.visit(node.fail)
+            self.emit('RAISE_VARARGS', 2)
+        else:
+            self.emit('RAISE_VARARGS', 1)
+        self.nextBlock(end)
+        self.emit('POP_TOP')
+
+    def visitRaise(self, node):
+        self.set_lineno(node)
+        n = 0
+        if node.expr1:
+            self.visit(node.expr1)
+            n = n + 1
+        if node.expr2:
+            self.visit(node.expr2)
+            n = n + 1
+        if node.expr3:
+            self.visit(node.expr3)
+            n = n + 1
+        self.emit('RAISE_VARARGS', n)
+
+    def visitTryExcept(self, node):
+        body = self.newBlock()
+        handlers = self.newBlock()
+        end = self.newBlock()
+        if node.else_:
+            lElse = self.newBlock()
+        else:
+            lElse = end
+        self.set_lineno(node)
+        self.emit('SETUP_EXCEPT', handlers)
+        self.nextBlock(body)
+        self.setups.push((EXCEPT, body))
+        self.visit(node.body)
+        self.emit('POP_BLOCK')
+        self.setups.pop()
+        self.emit('JUMP_FORWARD', lElse)
+        self.startBlock(handlers)
+
+        last = len(node.handlers) - 1
+        for i in range(len(node.handlers)):
+            expr, target, body = node.handlers[i]
+            self.set_lineno(expr)
+            if expr:
+                self.emit('DUP_TOP')
+                self.visit(expr)
+                self.emit('COMPARE_OP', 'exception match')
+                next = self.newBlock()
+                self.emit('JUMP_IF_FALSE', next)
+                self.nextBlock()
+                self.emit('POP_TOP')
+            self.emit('POP_TOP')
+            if target:
+                self.visit(target)
+            else:
+                self.emit('POP_TOP')
+            self.emit('POP_TOP')
+            self.visit(body)
+            self.emit('JUMP_FORWARD', end)
+            if expr:
+                self.nextBlock(next)
+            else:
+                self.nextBlock()
+            if expr: # XXX
+                self.emit('POP_TOP')
+        self.emit('END_FINALLY')
+        if node.else_:
+            self.nextBlock(lElse)
+            self.visit(node.else_)
+        self.nextBlock(end)
+
+    def visitTryFinally(self, node):
+        body = self.newBlock()
+        final = self.newBlock()
+        self.set_lineno(node)
+        self.emit('SETUP_FINALLY', final)
+        self.nextBlock(body)
+        self.setups.push((TRY_FINALLY, body))
+        self.visit(node.body)
+        self.emit('POP_BLOCK')
+        self.setups.pop()
+        self.emit('LOAD_CONST', None)
+        self.nextBlock(final)
+        self.setups.push((END_FINALLY, final))
+        self.visit(node.final)
+        self.emit('END_FINALLY')
+        self.setups.pop()
+
+    # misc
+
+    def visitDiscard(self, node):
+        self.set_lineno(node)
+        self.visit(node.expr)
+        self.emit('POP_TOP')
+
+    def visitConst(self, node):
+        self.emit('LOAD_CONST', node.value)
+
+    def visitKeyword(self, node):
+        self.emit('LOAD_CONST', node.name)
+        self.visit(node.expr)
+
+    def visitGlobal(self, node):
+        # no code to generate
+        pass
+
+    def visitName(self, node):
+        self.set_lineno(node)
+        self.loadName(node.name)
+
+    def visitPass(self, node):
+        self.set_lineno(node)
+
+    def visitImport(self, node):
+        self.set_lineno(node)
+        for name, alias in node.names:
+            if VERSION > 1:
+                self.emit('LOAD_CONST', None)
+            self.emit('IMPORT_NAME', name)
+            mod = string.split(name, ".")[0]
+            self.storeName(alias or mod)
+
+    def visitFrom(self, node):
+        self.set_lineno(node)
+        fromlist = map(lambda (name, alias): name, node.names)
+        if VERSION > 1:
+            self.emit('LOAD_CONST', tuple(fromlist))
+        self.emit('IMPORT_NAME', node.modname)
+        for name, alias in node.names:
+            if VERSION > 1:
+                if name == '*':
+                    self.namespace = 0
+                    self.emit('IMPORT_STAR')
+                    # There can only be one name w/ from ... import *
+                    assert len(node.names) == 1
+                    return
+                else:
+                    self.emit('IMPORT_FROM', name)
+                    self._resolveDots(name)
+                    self.storeName(alias or name)
+            else:
+                self.emit('IMPORT_FROM', name)
+        self.emit('POP_TOP')
+
+    def _resolveDots(self, name):
+        elts = string.split(name, ".")
+        if len(elts) == 1:
+            return
+        for elt in elts[1:]:
+            self.emit('LOAD_ATTR', elt)
+
+    def visitGetattr(self, node):
+        self.visit(node.expr)
+        self.emit('LOAD_ATTR', self.mangle(node.attrname))
+
+    # next five implement assignments
+
+    def visitAssign(self, node):
+        self.set_lineno(node)
+        self.visit(node.expr)
+        dups = len(node.nodes) - 1
+        for i in range(len(node.nodes)):
+            elt = node.nodes[i]
+            if i < dups:
+                self.emit('DUP_TOP')
+            if isinstance(elt, ast.Node):
+                self.visit(elt)
+
+    def visitAssName(self, node):
+        if node.flags == 'OP_ASSIGN':
+            self.storeName(node.name)
+        elif node.flags == 'OP_DELETE':
+            self.set_lineno(node)
+            self.delName(node.name)
+        else:
+            print "oops", node.flags
+
+    def visitAssAttr(self, node):
+        self.visit(node.expr)
+        if node.flags == 'OP_ASSIGN':
+            self.emit('STORE_ATTR', self.mangle(node.attrname))
+        elif node.flags == 'OP_DELETE':
+            self.emit('DELETE_ATTR', self.mangle(node.attrname))
+        else:
+            print "warning: unexpected flags:", node.flags
+            print node
+
+    def _visitAssSequence(self, node, op='UNPACK_SEQUENCE'):
+        if findOp(node) != 'OP_DELETE':
+            self.emit(op, len(node.nodes))
+        for child in node.nodes:
+            self.visit(child)
+
+    if VERSION > 1:
+        visitAssTuple = _visitAssSequence
+        visitAssList = _visitAssSequence
+    else:
+        def visitAssTuple(self, node):
+            self._visitAssSequence(node, 'UNPACK_TUPLE')
+
+        def visitAssList(self, node):
+            self._visitAssSequence(node, 'UNPACK_LIST')
+
+    # augmented assignment
+
+    def visitAugAssign(self, node):
+        self.set_lineno(node)
+        aug_node = wrap_aug(node.node)
+        self.visit(aug_node, "load")
+        self.visit(node.expr)
+        self.emit(self._augmented_opcode[node.op])
+        self.visit(aug_node, "store")
+
+    _augmented_opcode = {
+        '+=' : 'INPLACE_ADD',
+        '-=' : 'INPLACE_SUBTRACT',
+        '*=' : 'INPLACE_MULTIPLY',
+        '/=' : 'INPLACE_DIVIDE',
+        '//=': 'INPLACE_FLOOR_DIVIDE',
+        '%=' : 'INPLACE_MODULO',
+        '**=': 'INPLACE_POWER',
+        '>>=': 'INPLACE_RSHIFT',
+        '<<=': 'INPLACE_LSHIFT',
+        '&=' : 'INPLACE_AND',
+        '^=' : 'INPLACE_XOR',
+        '|=' : 'INPLACE_OR',
+        }
+
+    def visitAugName(self, node, mode):
+        if mode == "load":
+            self.loadName(node.name)
+        elif mode == "store":
+            self.storeName(node.name)
+
+    def visitAugGetattr(self, node, mode):
+        if mode == "load":
+            self.visit(node.expr)
+            self.emit('DUP_TOP')
+            self.emit('LOAD_ATTR', self.mangle(node.attrname))
+        elif mode == "store":
+            self.emit('ROT_TWO')
+            self.emit('STORE_ATTR', self.mangle(node.attrname))
+
+    def visitAugSlice(self, node, mode):
+        if mode == "load":
+            self.visitSlice(node, 1)
+        elif mode == "store":
+            slice = 0
+            if node.lower:
+                slice = slice | 1
+            if node.upper:
+                slice = slice | 2
+            if slice == 0:
+                self.emit('ROT_TWO')
+            elif slice == 3:
+                self.emit('ROT_FOUR')
+            else:
+                self.emit('ROT_THREE')
+            self.emit('STORE_SLICE+%d' % slice)
+
+    def visitAugSubscript(self, node, mode):
+        if len(node.subs) > 1:
+            raise SyntaxError, "augmented assignment to tuple is not possible"
+        if mode == "load":
+            self.visitSubscript(node, 1)
+        elif mode == "store":
+            self.emit('ROT_THREE')
+            self.emit('STORE_SUBSCR')
+
+    def visitExec(self, node):
+        self.visit(node.expr)
+        if node.locals is None:
+            self.emit('LOAD_CONST', None)
+        else:
+            self.visit(node.locals)
+        if node.globals is None:
+            self.emit('DUP_TOP')
+        else:
+            self.visit(node.globals)
+        self.emit('EXEC_STMT')
+
+    def visitCallFunc(self, node):
+        pos = 0
+        kw = 0
+        self.set_lineno(node)
+        self.visit(node.node)
+        for arg in node.args:
+            self.visit(arg)
+            if isinstance(arg, ast.Keyword):
+                kw = kw + 1
+            else:
+                pos = pos + 1
+        if node.star_args is not None:
+            self.visit(node.star_args)
+        if node.dstar_args is not None:
+            self.visit(node.dstar_args)
+        have_star = node.star_args is not None
+        have_dstar = node.dstar_args is not None
+        opcode = callfunc_opcode_info[have_star, have_dstar]
+        self.emit(opcode, kw << 8 | pos)
+
+    def visitPrint(self, node, newline=0):
+        self.set_lineno(node)
+        if node.dest:
+            self.visit(node.dest)
+        for child in node.nodes:
+            if node.dest:
+                self.emit('DUP_TOP')
+            self.visit(child)
+            if node.dest:
+                self.emit('ROT_TWO')
+                self.emit('PRINT_ITEM_TO')
+            else:
+                self.emit('PRINT_ITEM')
+        if node.dest and not newline:
+            self.emit('POP_TOP')
+
+    def visitPrintnl(self, node):
+        self.visitPrint(node, newline=1)
+        if node.dest:
+            self.emit('PRINT_NEWLINE_TO')
+        else:
+            self.emit('PRINT_NEWLINE')
+
+    def visitReturn(self, node):
+        self.set_lineno(node)
+        self.visit(node.value)
+        self.emit('RETURN_VALUE')
+
+    def visitYield(self, node):
+        self.set_lineno(node)
+        self.visit(node.value)
+        self.emit('YIELD_STMT')
+
+    # slice and subscript stuff
+
+    def visitSlice(self, node, aug_flag=None):
+        # aug_flag is used by visitAugSlice
+        self.visit(node.expr)
+        slice = 0
+        if node.lower:
+            self.visit(node.lower)
+            slice = slice | 1
+        if node.upper:
+            self.visit(node.upper)
+            slice = slice | 2
+        if aug_flag:
+            if slice == 0:
+                self.emit('DUP_TOP')
+            elif slice == 3:
+                self.emit('DUP_TOPX', 3)
+            else:
+                self.emit('DUP_TOPX', 2)
+        if node.flags == 'OP_APPLY':
+            self.emit('SLICE+%d' % slice)
+        elif node.flags == 'OP_ASSIGN':
+            self.emit('STORE_SLICE+%d' % slice)
+        elif node.flags == 'OP_DELETE':
+            self.emit('DELETE_SLICE+%d' % slice)
+        else:
+            print "weird slice", node.flags
+            raise
+
+    def visitSubscript(self, node, aug_flag=None):
+        self.visit(node.expr)
+        for sub in node.subs:
+            self.visit(sub)
+        if aug_flag:
+            self.emit('DUP_TOPX', 2)
+        if len(node.subs) > 1:
+            self.emit('BUILD_TUPLE', len(node.subs))
+        if node.flags == 'OP_APPLY':
+            self.emit('BINARY_SUBSCR')
+        elif node.flags == 'OP_ASSIGN':
+            self.emit('STORE_SUBSCR')
+        elif node.flags == 'OP_DELETE':
+            self.emit('DELETE_SUBSCR')
+
+    # binary ops
+
+    def binaryOp(self, node, op):
+        self.visit(node.left)
+        self.visit(node.right)
+        self.emit(op)
+
+    def visitAdd(self, node):
+        return self.binaryOp(node, 'BINARY_ADD')
+
+    def visitSub(self, node):
+        return self.binaryOp(node, 'BINARY_SUBTRACT')
+
+    def visitMul(self, node):
+        return self.binaryOp(node, 'BINARY_MULTIPLY')
+
+    def visitDiv(self, node):
+        return self.binaryOp(node, self._div_op)
+
+    def visitFloorDiv(self, node):
+        return self.binaryOp(node, 'BINARY_FLOOR_DIVIDE')
+
+    def visitMod(self, node):
+        return self.binaryOp(node, 'BINARY_MODULO')
+
+    def visitPower(self, node):
+        return self.binaryOp(node, 'BINARY_POWER')
+
+    def visitLeftShift(self, node):
+        return self.binaryOp(node, 'BINARY_LSHIFT')
+
+    def visitRightShift(self, node):
+        return self.binaryOp(node, 'BINARY_RSHIFT')
+
+    # unary ops
+
+    def unaryOp(self, node, op):
+        self.visit(node.expr)
+        self.emit(op)
+
+    def visitInvert(self, node):
+        return self.unaryOp(node, 'UNARY_INVERT')
+
+    def visitUnarySub(self, node):
+        return self.unaryOp(node, 'UNARY_NEGATIVE')
+
+    def visitUnaryAdd(self, node):
+        return self.unaryOp(node, 'UNARY_POSITIVE')
+
+    def visitUnaryInvert(self, node):
+        return self.unaryOp(node, 'UNARY_INVERT')
+
+    def visitNot(self, node):
+        return self.unaryOp(node, 'UNARY_NOT')
+
+    def visitBackquote(self, node):
+        return self.unaryOp(node, 'UNARY_CONVERT')
+
+    # bit ops
+
+    def bitOp(self, nodes, op):
+        self.visit(nodes[0])
+        for node in nodes[1:]:
+            self.visit(node)
+            self.emit(op)
+
+    def visitBitand(self, node):
+        return self.bitOp(node.nodes, 'BINARY_AND')
+
+    def visitBitor(self, node):
+        return self.bitOp(node.nodes, 'BINARY_OR')
+
+    def visitBitxor(self, node):
+        return self.bitOp(node.nodes, 'BINARY_XOR')
+
+    # object constructors
+
+    def visitEllipsis(self, node):
+        self.emit('LOAD_CONST', Ellipsis)
+
+    def visitTuple(self, node):
+        self.set_lineno(node)
+        for elt in node.nodes:
+            self.visit(elt)
+        self.emit('BUILD_TUPLE', len(node.nodes))
+
+    def visitList(self, node):
+        self.set_lineno(node)
+        for elt in node.nodes:
+            self.visit(elt)
+        self.emit('BUILD_LIST', len(node.nodes))
+
+    def visitSliceobj(self, node):
+        for child in node.nodes:
+            self.visit(child)
+        self.emit('BUILD_SLICE', len(node.nodes))
+
+    def visitDict(self, node):
+        lineno = getattr(node, 'lineno', None)
+        if lineno:
+            self.emit('SET_LINENO', lineno)
+        self.emit('BUILD_MAP', 0)
+        for k, v in node.items:
+            lineno2 = getattr(node, 'lineno', None)
+            if lineno2 is not None and lineno != lineno2:
+                self.emit('SET_LINENO', lineno2)
+                lineno = lineno2
+            self.emit('DUP_TOP')
+            self.visit(v)
+            self.emit('ROT_TWO')
+            self.visit(k)
+            self.emit('STORE_SUBSCR')
+
+class NestedScopeMixin:
+    """Defines initClass() for nested scoping (Python 2.2-compatible)"""
+    def initClass(self):
+        self.__class__.NameFinder = LocalNameFinder
+        self.__class__.FunctionGen = FunctionCodeGenerator
+        self.__class__.ClassGen = ClassCodeGenerator
+
+class ModuleCodeGenerator(NestedScopeMixin, CodeGenerator):
+    __super_init = CodeGenerator.__init__
+
+    scopes = None
+
+    def __init__(self, tree):
+        self.graph = pyassem.PyFlowGraph("<module>", tree.filename)
+        self.futures = future.find_futures(tree)
+        self.__super_init()
+        walk(tree, self)
+
+    def get_module(self):
+        return self
+
+class ExpressionCodeGenerator(NestedScopeMixin, CodeGenerator):
+    __super_init = CodeGenerator.__init__
+
+    scopes = None
+    futures = ()
+
+    def __init__(self, tree):
+        self.graph = pyassem.PyFlowGraph("<expression>", tree.filename)
+        self.__super_init()
+        walk(tree, self)
+
+    def get_module(self):
+        return self
+
+class InteractiveCodeGenerator(NestedScopeMixin, CodeGenerator):
+
+    __super_init = CodeGenerator.__init__
+
+    scopes = None
+    futures = ()
+
+    def __init__(self, tree):
+        self.graph = pyassem.PyFlowGraph("<interactive>", tree.filename)
+        self.__super_init()
+        self.set_lineno(tree)
+        walk(tree, self)
+        self.emit('RETURN_VALUE')
+
+    def get_module(self):
+        return self
+    
+    def visitDiscard(self, node):
+        # XXX Discard means it's an expression.  Perhaps this is a bad
+        # name.
+        self.visit(node.expr)
+        self.emit('PRINT_EXPR')
+
+class AbstractFunctionCode:
+    optimized = 1
+    lambdaCount = 0
+
+    def __init__(self, func, scopes, isLambda, class_name, mod):
+        self.class_name = class_name
+        self.module = mod
+        if isLambda:
+            klass = FunctionCodeGenerator
+            name = "<lambda.%d>" % klass.lambdaCount
+            klass.lambdaCount = klass.lambdaCount + 1
+        else:
+            name = func.name
+        args, hasTupleArg = generateArgList(func.argnames)
+        self.graph = pyassem.PyFlowGraph(name, func.filename, args,
+                                         optimized=1)
+        self.isLambda = isLambda
+        self.super_init()
+
+        if not isLambda and func.doc:
+            self.setDocstring(func.doc)
+
+        lnf = walk(func.code, self.NameFinder(args), verbose=0)
+        self.locals.push(lnf.getLocals())
+        if func.varargs:
+            self.graph.setFlag(CO_VARARGS)
+        if func.kwargs:
+            self.graph.setFlag(CO_VARKEYWORDS)
+        self.set_lineno(func)
+        if hasTupleArg:
+            self.generateArgUnpack(func.argnames)
+
+    def get_module(self):
+        return self.module
+
+    def finish(self):
+        self.graph.startExitBlock()
+        if not self.isLambda:
+            self.emit('LOAD_CONST', None)
+        self.emit('RETURN_VALUE')
+
+    def generateArgUnpack(self, args):
+        for i in range(len(args)):
+            arg = args[i]
+            if type(arg) == types.TupleType:
+                self.emit('LOAD_FAST', '.%d' % (i * 2))
+                self.unpackSequence(arg)
+
+    def unpackSequence(self, tup):
+        if VERSION > 1:
+            self.emit('UNPACK_SEQUENCE', len(tup))
+        else:
+            self.emit('UNPACK_TUPLE', len(tup))
+        for elt in tup:
+            if type(elt) == types.TupleType:
+                self.unpackSequence(elt)
+            else:
+                self._nameOp('STORE', elt)
+
+    unpackTuple = unpackSequence
+
+class FunctionCodeGenerator(NestedScopeMixin, AbstractFunctionCode,
+                            CodeGenerator):
+    super_init = CodeGenerator.__init__ # call be other init
+    scopes = None
+
+    __super_init = AbstractFunctionCode.__init__
+
+    def __init__(self, func, scopes, isLambda, class_name, mod):
+        self.scopes = scopes
+        self.scope = scopes[func]
+        self.__super_init(func, scopes, isLambda, class_name, mod)
+        self.graph.setFreeVars(self.scope.get_free_vars())
+        self.graph.setCellVars(self.scope.get_cell_vars())
+        if self.graph.checkFlag(CO_GENERATOR_ALLOWED):
+            if self.scope.generator is not None:
+                self.graph.setFlag(CO_GENERATOR)
+
+class AbstractClassCode:
+
+    def __init__(self, klass, scopes, module):
+        self.class_name = klass.name
+        self.module = module
+        self.graph = pyassem.PyFlowGraph(klass.name, klass.filename,
+                                           optimized=0, klass=1)
+        self.super_init()
+        lnf = walk(klass.code, self.NameFinder(), verbose=0)
+        self.locals.push(lnf.getLocals())
+        self.graph.setFlag(CO_NEWLOCALS)
+        if klass.doc:
+            self.setDocstring(klass.doc)
+
+    def get_module(self):
+        return self.module
+
+    def finish(self):
+        self.graph.startExitBlock()
+        self.emit('LOAD_LOCALS')
+        self.emit('RETURN_VALUE')
+
+class ClassCodeGenerator(NestedScopeMixin, AbstractClassCode, CodeGenerator):
+    super_init = CodeGenerator.__init__
+    scopes = None
+
+    __super_init = AbstractClassCode.__init__
+
+    def __init__(self, klass, scopes, module):
+        self.scopes = scopes
+        self.scope = scopes[klass]
+        self.__super_init(klass, scopes, module)
+        self.graph.setFreeVars(self.scope.get_free_vars())
+        self.graph.setCellVars(self.scope.get_cell_vars())
+        self.set_lineno(klass)
+        if klass.doc:
+            self.emit("LOAD_CONST", klass.doc)
+            self.storeName("__doc__")
+
+def generateArgList(arglist):
+    """Generate an arg list marking TupleArgs"""
+    args = []
+    extra = []
+    count = 0
+    for i in range(len(arglist)):
+        elt = arglist[i]
+        if type(elt) == types.StringType:
+            args.append(elt)
+        elif type(elt) == types.TupleType:
+            args.append(TupleArg(i * 2, elt))
+            extra.extend(misc.flatten(elt))
+            count = count + 1
+        else:
+            raise ValueError, "unexpect argument type:", elt
+    return args + extra, count
+
+def findOp(node):
+    """Find the op (DELETE, LOAD, STORE) in an AssTuple tree"""
+    v = OpFinder()
+    walk(node, v, verbose=0)
+    return v.op
+
+class OpFinder:
+    def __init__(self):
+        self.op = None
+    def visitAssName(self, node):
+        if self.op is None:
+            self.op = node.flags
+        elif self.op != node.flags:
+            raise ValueError, "mixed ops in stmt"
+    visitAssAttr = visitAssName
+    visitSubscript = visitAssName
+
+class Delegator:
+    """Base class to support delegation for augmented assignment nodes
+
+    To generator code for augmented assignments, we use the following
+    wrapper classes.  In visitAugAssign, the left-hand expression node
+    is visited twice.  The first time the visit uses the normal method
+    for that node .  The second time the visit uses a different method
+    that generates the appropriate code to perform the assignment.
+    These delegator classes wrap the original AST nodes in order to
+    support the variant visit methods.
+    """
+    def __init__(self, obj):
+        self.obj = obj
+
+    def __getattr__(self, attr):
+        return getattr(self.obj, attr)
+
+class AugGetattr(Delegator):
+    pass
+
+class AugName(Delegator):
+    pass
+
+class AugSlice(Delegator):
+    pass
+
+class AugSubscript(Delegator):
+    pass
+
+wrapper = {
+    ast.Getattr: AugGetattr,
+    ast.Name: AugName,
+    ast.Slice: AugSlice,
+    ast.Subscript: AugSubscript,
+    }
+
+def wrap_aug(node):
+    return wrapper[node.__class__](node)
+
+if __name__ == "__main__":
+    import sys
+
+    for file in sys.argv[1:]:
+        compileFile(file)
diff --git a/lib-python/2.2/compiler/symbols.py b/lib-python/2.2/compiler/symbols.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/compiler/symbols.py
@@ -0,0 +1,419 @@
+"""Module symbol-table generator"""
+
+from compiler import ast
+from compiler.consts import SC_LOCAL, SC_GLOBAL, SC_FREE, SC_CELL, SC_UNKNOWN
+from compiler.misc import mangle
+import types
+
+
+import sys
+
+MANGLE_LEN = 256
+
+class Scope:
+    # XXX how much information do I need about each name?
+    def __init__(self, name, module, klass=None):
+        self.name = name
+        self.module = module
+        self.defs = {}
+        self.uses = {}
+        self.globals = {}
+        self.params = {}
+        self.frees = {}
+        self.cells = {}
+        self.children = []
+        # nested is true if the class could contain free variables,
+        # i.e. if it is nested within another function.
+        self.nested = None
+        self.generator = None
+        self.klass = None
+        if klass is not None:
+            for i in range(len(klass)):
+                if klass[i] != '_':
+                    self.klass = klass[i:]
+                    break
+
+    def __repr__(self):
+        return "<%s: %s>" % (self.__class__.__name__, self.name)
+
+    def mangle(self, name):
+        if self.klass is None:
+            return name
+        return mangle(name, self.klass)
+
+    def add_def(self, name):
+        self.defs[self.mangle(name)] = 1
+
+    def add_use(self, name):
+        self.uses[self.mangle(name)] = 1
+
+    def add_global(self, name):
+        name = self.mangle(name)
+        if self.uses.has_key(name) or self.defs.has_key(name):
+            pass # XXX warn about global following def/use
+        if self.params.has_key(name):
+            raise SyntaxError, "%s in %s is global and parameter" % \
+                  (name, self.name)
+        self.globals[name] = 1
+        self.module.add_def(name)
+
+    def add_param(self, name):
+        name = self.mangle(name)
+        self.defs[name] = 1
+        self.params[name] = 1
+
+    def get_names(self):
+        d = {}
+        d.update(self.defs)
+        d.update(self.uses)
+        d.update(self.globals)
+        return d.keys()
+
+    def add_child(self, child):
+        self.children.append(child)
+
+    def get_children(self):
+        return self.children
+
+    def DEBUG(self):
+        print >> sys.stderr, self.name, self.nested and "nested" or ""
+        print >> sys.stderr, "\tglobals: ", self.globals
+        print >> sys.stderr, "\tcells: ", self.cells
+        print >> sys.stderr, "\tdefs: ", self.defs
+        print >> sys.stderr, "\tuses: ", self.uses
+        print >> sys.stderr, "\tfrees:", self.frees
+
+    def check_name(self, name):
+        """Return scope of name.
+
+        The scope of a name could be LOCAL, GLOBAL, FREE, or CELL.
+        """
+        if self.globals.has_key(name):
+            return SC_GLOBAL
+        if self.cells.has_key(name):
+            return SC_CELL
+        if self.defs.has_key(name):
+            return SC_LOCAL
+        if self.nested and (self.frees.has_key(name) or
+                            self.uses.has_key(name)):
+            return SC_FREE
+        if self.nested:
+            return SC_UNKNOWN
+        else:
+            return SC_GLOBAL
+
+    def get_free_vars(self):
+        if not self.nested:
+            return ()
+        free = {}
+        free.update(self.frees)
+        for name in self.uses.keys():
+            if not (self.defs.has_key(name) or
+                    self.globals.has_key(name)):
+                free[name] = 1
+        return free.keys()
+
+    def handle_children(self):
+        for child in self.children:
+            frees = child.get_free_vars()
+            globals = self.add_frees(frees)
+            for name in globals:
+                child.force_global(name)
+
+    def force_global(self, name):
+        """Force name to be global in scope.
+
+        Some child of the current node had a free reference to name.
+        When the child was processed, it was labelled a free
+        variable.  Now that all its enclosing scope have been
+        processed, the name is known to be a global or builtin.  So
+        walk back down the child chain and set the name to be global
+        rather than free.
+
+        Be careful to stop if a child does not think the name is
+        free.
+        """
+        self.globals[name] = 1
+        if self.frees.has_key(name):
+            del self.frees[name]
+        for child in self.children:
+            if child.check_name(name) == SC_FREE:
+                child.force_global(name)
+
+    def add_frees(self, names):
+        """Process list of free vars from nested scope.
+
+        Returns a list of names that are either 1) declared global in the
+        parent or 2) undefined in a top-level parent.  In either case,
+        the nested scope should treat them as globals.
+        """
+        child_globals = []
+        for name in names:
+            sc = self.check_name(name)
+            if self.nested:
+                if sc == SC_UNKNOWN or sc == SC_FREE \
+                   or isinstance(self, ClassScope):
+                    self.frees[name] = 1
+                elif sc == SC_GLOBAL:
+                    child_globals.append(name)
+                elif isinstance(self, FunctionScope) and sc == SC_LOCAL:
+                    self.cells[name] = 1
+                elif sc != SC_CELL:
+                    child_globals.append(name)
+            else:
+                if sc == SC_LOCAL:
+                    self.cells[name] = 1
+                elif sc != SC_CELL:
+                    child_globals.append(name)
+        return child_globals
+
+    def get_cell_vars(self):
+        return self.cells.keys()
+
+class ModuleScope(Scope):
+    __super_init = Scope.__init__
+
+    def __init__(self):
+        self.__super_init("global", self)
+
+class FunctionScope(Scope):
+    pass
+
+class LambdaScope(FunctionScope):
+    __super_init = Scope.__init__
+
+    __counter = 1
+
+    def __init__(self, module, klass=None):
+        i = self.__counter
+        self.__counter += 1
+        self.__super_init("lambda.%d" % i, module, klass)
+
+class ClassScope(Scope):
+    __super_init = Scope.__init__
+
+    def __init__(self, name, module):
+        self.__super_init(name, module, name)
+
+class SymbolVisitor:
+    def __init__(self):
+        self.scopes = {}
+        self.klass = None
+
+    # node that define new scopes
+
+    def visitModule(self, node):
+        scope = self.module = self.scopes[node] = ModuleScope()
+        self.visit(node.node, scope)
+
+    visitExpression = visitModule
+
+    def visitFunction(self, node, parent):
+        parent.add_def(node.name)
+        for n in node.defaults:
+            self.visit(n, parent)
+        scope = FunctionScope(node.name, self.module, self.klass)
+        if parent.nested or isinstance(parent, FunctionScope):
+            scope.nested = 1
+        self.scopes[node] = scope
+        self._do_args(scope, node.argnames)
+        self.visit(node.code, scope)
+        self.handle_free_vars(scope, parent)
+
+    def visitLambda(self, node, parent):
+        for n in node.defaults:
+            self.visit(n, parent)
+        scope = LambdaScope(self.module, self.klass)
+        if parent.nested or isinstance(parent, FunctionScope):
+            scope.nested = 1
+        self.scopes[node] = scope
+        self._do_args(scope, node.argnames)
+        self.visit(node.code, scope)
+        self.handle_free_vars(scope, parent)
+
+    def _do_args(self, scope, args):
+        for name in args:
+            if type(name) == types.TupleType:
+                self._do_args(scope, name)
+            else:
+                scope.add_param(name)
+
+    def handle_free_vars(self, scope, parent):
+        parent.add_child(scope)
+        scope.handle_children()
+
+    def visitClass(self, node, parent):
+        parent.add_def(node.name)
+        for n in node.bases:
+            self.visit(n, parent)
+        scope = ClassScope(node.name, self.module)
+        if parent.nested or isinstance(parent, FunctionScope):
+            scope.nested = 1
+        if node.doc is not None:
+            scope.add_def('__doc__')
+        self.scopes[node] = scope
+        prev = self.klass
+        self.klass = node.name
+        self.visit(node.code, scope)
+        self.klass = prev
+        self.handle_free_vars(scope, parent)
+
+    # name can be a def or a use
+
+    # XXX a few calls and nodes expect a third "assign" arg that is
+    # true if the name is being used as an assignment.  only
+    # expressions contained within statements may have the assign arg.
+
+    def visitName(self, node, scope, assign=0):
+        if assign:
+            scope.add_def(node.name)
+        else:
+            scope.add_use(node.name)
+
+    # operations that bind new names
+
+    def visitFor(self, node, scope):
+        self.visit(node.assign, scope, 1)
+        self.visit(node.list, scope)
+        self.visit(node.body, scope)
+        if node.else_:
+            self.visit(node.else_, scope)
+
+    def visitFrom(self, node, scope):
+        for name, asname in node.names:
+            if name == "*":
+                continue
+            scope.add_def(asname or name)
+
+    def visitImport(self, node, scope):
+        for name, asname in node.names:
+            i = name.find(".")
+            if i > -1:
+                name = name[:i]
+            scope.add_def(asname or name)
+
+    def visitGlobal(self, node, scope):
+        for name in node.names:
+            scope.add_global(name)
+
+    def visitAssign(self, node, scope):
+        """Propagate assignment flag down to child nodes.
+
+        The Assign node doesn't itself contains the variables being
+        assigned to.  Instead, the children in node.nodes are visited
+        with the assign flag set to true.  When the names occur in
+        those nodes, they are marked as defs.
+
+        Some names that occur in an assignment target are not bound by
+        the assignment, e.g. a name occurring inside a slice.  The
+        visitor handles these nodes specially; they do not propagate
+        the assign flag to their children.
+        """
+        for n in node.nodes:
+            self.visit(n, scope, 1)
+        self.visit(node.expr, scope)
+
+    def visitAssName(self, node, scope, assign=1):
+        scope.add_def(node.name)
+
+    def visitAssAttr(self, node, scope, assign=0):
+        self.visit(node.expr, scope, 0)
+
+    def visitSubscript(self, node, scope, assign=0):
+        self.visit(node.expr, scope, 0)
+        for n in node.subs:
+            self.visit(n, scope, 0)
+
+    def visitSlice(self, node, scope, assign=0):
+        self.visit(node.expr, scope, 0)
+        if node.lower:
+            self.visit(node.lower, scope, 0)
+        if node.upper:
+            self.visit(node.upper, scope, 0)
+
+    def visitAugAssign(self, node, scope):
+        # If the LHS is a name, then this counts as assignment.
+        # Otherwise, it's just use.
+        self.visit(node.node, scope)
+        if isinstance(node.node, ast.Name):
+            self.visit(node.node, scope, 1) # XXX worry about this
+        self.visit(node.expr, scope)
+
+    # prune if statements if tests are false
+
+    _const_types = types.StringType, types.IntType, types.FloatType
+
+    def visitIf(self, node, scope):
+        for test, body in node.tests:
+            if isinstance(test, ast.Const):
+                if type(test.value) in self._const_types:
+                    if not test.value:
+                        continue
+            self.visit(test, scope)
+            self.visit(body, scope)
+        if node.else_:
+            self.visit(node.else_, scope)
+
+    # a yield statement signals a generator
+
+    def visitYield(self, node, scope):
+        scope.generator = 1
+        self.visit(node.value, scope)
+
+def sort(l):
+    l = l[:]
+    l.sort()
+    return l
+
+def list_eq(l1, l2):
+    return sort(l1) == sort(l2)
+
+if __name__ == "__main__":
+    import sys
+    from compiler import parseFile, walk
+    import symtable
+
+    def get_names(syms):
+        return [s for s in [s.get_name() for s in syms.get_symbols()]
+                if not (s.startswith('_[') or s.startswith('.'))]
+
+    for file in sys.argv[1:]:
+        print file
+        f = open(file)
+        buf = f.read()
+        f.close()
+        syms = symtable.symtable(buf, file, "exec")
+        mod_names = get_names(syms)
+        tree = parseFile(file)
+        s = SymbolVisitor()
+        walk(tree, s)
+
+        # compare module-level symbols
+        names2 = s.scopes[tree].get_names()
+
+        if not list_eq(mod_names, names2):
+            print
+            print "oops", file
+            print sort(mod_names)
+            print sort(names2)
+            sys.exit(-1)
+
+        d = {}
+        d.update(s.scopes)
+        del d[tree]
+        scopes = d.values()
+        del d
+
+        for s in syms.get_symbols():
+            if s.is_namespace():
+                l = [sc for sc in scopes
+                     if sc.name == s.get_name()]
+                if len(l) > 1:
+                    print "skipping", s.get_name()
+                else:
+                    if not list_eq(get_names(s.get_namespace()),
+                                   l[0].get_names()):
+                        print s.get_name()
+                        print sort(get_names(s.get_namespace()))
+                        print sort(l[0].get_names())
+                        sys.exit(-1)
diff --git a/lib-python/2.2/compiler/syntax.py b/lib-python/2.2/compiler/syntax.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/compiler/syntax.py
@@ -0,0 +1,46 @@
+"""Check for errs in the AST.
+
+The Python parser does not catch all syntax errors.  Others, like
+assignments with invalid targets, are caught in the code generation
+phase.
+
+The compiler package catches some errors in the transformer module.
+But it seems clearer to write checkers that use the AST to detect
+errors.
+"""
+
+from compiler import ast, walk
+
+def check(tree, multi=None):
+    v = SyntaxErrorChecker(multi)
+    walk(tree, v)
+    return v.errors
+
+class SyntaxErrorChecker:
+    """A visitor to find syntax errors in the AST."""
+
+    def __init__(self, multi=None):
+        """Create new visitor object.
+
+        If optional argument multi is not None, then print messages
+        for each error rather than raising a SyntaxError for the
+        first.
+        """
+        self.multi = multi
+        self.errors = 0
+
+    def error(self, node, msg):
+        self.errors = self.errors + 1
+        if self.multi is not None:
+            print "%s:%s: %s" % (node.filename, node.lineno, msg)
+        else:
+            raise SyntaxError, "%s (%s:%s)" % (msg, node.filename, node.lineno)
+
+    def visitAssign(self, node):
+        # the transformer module handles many of these
+        for target in node.nodes:
+            pass
+##            if isinstance(target, ast.AssList):
+##                if target.lineno is None:
+##                    target.lineno = node.lineno
+##                self.error(target, "can't assign to list comprehension")
diff --git a/lib-python/2.2/compiler/transformer.py b/lib-python/2.2/compiler/transformer.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/compiler/transformer.py
@@ -0,0 +1,1359 @@
+"""Parse tree transformation module.
+
+Transforms Python source code into an abstract syntax tree (AST)
+defined in the ast module.
+
+The simplest ways to invoke this module are via parse and parseFile.
+parse(buf) -> AST
+parseFile(path) -> AST
+"""
+
+# Original version written by Greg Stein (gstein at lyra.org)
+#                         and Bill Tutt (rassilon at lima.mudlib.org)
+# February 1997.
+#
+# Modifications and improvements for Python 2.0 by Jeremy Hylton and
+# Mark Hammond
+
+# Portions of this file are:
+# Copyright (C) 1997-1998 Greg Stein. All Rights Reserved.
+#
+# This module is provided under a BSD-ish license. See
+#   http://www.opensource.org/licenses/bsd-license.html
+# and replace OWNER, ORGANIZATION, and YEAR as appropriate.
+
+from ast import *
+import parser
+# Care must be taken to use only symbols and tokens defined in Python
+# 1.5.2 for code branches executed in 1.5.2
+import symbol
+import token
+import string
+import sys
+
+error = 'walker.error'
+
+from consts import CO_VARARGS, CO_VARKEYWORDS
+from consts import OP_ASSIGN, OP_DELETE, OP_APPLY
+
+def parseFile(path):
+    f = open(path)
+    src = f.read()
+    f.close()
+    return parse(src)
+
+def parse(buf, mode="exec"):
+    if mode == "exec" or mode == "single":
+        return Transformer().parsesuite(buf)
+    elif mode == "eval":
+        return Transformer().parseexpr(buf)
+    else:
+        raise ValueError("compile() arg 3 must be"
+                         " 'exec' or 'eval' or 'single'")
+
+def asList(nodes):
+    l = []
+    for item in nodes:
+        if hasattr(item, "asList"):
+            l.append(item.asList())
+        else:
+            if type(item) is type( (None, None) ):
+                l.append(tuple(asList(item)))
+            elif type(item) is type( [] ):
+                l.append(asList(item))
+            else:
+                l.append(item)
+    return l
+
+def Node(*args):
+    kind = args[0]
+    if nodes.has_key(kind):
+        try:
+            return apply(nodes[kind], args[1:])
+        except TypeError:
+            print nodes[kind], len(args), args
+            raise
+    else:
+        raise error, "Can't find appropriate Node type: %s" % str(args)
+        #return apply(ast.Node, args)
+
+class Transformer:
+    """Utility object for transforming Python parse trees.
+
+    Exposes the following methods:
+        tree = transform(ast_tree)
+        tree = parsesuite(text)
+        tree = parseexpr(text)
+        tree = parsefile(fileob | filename)
+    """
+
+    def __init__(self):
+        self._dispatch = {}
+        for value, name in symbol.sym_name.items():
+            if hasattr(self, name):
+                self._dispatch[value] = getattr(self, name)
+        self._dispatch[token.NEWLINE] = self.com_NEWLINE
+        self._atom_dispatch = {token.LPAR: self.atom_lpar,
+                               token.LSQB: self.atom_lsqb,
+                               token.LBRACE: self.atom_lbrace,
+                               token.BACKQUOTE: self.atom_backquote,
+                               token.NUMBER: self.atom_number,
+                               token.STRING: self.atom_string,
+                               token.NAME: self.atom_name,
+                               }
+
+    def transform(self, tree):
+        """Transform an AST into a modified parse tree."""
+        if type(tree) != type(()) and type(tree) != type([]):
+            tree = parser.ast2tuple(tree, line_info=1)
+        return self.compile_node(tree)
+
+    def parsesuite(self, text):
+        """Return a modified parse tree for the given suite text."""
+        # Hack for handling non-native line endings on non-DOS like OSs.
+        text = string.replace(text, '\x0d', '')
+        return self.transform(parser.suite(text))
+
+    def parseexpr(self, text):
+        """Return a modified parse tree for the given expression text."""
+        return self.transform(parser.expr(text))
+
+    def parsefile(self, file):
+        """Return a modified parse tree for the contents of the given file."""
+        if type(file) == type(''):
+            file = open(file)
+        return self.parsesuite(file.read())
+
+    # --------------------------------------------------------------
+    #
+    # PRIVATE METHODS
+    #
+
+    def compile_node(self, node):
+        ### emit a line-number node?
+        n = node[0]
+        if n == symbol.single_input:
+            return self.single_input(node[1:])
+        if n == symbol.file_input:
+            return self.file_input(node[1:])
+        if n == symbol.eval_input:
+            return self.eval_input(node[1:])
+        if n == symbol.lambdef:
+            return self.lambdef(node[1:])
+        if n == symbol.funcdef:
+            return self.funcdef(node[1:])
+        if n == symbol.classdef:
+            return self.classdef(node[1:])
+
+        raise error, ('unexpected node type', n)
+
+    def single_input(self, node):
+        ### do we want to do anything about being "interactive" ?
+
+        # NEWLINE | simple_stmt | compound_stmt NEWLINE
+        n = node[0][0]
+        if n != token.NEWLINE:
+            return self.com_stmt(node[0])
+
+        return Pass()
+
+    def file_input(self, nodelist):
+        doc = self.get_docstring(nodelist, symbol.file_input)
+        if doc is not None:
+            i = 1
+        else:
+            i = 0
+        stmts = []
+        for node in nodelist[i:]:
+            if node[0] != token.ENDMARKER and node[0] != token.NEWLINE:
+                self.com_append_stmt(stmts, node)
+        return Module(doc, Stmt(stmts))
+
+    def eval_input(self, nodelist):
+        # from the built-in function input()
+        ### is this sufficient?
+        return Expression(self.com_node(nodelist[0]))
+
+    def funcdef(self, nodelist):
+        # funcdef: 'def' NAME parameters ':' suite
+        # parameters: '(' [varargslist] ')'
+
+        lineno = nodelist[1][2]
+        name = nodelist[1][1]
+        args = nodelist[2][2]
+
+        if args[0] == symbol.varargslist:
+            names, defaults, flags = self.com_arglist(args[1:])
+        else:
+            names = defaults = ()
+            flags = 0
+        doc = self.get_docstring(nodelist[4])
+
+        # code for function
+        code = self.com_node(nodelist[4])
+
+        if doc is not None:
+            assert isinstance(code, Stmt)
+            assert isinstance(code.nodes[0], Discard)
+            del code.nodes[0]
+        n = Function(name, names, defaults, flags, doc, code)
+        n.lineno = lineno
+        return n
+
+    def lambdef(self, nodelist):
+        # lambdef: 'lambda' [varargslist] ':' test
+        if nodelist[2][0] == symbol.varargslist:
+            names, defaults, flags = self.com_arglist(nodelist[2][1:])
+        else:
+            names = defaults = ()
+            flags = 0
+
+        # code for lambda
+        code = self.com_node(nodelist[-1])
+
+        n = Lambda(names, defaults, flags, code)
+        n.lineno = nodelist[1][2]
+        return n
+
+    def classdef(self, nodelist):
+        # classdef: 'class' NAME ['(' testlist ')'] ':' suite
+
+        name = nodelist[1][1]
+        doc = self.get_docstring(nodelist[-1])
+        if nodelist[2][0] == token.COLON:
+            bases = []
+        else:
+            bases = self.com_bases(nodelist[3])
+
+        # code for class
+        code = self.com_node(nodelist[-1])
+
+        if doc is not None:
+            assert isinstance(code, Stmt)
+            assert isinstance(code.nodes[0], Discard)
+            del code.nodes[0]
+
+        n = Class(name, bases, doc, code)
+        n.lineno = nodelist[1][2]
+        return n
+
+    def stmt(self, nodelist):
+        return self.com_stmt(nodelist[0])
+
+    small_stmt = stmt
+    flow_stmt = stmt
+    compound_stmt = stmt
+
+    def simple_stmt(self, nodelist):
+        # small_stmt (';' small_stmt)* [';'] NEWLINE
+        stmts = []
+        for i in range(0, len(nodelist), 2):
+            self.com_append_stmt(stmts, nodelist[i])
+        return Stmt(stmts)
+
+    def parameters(self, nodelist):
+        raise error
+
+    def varargslist(self, nodelist):
+        raise error
+
+    def fpdef(self, nodelist):
+        raise error
+
+    def fplist(self, nodelist):
+        raise error
+
+    def dotted_name(self, nodelist):
+        raise error
+
+    def comp_op(self, nodelist):
+        raise error
+
+    def trailer(self, nodelist):
+        raise error
+
+    def sliceop(self, nodelist):
+        raise error
+
+    def argument(self, nodelist):
+        raise error
+
+    # --------------------------------------------------------------
+    #
+    # STATEMENT NODES  (invoked by com_node())
+    #
+
+    def expr_stmt(self, nodelist):
+        # augassign testlist | testlist ('=' testlist)*
+        en = nodelist[-1]
+        exprNode = self.lookup_node(en)(en[1:])
+        if len(nodelist) == 1:
+            n = Discard(exprNode)
+            n.lineno = exprNode.lineno
+            return n
+        if nodelist[1][0] == token.EQUAL:
+            nodes = []
+            for i in range(0, len(nodelist) - 2, 2):
+                nodes.append(self.com_assign(nodelist[i], OP_ASSIGN))
+            n = Assign(nodes, exprNode)
+            n.lineno = nodelist[1][2]
+        else:
+            lval = self.com_augassign(nodelist[0])
+            op = self.com_augassign_op(nodelist[1])
+            n = AugAssign(lval, op[1], exprNode)
+            n.lineno = op[2]
+        return n
+
+    def print_stmt(self, nodelist):
+        # print ([ test (',' test)* [','] ] | '>>' test [ (',' test)+ [','] ])
+        items = []
+        if len(nodelist) == 1:
+            start = 1
+            dest = None
+        elif nodelist[1][0] == token.RIGHTSHIFT:
+            assert len(nodelist) == 3 \
+                   or nodelist[3][0] == token.COMMA
+            dest = self.com_node(nodelist[2])
+            start = 4
+        else:
+            dest = None
+            start = 1
+        for i in range(start, len(nodelist), 2):
+            items.append(self.com_node(nodelist[i]))
+        if nodelist[-1][0] == token.COMMA:
+            n = Print(items, dest)
+            n.lineno = nodelist[0][2]
+            return n
+        n = Printnl(items, dest)
+        n.lineno = nodelist[0][2]
+        return n
+
+    def del_stmt(self, nodelist):
+        return self.com_assign(nodelist[1], OP_DELETE)
+
+    def pass_stmt(self, nodelist):
+        n = Pass()
+        n.lineno = nodelist[0][2]
+        return n
+
+    def break_stmt(self, nodelist):
+        n = Break()
+        n.lineno = nodelist[0][2]
+        return n
+
+    def continue_stmt(self, nodelist):
+        n = Continue()
+        n.lineno = nodelist[0][2]
+        return n
+
+    def return_stmt(self, nodelist):
+        # return: [testlist]
+        if len(nodelist) < 2:
+            n = Return(Const(None))
+            n.lineno = nodelist[0][2]
+            return n
+        n = Return(self.com_node(nodelist[1]))
+        n.lineno = nodelist[0][2]
+        return n
+
+    def yield_stmt(self, nodelist):
+        n = Yield(self.com_node(nodelist[1]))
+        n.lineno = nodelist[0][2]
+        return n
+
+    def raise_stmt(self, nodelist):
+        # raise: [test [',' test [',' test]]]
+        if len(nodelist) > 5:
+            expr3 = self.com_node(nodelist[5])
+        else:
+            expr3 = None
+        if len(nodelist) > 3:
+            expr2 = self.com_node(nodelist[3])
+        else:
+            expr2 = None
+        if len(nodelist) > 1:
+            expr1 = self.com_node(nodelist[1])
+        else:
+            expr1 = None
+        n = Raise(expr1, expr2, expr3)
+        n.lineno = nodelist[0][2]
+        return n
+
+    def import_stmt(self, nodelist):
+        # import_stmt: 'import' dotted_as_name (',' dotted_as_name)* |
+        # from: 'from' dotted_name 'import'
+        #                        ('*' | import_as_name (',' import_as_name)*)
+        if nodelist[0][1] == 'from':
+            names = []
+            if nodelist[3][0] == token.NAME:
+                for i in range(3, len(nodelist), 2):
+                    names.append((nodelist[i][1], None))
+            else:
+                for i in range(3, len(nodelist), 2):
+                    names.append(self.com_import_as_name(nodelist[i]))
+            n = From(self.com_dotted_name(nodelist[1]), names)
+            n.lineno = nodelist[0][2]
+            return n
+
+        if nodelist[1][0] == symbol.dotted_name:
+            names = [(self.com_dotted_name(nodelist[1][1:]), None)]
+        else:
+            names = []
+            for i in range(1, len(nodelist), 2):
+                names.append(self.com_dotted_as_name(nodelist[i]))
+        n = Import(names)
+        n.lineno = nodelist[0][2]
+        return n
+
+    def global_stmt(self, nodelist):
+        # global: NAME (',' NAME)*
+        names = []
+        for i in range(1, len(nodelist), 2):
+            names.append(nodelist[i][1])
+        n = Global(names)
+        n.lineno = nodelist[0][2]
+        return n
+
+    def exec_stmt(self, nodelist):
+        # exec_stmt: 'exec' expr ['in' expr [',' expr]]
+        expr1 = self.com_node(nodelist[1])
+        if len(nodelist) >= 4:
+            expr2 = self.com_node(nodelist[3])
+            if len(nodelist) >= 6:
+                expr3 = self.com_node(nodelist[5])
+            else:
+                expr3 = None
+        else:
+            expr2 = expr3 = None
+
+        n = Exec(expr1, expr2, expr3)
+        n.lineno = nodelist[0][2]
+        return n
+
+    def assert_stmt(self, nodelist):
+        # 'assert': test, [',' test]
+        expr1 = self.com_node(nodelist[1])
+        if (len(nodelist) == 4):
+            expr2 = self.com_node(nodelist[3])
+        else:
+            expr2 = None
+        n = Assert(expr1, expr2)
+        n.lineno = nodelist[0][2]
+        return n
+
+    def if_stmt(self, nodelist):
+        # if: test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
+        tests = []
+        for i in range(0, len(nodelist) - 3, 4):
+            testNode = self.com_node(nodelist[i + 1])
+            suiteNode = self.com_node(nodelist[i + 3])
+            tests.append((testNode, suiteNode))
+
+        if len(nodelist) % 4 == 3:
+            elseNode = self.com_node(nodelist[-1])
+##      elseNode.lineno = nodelist[-1][1][2]
+        else:
+            elseNode = None
+        n = If(tests, elseNode)
+        n.lineno = nodelist[0][2]
+        return n
+
+    def while_stmt(self, nodelist):
+        # 'while' test ':' suite ['else' ':' suite]
+
+        testNode = self.com_node(nodelist[1])
+        bodyNode = self.com_node(nodelist[3])
+
+        if len(nodelist) > 4:
+            elseNode = self.com_node(nodelist[6])
+        else:
+            elseNode = None
+
+        n = While(testNode, bodyNode, elseNode)
+        n.lineno = nodelist[0][2]
+        return n
+
+    def for_stmt(self, nodelist):
+        # 'for' exprlist 'in' exprlist ':' suite ['else' ':' suite]
+
+        assignNode = self.com_assign(nodelist[1], OP_ASSIGN)
+        listNode = self.com_node(nodelist[3])
+        bodyNode = self.com_node(nodelist[5])
+
+        if len(nodelist) > 8:
+            elseNode = self.com_node(nodelist[8])
+        else:
+            elseNode = None
+
+        n = For(assignNode, listNode, bodyNode, elseNode)
+        n.lineno = nodelist[0][2]
+        return n
+
+    def try_stmt(self, nodelist):
+        # 'try' ':' suite (except_clause ':' suite)+ ['else' ':' suite]
+        # | 'try' ':' suite 'finally' ':' suite
+        if nodelist[3][0] != symbol.except_clause:
+            return self.com_try_finally(nodelist)
+
+        return self.com_try_except(nodelist)
+
+    def suite(self, nodelist):
+        # simple_stmt | NEWLINE INDENT NEWLINE* (stmt NEWLINE*)+ DEDENT
+        if len(nodelist) == 1:
+            return self.com_stmt(nodelist[0])
+
+        stmts = []
+        for node in nodelist:
+            if node[0] == symbol.stmt:
+                self.com_append_stmt(stmts, node)
+        return Stmt(stmts)
+
+    # --------------------------------------------------------------
+    #
+    # EXPRESSION NODES  (invoked by com_node())
+    #
+
+    def testlist(self, nodelist):
+        # testlist: expr (',' expr)* [',']
+        # testlist_safe: test [(',' test)+ [',']]
+        # exprlist: expr (',' expr)* [',']
+        return self.com_binary(Tuple, nodelist)
+
+    testlist_safe = testlist # XXX
+    exprlist = testlist
+
+    def test(self, nodelist):
+        # and_test ('or' and_test)* | lambdef
+        if len(nodelist) == 1 and nodelist[0][0] == symbol.lambdef:
+            return self.lambdef(nodelist[0])
+        return self.com_binary(Or, nodelist)
+
+    def and_test(self, nodelist):
+        # not_test ('and' not_test)*
+        return self.com_binary(And, nodelist)
+
+    def not_test(self, nodelist):
+        # 'not' not_test | comparison
+        result = self.com_node(nodelist[-1])
+        if len(nodelist) == 2:
+            n = Not(result)
+            n.lineno = nodelist[0][2]
+            return n
+        return result
+
+    def comparison(self, nodelist):
+        # comparison: expr (comp_op expr)*
+        node = self.com_node(nodelist[0])
+        if len(nodelist) == 1:
+            return node
+
+        results = []
+        for i in range(2, len(nodelist), 2):
+            nl = nodelist[i-1]
+
+            # comp_op: '<' | '>' | '=' | '>=' | '<=' | '<>' | '!=' | '=='
+            #          | 'in' | 'not' 'in' | 'is' | 'is' 'not'
+            n = nl[1]
+            if n[0] == token.NAME:
+                type = n[1]
+                if len(nl) == 3:
+                    if type == 'not':
+                        type = 'not in'
+                    else:
+                        type = 'is not'
+            else:
+                type = _cmp_types[n[0]]
+
+            lineno = nl[1][2]
+            results.append((type, self.com_node(nodelist[i])))
+
+        # we need a special "compare" node so that we can distinguish
+        #   3 < x < 5   from    (3 < x) < 5
+        # the two have very different semantics and results (note that the
+        # latter form is always true)
+
+        n = Compare(node, results)
+        n.lineno = lineno
+        return n
+
+    def expr(self, nodelist):
+        # xor_expr ('|' xor_expr)*
+        return self.com_binary(Bitor, nodelist)
+
+    def xor_expr(self, nodelist):
+        # xor_expr ('^' xor_expr)*
+        return self.com_binary(Bitxor, nodelist)
+
+    def and_expr(self, nodelist):
+        # xor_expr ('&' xor_expr)*
+        return self.com_binary(Bitand, nodelist)
+
+    def shift_expr(self, nodelist):
+        # shift_expr ('<<'|'>>' shift_expr)*
+        node = self.com_node(nodelist[0])
+        for i in range(2, len(nodelist), 2):
+            right = self.com_node(nodelist[i])
+            if nodelist[i-1][0] == token.LEFTSHIFT:
+                node = LeftShift([node, right])
+                node.lineno = nodelist[1][2]
+            elif nodelist[i-1][0] == token.RIGHTSHIFT:
+                node = RightShift([node, right])
+                node.lineno = nodelist[1][2]
+            else:
+                raise ValueError, "unexpected token: %s" % nodelist[i-1][0]
+        return node
+
+    def arith_expr(self, nodelist):
+        node = self.com_node(nodelist[0])
+        for i in range(2, len(nodelist), 2):
+            right = self.com_node(nodelist[i])
+            if nodelist[i-1][0] == token.PLUS:
+                node = Add([node, right])
+                node.lineno = nodelist[1][2]
+            elif nodelist[i-1][0] == token.MINUS:
+                node = Sub([node, right])
+                node.lineno = nodelist[1][2]
+            else:
+                raise ValueError, "unexpected token: %s" % nodelist[i-1][0]
+        return node
+
+    def term(self, nodelist):
+        node = self.com_node(nodelist[0])
+        for i in range(2, len(nodelist), 2):
+            right = self.com_node(nodelist[i])
+            t = nodelist[i-1][0]
+            if t == token.STAR:
+                node = Mul([node, right])
+            elif t == token.SLASH:
+                node = Div([node, right])
+            elif t == token.PERCENT:
+                node = Mod([node, right])
+            elif t == token.DOUBLESLASH:
+                node = FloorDiv([node, right])
+            else:
+                raise ValueError, "unexpected token: %s" % t
+            node.lineno = nodelist[1][2]
+        return node
+
+    def factor(self, nodelist):
+        elt = nodelist[0]
+        t = elt[0]
+        node = self.com_node(nodelist[-1])
+        if t == token.PLUS:
+            node = UnaryAdd(node)
+            node.lineno = elt[2]
+        elif t == token.MINUS:
+            node = UnarySub(node)
+            node.lineno = elt[2]
+        elif t == token.TILDE:
+            node = Invert(node)
+            node.lineno = elt[2]
+        return node
+
+    def power(self, nodelist):
+        # power: atom trailer* ('**' factor)*
+        node = self.com_node(nodelist[0])
+        for i in range(1, len(nodelist)):
+            elt = nodelist[i]
+            if elt[0] == token.DOUBLESTAR:
+                n = Power([node, self.com_node(nodelist[i+1])])
+                n.lineno = elt[2]
+                return n
+
+            node = self.com_apply_trailer(node, elt)
+
+        return node
+
+    def atom(self, nodelist):
+        n = self._atom_dispatch[nodelist[0][0]](nodelist)
+        n.lineno = nodelist[0][2]
+        return n
+
+    def atom_lpar(self, nodelist):
+        if nodelist[1][0] == token.RPAR:
+            n = Tuple(())
+            n.lineno = nodelist[0][2]
+            return n
+        return self.com_node(nodelist[1])
+
+    def atom_lsqb(self, nodelist):
+        if nodelist[1][0] == token.RSQB:
+            n = List(())
+            n.lineno = nodelist[0][2]
+            return n
+        return self.com_list_constructor(nodelist[1])
+
+    def atom_lbrace(self, nodelist):
+        if nodelist[1][0] == token.RBRACE:
+            return Dict(())
+        return self.com_dictmaker(nodelist[1])
+
+    def atom_backquote(self, nodelist):
+        n = Backquote(self.com_node(nodelist[1]))
+        n.lineno = nodelist[0][2]
+        return n
+
+    def atom_number(self, nodelist):
+        ### need to verify this matches compile.c
+        k = eval(nodelist[0][1])
+        n = Const(k)
+        n.lineno = nodelist[0][2]
+        return n
+
+    def atom_string(self, nodelist):
+        ### need to verify this matches compile.c
+        k = ''
+        for node in nodelist:
+            k = k + eval(node[1])
+        n = Const(k)
+        n.lineno = nodelist[0][2]
+        return n
+
+    def atom_name(self, nodelist):
+        ### any processing to do?
+        n = Name(nodelist[0][1])
+        n.lineno = nodelist[0][2]
+        return n
+
+    # --------------------------------------------------------------
+    #
+    # INTERNAL PARSING UTILITIES
+    #
+
+    # The use of com_node() introduces a lot of extra stack frames,
+    # enough to cause a stack overflow compiling test.test_parser with
+    # the standard interpreter recursionlimit.  The com_node() is a
+    # convenience function that hides the dispatch details, but comes
+    # at a very high cost.  It is more efficient to dispatch directly
+    # in the callers.  In these cases, use lookup_node() and call the
+    # dispatched node directly.
+
+    def lookup_node(self, node):
+        return self._dispatch[node[0]]
+
+    def com_node(self, node):
+        # Note: compile.c has handling in com_node for del_stmt, pass_stmt,
+        #       break_stmt, stmt, small_stmt, flow_stmt, simple_stmt,
+        #       and compound_stmt.
+        #       We'll just dispatch them.
+        return self._dispatch[node[0]](node[1:])
+
+    def com_NEWLINE(self, *args):
+        # A ';' at the end of a line can make a NEWLINE token appear
+        # here, Render it harmless. (genc discards ('discard',
+        # ('const', xxxx)) Nodes)
+        return Discard(Const(None))
+
+    def com_arglist(self, nodelist):
+        # varargslist:
+        #     (fpdef ['=' test] ',')* ('*' NAME [',' '**' NAME] | '**' NAME)
+        #   | fpdef ['=' test] (',' fpdef ['=' test])* [',']
+        # fpdef: NAME | '(' fplist ')'
+        # fplist: fpdef (',' fpdef)* [',']
+        names = []
+        defaults = []
+        flags = 0
+
+        i = 0
+        while i < len(nodelist):
+            node = nodelist[i]
+            if node[0] == token.STAR or node[0] == token.DOUBLESTAR:
+                if node[0] == token.STAR:
+                    node = nodelist[i+1]
+                    if node[0] == token.NAME:
+                        names.append(node[1])
+                        flags = flags | CO_VARARGS
+                        i = i + 3
+
+                if i < len(nodelist):
+                    # should be DOUBLESTAR
+                    t = nodelist[i][0]
+                    if t == token.DOUBLESTAR:
+                        node = nodelist[i+1]
+                    else:
+                        raise ValueError, "unexpected token: %s" % t
+                    names.append(node[1])
+                    flags = flags | CO_VARKEYWORDS
+
+                break
+
+            # fpdef: NAME | '(' fplist ')'
+            names.append(self.com_fpdef(node))
+
+            i = i + 1
+            if i >= len(nodelist):
+                break
+
+            if nodelist[i][0] == token.EQUAL:
+                defaults.append(self.com_node(nodelist[i + 1]))
+                i = i + 2
+            elif len(defaults):
+                # Treat "(a=1, b)" as "(a=1, b=None)"
+                defaults.append(Const(None))
+
+            i = i + 1
+
+        return names, defaults, flags
+
+    def com_fpdef(self, node):
+        # fpdef: NAME | '(' fplist ')'
+        if node[1][0] == token.LPAR:
+            return self.com_fplist(node[2])
+        return node[1][1]
+
+    def com_fplist(self, node):
+        # fplist: fpdef (',' fpdef)* [',']
+        if len(node) == 2:
+            return self.com_fpdef(node[1])
+        list = []
+        for i in range(1, len(node), 2):
+            list.append(self.com_fpdef(node[i]))
+        return tuple(list)
+
+    def com_dotted_name(self, node):
+        # String together the dotted names and return the string
+        name = ""
+        for n in node:
+            if type(n) == type(()) and n[0] == 1:
+                name = name + n[1] + '.'
+        return name[:-1]
+
+    def com_dotted_as_name(self, node):
+        dot = self.com_dotted_name(node[1])
+        if len(node) <= 2:
+            return dot, None
+        if node[0] == symbol.dotted_name:
+            pass
+        else:
+            assert node[2][1] == 'as'
+            assert node[3][0] == token.NAME
+            return dot, node[3][1]
+
+    def com_import_as_name(self, node):
+        if node[0] == token.STAR:
+            return '*', None
+        assert node[0] == symbol.import_as_name
+        node = node[1:]
+        if len(node) == 1:
+            assert node[0][0] == token.NAME
+            return node[0][1], None
+
+        assert node[1][1] == 'as', node
+        assert node[2][0] == token.NAME
+        return node[0][1], node[2][1]
+
+    def com_bases(self, node):
+        bases = []
+        for i in range(1, len(node), 2):
+            bases.append(self.com_node(node[i]))
+        return bases
+
+    def com_try_finally(self, nodelist):
+        # try_fin_stmt: "try" ":" suite "finally" ":" suite
+        n = TryFinally(self.com_node(nodelist[2]),
+                       self.com_node(nodelist[5]))
+        n.lineno = nodelist[0][2]
+        return n
+
+    def com_try_except(self, nodelist):
+        # try_except: 'try' ':' suite (except_clause ':' suite)* ['else' suite]
+        #tryexcept:  [TryNode, [except_clauses], elseNode)]
+        stmt = self.com_node(nodelist[2])
+        clauses = []
+        elseNode = None
+        for i in range(3, len(nodelist), 3):
+            node = nodelist[i]
+            if node[0] == symbol.except_clause:
+                # except_clause: 'except' [expr [',' expr]] */
+                if len(node) > 2:
+                    expr1 = self.com_node(node[2])
+                    if len(node) > 4:
+                        expr2 = self.com_assign(node[4], OP_ASSIGN)
+                    else:
+                        expr2 = None
+                else:
+                    expr1 = expr2 = None
+                clauses.append((expr1, expr2, self.com_node(nodelist[i+2])))
+
+            if node[0] == token.NAME:
+                elseNode = self.com_node(nodelist[i+2])
+        n = TryExcept(self.com_node(nodelist[2]), clauses, elseNode)
+        n.lineno = nodelist[0][2]
+        return n
+
+    def com_augassign_op(self, node):
+        assert node[0] == symbol.augassign
+        return node[1]
+
+    def com_augassign(self, node):
+        """Return node suitable for lvalue of augmented assignment
+
+        Names, slices, and attributes are the only allowable nodes.
+        """
+        l = self.com_node(node)
+        if l.__class__ in (Name, Slice, Subscript, Getattr):
+            return l
+        raise SyntaxError, "can't assign to %s" % l.__class__.__name__
+
+    def com_assign(self, node, assigning):
+        # return a node suitable for use as an "lvalue"
+        # loop to avoid trivial recursion
+        while 1:
+            t = node[0]
+            if t == symbol.exprlist or t == symbol.testlist:
+                if len(node) > 2:
+                    return self.com_assign_tuple(node, assigning)
+                node = node[1]
+            elif t in _assign_types:
+                if len(node) > 2:
+                    raise SyntaxError, "can't assign to operator"
+                node = node[1]
+            elif t == symbol.power:
+                if node[1][0] != symbol.atom:
+                    raise SyntaxError, "can't assign to operator"
+                if len(node) > 2:
+                    primary = self.com_node(node[1])
+                    for i in range(2, len(node)-1):
+                        ch = node[i]
+                        if ch[0] == token.DOUBLESTAR:
+                            raise SyntaxError, "can't assign to operator"
+                        primary = self.com_apply_trailer(primary, ch)
+                    return self.com_assign_trailer(primary, node[-1],
+                                                   assigning)
+                node = node[1]
+            elif t == symbol.atom:
+                t = node[1][0]
+                if t == token.LPAR:
+                    node = node[2]
+                    if node[0] == token.RPAR:
+                        raise SyntaxError, "can't assign to ()"
+                elif t == token.LSQB:
+                    node = node[2]
+                    if node[0] == token.RSQB:
+                        raise SyntaxError, "can't assign to []"
+                    return self.com_assign_list(node, assigning)
+                elif t == token.NAME:
+                    return self.com_assign_name(node[1], assigning)
+                else:
+                    raise SyntaxError, "can't assign to literal"
+            else:
+                raise SyntaxError, "bad assignment"
+
+    def com_assign_tuple(self, node, assigning):
+        assigns = []
+        for i in range(1, len(node), 2):
+            assigns.append(self.com_assign(node[i], assigning))
+        return AssTuple(assigns)
+
+    def com_assign_list(self, node, assigning):
+        assigns = []
+        for i in range(1, len(node), 2):
+            if i + 1 < len(node):
+                if node[i + 1][0] == symbol.list_for:
+                    raise SyntaxError, "can't assign to list comprehension"
+                assert node[i + 1][0] == token.COMMA, node[i + 1]
+            assigns.append(self.com_assign(node[i], assigning))
+        return AssList(assigns)
+
+    def com_assign_name(self, node, assigning):
+        n = AssName(node[1], assigning)
+        n.lineno = node[2]
+        return n
+
+    def com_assign_trailer(self, primary, node, assigning):
+        t = node[1][0]
+        if t == token.DOT:
+            return self.com_assign_attr(primary, node[2], assigning)
+        if t == token.LSQB:
+            return self.com_subscriptlist(primary, node[2], assigning)
+        if t == token.LPAR:
+            raise SyntaxError, "can't assign to function call"
+        raise SyntaxError, "unknown trailer type: %s" % t
+
+    def com_assign_attr(self, primary, node, assigning):
+        return AssAttr(primary, node[1], assigning)
+
+    def com_binary(self, constructor, nodelist):
+        "Compile 'NODE (OP NODE)*' into (type, [ node1, ..., nodeN ])."
+        l = len(nodelist)
+        if l == 1:
+            n = nodelist[0]
+            return self.lookup_node(n)(n[1:])
+        items = []
+        for i in range(0, l, 2):
+            n = nodelist[i]
+            items.append(self.lookup_node(n)(n[1:]))
+        return constructor(items)
+
+    def com_stmt(self, node):
+        result = self.lookup_node(node)(node[1:])
+        assert result is not None
+        if isinstance(result, Stmt):
+            return result
+        return Stmt([result])
+
+    def com_append_stmt(self, stmts, node):
+        result = self.com_node(node)
+        assert result is not None
+        if isinstance(result, Stmt):
+            stmts.extend(result.nodes)
+        else:
+            stmts.append(result)
+
+    if hasattr(symbol, 'list_for'):
+        def com_list_constructor(self, nodelist):
+            # listmaker: test ( list_for | (',' test)* [','] )
+            values = []
+            for i in range(1, len(nodelist)):
+                if nodelist[i][0] == symbol.list_for:
+                    assert len(nodelist[i:]) == 1
+                    return self.com_list_comprehension(values[0],
+                                                       nodelist[i])
+                elif nodelist[i][0] == token.COMMA:
+                    continue
+                values.append(self.com_node(nodelist[i]))
+            return List(values)
+
+        def com_list_comprehension(self, expr, node):
+            # list_iter: list_for | list_if
+            # list_for: 'for' exprlist 'in' testlist [list_iter]
+            # list_if: 'if' test [list_iter]
+
+            # XXX should raise SyntaxError for assignment
+
+            lineno = node[1][2]
+            fors = []
+            while node:
+                t = node[1][1]
+                if t == 'for':
+                    assignNode = self.com_assign(node[2], OP_ASSIGN)
+                    listNode = self.com_node(node[4])
+                    newfor = ListCompFor(assignNode, listNode, [])
+                    newfor.lineno = node[1][2]
+                    fors.append(newfor)
+                    if len(node) == 5:
+                        node = None
+                    else:
+                        node = self.com_list_iter(node[5])
+                elif t == 'if':
+                    test = self.com_node(node[2])
+                    newif = ListCompIf(test)
+                    newif.lineno = node[1][2]
+                    newfor.ifs.append(newif)
+                    if len(node) == 3:
+                        node = None
+                    else:
+                        node = self.com_list_iter(node[3])
+                else:
+                    raise SyntaxError, \
+                          ("unexpected list comprehension element: %s %d"
+                           % (node, lineno))
+            n = ListComp(expr, fors)
+            n.lineno = lineno
+            return n
+
+        def com_list_iter(self, node):
+            assert node[0] == symbol.list_iter
+            return node[1]
+    else:
+        def com_list_constructor(self, nodelist):
+            values = []
+            for i in range(1, len(nodelist), 2):
+                values.append(self.com_node(nodelist[i]))
+            return List(values)
+
+    def com_dictmaker(self, nodelist):
+        # dictmaker: test ':' test (',' test ':' value)* [',']
+        items = []
+        for i in range(1, len(nodelist), 4):
+            items.append((self.com_node(nodelist[i]),
+                          self.com_node(nodelist[i+2])))
+        return Dict(items)
+
+    def com_apply_trailer(self, primaryNode, nodelist):
+        t = nodelist[1][0]
+        if t == token.LPAR:
+            return self.com_call_function(primaryNode, nodelist[2])
+        if t == token.DOT:
+            return self.com_select_member(primaryNode, nodelist[2])
+        if t == token.LSQB:
+            return self.com_subscriptlist(primaryNode, nodelist[2], OP_APPLY)
+
+        raise SyntaxError, 'unknown node type: %s' % t
+
+    def com_select_member(self, primaryNode, nodelist):
+        if nodelist[0] != token.NAME:
+            raise SyntaxError, "member must be a name"
+        n = Getattr(primaryNode, nodelist[1])
+        n.lineno = nodelist[2]
+        return n
+
+    def com_call_function(self, primaryNode, nodelist):
+        if nodelist[0] == token.RPAR:
+            return CallFunc(primaryNode, [])
+        args = []
+        kw = 0
+        len_nodelist = len(nodelist)
+        for i in range(1, len_nodelist, 2):
+            node = nodelist[i]
+            if node[0] == token.STAR or node[0] == token.DOUBLESTAR:
+                break
+            kw, result = self.com_argument(node, kw)
+            args.append(result)
+        else:
+            # No broken by star arg, so skip the last one we processed.
+            i = i + 1
+        if i < len_nodelist and nodelist[i][0] == token.COMMA:
+            # need to accept an application that looks like "f(a, b,)"
+            i = i + 1
+        star_node = dstar_node = None
+        while i < len_nodelist:
+            tok = nodelist[i]
+            ch = nodelist[i+1]
+            i = i + 3
+            if tok[0]==token.STAR:
+                if star_node is not None:
+                    raise SyntaxError, 'already have the varargs indentifier'
+                star_node = self.com_node(ch)
+            elif tok[0]==token.DOUBLESTAR:
+                if dstar_node is not None:
+                    raise SyntaxError, 'already have the kwargs indentifier'
+                dstar_node = self.com_node(ch)
+            else:
+                raise SyntaxError, 'unknown node type: %s' % tok
+
+        return CallFunc(primaryNode, args, star_node, dstar_node)
+
+    def com_argument(self, nodelist, kw):
+        if len(nodelist) == 2:
+            if kw:
+                raise SyntaxError, "non-keyword arg after keyword arg"
+            return 0, self.com_node(nodelist[1])
+        result = self.com_node(nodelist[3])
+        n = nodelist[1]
+        while len(n) == 2 and n[0] != token.NAME:
+            n = n[1]
+        if n[0] != token.NAME:
+            raise SyntaxError, "keyword can't be an expression (%s)"%n[0]
+        node = Keyword(n[1], result)
+        node.lineno = n[2]
+        return 1, node
+
+    def com_subscriptlist(self, primary, nodelist, assigning):
+        # slicing:      simple_slicing | extended_slicing
+        # simple_slicing:   primary "[" short_slice "]"
+        # extended_slicing: primary "[" slice_list "]"
+        # slice_list:   slice_item ("," slice_item)* [","]
+
+        # backwards compat slice for '[i:j]'
+        if len(nodelist) == 2:
+            sub = nodelist[1]
+            if (sub[1][0] == token.COLON or \
+                            (len(sub) > 2 and sub[2][0] == token.COLON)) and \
+                            sub[-1][0] != symbol.sliceop:
+                return self.com_slice(primary, sub, assigning)
+
+        subscripts = []
+        for i in range(1, len(nodelist), 2):
+            subscripts.append(self.com_subscript(nodelist[i]))
+
+        return Subscript(primary, assigning, subscripts)
+
+    def com_subscript(self, node):
+        # slice_item: expression | proper_slice | ellipsis
+        ch = node[1]
+        t = ch[0]
+        if t == token.DOT and node[2][0] == token.DOT:
+            return Ellipsis()
+        if t == token.COLON or len(node) > 2:
+            return self.com_sliceobj(node)
+        return self.com_node(ch)
+
+    def com_sliceobj(self, node):
+        # proper_slice: short_slice | long_slice
+        # short_slice:  [lower_bound] ":" [upper_bound]
+        # long_slice:   short_slice ":" [stride]
+        # lower_bound:  expression
+        # upper_bound:  expression
+        # stride:       expression
+        #
+        # Note: a stride may be further slicing...
+
+        items = []
+
+        if node[1][0] == token.COLON:
+            items.append(Const(None))
+            i = 2
+        else:
+            items.append(self.com_node(node[1]))
+            # i == 2 is a COLON
+            i = 3
+
+        if i < len(node) and node[i][0] == symbol.test:
+            items.append(self.com_node(node[i]))
+            i = i + 1
+        else:
+            items.append(Const(None))
+
+        # a short_slice has been built. look for long_slice now by looking
+        # for strides...
+        for j in range(i, len(node)):
+            ch = node[j]
+            if len(ch) == 2:
+                items.append(Const(None))
+            else:
+                items.append(self.com_node(ch[2]))
+
+        return Sliceobj(items)
+
+    def com_slice(self, primary, node, assigning):
+        # short_slice:  [lower_bound] ":" [upper_bound]
+        lower = upper = None
+        if len(node) == 3:
+            if node[1][0] == token.COLON:
+                upper = self.com_node(node[2])
+            else:
+                lower = self.com_node(node[1])
+        elif len(node) == 4:
+            lower = self.com_node(node[1])
+            upper = self.com_node(node[3])
+        return Slice(primary, assigning, lower, upper)
+
+    def get_docstring(self, node, n=None):
+        if n is None:
+            n = node[0]
+            node = node[1:]
+        if n == symbol.suite:
+            if len(node) == 1:
+                return self.get_docstring(node[0])
+            for sub in node:
+                if sub[0] == symbol.stmt:
+                    return self.get_docstring(sub)
+            return None
+        if n == symbol.file_input:
+            for sub in node:
+                if sub[0] == symbol.stmt:
+                    return self.get_docstring(sub)
+            return None
+        if n == symbol.atom:
+            if node[0][0] == token.STRING:
+                s = ''
+                for t in node:
+                    s = s + eval(t[1])
+                return s
+            return None
+        if n == symbol.stmt or n == symbol.simple_stmt \
+           or n == symbol.small_stmt:
+            return self.get_docstring(node[0])
+        if n in _doc_nodes and len(node) == 1:
+            return self.get_docstring(node[0])
+        return None
+
+
+_doc_nodes = [
+    symbol.expr_stmt,
+    symbol.testlist,
+    symbol.testlist_safe,
+    symbol.test,
+    symbol.and_test,
+    symbol.not_test,
+    symbol.comparison,
+    symbol.expr,
+    symbol.xor_expr,
+    symbol.and_expr,
+    symbol.shift_expr,
+    symbol.arith_expr,
+    symbol.term,
+    symbol.factor,
+    symbol.power,
+    ]
+
+# comp_op: '<' | '>' | '=' | '>=' | '<=' | '<>' | '!=' | '=='
+#             | 'in' | 'not' 'in' | 'is' | 'is' 'not'
+_cmp_types = {
+    token.LESS : '<',
+    token.GREATER : '>',
+    token.EQEQUAL : '==',
+    token.EQUAL : '==',
+    token.LESSEQUAL : '<=',
+    token.GREATEREQUAL : '>=',
+    token.NOTEQUAL : '!=',
+    }
+
+_legal_node_types = [
+    symbol.funcdef,
+    symbol.classdef,
+    symbol.stmt,
+    symbol.small_stmt,
+    symbol.flow_stmt,
+    symbol.simple_stmt,
+    symbol.compound_stmt,
+    symbol.expr_stmt,
+    symbol.print_stmt,
+    symbol.del_stmt,
+    symbol.pass_stmt,
+    symbol.break_stmt,
+    symbol.continue_stmt,
+    symbol.return_stmt,
+    symbol.raise_stmt,
+    symbol.import_stmt,
+    symbol.global_stmt,
+    symbol.exec_stmt,
+    symbol.assert_stmt,
+    symbol.if_stmt,
+    symbol.while_stmt,
+    symbol.for_stmt,
+    symbol.try_stmt,
+    symbol.suite,
+    symbol.testlist,
+    symbol.testlist_safe,
+    symbol.test,
+    symbol.and_test,
+    symbol.not_test,
+    symbol.comparison,
+    symbol.exprlist,
+    symbol.expr,
+    symbol.xor_expr,
+    symbol.and_expr,
+    symbol.shift_expr,
+    symbol.arith_expr,
+    symbol.term,
+    symbol.factor,
+    symbol.power,
+    symbol.atom,
+    ]
+
+if hasattr(symbol, 'yield_stmt'):
+    _legal_node_types.append(symbol.yield_stmt)
+
+_assign_types = [
+    symbol.test,
+    symbol.and_test,
+    symbol.not_test,
+    symbol.comparison,
+    symbol.expr,
+    symbol.xor_expr,
+    symbol.and_expr,
+    symbol.shift_expr,
+    symbol.arith_expr,
+    symbol.term,
+    symbol.factor,
+    ]
+
+import types
+_names = {}
+for k, v in symbol.sym_name.items():
+    _names[k] = v
+for k, v in token.tok_name.items():
+    _names[k] = v
+
+def debug_tree(tree):
+    l = []
+    for elt in tree:
+        if type(elt) == types.IntType:
+            l.append(_names.get(elt, elt))
+        elif type(elt) == types.StringType:
+            l.append(elt)
+        else:
+            l.append(debug_tree(elt))
+    return l
diff --git a/lib-python/2.2/compiler/visitor.py b/lib-python/2.2/compiler/visitor.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/compiler/visitor.py
@@ -0,0 +1,121 @@
+from compiler import ast
+
+# XXX should probably rename ASTVisitor to ASTWalker
+# XXX can it be made even more generic?
+
+class ASTVisitor:
+    """Performs a depth-first walk of the AST
+
+    The ASTVisitor will walk the AST, performing either a preorder or
+    postorder traversal depending on which method is called.
+
+    methods:
+    preorder(tree, visitor)
+    postorder(tree, visitor)
+        tree: an instance of ast.Node
+        visitor: an instance with visitXXX methods
+
+    The ASTVisitor is responsible for walking over the tree in the
+    correct order.  For each node, it checks the visitor argument for
+    a method named 'visitNodeType' where NodeType is the name of the
+    node's class, e.g. Class.  If the method exists, it is called
+    with the node as its sole argument.
+
+    The visitor method for a particular node type can control how
+    child nodes are visited during a preorder walk.  (It can't control
+    the order during a postorder walk, because it is called _after_
+    the walk has occurred.)  The ASTVisitor modifies the visitor
+    argument by adding a visit method to the visitor; this method can
+    be used to visit a particular child node.  If the visitor method
+    returns a true value, the ASTVisitor will not traverse the child
+    nodes.
+
+    XXX The interface for controlling the preorder walk needs to be
+    re-considered.  The current interface is convenient for visitors
+    that mostly let the ASTVisitor do everything.  For something like
+    a code generator, where you want to walk to occur in a specific
+    order, it's a pain to add "return 1" to the end of each method.
+    """
+
+    VERBOSE = 0
+
+    def __init__(self):
+        self.node = None
+        self._cache = {}
+
+    def default(self, node, *args):
+        for child in node.getChildNodes():
+            self.dispatch(child, *args)
+
+    def dispatch(self, node, *args):
+        self.node = node
+        klass = node.__class__
+        meth = self._cache.get(klass, None)
+        if meth is None:
+            className = klass.__name__
+            meth = getattr(self.visitor, 'visit' + className, self.default)
+            self._cache[klass] = meth
+##        if self.VERBOSE > 0:
+##            className = klass.__name__
+##            if self.VERBOSE == 1:
+##                if meth == 0:
+##                    print "dispatch", className
+##            else:
+##                print "dispatch", className, (meth and meth.__name__ or '')
+        return meth(node, *args)
+
+    def preorder(self, tree, visitor, *args):
+        """Do preorder walk of tree using visitor"""
+        self.visitor = visitor
+        visitor.visit = self.dispatch
+        self.dispatch(tree, *args) # XXX *args make sense?
+
+class ExampleASTVisitor(ASTVisitor):
+    """Prints examples of the nodes that aren't visited
+
+    This visitor-driver is only useful for development, when it's
+    helpful to develop a visitor incremently, and get feedback on what
+    you still have to do.
+    """
+    examples = {}
+
+    def dispatch(self, node, *args):
+        self.node = node
+        meth = self._cache.get(node.__class__, None)
+        className = node.__class__.__name__
+        if meth is None:
+            meth = getattr(self.visitor, 'visit' + className, 0)
+            self._cache[node.__class__] = meth
+        if self.VERBOSE > 1:
+            print "dispatch", className, (meth and meth.__name__ or '')
+        if meth:
+            meth(node, *args)
+        elif self.VERBOSE > 0:
+            klass = node.__class__
+            if not self.examples.has_key(klass):
+                self.examples[klass] = klass
+                print
+                print self.visitor
+                print klass
+                for attr in dir(node):
+                    if attr[0] != '_':
+                        print "\t", "%-12.12s" % attr, getattr(node, attr)
+                print
+            return self.default(node, *args)
+
+# XXX this is an API change
+
+_walker = ASTVisitor
+def walk(tree, visitor, walker=None, verbose=None):
+    if walker is None:
+        walker = _walker()
+    if verbose is not None:
+        walker.VERBOSE = verbose
+    walker.preorder(tree, visitor)
+    return walker.visitor
+
+def dumpNode(node):
+    print node.__class__
+    for attr in dir(node):
+        if attr[0] != '_':
+            print "\t", "%-10.10s" % attr, getattr(node, attr)
diff --git a/lib-python/2.2/copy.py b/lib-python/2.2/copy.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/copy.py
@@ -0,0 +1,381 @@
+"""Generic (shallow and deep) copying operations.
+
+Interface summary:
+
+        import copy
+
+        x = copy.copy(y)        # make a shallow copy of y
+        x = copy.deepcopy(y)    # make a deep copy of y
+
+For module specific errors, copy.error is raised.
+
+The difference between shallow and deep copying is only relevant for
+compound objects (objects that contain other objects, like lists or
+class instances).
+
+- A shallow copy constructs a new compound object and then (to the
+  extent possible) inserts *the same objects* into in that the
+  original contains.
+
+- A deep copy constructs a new compound object and then, recursively,
+  inserts *copies* into it of the objects found in the original.
+
+Two problems often exist with deep copy operations that don't exist
+with shallow copy operations:
+
+ a) recursive objects (compound objects that, directly or indirectly,
+    contain a reference to themselves) may cause a recursive loop
+
+ b) because deep copy copies *everything* it may copy too much, e.g.
+    administrative data structures that should be shared even between
+    copies
+
+Python's deep copy operation avoids these problems by:
+
+ a) keeping a table of objects already copied during the current
+    copying pass
+
+ b) letting user-defined classes override the copying operation or the
+    set of components copied
+
+This version does not copy types like module, class, function, method,
+nor stack trace, stack frame, nor file, socket, window, nor array, nor
+any similar types.
+
+Classes can use the same interfaces to control copying that they use
+to control pickling: they can define methods called __getinitargs__(),
+__getstate__() and __setstate__().  See the documentation for module
+"pickle" for information on these methods.
+"""
+
+# XXX need to support copy_reg here too...
+
+import types
+
+class Error(Exception):
+    pass
+error = Error   # backward compatibility
+
+try:
+    from org.python.core import PyStringMap
+except ImportError:
+    PyStringMap = None
+
+__all__ = ["Error", "error", "copy", "deepcopy"]
+
+def copy(x):
+    """Shallow copy operation on arbitrary Python objects.
+
+    See the module's __doc__ string for more info.
+    """
+
+    try:
+        copierfunction = _copy_dispatch[type(x)]
+    except KeyError:
+        try:
+            copier = x.__copy__
+        except AttributeError:
+            try:
+                reductor = x.__reduce__
+            except AttributeError:
+                raise error, \
+                      "un(shallow)copyable object of type %s" % type(x)
+            else:
+                y = _reconstruct(x, reductor(), 0)
+        else:
+            y = copier()
+    else:
+        y = copierfunction(x)
+    return y
+
+_copy_dispatch = d = {}
+
+def _copy_atomic(x):
+    return x
+d[types.NoneType] = _copy_atomic
+d[types.IntType] = _copy_atomic
+d[types.LongType] = _copy_atomic
+d[types.FloatType] = _copy_atomic
+try:
+    d[types.ComplexType] = _copy_atomic
+except AttributeError:
+    pass
+d[types.StringType] = _copy_atomic
+try:
+    d[types.UnicodeType] = _copy_atomic
+except AttributeError:
+    pass
+try:
+    d[types.CodeType] = _copy_atomic
+except AttributeError:
+    pass
+d[types.TypeType] = _copy_atomic
+d[types.XRangeType] = _copy_atomic
+d[types.ClassType] = _copy_atomic
+d[types.BuiltinFunctionType] = _copy_atomic
+
+def _copy_list(x):
+    return x[:]
+d[types.ListType] = _copy_list
+
+def _copy_tuple(x):
+    return x[:]
+d[types.TupleType] = _copy_tuple
+
+def _copy_dict(x):
+    return x.copy()
+d[types.DictionaryType] = _copy_dict
+if PyStringMap is not None:
+    d[PyStringMap] = _copy_dict
+
+def _copy_inst(x):
+    if hasattr(x, '__copy__'):
+        return x.__copy__()
+    if hasattr(x, '__getinitargs__'):
+        args = x.__getinitargs__()
+        y = apply(x.__class__, args)
+    else:
+        y = _EmptyClass()
+        y.__class__ = x.__class__
+    if hasattr(x, '__getstate__'):
+        state = x.__getstate__()
+    else:
+        state = x.__dict__
+    if hasattr(y, '__setstate__'):
+        y.__setstate__(state)
+    else:
+        y.__dict__.update(state)
+    return y
+d[types.InstanceType] = _copy_inst
+
+del d
+
+def deepcopy(x, memo = None):
+    """Deep copy operation on arbitrary Python objects.
+
+    See the module's __doc__ string for more info.
+    """
+
+    if memo is None:
+        memo = {}
+    d = id(x)
+    if memo.has_key(d):
+        return memo[d]
+    try:
+        copierfunction = _deepcopy_dispatch[type(x)]
+    except KeyError:
+        try:
+            issc = issubclass(type(x), type)
+        except TypeError:
+            issc = 0
+        if issc:
+            y = _deepcopy_dispatch[type](x, memo)
+        else:
+            try:
+                copier = x.__deepcopy__
+            except AttributeError:
+                try:
+                    reductor = x.__reduce__
+                except AttributeError:
+                    raise error, \
+                       "un-deep-copyable object of type %s" % type(x)
+                else:
+                    y = _reconstruct(x, reductor(), 1, memo)
+            else:
+                y = copier(memo)
+    else:
+        y = copierfunction(x, memo)
+    memo[d] = y
+    _keep_alive(x, memo) # Make sure x lives at least as long as d
+    return y
+
+_deepcopy_dispatch = d = {}
+
+def _deepcopy_atomic(x, memo):
+    return x
+d[types.NoneType] = _deepcopy_atomic
+d[types.IntType] = _deepcopy_atomic
+d[types.LongType] = _deepcopy_atomic
+d[types.FloatType] = _deepcopy_atomic
+try:
+    d[types.ComplexType] = _deepcopy_atomic
+except AttributeError:
+    pass
+d[types.StringType] = _deepcopy_atomic
+try:
+    d[types.UnicodeType] = _deepcopy_atomic
+except AttributeError:
+    pass
+try:
+    d[types.CodeType] = _deepcopy_atomic
+except AttributeError:
+    pass
+d[types.TypeType] = _deepcopy_atomic
+d[types.XRangeType] = _deepcopy_atomic
+d[types.ClassType] = _deepcopy_atomic
+d[types.BuiltinFunctionType] = _deepcopy_atomic
+
+def _deepcopy_list(x, memo):
+    y = []
+    memo[id(x)] = y
+    for a in x:
+        y.append(deepcopy(a, memo))
+    return y
+d[types.ListType] = _deepcopy_list
+
+def _deepcopy_tuple(x, memo):
+    y = []
+    for a in x:
+        y.append(deepcopy(a, memo))
+    d = id(x)
+    try:
+        return memo[d]
+    except KeyError:
+        pass
+    for i in range(len(x)):
+        if x[i] is not y[i]:
+            y = tuple(y)
+            break
+    else:
+        y = x
+    memo[d] = y
+    return y
+d[types.TupleType] = _deepcopy_tuple
+
+def _deepcopy_dict(x, memo):
+    y = {}
+    memo[id(x)] = y
+    for key in x.keys():
+        y[deepcopy(key, memo)] = deepcopy(x[key], memo)
+    return y
+d[types.DictionaryType] = _deepcopy_dict
+if PyStringMap is not None:
+    d[PyStringMap] = _deepcopy_dict
+
+def _keep_alive(x, memo):
+    """Keeps a reference to the object x in the memo.
+
+    Because we remember objects by their id, we have
+    to assure that possibly temporary objects are kept
+    alive by referencing them.
+    We store a reference at the id of the memo, which should
+    normally not be used unless someone tries to deepcopy
+    the memo itself...
+    """
+    try:
+        memo[id(memo)].append(x)
+    except KeyError:
+        # aha, this is the first one :-)
+        memo[id(memo)]=[x]
+
+def _deepcopy_inst(x, memo):
+    if hasattr(x, '__deepcopy__'):
+        return x.__deepcopy__(memo)
+    if hasattr(x, '__getinitargs__'):
+        args = x.__getinitargs__()
+        args = deepcopy(args, memo)
+        y = apply(x.__class__, args)
+    else:
+        y = _EmptyClass()
+        y.__class__ = x.__class__
+    memo[id(x)] = y
+    if hasattr(x, '__getstate__'):
+        state = x.__getstate__()
+    else:
+        state = x.__dict__
+    state = deepcopy(state, memo)
+    if hasattr(y, '__setstate__'):
+        y.__setstate__(state)
+    else:
+        y.__dict__.update(state)
+    return y
+d[types.InstanceType] = _deepcopy_inst
+
+def _reconstruct(x, info, deep, memo=None):
+    if isinstance(info, str):
+        return x
+    assert isinstance(info, tuple)
+    if memo is None:
+        memo = {}
+    n = len(info)
+    assert n in (2, 3)
+    callable, args = info[:2]
+    if n > 2:
+        state = info[2]
+    else:
+        state = {}
+    if deep:
+        args = deepcopy(args, memo)
+    y = callable(*args)
+    if state:
+        if deep:
+            state = deepcopy(state, memo)
+        if hasattr(y, '__setstate__'):
+            y.__setstate__(state)
+        else:
+            y.__dict__.update(state)
+    return y
+
+del d
+
+del types
+
+# Helper for instance creation without calling __init__
+class _EmptyClass:
+    pass
+
+def _test():
+    l = [None, 1, 2L, 3.14, 'xyzzy', (1, 2L), [3.14, 'abc'],
+         {'abc': 'ABC'}, (), [], {}]
+    l1 = copy(l)
+    print l1==l
+    l1 = map(copy, l)
+    print l1==l
+    l1 = deepcopy(l)
+    print l1==l
+    class C:
+        def __init__(self, arg=None):
+            self.a = 1
+            self.arg = arg
+            if __name__ == '__main__':
+                import sys
+                file = sys.argv[0]
+            else:
+                file = __file__
+            self.fp = open(file)
+            self.fp.close()
+        def __getstate__(self):
+            return {'a': self.a, 'arg': self.arg}
+        def __setstate__(self, state):
+            for key in state.keys():
+                setattr(self, key, state[key])
+        def __deepcopy__(self, memo = None):
+            new = self.__class__(deepcopy(self.arg, memo))
+            new.a = self.a
+            return new
+    c = C('argument sketch')
+    l.append(c)
+    l2 = copy(l)
+    print l == l2
+    print l
+    print l2
+    l2 = deepcopy(l)
+    print l == l2
+    print l
+    print l2
+    l.append({l[1]: l, 'xyz': l[2]})
+    l3 = copy(l)
+    import repr
+    print map(repr.repr, l)
+    print map(repr.repr, l1)
+    print map(repr.repr, l2)
+    print map(repr.repr, l3)
+    l3 = deepcopy(l)
+    import repr
+    print map(repr.repr, l)
+    print map(repr.repr, l1)
+    print map(repr.repr, l2)
+    print map(repr.repr, l3)
+
+if __name__ == '__main__':
+    _test()
diff --git a/lib-python/2.2/copy_reg.py b/lib-python/2.2/copy_reg.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/copy_reg.py
@@ -0,0 +1,78 @@
+"""Helper to provide extensibility for pickle/cPickle.
+
+This is only useful to add pickle support for extension types defined in
+C, not for instances of user-defined classes.
+"""
+
+from types import ClassType as _ClassType
+
+__all__ = ["pickle","constructor"]
+
+dispatch_table = {}
+safe_constructors = {}
+
+def pickle(ob_type, pickle_function, constructor_ob=None):
+    if type(ob_type) is _ClassType:
+        raise TypeError("copy_reg is not intended for use with classes")
+
+    if not callable(pickle_function):
+        raise TypeError("reduction functions must be callable")
+    dispatch_table[ob_type] = pickle_function
+
+    if constructor_ob is not None:
+        constructor(constructor_ob)
+
+def constructor(object):
+    if not callable(object):
+        raise TypeError("constructors must be callable")
+    safe_constructors[object] = 1
+
+# Example: provide pickling support for complex numbers.
+
+try:
+    complex
+except NameError:
+    pass
+else:
+
+    def pickle_complex(c):
+        return complex, (c.real, c.imag)
+
+    pickle(complex, pickle_complex, complex)
+
+# Support for picking new-style objects
+
+def _reconstructor(cls, base, state):
+    obj = base.__new__(cls, state)
+    base.__init__(obj, state)
+    return obj
+_reconstructor.__safe_for_unpickling__ = 1
+
+_HEAPTYPE = 1<<9
+
+def _reduce(self):
+    for base in self.__class__.__mro__:
+        if hasattr(base, '__flags__') and not base.__flags__ & _HEAPTYPE:
+            break
+    else:
+        base = object # not really reachable
+    if base is object:
+        state = None
+    else:
+        if base is self.__class__:
+            raise TypeError, "can't pickle %s objects" % base.__name__
+        state = base(self)
+    args = (self.__class__, base, state)
+    try:
+        getstate = self.__getstate__
+    except AttributeError:
+        try:
+            dict = self.__dict__
+        except AttributeError:
+            dict = None
+    else:
+        dict = getstate()
+    if dict:
+        return _reconstructor, args, dict
+    else:
+        return _reconstructor, args
diff --git a/lib-python/2.2/curses/__init__.py b/lib-python/2.2/curses/__init__.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/curses/__init__.py
@@ -0,0 +1,54 @@
+"""curses
+
+The main package for curses support for Python.  Normally used by importing
+the package, and perhaps a particular module inside it.
+
+   import curses
+   from curses import textpad
+   curses.initwin()
+   ...
+   
+"""
+
+__revision__ = "$Id$"
+
+from _curses import *
+from curses.wrapper import wrapper
+
+# Some constants, most notably the ACS_* ones, are only added to the C
+# _curses module's dictionary after initscr() is called.  (Some
+# versions of SGI's curses don't define values for those constants
+# until initscr() has been called.)  This wrapper function calls the
+# underlying C initscr(), and then copies the constants from the
+# _curses module to the curses package's dictionary.  Don't do 'from
+# curses import *' if you'll be needing the ACS_* constants.
+
+def initscr():
+    import _curses, curses
+    stdscr = _curses.initscr()
+    for key, value in _curses.__dict__.items():
+        if key[0:4] == 'ACS_' or key in ('LINES', 'COLS'):
+            setattr(curses, key, value)
+    
+    return stdscr
+
+# This is a similar wrapper for start_color(), which adds the COLORS and
+# COLOR_PAIRS variables which are only available after start_color() is
+# called.
+ 
+def start_color():
+    import _curses, curses
+    retval = _curses.start_color()
+    if hasattr(_curses, 'COLORS'):
+        curses.COLORS = _curses.COLORS
+    if hasattr(_curses, 'COLOR_PAIRS'):
+        curses.COLOR_PAIRS = _curses.COLOR_PAIRS
+    return retval 
+
+# Import Python has_key() implementation if _curses doesn't contain has_key()
+
+try:
+    has_key
+except NameError:
+    from has_key import has_key
+
diff --git a/lib-python/2.2/curses/ascii.py b/lib-python/2.2/curses/ascii.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/curses/ascii.py
@@ -0,0 +1,100 @@
+"""Constants and membership tests for ASCII characters"""
+
+NUL	= 0x00	# ^@
+SOH	= 0x01	# ^A
+STX	= 0x02	# ^B
+ETX	= 0x03	# ^C
+EOT	= 0x04	# ^D
+ENQ	= 0x05	# ^E
+ACK	= 0x06	# ^F
+BEL	= 0x07	# ^G
+BS	= 0x08	# ^H
+TAB	= 0x09	# ^I
+HT	= 0x09	# ^I
+LF	= 0x0a	# ^J
+NL	= 0x0a	# ^J
+VT	= 0x0b	# ^K
+FF	= 0x0c	# ^L
+CR	= 0x0d	# ^M
+SO	= 0x0e	# ^N
+SI	= 0x0f	# ^O
+DLE	= 0x10	# ^P
+DC1	= 0x11	# ^Q
+DC2	= 0x12	# ^R
+DC3	= 0x13	# ^S
+DC4	= 0x14	# ^T
+NAK	= 0x15	# ^U
+SYN	= 0x16	# ^V
+ETB	= 0x17	# ^W
+CAN	= 0x18	# ^X
+EM	= 0x19	# ^Y
+SUB	= 0x1a	# ^Z
+ESC	= 0x1b	# ^[
+FS	= 0x1c	# ^\
+GS	= 0x1d	# ^]
+RS	= 0x1e	# ^^
+US	= 0x1f	# ^_
+SP	= 0x20	# space
+DEL	= 0x7f	# delete
+
+controlnames = [
+"NUL", "SOH", "STX", "ETX", "EOT", "ENQ", "ACK", "BEL",
+"BS",  "HT",  "LF",  "VT",  "FF",  "CR",  "SO",  "SI",
+"DLE", "DC1", "DC2", "DC3", "DC4", "NAK", "SYN", "ETB",
+"CAN", "EM",  "SUB", "ESC", "FS",  "GS",  "RS",  "US",
+"SP"
+]
+
+def _ctoi(c):
+    if type(c) == type(""):
+        return ord(c)
+    else:
+        return c
+
+def isalnum(c): return isalpha(c) or isdigit(c)
+def isalpha(c): return isupper(c) or islower(c)
+def isascii(c): return _ctoi(c) <= 127		# ?
+def isblank(c): return _ctoi(c) in (8,32)
+def iscntrl(c): return _ctoi(c) <= 31
+def isdigit(c): return _ctoi(c) >= 48 and _ctoi(c) <= 57
+def isgraph(c): return _ctoi(c) >= 33 and _ctoi(c) <= 126
+def islower(c): return _ctoi(c) >= 97 and _ctoi(c) <= 122
+def isprint(c): return _ctoi(c) >= 32 and _ctoi(c) <= 126
+def ispunct(c): return _ctoi(c) != 32 and not isalnum(c)
+def isspace(c): return _ctoi(c) in (9, 10, 11, 12, 13, 32)
+def isupper(c): return _ctoi(c) >= 65 and _ctoi(c) <= 90
+def isxdigit(c): return isdigit(c) or \
+    (_ctoi(c) >= 65 and _ctoi(c) <= 70) or (_ctoi(c) >= 97 and _ctoi(c) <= 102)
+def isctrl(c): return _ctoi(c) < 32
+def ismeta(c): return _ctoi(c) > 127
+
+def ascii(c):
+    if type(c) == type(""):
+        return chr(_ctoi(c) & 0x7f)
+    else:
+        return _ctoi(c) & 0x7f
+
+def ctrl(c):
+    if type(c) == type(""):
+        return chr(_ctoi(c) & 0x1f)
+    else:
+        return _ctoi(c) & 0x1f
+
+def alt(c):
+    if type(c) == type(""):
+        return chr(_ctoi(c) | 0x80)
+    else:
+        return _ctoi(c) | 0x80
+
+def unctrl(c):
+    bits = _ctoi(c)
+    if bits == 0x7f:
+        rep = "^?"
+    elif bits & 0x20:
+        rep = chr((bits & 0x7f) | 0x20)
+    else:
+        rep = "^" + chr(((bits & 0x7f) | 0x20) + 0x20)
+    if bits & 0x80:
+        return "!" + rep
+    return rep
+
diff --git a/lib-python/2.2/curses/has_key.py b/lib-python/2.2/curses/has_key.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/curses/has_key.py
@@ -0,0 +1,189 @@
+
+#
+# Emulation of has_key() function for platforms that don't use ncurses
+#
+
+import _curses
+
+# Table mapping curses keys to the terminfo capability name
+
+_capability_names = {
+    _curses.KEY_A1: 'ka1', 
+    _curses.KEY_A3: 'ka3', 
+    _curses.KEY_B2: 'kb2', 
+    _curses.KEY_BACKSPACE: 'kbs', 
+    _curses.KEY_BEG: 'kbeg', 
+    _curses.KEY_BTAB: 'kcbt', 
+    _curses.KEY_C1: 'kc1', 
+    _curses.KEY_C3: 'kc3', 
+    _curses.KEY_CANCEL: 'kcan', 
+    _curses.KEY_CATAB: 'ktbc', 
+    _curses.KEY_CLEAR: 'kclr', 
+    _curses.KEY_CLOSE: 'kclo', 
+    _curses.KEY_COMMAND: 'kcmd', 
+    _curses.KEY_COPY: 'kcpy', 
+    _curses.KEY_CREATE: 'kcrt', 
+    _curses.KEY_CTAB: 'kctab', 
+    _curses.KEY_DC: 'kdch1', 
+    _curses.KEY_DL: 'kdl1', 
+    _curses.KEY_DOWN: 'kcud1', 
+    _curses.KEY_EIC: 'krmir', 
+    _curses.KEY_END: 'kend', 
+    _curses.KEY_ENTER: 'kent', 
+    _curses.KEY_EOL: 'kel', 
+    _curses.KEY_EOS: 'ked', 
+    _curses.KEY_EXIT: 'kext', 
+    _curses.KEY_F0: 'kf0', 
+    _curses.KEY_F1: 'kf1', 
+    _curses.KEY_F10: 'kf10', 
+    _curses.KEY_F11: 'kf11', 
+    _curses.KEY_F12: 'kf12', 
+    _curses.KEY_F13: 'kf13', 
+    _curses.KEY_F14: 'kf14', 
+    _curses.KEY_F15: 'kf15', 
+    _curses.KEY_F16: 'kf16', 
+    _curses.KEY_F17: 'kf17', 
+    _curses.KEY_F18: 'kf18', 
+    _curses.KEY_F19: 'kf19', 
+    _curses.KEY_F2: 'kf2', 
+    _curses.KEY_F20: 'kf20', 
+    _curses.KEY_F21: 'kf21', 
+    _curses.KEY_F22: 'kf22', 
+    _curses.KEY_F23: 'kf23', 
+    _curses.KEY_F24: 'kf24', 
+    _curses.KEY_F25: 'kf25', 
+    _curses.KEY_F26: 'kf26', 
+    _curses.KEY_F27: 'kf27', 
+    _curses.KEY_F28: 'kf28', 
+    _curses.KEY_F29: 'kf29', 
+    _curses.KEY_F3: 'kf3', 
+    _curses.KEY_F30: 'kf30', 
+    _curses.KEY_F31: 'kf31', 
+    _curses.KEY_F32: 'kf32', 
+    _curses.KEY_F33: 'kf33', 
+    _curses.KEY_F34: 'kf34', 
+    _curses.KEY_F35: 'kf35', 
+    _curses.KEY_F36: 'kf36', 
+    _curses.KEY_F37: 'kf37', 
+    _curses.KEY_F38: 'kf38', 
+    _curses.KEY_F39: 'kf39', 
+    _curses.KEY_F4: 'kf4', 
+    _curses.KEY_F40: 'kf40', 
+    _curses.KEY_F41: 'kf41', 
+    _curses.KEY_F42: 'kf42', 
+    _curses.KEY_F43: 'kf43', 
+    _curses.KEY_F44: 'kf44', 
+    _curses.KEY_F45: 'kf45', 
+    _curses.KEY_F46: 'kf46', 
+    _curses.KEY_F47: 'kf47', 
+    _curses.KEY_F48: 'kf48', 
+    _curses.KEY_F49: 'kf49', 
+    _curses.KEY_F5: 'kf5', 
+    _curses.KEY_F50: 'kf50', 
+    _curses.KEY_F51: 'kf51', 
+    _curses.KEY_F52: 'kf52', 
+    _curses.KEY_F53: 'kf53', 
+    _curses.KEY_F54: 'kf54', 
+    _curses.KEY_F55: 'kf55', 
+    _curses.KEY_F56: 'kf56', 
+    _curses.KEY_F57: 'kf57', 
+    _curses.KEY_F58: 'kf58', 
+    _curses.KEY_F59: 'kf59', 
+    _curses.KEY_F6: 'kf6', 
+    _curses.KEY_F60: 'kf60', 
+    _curses.KEY_F61: 'kf61', 
+    _curses.KEY_F62: 'kf62', 
+    _curses.KEY_F63: 'kf63', 
+    _curses.KEY_F7: 'kf7', 
+    _curses.KEY_F8: 'kf8', 
+    _curses.KEY_F9: 'kf9', 
+    _curses.KEY_FIND: 'kfnd', 
+    _curses.KEY_HELP: 'khlp', 
+    _curses.KEY_HOME: 'khome', 
+    _curses.KEY_IC: 'kich1', 
+    _curses.KEY_IL: 'kil1', 
+    _curses.KEY_LEFT: 'kcub1', 
+    _curses.KEY_LL: 'kll', 
+    _curses.KEY_MARK: 'kmrk', 
+    _curses.KEY_MESSAGE: 'kmsg', 
+    _curses.KEY_MOVE: 'kmov', 
+    _curses.KEY_NEXT: 'knxt', 
+    _curses.KEY_NPAGE: 'knp', 
+    _curses.KEY_OPEN: 'kopn', 
+    _curses.KEY_OPTIONS: 'kopt', 
+    _curses.KEY_PPAGE: 'kpp', 
+    _curses.KEY_PREVIOUS: 'kprv', 
+    _curses.KEY_PRINT: 'kprt', 
+    _curses.KEY_REDO: 'krdo', 
+    _curses.KEY_REFERENCE: 'kref', 
+    _curses.KEY_REFRESH: 'krfr', 
+    _curses.KEY_REPLACE: 'krpl', 
+    _curses.KEY_RESTART: 'krst', 
+    _curses.KEY_RESUME: 'kres', 
+    _curses.KEY_RIGHT: 'kcuf1', 
+    _curses.KEY_SAVE: 'ksav', 
+    _curses.KEY_SBEG: 'kBEG', 
+    _curses.KEY_SCANCEL: 'kCAN', 
+    _curses.KEY_SCOMMAND: 'kCMD', 
+    _curses.KEY_SCOPY: 'kCPY', 
+    _curses.KEY_SCREATE: 'kCRT', 
+    _curses.KEY_SDC: 'kDC', 
+    _curses.KEY_SDL: 'kDL', 
+    _curses.KEY_SELECT: 'kslt', 
+    _curses.KEY_SEND: 'kEND', 
+    _curses.KEY_SEOL: 'kEOL', 
+    _curses.KEY_SEXIT: 'kEXT', 
+    _curses.KEY_SF: 'kind', 
+    _curses.KEY_SFIND: 'kFND', 
+    _curses.KEY_SHELP: 'kHLP', 
+    _curses.KEY_SHOME: 'kHOM', 
+    _curses.KEY_SIC: 'kIC', 
+    _curses.KEY_SLEFT: 'kLFT', 
+    _curses.KEY_SMESSAGE: 'kMSG', 
+    _curses.KEY_SMOVE: 'kMOV', 
+    _curses.KEY_SNEXT: 'kNXT', 
+    _curses.KEY_SOPTIONS: 'kOPT', 
+    _curses.KEY_SPREVIOUS: 'kPRV', 
+    _curses.KEY_SPRINT: 'kPRT', 
+    _curses.KEY_SR: 'kri', 
+    _curses.KEY_SREDO: 'kRDO', 
+    _curses.KEY_SREPLACE: 'kRPL', 
+    _curses.KEY_SRIGHT: 'kRIT', 
+    _curses.KEY_SRSUME: 'kRES', 
+    _curses.KEY_SSAVE: 'kSAV', 
+    _curses.KEY_SSUSPEND: 'kSPD', 
+    _curses.KEY_STAB: 'khts', 
+    _curses.KEY_SUNDO: 'kUND', 
+    _curses.KEY_SUSPEND: 'kspd', 
+    _curses.KEY_UNDO: 'kund', 
+    _curses.KEY_UP: 'kcuu1'
+    }
+
+def has_key(ch):
+    if type(ch) == type( '' ): ch = ord(ch)
+
+    # Figure out the correct capability name for the keycode.
+    capability_name = _capability_names[ch]
+
+    #Check the current terminal description for that capability;
+    #if present, return true, else return false.
+    if _curses.tigetstr( capability_name ): return 1
+    else: return 0
+
+if __name__ == '__main__':
+    # Compare the output of this implementation and the ncurses has_key,
+    # on platforms where has_key is already available
+    try:
+        L = []
+        _curses.initscr()
+        for key in _capability_names.keys():
+            system = _curses.has_key(key)
+            python = has_key(key)
+            if system != python:
+                L.append( 'Mismatch for key %s, system=%i, Python=%i'
+                          % (_curses.keyname( key ), system, python) )
+    finally:
+        _curses.endwin()
+        for i in L: print i
+        
+    
diff --git a/lib-python/2.2/curses/panel.py b/lib-python/2.2/curses/panel.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/curses/panel.py
@@ -0,0 +1,9 @@
+"""curses.panel
+
+Module for using panels with curses.
+"""
+
+__revision__ = "$Id$"
+
+from _curses_panel import *
+
diff --git a/lib-python/2.2/curses/textpad.py b/lib-python/2.2/curses/textpad.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/curses/textpad.py
@@ -0,0 +1,167 @@
+"""Simple textbox editing widget with Emacs-like keybindings."""
+
+import sys, curses, ascii
+
+def rectangle(win, uly, ulx, lry, lrx):
+    "Draw a rectangle."
+    win.vline(uly+1, ulx, curses.ACS_VLINE, lry - uly - 1)
+    win.hline(uly, ulx+1, curses.ACS_HLINE, lrx - ulx - 1)
+    win.hline(lry, ulx+1, curses.ACS_HLINE, lrx - ulx - 1)
+    win.vline(uly+1, lrx, curses.ACS_VLINE, lry - uly - 1)
+    win.addch(uly, ulx, curses.ACS_ULCORNER)
+    win.addch(uly, lrx, curses.ACS_URCORNER)
+    win.addch(lry, lrx, curses.ACS_LRCORNER)
+    win.addch(lry, ulx, curses.ACS_LLCORNER)
+
+class Textbox:
+    """Editing widget using the interior of a window object.
+     Supports the following Emacs-like key bindings:
+
+    Ctrl-A      Go to left edge of window.
+    Ctrl-B      Cursor left, wrapping to previous line if appropriate.
+    Ctrl-D      Delete character under cursor.
+    Ctrl-E      Go to right edge (stripspaces off) or end of line (stripspaces on).
+    Ctrl-F      Cursor right, wrapping to next line when appropriate.
+    Ctrl-G      Terminate, returning the window contents.
+    Ctrl-H      Delete character backward.
+    Ctrl-J      Terminate if the window is 1 line, otherwise insert newline.
+    Ctrl-K      If line is blank, delete it, otherwise clear to end of line.
+    Ctrl-L      Refresh screen.
+    Ctrl-N      Cursor down; move down one line.
+    Ctrl-O      Insert a blank line at cursor location.
+    Ctrl-P      Cursor up; move up one line.
+
+    Move operations do nothing if the cursor is at an edge where the movement
+    is not possible.  The following synonyms are supported where possible:
+
+    KEY_LEFT = Ctrl-B, KEY_RIGHT = Ctrl-F, KEY_UP = Ctrl-P, KEY_DOWN = Ctrl-N
+    KEY_BACKSPACE = Ctrl-h
+    """
+    def __init__(self, win):
+        self.win = win
+        (self.maxy, self.maxx) = win.getmaxyx()
+        self.maxy = self.maxy - 1
+        self.maxx = self.maxx - 1
+        self.stripspaces = 1
+        self.lastcmd = None
+        win.keypad(1)
+
+    def _end_of_line(self, y):
+        "Go to the location of the first blank on the given line."
+        last = self.maxx
+        while 1:
+            if ascii.ascii(self.win.inch(y, last)) != ascii.SP:
+                last = last + 1
+                break
+            elif last == 0:
+                break
+            last = last - 1
+        return last
+
+    def do_command(self, ch):
+        "Process a single editing command."
+        (y, x) = self.win.getyx()
+        self.lastcmd = ch
+        if ascii.isprint(ch):
+            if y < self.maxy or x < self.maxx:
+                # The try-catch ignores the error we trigger from some curses
+                # versions by trying to write into the lowest-rightmost spot
+                # in the window.
+                try:
+                    self.win.addch(ch)
+                except curses.error:
+                    pass
+        elif ch == ascii.SOH:				# ^a
+            self.win.move(y, 0)
+        elif ch in (ascii.STX,curses.KEY_LEFT, ascii.BS,curses.KEY_BACKSPACE):
+            if x > 0:
+                self.win.move(y, x-1)
+            elif y == 0:
+                pass
+            elif self.stripspaces:
+                self.win.move(y-1, self._end_of_line(y-1))
+            else:
+                self.win.move(y-1, self.maxx)
+            if ch in (ascii.BS, curses.KEY_BACKSPACE):
+                self.win.delch()
+        elif ch == ascii.EOT:				# ^d
+            self.win.delch()
+        elif ch == ascii.ENQ:				# ^e
+            if self.stripspaces:
+                self.win.move(y, self._end_of_line(y))
+            else:
+                self.win.move(y, self.maxx)
+        elif ch in (ascii.ACK, curses.KEY_RIGHT):	# ^f
+            if x < self.maxx:
+                self.win.move(y, x+1)
+            elif y == self.maxy:
+                pass
+            else:
+                self.win.move(y+1, 0)
+        elif ch == ascii.BEL:				# ^g
+            return 0
+        elif ch == ascii.NL:				# ^j
+            if self.maxy == 0:
+                return 0
+            elif y < self.maxy:
+                self.win.move(y+1, 0)
+        elif ch == ascii.VT:				# ^k
+            if x == 0 and self._end_of_line(y) == 0:
+                self.win.deleteln()
+            else:
+                self.win.clrtoeol()
+        elif ch == ascii.FF:				# ^l
+            self.win.refresh()
+        elif ch in (ascii.SO, curses.KEY_DOWN):		# ^n
+            if y < self.maxy:
+                self.win.move(y+1, x)
+                if x > self._end_of_line(y+1):
+                    self.win.move(y+1, self._end_of_line(y+1))
+        elif ch == ascii.SI:				# ^o
+            self.win.insertln()
+        elif ch in (ascii.DLE, curses.KEY_UP):		# ^p
+            if y > 0:
+                self.win.move(y-1, x)
+                if x > self._end_of_line(y-1):
+                    self.win.move(y-1, self._end_of_line(y-1))
+        return 1
+        
+    def gather(self):
+        "Collect and return the contents of the window."
+        result = ""
+        for y in range(self.maxy+1):
+            self.win.move(y, 0)
+            stop = self._end_of_line(y)
+            #sys.stderr.write("y=%d, _end_of_line(y)=%d\n" % (y, stop))
+            if stop == 0 and self.stripspaces:
+                continue
+            for x in range(self.maxx+1):
+                if self.stripspaces and x == stop:
+                    break
+                result = result + chr(ascii.ascii(self.win.inch(y, x)))
+            if self.maxy > 0:
+                result = result + "\n"
+        return result
+
+    def edit(self, validate=None):
+        "Edit in the widget window and collect the results."
+        while 1:
+            ch = self.win.getch()
+            if validate:
+                ch = validate(ch)
+            if not ch:
+                continue
+            if not self.do_command(ch):
+                break
+            self.win.refresh()
+        return self.gather()
+
+if __name__ == '__main__':
+    def test_editbox(stdscr):
+        win = curses.newwin(4, 9, 15, 20)
+        rectangle(stdscr, 14, 19, 19, 29)
+        stdscr.refresh()
+        return Textbox(win).edit()
+
+    str = curses.wrapper(test_editbox)
+    print str
diff --git a/lib-python/2.2/curses/wrapper.py b/lib-python/2.2/curses/wrapper.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/curses/wrapper.py
@@ -0,0 +1,63 @@
+"""curses.wrapper
+
+Contains one function, wrapper(), which runs another function which
+should be the rest of your curses-based application.  If the
+application raises an exception, wrapper() will restore the terminal
+to a sane state so you can read the resulting traceback.
+
+"""
+
+import sys, curses
+
+def wrapper(func, *rest):
+    """Wrapper function that initializes curses and calls another function,
+    restoring normal keyboard/screen behavior on error.
+    The callable object 'func' is then passed the main window 'stdscr'
+    as its first argument, followed by any other arguments passed to
+    wrapper().
+    """
+    
+    res = None
+    try:
+	# Initialize curses
+        stdscr=curses.initscr()
+        
+	# Turn off echoing of keys, and enter cbreak mode,
+	# where no buffering is performed on keyboard input
+        curses.noecho()
+        curses.cbreak()
+
+	# In keypad mode, escape sequences for special keys
+	# (like the cursor keys) will be interpreted and
+	# a special value like curses.KEY_LEFT will be returned
+        stdscr.keypad(1)
+
+        # Start color, too.  Harmless if the terminal doesn't have
+        # color; user can test with has_color() later on.  The try/catch
+        # works around a minor bit of over-conscientiousness in the curses
+        # module -- the error return from C start_color() is ignorable.
+        try:
+            curses.start_color()
+        except:
+            pass
+
+        res = apply(func, (stdscr,) + rest)
+    except:
+	# In the event of an error, restore the terminal
+	# to a sane state.
+        stdscr.keypad(0)
+        curses.echo()
+        curses.nocbreak()
+        curses.endwin()
+        
+        # Pass the exception upwards
+        (exc_type, exc_value, exc_traceback) = sys.exc_info()
+        raise exc_type, exc_value, exc_traceback
+    else:
+	# Set everything back to normal
+        stdscr.keypad(0)
+        curses.echo()
+        curses.nocbreak()
+        curses.endwin()		 # Terminate curses
+
+        return res
diff --git a/lib-python/2.2/dbhash.py b/lib-python/2.2/dbhash.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/dbhash.py
@@ -0,0 +1,16 @@
+"""Provide a (g)dbm-compatible interface to bsdhash.hashopen."""
+
+import sys
+try:
+    import bsddb
+except ImportError:
+    # prevent a second import of this module from spuriously succeeding
+    del sys.modules[__name__]
+    raise
+
+__all__ = ["error","open"]
+
+error = bsddb.error                     # Exported for anydbm
+
+def open(file, flag = 'r', mode=0666):
+    return bsddb.hashopen(file, flag, mode)
diff --git a/lib-python/2.2/difflib.py b/lib-python/2.2/difflib.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/difflib.py
@@ -0,0 +1,1088 @@
+#! /usr/bin/env python
+
+from __future__ import generators
+
+"""
+Module difflib -- helpers for computing deltas between objects.
+
+Function get_close_matches(word, possibilities, n=3, cutoff=0.6):
+    Use SequenceMatcher to return list of the best "good enough" matches.
+
+Function ndiff(a, b):
+    Return a delta: the difference between `a` and `b` (lists of strings).
+
+Function restore(delta, which):
+    Return one of the two sequences that generated an ndiff delta.
+
+Class SequenceMatcher:
+    A flexible class for comparing pairs of sequences of any type.
+
+Class Differ:
+    For producing human-readable deltas from sequences of lines of text.
+"""
+
+__all__ = ['get_close_matches', 'ndiff', 'restore', 'SequenceMatcher',
+           'Differ', 'IS_CHARACTER_JUNK', 'IS_LINE_JUNK']
+
+def _calculate_ratio(matches, length):
+    if length:
+        return 2.0 * matches / length
+    return 1.0
+
+class SequenceMatcher:
+
+    """
+    SequenceMatcher is a flexible class for comparing pairs of sequences of
+    any type, so long as the sequence elements are hashable.  The basic
+    algorithm predates, and is a little fancier than, an algorithm
+    published in the late 1980's by Ratcliff and Obershelp under the
+    hyperbolic name "gestalt pattern matching".  The basic idea is to find
+    the longest contiguous matching subsequence that contains no "junk"
+    elements (R-O doesn't address junk).  The same idea is then applied
+    recursively to the pieces of the sequences to the left and to the right
+    of the matching subsequence.  This does not yield minimal edit
+    sequences, but does tend to yield matches that "look right" to people.
+
+    SequenceMatcher tries to compute a "human-friendly diff" between two
+    sequences.  Unlike e.g. UNIX(tm) diff, the fundamental notion is the
+    longest *contiguous* & junk-free matching subsequence.  That's what
+    catches peoples' eyes.  The Windows(tm) windiff has another interesting
+    notion, pairing up elements that appear uniquely in each sequence.
+    That, and the method here, appear to yield more intuitive difference
+    reports than does diff.  This method appears to be the least vulnerable
+    to synching up on blocks of "junk lines", though (like blank lines in
+    ordinary text files, or maybe "<P>" lines in HTML files).  That may be
+    because this is the only method of the 3 that has a *concept* of
+    "junk" <wink>.
+
+    Example, comparing two strings, and considering blanks to be "junk":
+
+    >>> s = SequenceMatcher(lambda x: x == " ",
+    ...                     "private Thread currentThread;",
+    ...                     "private volatile Thread currentThread;")
+    >>>
+
+    .ratio() returns a float in [0, 1], measuring the "similarity" of the
+    sequences.  As a rule of thumb, a .ratio() value over 0.6 means the
+    sequences are close matches:
+
+    >>> print round(s.ratio(), 3)
+    0.866
+    >>>
+
+    If you're only interested in where the sequences match,
+    .get_matching_blocks() is handy:
+
+    >>> for block in s.get_matching_blocks():
+    ...     print "a[%d] and b[%d] match for %d elements" % block
+    a[0] and b[0] match for 8 elements
+    a[8] and b[17] match for 6 elements
+    a[14] and b[23] match for 15 elements
+    a[29] and b[38] match for 0 elements
+
+    Note that the last tuple returned by .get_matching_blocks() is always a
+    dummy, (len(a), len(b), 0), and this is the only case in which the last
+    tuple element (number of elements matched) is 0.
+
+    If you want to know how to change the first sequence into the second,
+    use .get_opcodes():
+
+    >>> for opcode in s.get_opcodes():
+    ...     print "%6s a[%d:%d] b[%d:%d]" % opcode
+     equal a[0:8] b[0:8]
+    insert a[8:8] b[8:17]
+     equal a[8:14] b[17:23]
+     equal a[14:29] b[23:38]
+
+    See the Differ class for a fancy human-friendly file differencer, which
+    uses SequenceMatcher both to compare sequences of lines, and to compare
+    sequences of characters within similar (near-matching) lines.
+
+    See also function get_close_matches() in this module, which shows how
+    simple code building on SequenceMatcher can be used to do useful work.
+
+    Timing:  Basic R-O is cubic time worst case and quadratic time expected
+    case.  SequenceMatcher is quadratic time for the worst case and has
+    expected-case behavior dependent in a complicated way on how many
+    elements the sequences have in common; best case time is linear.
+
+    Methods:
+
+    __init__(isjunk=None, a='', b='')
+        Construct a SequenceMatcher.
+
+    set_seqs(a, b)
+        Set the two sequences to be compared.
+
+    set_seq1(a)
+        Set the first sequence to be compared.
+
+    set_seq2(b)
+        Set the second sequence to be compared.
+
+    find_longest_match(alo, ahi, blo, bhi)
+        Find longest matching block in a[alo:ahi] and b[blo:bhi].
+
+    get_matching_blocks()
+        Return list of triples describing matching subsequences.
+
+    get_opcodes()
+        Return list of 5-tuples describing how to turn a into b.
+
+    ratio()
+        Return a measure of the sequences' similarity (float in [0,1]).
+
+    quick_ratio()
+        Return an upper bound on .ratio() relatively quickly.
+
+    real_quick_ratio()
+        Return an upper bound on ratio() very quickly.
+    """
+
+    def __init__(self, isjunk=None, a='', b=''):
+        """Construct a SequenceMatcher.
+
+        Optional arg isjunk is None (the default), or a one-argument
+        function that takes a sequence element and returns true iff the
+        element is junk.  None is equivalent to passing "lambda x: 0", i.e.
+        no elements are considered to be junk.  For example, pass
+            lambda x: x in " \\t"
+        if you're comparing lines as sequences of characters, and don't
+        want to synch up on blanks or hard tabs.
+
+        Optional arg a is the first of two sequences to be compared.  By
+        default, an empty string.  The elements of a must be hashable.  See
+        also .set_seqs() and .set_seq1().
+
+        Optional arg b is the second of two sequences to be compared.  By
+        default, an empty string.  The elements of b must be hashable. See
+        also .set_seqs() and .set_seq2().
+        """
+
+        # Members:
+        # a
+        #      first sequence
+        # b
+        #      second sequence; differences are computed as "what do
+        #      we need to do to 'a' to change it into 'b'?"
+        # b2j
+        #      for x in b, b2j[x] is a list of the indices (into b)
+        #      at which x appears; junk elements do not appear
+        # b2jhas
+        #      b2j.has_key
+        # fullbcount
+        #      for x in b, fullbcount[x] == the number of times x
+        #      appears in b; only materialized if really needed (used
+        #      only for computing quick_ratio())
+        # matching_blocks
+        #      a list of (i, j, k) triples, where a[i:i+k] == b[j:j+k];
+        #      ascending & non-overlapping in i and in j; terminated by
+        #      a dummy (len(a), len(b), 0) sentinel
+        # opcodes
+        #      a list of (tag, i1, i2, j1, j2) tuples, where tag is
+        #      one of
+        #          'replace'   a[i1:i2] should be replaced by b[j1:j2]
+        #          'delete'    a[i1:i2] should be deleted
+        #          'insert'    b[j1:j2] should be inserted
+        #          'equal'     a[i1:i2] == b[j1:j2]
+        # isjunk
+        #      a user-supplied function taking a sequence element and
+        #      returning true iff the element is "junk" -- this has
+        #      subtle but helpful effects on the algorithm, which I'll
+        #      get around to writing up someday <0.9 wink>.
+        #      DON'T USE!  Only __chain_b uses this.  Use isbjunk.
+        # isbjunk
+        #      for x in b, isbjunk(x) == isjunk(x) but much faster;
+        #      it's really the has_key method of a hidden dict.
+        #      DOES NOT WORK for x in a!
+
+        self.isjunk = isjunk
+        self.a = self.b = None
+        self.set_seqs(a, b)
+
+    def set_seqs(self, a, b):
+        """Set the two sequences to be compared.
+
+        >>> s = SequenceMatcher()
+        >>> s.set_seqs("abcd", "bcde")
+        >>> s.ratio()
+        0.75
+        """
+
+        self.set_seq1(a)
+        self.set_seq2(b)
+
+    def set_seq1(self, a):
+        """Set the first sequence to be compared.
+
+        The second sequence to be compared is not changed.
+
+        >>> s = SequenceMatcher(None, "abcd", "bcde")
+        >>> s.ratio()
+        0.75
+        >>> s.set_seq1("bcde")
+        >>> s.ratio()
+        1.0
+        >>>
+
+        SequenceMatcher computes and caches detailed information about the
+        second sequence, so if you want to compare one sequence S against
+        many sequences, use .set_seq2(S) once and call .set_seq1(x)
+        repeatedly for each of the other sequences.
+
+        See also set_seqs() and set_seq2().
+        """
+
+        if a is self.a:
+            return
+        self.a = a
+        self.matching_blocks = self.opcodes = None
+
+    def set_seq2(self, b):
+        """Set the second sequence to be compared.
+
+        The first sequence to be compared is not changed.
+
+        >>> s = SequenceMatcher(None, "abcd", "bcde")
+        >>> s.ratio()
+        0.75
+        >>> s.set_seq2("abcd")
+        >>> s.ratio()
+        1.0
+        >>>
+
+        SequenceMatcher computes and caches detailed information about the
+        second sequence, so if you want to compare one sequence S against
+        many sequences, use .set_seq2(S) once and call .set_seq1(x)
+        repeatedly for each of the other sequences.
+
+        See also set_seqs() and set_seq1().
+        """
+
+        if b is self.b:
+            return
+        self.b = b
+        self.matching_blocks = self.opcodes = None
+        self.fullbcount = None
+        self.__chain_b()
+
+    # For each element x in b, set b2j[x] to a list of the indices in
+    # b where x appears; the indices are in increasing order; note that
+    # the number of times x appears in b is len(b2j[x]) ...
+    # when self.isjunk is defined, junk elements don't show up in this
+    # map at all, which stops the central find_longest_match method
+    # from starting any matching block at a junk element ...
+    # also creates the fast isbjunk function ...
+    # note that this is only called when b changes; so for cross-product
+    # kinds of matches, it's best to call set_seq2 once, then set_seq1
+    # repeatedly
+
+    def __chain_b(self):
+        # Because isjunk is a user-defined (not C) function, and we test
+        # for junk a LOT, it's important to minimize the number of calls.
+        # Before the tricks described here, __chain_b was by far the most
+        # time-consuming routine in the whole module!  If anyone sees
+        # Jim Roskind, thank him again for profile.py -- I never would
+        # have guessed that.
+        # The first trick is to build b2j ignoring the possibility
+        # of junk.  I.e., we don't call isjunk at all yet.  Throwing
+        # out the junk later is much cheaper than building b2j "right"
+        # from the start.
+        b = self.b
+        self.b2j = b2j = {}
+        self.b2jhas = b2jhas = b2j.has_key
+        for i in xrange(len(b)):
+            elt = b[i]
+            if b2jhas(elt):
+                b2j[elt].append(i)
+            else:
+                b2j[elt] = [i]
+
+        # Now b2j.keys() contains elements uniquely, and especially when
+        # the sequence is a string, that's usually a good deal smaller
+        # than len(string).  The difference is the number of isjunk calls
+        # saved.
+        isjunk, junkdict = self.isjunk, {}
+        if isjunk:
+            for elt in b2j.keys():
+                if isjunk(elt):
+                    junkdict[elt] = 1   # value irrelevant; it's a set
+                    del b2j[elt]
+
+        # Now for x in b, isjunk(x) == junkdict.has_key(x), but the
+        # latter is much faster.  Note too that while there may be a
+        # lot of junk in the sequence, the number of *unique* junk
+        # elements is probably small.  So the memory burden of keeping
+        # this dict alive is likely trivial compared to the size of b2j.
+        self.isbjunk = junkdict.has_key
+
+    def find_longest_match(self, alo, ahi, blo, bhi):
+        """Find longest matching block in a[alo:ahi] and b[blo:bhi].
+
+        If isjunk is not defined:
+
+        Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
+            alo <= i <= i+k <= ahi
+            blo <= j <= j+k <= bhi
+        and for all (i',j',k') meeting those conditions,
+            k >= k'
+            i <= i'
+            and if i == i', j <= j'
+
+        In other words, of all maximal matching blocks, return one that
+        starts earliest in a, and of all those maximal matching blocks that
+        start earliest in a, return the one that starts earliest in b.
+
+        >>> s = SequenceMatcher(None, " abcd", "abcd abcd")
+        >>> s.find_longest_match(0, 5, 0, 9)
+        (0, 4, 5)
+
+        If isjunk is defined, first the longest matching block is
+        determined as above, but with the additional restriction that no
+        junk element appears in the block.  Then that block is extended as
+        far as possible by matching (only) junk elements on both sides.  So
+        the resulting block never matches on junk except as identical junk
+        happens to be adjacent to an "interesting" match.
+
+        Here's the same example as before, but considering blanks to be
+        junk.  That prevents " abcd" from matching the " abcd" at the tail
+        end of the second sequence directly.  Instead only the "abcd" can
+        match, and matches the leftmost "abcd" in the second sequence:
+
+        >>> s = SequenceMatcher(lambda x: x==" ", " abcd", "abcd abcd")
+        >>> s.find_longest_match(0, 5, 0, 9)
+        (1, 0, 4)
+
+        If no blocks match, return (alo, blo, 0).
+
+        >>> s = SequenceMatcher(None, "ab", "c")
+        >>> s.find_longest_match(0, 2, 0, 1)
+        (0, 0, 0)
+        """
+
+        # CAUTION:  stripping common prefix or suffix would be incorrect.
+        # E.g.,
+        #    ab
+        #    acab
+        # Longest matching block is "ab", but if common prefix is
+        # stripped, it's "a" (tied with "b").  UNIX(tm) diff does so
+        # strip, so ends up claiming that ab is changed to acab by
+        # inserting "ca" in the middle.  That's minimal but unintuitive:
+        # "it's obvious" that someone inserted "ac" at the front.
+        # Windiff ends up at the same place as diff, but by pairing up
+        # the unique 'b's and then matching the first two 'a's.
+
+        a, b, b2j, isbjunk = self.a, self.b, self.b2j, self.isbjunk
+        besti, bestj, bestsize = alo, blo, 0
+        # find longest junk-free match
+        # during an iteration of the loop, j2len[j] = length of longest
+        # junk-free match ending with a[i-1] and b[j]
+        j2len = {}
+        nothing = []
+        for i in xrange(alo, ahi):
+            # look at all instances of a[i] in b; note that because
+            # b2j has no junk keys, the loop is skipped if a[i] is junk
+            j2lenget = j2len.get
+            newj2len = {}
+            for j in b2j.get(a[i], nothing):
+                # a[i] matches b[j]
+                if j < blo:
+                    continue
+                if j >= bhi:
+                    break
+                k = newj2len[j] = j2lenget(j-1, 0) + 1
+                if k > bestsize:
+                    besti, bestj, bestsize = i-k+1, j-k+1, k
+            j2len = newj2len
+
+        # Now that we have a wholly interesting match (albeit possibly
+        # empty!), we may as well suck up the matching junk on each
+        # side of it too.  Can't think of a good reason not to, and it
+        # saves post-processing the (possibly considerable) expense of
+        # figuring out what to do with it.  In the case of an empty
+        # interesting match, this is clearly the right thing to do,
+        # because no other kind of match is possible in the regions.
+        while besti > alo and bestj > blo and \
+              isbjunk(b[bestj-1]) and \
+              a[besti-1] == b[bestj-1]:
+            besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
+        while besti+bestsize < ahi and bestj+bestsize < bhi and \
+              isbjunk(b[bestj+bestsize]) and \
+              a[besti+bestsize] == b[bestj+bestsize]:
+            bestsize = bestsize + 1
+
+        return besti, bestj, bestsize
+
+    def get_matching_blocks(self):
+        """Return list of triples describing matching subsequences.
+
+        Each triple is of the form (i, j, n), and means that
+        a[i:i+n] == b[j:j+n].  The triples are monotonically increasing in
+        i and in j.
+
+        The last triple is a dummy, (len(a), len(b), 0), and is the only
+        triple with n==0.
+
+        >>> s = SequenceMatcher(None, "abxcd", "abcd")
+        >>> s.get_matching_blocks()
+        [(0, 0, 2), (3, 2, 2), (5, 4, 0)]
+        """
+
+        if self.matching_blocks is not None:
+            return self.matching_blocks
+        self.matching_blocks = []
+        la, lb = len(self.a), len(self.b)
+        self.__helper(0, la, 0, lb, self.matching_blocks)
+        self.matching_blocks.append( (la, lb, 0) )
+        return self.matching_blocks
+
+    # builds list of matching blocks covering a[alo:ahi] and
+    # b[blo:bhi], appending them in increasing order to answer
+
+    def __helper(self, alo, ahi, blo, bhi, answer):
+        i, j, k = x = self.find_longest_match(alo, ahi, blo, bhi)
+        # a[alo:i] vs b[blo:j] unknown
+        # a[i:i+k] same as b[j:j+k]
+        # a[i+k:ahi] vs b[j+k:bhi] unknown
+        if k:
+            if alo < i and blo < j:
+                self.__helper(alo, i, blo, j, answer)
+            answer.append(x)
+            if i+k < ahi and j+k < bhi:
+                self.__helper(i+k, ahi, j+k, bhi, answer)
+
+    def get_opcodes(self):
+        """Return list of 5-tuples describing how to turn a into b.
+
+        Each tuple is of the form (tag, i1, i2, j1, j2).  The first tuple
+        has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
+        tuple preceding it, and likewise for j1 == the previous j2.
+
+        The tags are strings, with these meanings:
+
+        'replace':  a[i1:i2] should be replaced by b[j1:j2]
+        'delete':   a[i1:i2] should be deleted.
+                    Note that j1==j2 in this case.
+        'insert':   b[j1:j2] should be inserted at a[i1:i1].
+                    Note that i1==i2 in this case.
+        'equal':    a[i1:i2] == b[j1:j2]
+
+        >>> a = "qabxcd"
+        >>> b = "abycdf"
+        >>> s = SequenceMatcher(None, a, b)
+        >>> for tag, i1, i2, j1, j2 in s.get_opcodes():
+        ...    print ("%7s a[%d:%d] (%s) b[%d:%d] (%s)" %
+        ...           (tag, i1, i2, a[i1:i2], j1, j2, b[j1:j2]))
+         delete a[0:1] (q) b[0:0] ()
+          equal a[1:3] (ab) b[0:2] (ab)
+        replace a[3:4] (x) b[2:3] (y)
+          equal a[4:6] (cd) b[3:5] (cd)
+         insert a[6:6] () b[5:6] (f)
+        """
+
+        if self.opcodes is not None:
+            return self.opcodes
+        i = j = 0
+        self.opcodes = answer = []
+        for ai, bj, size in self.get_matching_blocks():
+            # invariant:  we've pumped out correct diffs to change
+            # a[:i] into b[:j], and the next matching block is
+            # a[ai:ai+size] == b[bj:bj+size].  So we need to pump
+            # out a diff to change a[i:ai] into b[j:bj], pump out
+            # the matching block, and move (i,j) beyond the match
+            tag = ''
+            if i < ai and j < bj:
+                tag = 'replace'
+            elif i < ai:
+                tag = 'delete'
+            elif j < bj:
+                tag = 'insert'
+            if tag:
+                answer.append( (tag, i, ai, j, bj) )
+            i, j = ai+size, bj+size
+            # the list of matching blocks is terminated by a
+            # sentinel with size 0
+            if size:
+                answer.append( ('equal', ai, i, bj, j) )
+        return answer
+
+    def ratio(self):
+        """Return a measure of the sequences' similarity (float in [0,1]).
+
+        Where T is the total number of elements in both sequences, and
+        M is the number of matches, this is 2,0*M / T.
+        Note that this is 1 if the sequences are identical, and 0 if
+        they have nothing in common.
+
+        .ratio() is expensive to compute if you haven't already computed
+        .get_matching_blocks() or .get_opcodes(), in which case you may
+        want to try .quick_ratio() or .real_quick_ratio() first to get an
+        upper bound.
+
+        >>> s = SequenceMatcher(None, "abcd", "bcde")
+        >>> s.ratio()
+        0.75
+        >>> s.quick_ratio()
+        0.75
+        >>> s.real_quick_ratio()
+        1.0
+        """
+
+        matches = reduce(lambda sum, triple: sum + triple[-1],
+                         self.get_matching_blocks(), 0)
+        return _calculate_ratio(matches, len(self.a) + len(self.b))
+
+    def quick_ratio(self):
+        """Return an upper bound on ratio() relatively quickly.
+
+        This isn't defined beyond that it is an upper bound on .ratio(), and
+        is faster to compute.
+        """
+
+        # viewing a and b as multisets, set matches to the cardinality
+        # of their intersection; this counts the number of matches
+        # without regard to order, so is clearly an upper bound
+        if self.fullbcount is None:
+            self.fullbcount = fullbcount = {}
+            for elt in self.b:
+                fullbcount[elt] = fullbcount.get(elt, 0) + 1
+        fullbcount = self.fullbcount
+        # avail[x] is the number of times x appears in 'b' less the
+        # number of times we've seen it in 'a' so far ... kinda
+        avail = {}
+        availhas, matches = avail.has_key, 0
+        for elt in self.a:
+            if availhas(elt):
+                numb = avail[elt]
+            else:
+                numb = fullbcount.get(elt, 0)
+            avail[elt] = numb - 1
+            if numb > 0:
+                matches = matches + 1
+        return _calculate_ratio(matches, len(self.a) + len(self.b))
+
+    def real_quick_ratio(self):
+        """Return an upper bound on ratio() very quickly.
+
+        This isn't defined beyond that it is an upper bound on .ratio(), and
+        is faster to compute than either .ratio() or .quick_ratio().
+        """
+
+        la, lb = len(self.a), len(self.b)
+        # can't have more matches than the number of elements in the
+        # shorter sequence
+        return _calculate_ratio(min(la, lb), la + lb)
+
+def get_close_matches(word, possibilities, n=3, cutoff=0.6):
+    """Use SequenceMatcher to return list of the best "good enough" matches.
+
+    word is a sequence for which close matches are desired (typically a
+    string).
+
+    possibilities is a list of sequences against which to match word
+    (typically a list of strings).
+
+    Optional arg n (default 3) is the maximum number of close matches to
+    return.  n must be > 0.
+
+    Optional arg cutoff (default 0.6) is a float in [0, 1].  Possibilities
+    that don't score at least that similar to word are ignored.
+
+    The best (no more than n) matches among the possibilities are returned
+    in a list, sorted by similarity score, most similar first.
+
+    >>> get_close_matches("appel", ["ape", "apple", "peach", "puppy"])
+    ['apple', 'ape']
+    >>> import keyword as _keyword
+    >>> get_close_matches("wheel", _keyword.kwlist)
+    ['while']
+    >>> get_close_matches("apple", _keyword.kwlist)
+    []
+    >>> get_close_matches("accept", _keyword.kwlist)
+    ['except']
+    """
+
+    if not n >  0:
+        raise ValueError("n must be > 0: " + `n`)
+    if not 0.0 <= cutoff <= 1.0:
+        raise ValueError("cutoff must be in [0.0, 1.0]: " + `cutoff`)
+    result = []
+    s = SequenceMatcher()
+    s.set_seq2(word)
+    for x in possibilities:
+        s.set_seq1(x)
+        if s.real_quick_ratio() >= cutoff and \
+           s.quick_ratio() >= cutoff and \
+           s.ratio() >= cutoff:
+            result.append((s.ratio(), x))
+    # Sort by score.
+    result.sort()
+    # Retain only the best n.
+    result = result[-n:]
+    # Move best-scorer to head of list.
+    result.reverse()
+    # Strip scores.
+    return [x for score, x in result]
+
+
+def _count_leading(line, ch):
+    """
+    Return number of `ch` characters at the start of `line`.
+
+    Example:
+
+    >>> _count_leading('   abc', ' ')
+    3
+    """
+
+    i, n = 0, len(line)
+    while i < n and line[i] == ch:
+        i += 1
+    return i
+
+class Differ:
+    r"""
+    Differ is a class for comparing sequences of lines of text, and
+    producing human-readable differences or deltas.  Differ uses
+    SequenceMatcher both to compare sequences of lines, and to compare
+    sequences of characters within similar (near-matching) lines.
+
+    Each line of a Differ delta begins with a two-letter code:
+
+        '- '    line unique to sequence 1
+        '+ '    line unique to sequence 2
+        '  '    line common to both sequences
+        '? '    line not present in either input sequence
+
+    Lines beginning with '? ' attempt to guide the eye to intraline
+    differences, and were not present in either input sequence.  These lines
+    can be confusing if the sequences contain tab characters.
+
+    Note that Differ makes no claim to produce a *minimal* diff.  To the
+    contrary, minimal diffs are often counter-intuitive, because they synch
+    up anywhere possible, sometimes accidental matches 100 pages apart.
+    Restricting synch points to contiguous matches preserves some notion of
+    locality, at the occasional cost of producing a longer diff.
+
+    Example: Comparing two texts.
+
+    First we set up the texts, sequences of individual single-line strings
+    ending with newlines (such sequences can also be obtained from the
+    `readlines()` method of file-like objects):
+
+    >>> text1 = '''  1. Beautiful is better than ugly.
+    ...   2. Explicit is better than implicit.
+    ...   3. Simple is better than complex.
+    ...   4. Complex is better than complicated.
+    ... '''.splitlines(1)
+    >>> len(text1)
+    4
+    >>> text1[0][-1]
+    '\n'
+    >>> text2 = '''  1. Beautiful is better than ugly.
+    ...   3.   Simple is better than complex.
+    ...   4. Complicated is better than complex.
+    ...   5. Flat is better than nested.
+    ... '''.splitlines(1)
+
+    Next we instantiate a Differ object:
+
+    >>> d = Differ()
+
+    Note that when instantiating a Differ object we may pass functions to
+    filter out line and character 'junk'.  See Differ.__init__ for details.
+
+    Finally, we compare the two:
+
+    >>> result = list(d.compare(text1, text2))
+
+    'result' is a list of strings, so let's pretty-print it:
+
+    >>> from pprint import pprint as _pprint
+    >>> _pprint(result)
+    ['    1. Beautiful is better than ugly.\n',
+     '-   2. Explicit is better than implicit.\n',
+     '-   3. Simple is better than complex.\n',
+     '+   3.   Simple is better than complex.\n',
+     '?     ++\n',
+     '-   4. Complex is better than complicated.\n',
+     '?            ^                     ---- ^\n',
+     '+   4. Complicated is better than complex.\n',
+     '?           ++++ ^                      ^\n',
+     '+   5. Flat is better than nested.\n']
+
+    As a single multi-line string it looks like this:
+
+    >>> print ''.join(result),
+        1. Beautiful is better than ugly.
+    -   2. Explicit is better than implicit.
+    -   3. Simple is better than complex.
+    +   3.   Simple is better than complex.
+    ?     ++
+    -   4. Complex is better than complicated.
+    ?            ^                     ---- ^
+    +   4. Complicated is better than complex.
+    ?           ++++ ^                      ^
+    +   5. Flat is better than nested.
+
+    Methods:
+
+    __init__(linejunk=None, charjunk=None)
+        Construct a text differencer, with optional filters.
+
+    compare(a, b)
+        Compare two sequences of lines; generate the resulting delta.
+    """
+
+    def __init__(self, linejunk=None, charjunk=None):
+        """
+        Construct a text differencer, with optional filters.
+
+        The two optional keyword parameters are for filter functions:
+
+        - `linejunk`: A function that should accept a single string argument,
+          and return true iff the string is junk. The module-level function
+          `IS_LINE_JUNK` may be used to filter out lines without visible
+          characters, except for at most one splat ('#').
+
+        - `charjunk`: A function that should accept a string of length 1. The
+          module-level function `IS_CHARACTER_JUNK` may be used to filter out
+          whitespace characters (a blank or tab; **note**: bad idea to include
+          newline in this!).
+        """
+
+        self.linejunk = linejunk
+        self.charjunk = charjunk
+
+    def compare(self, a, b):
+        r"""
+        Compare two sequences of lines; generate the resulting delta.
+
+        Each sequence must contain individual single-line strings ending with
+        newlines. Such sequences can be obtained from the `readlines()` method
+        of file-like objects.  The delta generated also consists of newline-
+        terminated strings, ready to be printed as-is via the writeline()
+        method of a file-like object.
+
+        Example:
+
+        >>> print ''.join(Differ().compare('one\ntwo\nthree\n'.splitlines(1),
+        ...                                'ore\ntree\nemu\n'.splitlines(1))),
+        - one
+        ?  ^
+        + ore
+        ?  ^
+        - two
+        - three
+        ?  -
+        + tree
+        + emu
+        """
+
+        cruncher = SequenceMatcher(self.linejunk, a, b)
+        for tag, alo, ahi, blo, bhi in cruncher.get_opcodes():
+            if tag == 'replace':
+                g = self._fancy_replace(a, alo, ahi, b, blo, bhi)
+            elif tag == 'delete':
+                g = self._dump('-', a, alo, ahi)
+            elif tag == 'insert':
+                g = self._dump('+', b, blo, bhi)
+            elif tag == 'equal':
+                g = self._dump(' ', a, alo, ahi)
+            else:
+                raise ValueError, 'unknown tag ' + `tag`
+
+            for line in g:
+                yield line
+
+    def _dump(self, tag, x, lo, hi):
+        """Generate comparison results for a same-tagged range."""
+        for i in xrange(lo, hi):
+            yield '%s %s' % (tag, x[i])
+
+    def _plain_replace(self, a, alo, ahi, b, blo, bhi):
+        assert alo < ahi and blo < bhi
+        # dump the shorter block first -- reduces the burden on short-term
+        # memory if the blocks are of very different sizes
+        if bhi - blo < ahi - alo:
+            first  = self._dump('+', b, blo, bhi)
+            second = self._dump('-', a, alo, ahi)
+        else:
+            first  = self._dump('-', a, alo, ahi)
+            second = self._dump('+', b, blo, bhi)
+
+        for g in first, second:
+            for line in g:
+                yield line
+
+    def _fancy_replace(self, a, alo, ahi, b, blo, bhi):
+        r"""
+        When replacing one block of lines with another, search the blocks
+        for *similar* lines; the best-matching pair (if any) is used as a
+        synch point, and intraline difference marking is done on the
+        similar pair. Lots of work, but often worth it.
+
+        Example:
+
+        >>> d = Differ()
+        >>> d._fancy_replace(['abcDefghiJkl\n'], 0, 1, ['abcdefGhijkl\n'], 0, 1)
+        >>> print ''.join(d.results),
+        - abcDefghiJkl
+        ?    ^  ^  ^
+        + abcdefGhijkl
+        ?    ^  ^  ^
+        """
+
+        # don't synch up unless the lines have a similarity score of at
+        # least cutoff; best_ratio tracks the best score seen so far
+        best_ratio, cutoff = 0.74, 0.75
+        cruncher = SequenceMatcher(self.charjunk)
+        eqi, eqj = None, None   # 1st indices of equal lines (if any)
+
+        # search for the pair that matches best without being identical
+        # (identical lines must be junk lines, & we don't want to synch up
+        # on junk -- unless we have to)
+        for j in xrange(blo, bhi):
+            bj = b[j]
+            cruncher.set_seq2(bj)
+            for i in xrange(alo, ahi):
+                ai = a[i]
+                if ai == bj:
+                    if eqi is None:
+                        eqi, eqj = i, j
+                    continue
+                cruncher.set_seq1(ai)
+                # computing similarity is expensive, so use the quick
+                # upper bounds first -- have seen this speed up messy
+                # compares by a factor of 3.
+                # note that ratio() is only expensive to compute the first
+                # time it's called on a sequence pair; the expensive part
+                # of the computation is cached by cruncher
+                if cruncher.real_quick_ratio() > best_ratio and \
+                      cruncher.quick_ratio() > best_ratio and \
+                      cruncher.ratio() > best_ratio:
+                    best_ratio, best_i, best_j = cruncher.ratio(), i, j
+        if best_ratio < cutoff:
+            # no non-identical "pretty close" pair
+            if eqi is None:
+                # no identical pair either -- treat it as a straight replace
+                for line in self._plain_replace(a, alo, ahi, b, blo, bhi):
+                    yield line
+                return
+            # no close pair, but an identical pair -- synch up on that
+            best_i, best_j, best_ratio = eqi, eqj, 1.0
+        else:
+            # there's a close pair, so forget the identical pair (if any)
+            eqi = None
+
+        # a[best_i] very similar to b[best_j]; eqi is None iff they're not
+        # identical
+
+        # pump out diffs from before the synch point
+        for line in self._fancy_helper(a, alo, best_i, b, blo, best_j):
+            yield line
+
+        # do intraline marking on the synch pair
+        aelt, belt = a[best_i], b[best_j]
+        if eqi is None:
+            # pump out a '-', '?', '+', '?' quad for the synched lines
+            atags = btags = ""
+            cruncher.set_seqs(aelt, belt)
+            for tag, ai1, ai2, bj1, bj2 in cruncher.get_opcodes():
+                la, lb = ai2 - ai1, bj2 - bj1
+                if tag == 'replace':
+                    atags += '^' * la
+                    btags += '^' * lb
+                elif tag == 'delete':
+                    atags += '-' * la
+                elif tag == 'insert':
+                    btags += '+' * lb
+                elif tag == 'equal':
+                    atags += ' ' * la
+                    btags += ' ' * lb
+                else:
+                    raise ValueError, 'unknown tag ' + `tag`
+            for line in self._qformat(aelt, belt, atags, btags):
+                yield line
+        else:
+            # the synch pair is identical
+            yield '  ' + aelt
+
+        # pump out diffs from after the synch point
+        for line in self._fancy_helper(a, best_i+1, ahi, b, best_j+1, bhi):
+            yield line
+
+    def _fancy_helper(self, a, alo, ahi, b, blo, bhi):
+        g = []
+        if alo < ahi:
+            if blo < bhi:
+                g = self._fancy_replace(a, alo, ahi, b, blo, bhi)
+            else:
+                g = self._dump('-', a, alo, ahi)
+        elif blo < bhi:
+            g = self._dump('+', b, blo, bhi)
+
+        for line in g:
+            yield line
+
+    def _qformat(self, aline, bline, atags, btags):
+        r"""
+        Format "?" output and deal with leading tabs.
+
+        Example:
+
+        >>> d = Differ()
+        >>> d._qformat('\tabcDefghiJkl\n', '\t\tabcdefGhijkl\n',
+        ...            '  ^ ^  ^      ', '+  ^ ^  ^      ')
+        >>> for line in d.results: print repr(line)
+        ...
+        '- \tabcDefghiJkl\n'
+        '? \t ^ ^  ^\n'
+        '+ \t\tabcdefGhijkl\n'
+        '? \t  ^ ^  ^\n'
+        """
+
+        # Can hurt, but will probably help most of the time.
+        common = min(_count_leading(aline, "\t"),
+                     _count_leading(bline, "\t"))
+        common = min(common, _count_leading(atags[:common], " "))
+        atags = atags[common:].rstrip()
+        btags = btags[common:].rstrip()
+
+        yield "- " + aline
+        if atags:
+            yield "? %s%s\n" % ("\t" * common, atags)
+
+        yield "+ " + bline
+        if btags:
+            yield "? %s%s\n" % ("\t" * common, btags)
+
+# With respect to junk, an earlier version of ndiff simply refused to
+# *start* a match with a junk element.  The result was cases like this:
+#     before: private Thread currentThread;
+#     after:  private volatile Thread currentThread;
+# If you consider whitespace to be junk, the longest contiguous match
+# not starting with junk is "e Thread currentThread".  So ndiff reported
+# that "e volatil" was inserted between the 't' and the 'e' in "private".
+# While an accurate view, to people that's absurd.  The current version
+# looks for matching blocks that are entirely junk-free, then extends the
+# longest one of those as far as possible but only with matching junk.
+# So now "currentThread" is matched, then extended to suck up the
+# preceding blank; then "private" is matched, and extended to suck up the
+# following blank; then "Thread" is matched; and finally ndiff reports
+# that "volatile " was inserted before "Thread".  The only quibble
+# remaining is that perhaps it was really the case that " volatile"
+# was inserted after "private".  I can live with that <wink>.
+
+import re
+
+def IS_LINE_JUNK(line, pat=re.compile(r"\s*#?\s*$").match):
+    r"""
+    Return 1 for ignorable line: iff `line` is blank or contains a single '#'.
+
+    Examples:
+
+    >>> IS_LINE_JUNK('\n')
+    1
+    >>> IS_LINE_JUNK('  #   \n')
+    1
+    >>> IS_LINE_JUNK('hello\n')
+    0
+    """
+
+    return pat(line) is not None
+
+def IS_CHARACTER_JUNK(ch, ws=" \t"):
+    r"""
+    Return 1 for ignorable character: iff `ch` is a space or tab.
+
+    Examples:
+
+    >>> IS_CHARACTER_JUNK(' ')
+    1
+    >>> IS_CHARACTER_JUNK('\t')
+    1
+    >>> IS_CHARACTER_JUNK('\n')
+    0
+    >>> IS_CHARACTER_JUNK('x')
+    0
+    """
+
+    return ch in ws
+
+del re
+
+def ndiff(a, b, linejunk=IS_LINE_JUNK, charjunk=IS_CHARACTER_JUNK):
+    r"""
+    Compare `a` and `b` (lists of strings); return a `Differ`-style delta.
+
+    Optional keyword parameters `linejunk` and `charjunk` are for filter
+    functions (or None):
+
+    - linejunk: A function that should accept a single string argument, and
+      return true iff the string is junk. The default is module-level function
+      IS_LINE_JUNK, which filters out lines without visible characters, except
+      for at most one splat ('#').
+
+    - charjunk: A function that should accept a string of length 1. The
+      default is module-level function IS_CHARACTER_JUNK, which filters out
+      whitespace characters (a blank or tab; note: bad idea to include newline
+      in this!).
+
+    Tools/scripts/ndiff.py is a command-line front-end to this function.
+
+    Example:
+
+    >>> diff = ndiff('one\ntwo\nthree\n'.splitlines(1),
+    ...              'ore\ntree\nemu\n'.splitlines(1))
+    >>> print ''.join(diff),
+    - one
+    ?  ^
+    + ore
+    ?  ^
+    - two
+    - three
+    ?  -
+    + tree
+    + emu
+    """
+    return Differ(linejunk, charjunk).compare(a, b)
+
+def restore(delta, which):
+    r"""
+    Generate one of the two sequences that generated a delta.
+
+    Given a `delta` produced by `Differ.compare()` or `ndiff()`, extract
+    lines originating from file 1 or 2 (parameter `which`), stripping off line
+    prefixes.
+
+    Examples:
+
+    >>> diff = ndiff('one\ntwo\nthree\n'.splitlines(1),
+    ...              'ore\ntree\nemu\n'.splitlines(1))
+    >>> diff = list(diff)
+    >>> print ''.join(restore(diff, 1)),
+    one
+    two
+    three
+    >>> print ''.join(restore(diff, 2)),
+    ore
+    tree
+    emu
+    """
+    try:
+        tag = {1: "- ", 2: "+ "}[int(which)]
+    except KeyError:
+        raise ValueError, ('unknown delta choice (must be 1 or 2): %r'
+                           % which)
+    prefixes = ("  ", tag)
+    for line in delta:
+        if line[:2] in prefixes:
+            yield line[2:]
+
+def _test():
+    import doctest, difflib
+    return doctest.testmod(difflib)
+
+if __name__ == "__main__":
+    _test()
diff --git a/lib-python/2.2/dircache.py b/lib-python/2.2/dircache.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/dircache.py
@@ -0,0 +1,44 @@
+"""Read and cache directory listings.
+
+The listdir() routine returns a sorted list of the files in a directory,
+using a cache to avoid reading the directory more often than necessary.
+The annotate() routine appends slashes to directories."""
+
+import os
+
+__all__ = ["listdir", "opendir", "annotate", "reset"]
+
+cache = {}
+
+def reset():
+    """Reset the cache completely."""
+    global cache
+    cache = {}
+
+def listdir(path):
+    """List directory contents, using cache."""
+    try:
+        cached_mtime, list = cache[path]
+        del cache[path]
+    except KeyError:
+        cached_mtime, list = -1, []
+    try:
+        mtime = os.stat(path)[8]
+    except os.error:
+        return []
+    if mtime != cached_mtime:
+        try:
+            list = os.listdir(path)
+        except os.error:
+            return []
+        list.sort()
+    cache[path] = mtime, list
+    return list
+
+opendir = listdir # XXX backward compatibility
+
+def annotate(head, list):
+    """Add '/' suffixes to directories."""
+    for i in range(len(list)):
+        if os.path.isdir(os.path.join(head, list[i])):
+            list[i] = list[i] + '/'
diff --git a/lib-python/2.2/dis.py b/lib-python/2.2/dis.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/dis.py
@@ -0,0 +1,327 @@
+"""Disassembler of Python byte code into mnemonics."""
+
+import sys
+import types
+
+__all__ = ["dis","disassemble","distb","disco","opname","cmp_op",
+           "hasconst","hasname","hasjrel","hasjabs","haslocal",
+           "hascompare", "hasfree"]
+
+def dis(x=None):
+    """Disassemble classes, methods, functions, or code.
+
+    With no argument, disassemble the last traceback.
+
+    """
+    if not x:
+        distb()
+        return
+    if type(x) is types.InstanceType:
+        x = x.__class__
+    if hasattr(x, 'im_func'):
+        x = x.im_func
+    if hasattr(x, 'func_code'):
+        x = x.func_code
+    if hasattr(x, '__dict__'):
+        items = x.__dict__.items()
+        items.sort()
+        for name, x1 in items:
+            if type(x1) in (types.MethodType,
+                            types.FunctionType,
+                            types.CodeType):
+                print "Disassembly of %s:" % name
+                try:
+                    dis(x1)
+                except TypeError, msg:
+                    print "Sorry:", msg
+                print
+    elif hasattr(x, 'co_code'):
+        disassemble(x)
+    else:
+        raise TypeError, \
+              "don't know how to disassemble %s objects" % \
+              type(x).__name__
+
+def distb(tb=None):
+    """Disassemble a traceback (default: last traceback)."""
+    if not tb:
+        try:
+            tb = sys.last_traceback
+        except AttributeError:
+            raise RuntimeError, "no last traceback to disassemble"
+        while tb.tb_next: tb = tb.tb_next
+    disassemble(tb.tb_frame.f_code, tb.tb_lasti)
+
+def disassemble(co, lasti=-1):
+    """Disassemble a code object."""
+    code = co.co_code
+    labels = findlabels(code)
+    n = len(code)
+    i = 0
+    extended_arg = 0
+    free = None
+    while i < n:
+        c = code[i]
+        op = ord(c)
+        if op == SET_LINENO and i > 0: print # Extra blank line
+        if i == lasti: print '-->',
+        else: print '   ',
+        if i in labels: print '>>',
+        else: print '  ',
+        print `i`.rjust(4),
+        print opname[op].ljust(20),
+        i = i+1
+        if op >= HAVE_ARGUMENT:
+            oparg = ord(code[i]) + ord(code[i+1])*256 + extended_arg
+            extended_arg = 0
+            i = i+2
+            if op == EXTENDED_ARG:
+                extended_arg = oparg*65536L
+            print `oparg`.rjust(5),
+            if op in hasconst:
+                print '(' + `co.co_consts[oparg]` + ')',
+            elif op in hasname:
+                print '(' + co.co_names[oparg] + ')',
+            elif op in hasjrel:
+                print '(to ' + `i + oparg` + ')',
+            elif op in haslocal:
+                print '(' + co.co_varnames[oparg] + ')',
+            elif op in hascompare:
+                print '(' + cmp_op[oparg] + ')',
+            elif op in hasfree:
+                if free is None:
+                    free = co.co_cellvars + co.co_freevars
+                print '(' + free[oparg] + ')',
+        print
+
+disco = disassemble                     # XXX For backwards compatibility
+
+def findlabels(code):
+    """Detect all offsets in a byte code which are jump targets.
+
+    Return the list of offsets.
+
+    """
+    labels = []
+    n = len(code)
+    i = 0
+    while i < n:
+        c = code[i]
+        op = ord(c)
+        i = i+1
+        if op >= HAVE_ARGUMENT:
+            oparg = ord(code[i]) + ord(code[i+1])*256
+            i = i+2
+            label = -1
+            if op in hasjrel:
+                label = i+oparg
+            elif op in hasjabs:
+                label = oparg
+            if label >= 0:
+                if label not in labels:
+                    labels.append(label)
+    return labels
+
+cmp_op = ('<', '<=', '==', '!=', '>', '>=', 'in', 'not in', 'is',
+        'is not', 'exception match', 'BAD')
+
+hasconst = []
+hasname = []
+hasjrel = []
+hasjabs = []
+haslocal = []
+hascompare = []
+hasfree = []
+
+opname = [''] * 256
+for op in range(256): opname[op] = '<' + `op` + '>'
+
+def def_op(name, op):
+    opname[op] = name
+
+def name_op(name, op):
+    opname[op] = name
+    hasname.append(op)
+
+def jrel_op(name, op):
+    opname[op] = name
+    hasjrel.append(op)
+
+def jabs_op(name, op):
+    opname[op] = name
+    hasjabs.append(op)
+
+# Instruction opcodes for compiled code
+
+def_op('STOP_CODE', 0)
+def_op('POP_TOP', 1)
+def_op('ROT_TWO', 2)
+def_op('ROT_THREE', 3)
+def_op('DUP_TOP', 4)
+def_op('ROT_FOUR', 5)
+
+def_op('UNARY_POSITIVE', 10)
+def_op('UNARY_NEGATIVE', 11)
+def_op('UNARY_NOT', 12)
+def_op('UNARY_CONVERT', 13)
+
+def_op('UNARY_INVERT', 15)
+
+def_op('BINARY_POWER', 19)
+
+def_op('BINARY_MULTIPLY', 20)
+def_op('BINARY_DIVIDE', 21)
+def_op('BINARY_MODULO', 22)
+def_op('BINARY_ADD', 23)
+def_op('BINARY_SUBTRACT', 24)
+def_op('BINARY_SUBSCR', 25)
+def_op('BINARY_FLOOR_DIVIDE', 26)
+def_op('BINARY_TRUE_DIVIDE', 27)
+def_op('INPLACE_FLOOR_DIVIDE', 28)
+def_op('INPLACE_TRUE_DIVIDE', 29)
+
+def_op('SLICE+0', 30)
+def_op('SLICE+1', 31)
+def_op('SLICE+2', 32)
+def_op('SLICE+3', 33)
+
+def_op('STORE_SLICE+0', 40)
+def_op('STORE_SLICE+1', 41)
+def_op('STORE_SLICE+2', 42)
+def_op('STORE_SLICE+3', 43)
+
+def_op('DELETE_SLICE+0', 50)
+def_op('DELETE_SLICE+1', 51)
+def_op('DELETE_SLICE+2', 52)
+def_op('DELETE_SLICE+3', 53)
+
+def_op('INPLACE_ADD', 55)
+def_op('INPLACE_SUBTRACT', 56)
+def_op('INPLACE_MULTIPLY', 57)
+def_op('INPLACE_DIVIDE', 58)
+def_op('INPLACE_MODULO', 59)
+def_op('STORE_SUBSCR', 60)
+def_op('DELETE_SUBSCR', 61)
+
+def_op('BINARY_LSHIFT', 62)
+def_op('BINARY_RSHIFT', 63)
+def_op('BINARY_AND', 64)
+def_op('BINARY_XOR', 65)
+def_op('BINARY_OR', 66)
+def_op('INPLACE_POWER', 67)
+def_op('GET_ITER', 68)
+
+def_op('PRINT_EXPR', 70)
+def_op('PRINT_ITEM', 71)
+def_op('PRINT_NEWLINE', 72)
+def_op('PRINT_ITEM_TO', 73)
+def_op('PRINT_NEWLINE_TO', 74)
+def_op('INPLACE_LSHIFT', 75)
+def_op('INPLACE_RSHIFT', 76)
+def_op('INPLACE_AND', 77)
+def_op('INPLACE_XOR', 78)
+def_op('INPLACE_OR', 79)
+def_op('BREAK_LOOP', 80)
+
+def_op('LOAD_LOCALS', 82)
+def_op('RETURN_VALUE', 83)
+def_op('IMPORT_STAR', 84)
+def_op('EXEC_STMT', 85)
+def_op('YIELD_STMT', 86)
+
+def_op('POP_BLOCK', 87)
+def_op('END_FINALLY', 88)
+def_op('BUILD_CLASS', 89)
+
+HAVE_ARGUMENT = 90              # Opcodes from here have an argument:
+
+name_op('STORE_NAME', 90)       # Index in name list
+name_op('DELETE_NAME', 91)      # ""
+def_op('UNPACK_SEQUENCE', 92)   # Number of tuple items
+jrel_op('FOR_ITER', 93)
+
+name_op('STORE_ATTR', 95)       # Index in name list
+name_op('DELETE_ATTR', 96)      # ""
+name_op('STORE_GLOBAL', 97)     # ""
+name_op('DELETE_GLOBAL', 98)    # ""
+def_op('DUP_TOPX', 99)          # number of items to duplicate
+def_op('LOAD_CONST', 100)       # Index in const list
+hasconst.append(100)
+name_op('LOAD_NAME', 101)       # Index in name list
+def_op('BUILD_TUPLE', 102)      # Number of tuple items
+def_op('BUILD_LIST', 103)       # Number of list items
+def_op('BUILD_MAP', 104)        # Always zero for now
+name_op('LOAD_ATTR', 105)       # Index in name list
+def_op('COMPARE_OP', 106)       # Comparison operator
+hascompare.append(106)
+name_op('IMPORT_NAME', 107)     # Index in name list
+name_op('IMPORT_FROM', 108)     # Index in name list
+
+jrel_op('JUMP_FORWARD', 110)    # Number of bytes to skip
+jrel_op('JUMP_IF_FALSE', 111)   # ""
+jrel_op('JUMP_IF_TRUE', 112)    # ""
+jabs_op('JUMP_ABSOLUTE', 113)   # Target byte offset from beginning of code
+jrel_op('FOR_LOOP', 114)        # Number of bytes to skip
+
+name_op('LOAD_GLOBAL', 116)     # Index in name list
+
+jabs_op('CONTINUE_LOOP', 119)   # Target address
+jrel_op('SETUP_LOOP', 120)      # Distance to target address
+jrel_op('SETUP_EXCEPT', 121)    # ""
+jrel_op('SETUP_FINALLY', 122)   # ""
+
+def_op('LOAD_FAST', 124)        # Local variable number
+haslocal.append(124)
+def_op('STORE_FAST', 125)       # Local variable number
+haslocal.append(125)
+def_op('DELETE_FAST', 126)      # Local variable number
+haslocal.append(126)
+
+def_op('SET_LINENO', 127)       # Current line number
+SET_LINENO = 127
+
+def_op('RAISE_VARARGS', 130)    # Number of raise arguments (1, 2, or 3)
+def_op('CALL_FUNCTION', 131)    # #args + (#kwargs << 8)
+def_op('MAKE_FUNCTION', 132)    # Number of args with default values
+def_op('BUILD_SLICE', 133)      # Number of items
+
+def_op('MAKE_CLOSURE', 134)
+def_op('LOAD_CLOSURE', 135)
+hasfree.append(135)
+def_op('LOAD_DEREF', 136)
+hasfree.append(136)
+def_op('STORE_DEREF', 137)
+hasfree.append(137)
+
+def_op('CALL_FUNCTION_VAR', 140)     # #args + (#kwargs << 8)
+def_op('CALL_FUNCTION_KW', 141)      # #args + (#kwargs << 8)
+def_op('CALL_FUNCTION_VAR_KW', 142)  # #args + (#kwargs << 8)
+
+def_op('EXTENDED_ARG', 143)
+EXTENDED_ARG = 143
+
+def _test():
+    """Simple test program to disassemble a file."""
+    if sys.argv[1:]:
+        if sys.argv[2:]:
+            sys.stderr.write("usage: python dis.py [-|file]\n")
+            sys.exit(2)
+        fn = sys.argv[1]
+        if not fn or fn == "-":
+            fn = None
+    else:
+        fn = None
+    if not fn:
+        f = sys.stdin
+    else:
+        f = open(fn)
+    source = f.read()
+    if fn:
+        f.close()
+    else:
+        fn = "<stdin>"
+    code = compile(source, fn, "exec")
+    dis(code)
+
+if __name__ == "__main__":
+    _test()
diff --git a/lib-python/2.2/distutils/README b/lib-python/2.2/distutils/README
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/README
@@ -0,0 +1,18 @@
+This directory contains only a subset of the Distutils, specifically the
+Python modules in the 'distutils' and 'distutils.command' packages.
+Technically, this is all you need to distribute and install Python modules
+using the Distutils.  Most people will want some documentation and other
+help, though.  Currently, everything can be found at the Distutils web page:
+
+    http://www.python.org/sigs/distutils-sig/
+
+From there you can access the latest documentation, or download a standalone
+Distutils release that includes all the code in this directory, plus
+documentation, test scripts, examples, etc.
+
+The Distutils documentation isn't yet part of the standard Python
+documentation set, but will be soon.
+
+        Greg Ward (gward at python.net)
+
+$Id$
diff --git a/lib-python/2.2/distutils/__init__.py b/lib-python/2.2/distutils/__init__.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/__init__.py
@@ -0,0 +1,13 @@
+"""distutils
+
+The main package for the Python Module Distribution Utilities.  Normally
+used from a setup script as
+
+   from distutils.core import setup
+
+   setup (...)
+"""
+
+__revision__ = "$Id$"
+
+__version__ = "1.0.3"
diff --git a/lib-python/2.2/distutils/archive_util.py b/lib-python/2.2/distutils/archive_util.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/archive_util.py
@@ -0,0 +1,173 @@
+"""distutils.archive_util
+
+Utility functions for creating archive files (tarballs, zip files,
+that sort of thing)."""
+
+# created 2000/04/03, Greg Ward (extracted from util.py)
+
+__revision__ = "$Id$"
+
+import os
+from distutils.errors import DistutilsExecError
+from distutils.spawn import spawn
+from distutils.dir_util import mkpath
+
+def make_tarball (base_name, base_dir, compress="gzip",
+                  verbose=0, dry_run=0):
+    """Create a (possibly compressed) tar file from all the files under
+    'base_dir'.  'compress' must be "gzip" (the default), "compress",
+    "bzip2", or None.  Both "tar" and the compression utility named by
+    'compress' must be on the default program search path, so this is
+    probably Unix-specific.  The output tar file will be named 'base_dir' +
+    ".tar", possibly plus the appropriate compression extension (".gz",
+    ".bz2" or ".Z").  Return the output filename.
+    """
+    # XXX GNU tar 1.13 has a nifty option to add a prefix directory.
+    # It's pretty new, though, so we certainly can't require it --
+    # but it would be nice to take advantage of it to skip the
+    # "create a tree of hardlinks" step!  (Would also be nice to
+    # detect GNU tar to use its 'z' option and save a step.)
+
+    compress_ext = { 'gzip': ".gz",
+                     'bzip2': '.bz2',
+                     'compress': ".Z" }
+
+    # flags for compression program, each element of list will be an argument
+    compress_flags = {'gzip': ["-f9"],
+                      'compress': ["-f"],
+                      'bzip2': ['-f9']}
+
+    if compress is not None and compress not in compress_ext.keys():
+        raise ValueError, \
+              "bad value for 'compress': must be None, 'gzip', or 'compress'"
+
+    archive_name = base_name + ".tar"
+    mkpath(os.path.dirname(archive_name), verbose=verbose, dry_run=dry_run)
+    cmd = ["tar", "-cf", archive_name, base_dir]
+    spawn(cmd, verbose=verbose, dry_run=dry_run)
+
+    if compress:
+        spawn([compress] + compress_flags[compress] + [archive_name],
+              verbose=verbose, dry_run=dry_run)
+        return archive_name + compress_ext[compress]
+    else:
+        return archive_name
+
+# make_tarball ()
+
+
+def make_zipfile (base_name, base_dir, verbose=0, dry_run=0):
+    """Create a zip file from all the files under 'base_dir'.  The output
+    zip file will be named 'base_dir' + ".zip".  Uses either the InfoZIP
+    "zip" utility (if installed and found on the default search path) or
+    the "zipfile" Python module (if available).  If neither tool is
+    available, raises DistutilsExecError.  Returns the name of the output
+    zip file.
+    """
+    # This initially assumed the Unix 'zip' utility -- but
+    # apparently InfoZIP's zip.exe works the same under Windows, so
+    # no changes needed!
+
+    zip_filename = base_name + ".zip"
+    mkpath(os.path.dirname(zip_filename), verbose=verbose, dry_run=dry_run)
+    try:
+        spawn(["zip", "-rq", zip_filename, base_dir],
+              verbose=verbose, dry_run=dry_run)
+    except DistutilsExecError:
+
+        # XXX really should distinguish between "couldn't find
+        # external 'zip' command" and "zip failed" -- shouldn't try
+        # again in the latter case.  (I think fixing this will
+        # require some cooperation from the spawn module -- perhaps
+        # a utility function to search the path, so we can fallback
+        # on zipfile.py without the failed spawn.)
+        try:
+            import zipfile
+        except ImportError:
+            raise DistutilsExecError, \
+                  ("unable to create zip file '%s': " +
+                   "could neither find a standalone zip utility nor " +
+                   "import the 'zipfile' module") % zip_filename
+
+        if verbose:
+            print "creating '%s' and adding '%s' to it" % \
+                  (zip_filename, base_dir)
+
+        def visit (z, dirname, names):
+            for name in names:
+                path = os.path.normpath(os.path.join(dirname, name))
+                if os.path.isfile(path):
+                    z.write(path, path)
+
+        if not dry_run:
+            z = zipfile.ZipFile(zip_filename, "w",
+                                compression=zipfile.ZIP_DEFLATED)
+
+            os.path.walk(base_dir, visit, z)
+            z.close()
+
+    return zip_filename
+
+# make_zipfile ()
+
+
+ARCHIVE_FORMATS = {
+    'gztar': (make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"),
+    'bztar': (make_tarball, [('compress', 'bzip2')], "bzip2'ed tar-file"),
+    'ztar':  (make_tarball, [('compress', 'compress')], "compressed tar file"),
+    'tar':   (make_tarball, [('compress', None)], "uncompressed tar file"),
+    'zip':   (make_zipfile, [],"ZIP file")
+    }
+
+def check_archive_formats (formats):
+    for format in formats:
+        if not ARCHIVE_FORMATS.has_key(format):
+            return format
+    else:
+        return None
+
+def make_archive (base_name, format,
+                  root_dir=None, base_dir=None,
+                  verbose=0, dry_run=0):
+    """Create an archive file (eg. zip or tar).  'base_name' is the name
+    of the file to create, minus any format-specific extension; 'format'
+    is the archive format: one of "zip", "tar", "ztar", or "gztar".
+    'root_dir' is a directory that will be the root directory of the
+    archive; ie. we typically chdir into 'root_dir' before creating the
+    archive.  'base_dir' is the directory where we start archiving from;
+    ie. 'base_dir' will be the common prefix of all files and
+    directories in the archive.  'root_dir' and 'base_dir' both default
+    to the current directory.  Returns the name of the archive file.
+    """
+    save_cwd = os.getcwd()
+    if root_dir is not None:
+        if verbose:
+            print "changing into '%s'" % root_dir
+        base_name = os.path.abspath(base_name)
+        if not dry_run:
+            os.chdir(root_dir)
+
+    if base_dir is None:
+        base_dir = os.curdir
+
+    kwargs = { 'verbose': verbose,
+               'dry_run': dry_run }
+
+    try:
+        format_info = ARCHIVE_FORMATS[format]
+    except KeyError:
+        raise ValueError, "unknown archive format '%s'" % format
+
+    func = format_info[0]
+    for (arg,val) in format_info[1]:
+        kwargs[arg] = val
+    filename = apply(func, (base_name, base_dir), kwargs)
+
+    if root_dir is not None:
+        if verbose:
+            print "changing back to '%s'" % save_cwd
+        os.chdir(save_cwd)
+
+    return filename
+
+# make_archive ()
diff --git a/lib-python/2.2/distutils/bcppcompiler.py b/lib-python/2.2/distutils/bcppcompiler.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/bcppcompiler.py
@@ -0,0 +1,409 @@
+"""distutils.bcppcompiler
+
+Contains BorlandCCompiler, an implementation of the abstract CCompiler class
+for the Borland C++ compiler.
+"""
+
+# This implementation by Lyle Johnson, based on the original msvccompiler.py
+# module and using the directions originally published by Gordon Williams.
+
+# XXX looks like there's a LOT of overlap between these two classes:
+# someone should sit down and factor out the common code as
+# WindowsCCompiler!  --GPW
+
+__revision__ = "$Id$"
+
+
+import sys, os
+from distutils.errors import \
+     DistutilsExecError, DistutilsPlatformError, \
+     CompileError, LibError, LinkError, UnknownFileError
+from distutils.ccompiler import \
+     CCompiler, gen_preprocess_options, gen_lib_options
+from distutils.file_util import write_file
+from distutils.dep_util import newer
+
+class BCPPCompiler(CCompiler) :
+    """Concrete class that implements an interface to the Borland C/C++
+    compiler, as defined by the CCompiler abstract class.
+    """
+
+    compiler_type = 'bcpp'
+
+    # Just set this so CCompiler's constructor doesn't barf.  We currently
+    # don't use the 'set_executables()' bureaucracy provided by CCompiler,
+    # as it really isn't necessary for this sort of single-compiler class.
+    # Would be nice to have a consistent interface with UnixCCompiler,
+    # though, so it's worth thinking about.
+    executables = {}
+
+    # Private class data (need to distinguish C from C++ source for compiler)
+    _c_extensions = ['.c']
+    _cpp_extensions = ['.cc', '.cpp', '.cxx']
+
+    # Needed for the filename generation methods provided by the
+    # base class, CCompiler.
+    src_extensions = _c_extensions + _cpp_extensions
+    obj_extension = '.obj'
+    static_lib_extension = '.lib'
+    shared_lib_extension = '.dll'
+    static_lib_format = shared_lib_format = '%s%s'
+    exe_extension = '.exe'
+
+
+    def __init__ (self,
+                  verbose=0,
+                  dry_run=0,
+                  force=0):
+
+        CCompiler.__init__ (self, verbose, dry_run, force)
+
+        # These executables are assumed to all be in the path.
+        # Borland doesn't seem to use any special registry settings to
+        # indicate their installation locations.
+
+        self.cc = "bcc32.exe"
+        self.linker = "ilink32.exe"
+        self.lib = "tlib.exe"
+
+        self.preprocess_options = None
+        self.compile_options = ['/tWM', '/O2', '/q', '/g0']
+        self.compile_options_debug = ['/tWM', '/Od', '/q', '/g0']
+
+        self.ldflags_shared = ['/Tpd', '/Gn', '/q', '/x']
+        self.ldflags_shared_debug = ['/Tpd', '/Gn', '/q', '/x']
+        self.ldflags_static = []
+        self.ldflags_exe = ['/Gn', '/q', '/x']
+        self.ldflags_exe_debug = ['/Gn', '/q', '/x','/r']
+
+
+    # -- Worker methods ------------------------------------------------
+
+    def compile (self,
+                 sources,
+                 output_dir=None,
+                 macros=None,
+                 include_dirs=None,
+                 debug=0,
+                 extra_preargs=None,
+                 extra_postargs=None):
+
+        (output_dir, macros, include_dirs) = \
+            self._fix_compile_args (output_dir, macros, include_dirs)
+        (objects, skip_sources) = self._prep_compile (sources, output_dir)
+
+        if extra_postargs is None:
+            extra_postargs = []
+
+        pp_opts = gen_preprocess_options (macros, include_dirs)
+        compile_opts = extra_preargs or []
+        compile_opts.append ('-c')
+        if debug:
+            compile_opts.extend (self.compile_options_debug)
+        else:
+            compile_opts.extend (self.compile_options)
+
+        for i in range (len (sources)):
+            src = sources[i] ; obj = objects[i]
+            ext = (os.path.splitext (src))[1]
+
+            if skip_sources[src]:
+                self.announce ("skipping %s (%s up-to-date)" % (src, obj))
+            else:
+                src = os.path.normpath(src)
+                obj = os.path.normpath(obj)
+                self.mkpath(os.path.dirname(obj))
+
+                if ext == '.res':
+                    # This is already a binary file -- skip it.
+                    continue # the 'for' loop
+                if ext == '.rc':
+                    # This needs to be compiled to a .res file -- do it now.
+                    try:
+                        self.spawn (["brcc32", "-fo", obj, src])
+                    except DistutilsExecError, msg:
+                        raise CompileError, msg
+                    continue # the 'for' loop
+
+                # The next two are both for the real compiler.
+                if ext in self._c_extensions:
+                    input_opt = ""
+                elif ext in self._cpp_extensions:
+                    input_opt = "-P"
+                else:
+                    # Unknown file type -- no extra options.  The compiler
+                    # will probably fail, but let it just in case this is a
+                    # file the compiler recognizes even if we don't.
+                    input_opt = ""
+
+                output_opt = "-o" + obj
+
+                # Compiler command line syntax is: "bcc32 [options] file(s)".
+                # Note that the source file names must appear at the end of
+                # the command line.
+                try:
+                    self.spawn ([self.cc] + compile_opts + pp_opts +
+                                [input_opt, output_opt] +
+                                extra_postargs + [src])
+                except DistutilsExecError, msg:
+                    raise CompileError, msg
+
+        return objects
+
+    # compile ()
+
+
+    def create_static_lib (self,
+                           objects,
+                           output_libname,
+                           output_dir=None,
+                           debug=0,
+                           extra_preargs=None,
+                           extra_postargs=None):
+
+        (objects, output_dir) = self._fix_object_args (objects, output_dir)
+        output_filename = \
+            self.library_filename (output_libname, output_dir=output_dir)
+
+        if self._need_link (objects, output_filename):
+            lib_args = [output_filename, '/u'] + objects
+            if debug:
+                pass                    # XXX what goes here?
+            if extra_preargs:
+                lib_args[:0] = extra_preargs
+            if extra_postargs:
+                lib_args.extend (extra_postargs)
+            try:
+                self.spawn ([self.lib] + lib_args)
+            except DistutilsExecError, msg:
+                raise LibError, msg
+        else:
+            self.announce ("skipping %s (up-to-date)" % output_filename)
+
+    # create_static_lib ()
+
+
+    def link (self,
+              target_desc,
+              objects,
+              output_filename,
+              output_dir=None,
+              libraries=None,
+              library_dirs=None,
+              runtime_library_dirs=None,
+              export_symbols=None,
+              debug=0,
+              extra_preargs=None,
+              extra_postargs=None,
+              build_temp=None):
+
+        # XXX this ignores 'build_temp'!  should follow the lead of
+        # msvccompiler.py
+
+        (objects, output_dir) = self._fix_object_args (objects, output_dir)
+        (libraries, library_dirs, runtime_library_dirs) = \
+            self._fix_lib_args (libraries, library_dirs, runtime_library_dirs)
+
+        if runtime_library_dirs:
+            self.warn ("I don't know what to do with 'runtime_library_dirs': "
+                       + str (runtime_library_dirs))
+
+        if output_dir is not None:
+            output_filename = os.path.join (output_dir, output_filename)
+
+        if self._need_link (objects, output_filename):
+
+            # Figure out linker args based on type of target.
+            if target_desc == CCompiler.EXECUTABLE:
+                startup_obj = 'c0w32'
+                if debug:
+                    ld_args = self.ldflags_exe_debug[:]
+                else:
+                    ld_args = self.ldflags_exe[:]
+            else:
+                startup_obj = 'c0d32'
+                if debug:
+                    ld_args = self.ldflags_shared_debug[:]
+                else:
+                    ld_args = self.ldflags_shared[:]
+
+
+            # Create a temporary exports file for use by the linker
+            if export_symbols is None:
+                def_file = ''
+            else:
+                head, tail = os.path.split (output_filename)
+                modname, ext = os.path.splitext (tail)
+                temp_dir = os.path.dirname(objects[0]) # preserve tree structure
+                def_file = os.path.join (temp_dir, '%s.def' % modname)
+                contents = ['EXPORTS']
+                for sym in (export_symbols or []):
+                    contents.append('  %s=_%s' % (sym, sym))
+                self.execute(write_file, (def_file, contents),
+                             "writing %s" % def_file)
+
+            # Borland C++ has problems with '/' in paths
+            objects2 = map(os.path.normpath, objects)
+            # split objects in .obj and .res files
+            # Borland C++ needs them at different positions in the command line
+            objects = [startup_obj]
+            resources = []
+            for file in objects2:
+                (base, ext) = os.path.splitext(os.path.normcase(file))
+                if ext == '.res':
+                    resources.append(file)
+                else:
+                    objects.append(file)
+
+
+            for l in library_dirs:
+                ld_args.append("/L%s" % os.path.normpath(l))
+            ld_args.append("/L.") # we sometimes use relative paths
+
+            # list of object files
+            ld_args.extend(objects)
+
+            # XXX the command-line syntax for Borland C++ is a bit wonky;
+            # certain filenames are jammed together in one big string, but
+            # comma-delimited.  This doesn't mesh too well with the
+            # Unix-centric attitude (with a DOS/Windows quoting hack) of
+            # 'spawn()', so constructing the argument list is a bit
+            # awkward.  Note that doing the obvious thing and jamming all
+            # the filenames and commas into one argument would be wrong,
+            # because 'spawn()' would quote any filenames with spaces in
+            # them.  Arghghh!.  Apparently it works fine as coded...
+
+            # name of dll/exe file
+            ld_args.extend([',',output_filename])
+            # no map file and start libraries
+            ld_args.append(',,')
+
+            for lib in libraries:
+                # see if we find it and if there is a bcpp specific lib
+                # (xxx_bcpp.lib)
+                libfile = self.find_library_file(library_dirs, lib, debug)
+                if libfile is None:
+                    ld_args.append(lib)
+                    # probably a BCPP internal library -- don't warn
+                    #    self.warn('library %s not found.' % lib)
+                else:
+                    # full name which prefers bcpp_xxx.lib over xxx.lib
+                    ld_args.append(libfile)
+
+            # some default libraries
+            ld_args.append ('import32')
+            ld_args.append ('cw32mt')
+
+            # def file for export symbols
+            ld_args.extend([',',def_file])
+            # add resource files
+            ld_args.append(',')
+            ld_args.extend(resources)
+
+
+            if extra_preargs:
+                ld_args[:0] = extra_preargs
+            if extra_postargs:
+                ld_args.extend(extra_postargs)
+
+            self.mkpath (os.path.dirname (output_filename))
+            try:
+                self.spawn ([self.linker] + ld_args)
+            except DistutilsExecError, msg:
+                raise LinkError, msg
+
+        else:
+            self.announce ("skipping %s (up-to-date)" % output_filename)
+
+    # link ()
+
+    # -- Miscellaneous methods -----------------------------------------
+
+
+    def find_library_file (self, dirs, lib, debug=0):
+        # List of effective library names to try, in order of preference:
+        # xxx_bcpp.lib is better than xxx.lib
+        # and xxx_d.lib is better than xxx.lib if debug is set
+        #
+        # The "_bcpp" suffix is to handle a Python installation for people
+        # with multiple compilers (primarily Distutils hackers, I suspect
+        # ;-).  The idea is they'd have one static library for each
+        # compiler they care about, since (almost?) every Windows compiler
+        # seems to have a different format for static libraries.
+        if debug:
+            dlib = (lib + "_d")
+            try_names = (dlib + "_bcpp", lib + "_bcpp", dlib, lib)
+        else:
+            try_names = (lib + "_bcpp", lib)
+
+        for dir in dirs:
+            for name in try_names:
+                libfile = os.path.join(dir, self.library_filename(name))
+                if os.path.exists(libfile):
+                    return libfile
+        else:
+            # Oops, didn't find it in *any* of 'dirs'
+            return None
+
+    # overwrite the one from CCompiler to support rc and res-files
+    def object_filenames (self,
+                          source_filenames,
+                          strip_dir=0,
+                          output_dir=''):
+        if output_dir is None: output_dir = ''
+        obj_names = []
+        for src_name in source_filenames:
+            # use normcase to make sure '.rc' is really '.rc' and not '.RC'
+            (base, ext) = os.path.splitext (os.path.normcase(src_name))
+            if ext not in (self.src_extensions + ['.rc','.res']):
+                raise UnknownFileError, \
+                      "unknown file type '%s' (from '%s')" % \
+                      (ext, src_name)
+            if strip_dir:
+                base = os.path.basename (base)
+            if ext == '.res':
+                # these can go unchanged
+                obj_names.append (os.path.join (output_dir, base + ext))
+            elif ext == '.rc':
+                # these need to be compiled to .res-files
+                obj_names.append (os.path.join (output_dir, base + '.res'))
+            else:
+                obj_names.append (os.path.join (output_dir,
+                                            base + self.obj_extension))
+        return obj_names
+
+    # object_filenames ()
+
+    def preprocess (self,
+                    source,
+                    output_file=None,
+                    macros=None,
+                    include_dirs=None,
+                    extra_preargs=None,
+                    extra_postargs=None):
+
+        (_, macros, include_dirs) = \
+            self._fix_compile_args(None, macros, include_dirs)
+        pp_opts = gen_preprocess_options(macros, include_dirs)
+        pp_args = ['cpp32.exe'] + pp_opts
+        if output_file is not None:
+            pp_args.append('-o' + output_file)
+        if extra_preargs:
+            pp_args[:0] = extra_preargs
+        if extra_postargs:
+            pp_args.extend(extra_postargs)
+        pp_args.append(source)
+
+        # We need to preprocess: either we're being forced to, or the
+        # source file is newer than the target (or the target doesn't
+        # exist).
+        if self.force or output_file is None or newer(source, output_file):
+            if output_file:
+                self.mkpath(os.path.dirname(output_file))
+            try:
+                self.spawn(pp_args)
+            except DistutilsExecError, msg:
+                print msg
+                raise CompileError, msg
+
+    # preprocess()
diff --git a/lib-python/2.2/distutils/ccompiler.py b/lib-python/2.2/distutils/ccompiler.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/ccompiler.py
@@ -0,0 +1,1046 @@
+"""distutils.ccompiler
+
+Contains CCompiler, an abstract base class that defines the interface
+for the Distutils compiler abstraction model."""
+
+# created 1999/07/05, Greg Ward
+
+__revision__ = "$Id$"
+
+import sys, os, re
+from types import *
+from copy import copy
+from distutils.errors import *
+from distutils.spawn import spawn
+from distutils.file_util import move_file
+from distutils.dir_util import mkpath
+from distutils.dep_util import newer_pairwise, newer_group
+from distutils.util import split_quoted, execute
+
+
+class CCompiler:
+    """Abstract base class to define the interface that must be implemented
+    by real compiler classes.  Also has some utility methods used by
+    several compiler classes.
+
+    The basic idea behind a compiler abstraction class is that each
+    instance can be used for all the compile/link steps in building a
+    single project.  Thus, attributes common to all of those compile and
+    link steps -- include directories, macros to define, libraries to link
+    against, etc. -- are attributes of the compiler instance.  To allow for
+    variability in how individual files are treated, most of those
+    attributes may be varied on a per-compilation or per-link basis.
+    """
+
+    # 'compiler_type' is a class attribute that identifies this class.  It
+    # keeps code that wants to know what kind of compiler it's dealing with
+    # from having to import all possible compiler classes just to do an
+    # 'isinstance'.  In concrete CCompiler subclasses, 'compiler_type'
+    # should really, really be one of the keys of the 'compiler_class'
+    # dictionary (see below -- used by the 'new_compiler()' factory
+    # function) -- authors of new compiler interface classes are
+    # responsible for updating 'compiler_class'!
+    compiler_type = None
+
+    # XXX things not handled by this compiler abstraction model:
+    #   * client can't provide additional options for a compiler,
+    #     e.g. warning, optimization, debugging flags.  Perhaps this
+    #     should be the domain of concrete compiler abstraction classes
+    #     (UnixCCompiler, MSVCCompiler, etc.) -- or perhaps the base
+    #     class should have methods for the common ones.
+    #   * can't completely override the include or library searchg
+    #     path, ie. no "cc -I -Idir1 -Idir2" or "cc -L -Ldir1 -Ldir2".
+    #     I'm not sure how widely supported this is even by Unix
+    #     compilers, much less on other platforms.  And I'm even less
+    #     sure how useful it is; maybe for cross-compiling, but
+    #     support for that is a ways off.  (And anyways, cross
+    #     compilers probably have a dedicated binary with the
+    #     right paths compiled in.  I hope.)
+    #   * can't do really freaky things with the library list/library
+    #     dirs, e.g. "-Ldir1 -lfoo -Ldir2 -lfoo" to link against
+    #     different versions of libfoo.a in different locations.  I
+    #     think this is useless without the ability to null out the
+    #     library search path anyways.
+
+
+    # Subclasses that rely on the standard filename generation methods
+    # implemented below should override these; see the comment near
+    # those methods ('object_filenames()' et. al.) for details:
+    src_extensions = None               # list of strings
+    obj_extension = None                # string
+    static_lib_extension = None
+    shared_lib_extension = None         # string
+    static_lib_format = None            # format string
+    shared_lib_format = None            # prob. same as static_lib_format
+    exe_extension = None                # string
+
+
+    def __init__ (self,
+                  verbose=0,
+                  dry_run=0,
+                  force=0):
+
+        self.verbose = verbose
+        self.dry_run = dry_run
+        self.force = force
+
+        # 'output_dir': a common output directory for object, library,
+        # shared object, and shared library files
+        self.output_dir = None
+
+        # 'macros': a list of macro definitions (or undefinitions).  A
+        # macro definition is a 2-tuple (name, value), where the value is
+        # either a string or None (no explicit value).  A macro
+        # undefinition is a 1-tuple (name,).
+        self.macros = []
+
+        # 'include_dirs': a list of directories to search for include files
+        self.include_dirs = []
+
+        # 'libraries': a list of libraries to include in any link
+        # (library names, not filenames: eg. "foo" not "libfoo.a")
+        self.libraries = []
+
+        # 'library_dirs': a list of directories to search for libraries
+        self.library_dirs = []
+
+        # 'runtime_library_dirs': a list of directories to search for
+        # shared libraries/objects at runtime
+        self.runtime_library_dirs = []
+
+        # 'objects': a list of object files (or similar, such as explicitly
+        # named library files) to include on any link
+        self.objects = []
+
+        for key in self.executables.keys():
+            self.set_executable(key, self.executables[key])
+
+    # __init__ ()
+
+
+    def set_executables (self, **args):
+
+        """Define the executables (and options for them) that will be run
+        to perform the various stages of compilation.  The exact set of
+        executables that may be specified here depends on the compiler
+        class (via the 'executables' class attribute), but most will have:
+          compiler      the C/C++ compiler
+          linker_so     linker used to create shared objects and libraries
+          linker_exe    linker used to create binary executables
+          archiver      static library creator
+
+        On platforms with a command-line (Unix, DOS/Windows), each of these
+        is a string that will be split into executable name and (optional)
+        list of arguments.  (Splitting the string is done similarly to how
+        Unix shells operate: words are delimited by spaces, but quotes and
+        backslashes can override this.  See
+        'distutils.util.split_quoted()'.)
+        """
+
+        # Note that some CCompiler implementation classes will define class
+        # attributes 'cpp', 'cc', etc. with hard-coded executable names;
+        # this is appropriate when a compiler class is for exactly one
+        # compiler/OS combination (eg. MSVCCompiler).  Other compiler
+        # classes (UnixCCompiler, in particular) are driven by information
+        # discovered at run-time, since there are many different ways to do
+        # basically the same things with Unix C compilers.
+
+        for key in args.keys():
+            if not self.executables.has_key(key):
+                raise ValueError, \
+                      "unknown executable '%s' for class %s" % \
+                      (key, self.__class__.__name__)
+            self.set_executable(key, args[key])
+
+    # set_executables ()
+
+    def set_executable(self, key, value):
+        if type(value) is StringType:
+            setattr(self, key, split_quoted(value))
+        else:
+            setattr(self, key, value)
+
+
+
+    def _find_macro (self, name):
+        i = 0
+        for defn in self.macros:
+            if defn[0] == name:
+                return i
+            i = i + 1
+
+        return None
+
+
+    def _check_macro_definitions (self, definitions):
+        """Ensures that every element of 'definitions' is a valid macro
+        definition, ie. either (name,value) 2-tuple or a (name,) tuple.  Do
+        nothing if all definitions are OK, raise TypeError otherwise.
+        """
+        for defn in definitions:
+            if not (type (defn) is TupleType and
+                    (len (defn) == 1 or
+                     (len (defn) == 2 and
+                      (type (defn[1]) is StringType or defn[1] is None))) and
+                    type (defn[0]) is StringType):
+                raise TypeError, \
+                      ("invalid macro definition '%s': " % defn) + \
+                      "must be tuple (string,), (string, string), or " + \
+                      "(string, None)"
+
+
+    # -- Bookkeeping methods -------------------------------------------
+
+    def define_macro (self, name, value=None):
+        """Define a preprocessor macro for all compilations driven by this
+        compiler object.  The optional parameter 'value' should be a
+        string; if it is not supplied, then the macro will be defined
+        without an explicit value and the exact outcome depends on the
+        compiler used (XXX true? does ANSI say anything about this?)
+        """
+        # Delete from the list of macro definitions/undefinitions if
+        # already there (so that this one will take precedence).
+        i = self._find_macro (name)
+        if i is not None:
+            del self.macros[i]
+
+        defn = (name, value)
+        self.macros.append (defn)
+
+
+    def undefine_macro (self, name):
+        """Undefine a preprocessor macro for all compilations driven by
+        this compiler object.  If the same macro is defined by
+        'define_macro()' and undefined by 'undefine_macro()' the last call
+        takes precedence (including multiple redefinitions or
+        undefinitions).  If the macro is redefined/undefined on a
+        per-compilation basis (ie. in the call to 'compile()'), then that
+        takes precedence.
+        """
+        # Delete from the list of macro definitions/undefinitions if
+        # already there (so that this one will take precedence).
+        i = self._find_macro (name)
+        if i is not None:
+            del self.macros[i]
+
+        undefn = (name,)
+        self.macros.append (undefn)
+
+
+    def add_include_dir (self, dir):
+        """Add 'dir' to the list of directories that will be searched for
+        header files.  The compiler is instructed to search directories in
+        the order in which they are supplied by successive calls to
+        'add_include_dir()'.
+        """
+        self.include_dirs.append (dir)
+
+    def set_include_dirs (self, dirs):
+        """Set the list of directories that will be searched to 'dirs' (a
+        list of strings).  Overrides any preceding calls to
+        'add_include_dir()'; subsequence calls to 'add_include_dir()' add
+        to the list passed to 'set_include_dirs()'.  This does not affect
+        any list of standard include directories that the compiler may
+        search by default.
+        """
+        self.include_dirs = copy (dirs)
+
+
+    def add_library (self, libname):
+        """Add 'libname' to the list of libraries that will be included in
+        all links driven by this compiler object.  Note that 'libname'
+        should *not* be the name of a file containing a library, but the
+        name of the library itself: the actual filename will be inferred by
+        the linker, the compiler, or the compiler class (depending on the
+        platform).
+
+        The linker will be instructed to link against libraries in the
+        order they were supplied to 'add_library()' and/or
+        'set_libraries()'.  It is perfectly valid to duplicate library
+        names; the linker will be instructed to link against libraries as
+        many times as they are mentioned.
+        """
+        self.libraries.append (libname)
+
+    def set_libraries (self, libnames):
+        """Set the list of libraries to be included in all links driven by
+        this compiler object to 'libnames' (a list of strings).  This does
+        not affect any standard system libraries that the linker may
+        include by default.
+        """
+        self.libraries = copy (libnames)
+
+
+    def add_library_dir (self, dir):
+        """Add 'dir' to the list of directories that will be searched for
+        libraries specified to 'add_library()' and 'set_libraries()'.  The
+        linker will be instructed to search for libraries in the order they
+        are supplied to 'add_library_dir()' and/or 'set_library_dirs()'.
+        """
+        self.library_dirs.append (dir)
+
+    def set_library_dirs (self, dirs):
+        """Set the list of library search directories to 'dirs' (a list of
+        strings).  This does not affect any standard library search path
+        that the linker may search by default.
+        """
+        self.library_dirs = copy (dirs)
+
+
+    def add_runtime_library_dir (self, dir):
+        """Add 'dir' to the list of directories that will be searched for
+        shared libraries at runtime.
+        """
+        self.runtime_library_dirs.append (dir)
+
+    def set_runtime_library_dirs (self, dirs):
+        """Set the list of directories to search for shared libraries at
+        runtime to 'dirs' (a list of strings).  This does not affect any
+        standard search path that the runtime linker may search by
+        default.
+        """
+        self.runtime_library_dirs = copy (dirs)
+
+
+    def add_link_object (self, object):
+        """Add 'object' to the list of object files (or analogues, such as
+        explicitly named library files or the output of "resource
+        compilers") to be included in every link driven by this compiler
+        object.
+        """
+        self.objects.append (object)
+
+    def set_link_objects (self, objects):
+        """Set the list of object files (or analogues) to be included in
+        every link to 'objects'.  This does not affect any standard object
+        files that the linker may include by default (such as system
+        libraries).
+        """
+        self.objects = copy (objects)
+
+
+    # -- Priviate utility methods --------------------------------------
+    # (here for the convenience of subclasses)
+
+    def _fix_compile_args (self, output_dir, macros, include_dirs):
+        """Typecheck and fix-up some of the arguments to the 'compile()'
+        method, and return fixed-up values.  Specifically: if 'output_dir'
+        is None, replaces it with 'self.output_dir'; ensures that 'macros'
+        is a list, and augments it with 'self.macros'; ensures that
+        'include_dirs' is a list, and augments it with 'self.include_dirs'.
+        Guarantees that the returned values are of the correct type,
+        i.e. for 'output_dir' either string or None, and for 'macros' and
+        'include_dirs' either list or None.
+        """
+        if output_dir is None:
+            output_dir = self.output_dir
+        elif type (output_dir) is not StringType:
+            raise TypeError, "'output_dir' must be a string or None"
+
+        if macros is None:
+            macros = self.macros
+        elif type (macros) is ListType:
+            macros = macros + (self.macros or [])
+        else:
+            raise TypeError, \
+                  "'macros' (if supplied) must be a list of tuples"
+
+        if include_dirs is None:
+            include_dirs = self.include_dirs
+        elif type (include_dirs) in (ListType, TupleType):
+            include_dirs = list (include_dirs) + (self.include_dirs or [])
+        else:
+            raise TypeError, \
+                  "'include_dirs' (if supplied) must be a list of strings"
+
+        return (output_dir, macros, include_dirs)
+
+    # _fix_compile_args ()
+
+
+    def _prep_compile (self, sources, output_dir):
+        """Determine the list of object files corresponding to 'sources',
+        and figure out which ones really need to be recompiled.  Return a
+        list of all object files and a dictionary telling which source
+        files can be skipped.
+        """
+        # Get the list of expected output (object) files
+        objects = self.object_filenames (sources,
+                                         strip_dir=1,
+                                         output_dir=output_dir)
+
+        if self.force:
+            skip_source = {}            # rebuild everything
+            for source in sources:
+                skip_source[source] = 0
+        else:
+            # Figure out which source files we have to recompile according
+            # to a simplistic check -- we just compare the source and
+            # object file, no deep dependency checking involving header
+            # files.
+            skip_source = {}            # rebuild everything
+            for source in sources:      # no wait, rebuild nothing
+                skip_source[source] = 1
+
+            (n_sources, n_objects) = newer_pairwise (sources, objects)
+            for source in n_sources:    # no really, only rebuild what's
+                skip_source[source] = 0 # out-of-date
+
+        return (objects, skip_source)
+
+    # _prep_compile ()
+
+
+    def _fix_object_args (self, objects, output_dir):
+        """Typecheck and fix up some arguments supplied to various methods.
+        Specifically: ensure that 'objects' is a list; if output_dir is
+        None, replace with self.output_dir.  Return fixed versions of
+        'objects' and 'output_dir'.
+        """
+        if type (objects) not in (ListType, TupleType):
+            raise TypeError, \
+                  "'objects' must be a list or tuple of strings"
+        objects = list (objects)
+
+        if output_dir is None:
+            output_dir = self.output_dir
+        elif type (output_dir) is not StringType:
+            raise TypeError, "'output_dir' must be a string or None"
+
+        return (objects, output_dir)
+
+
+    def _fix_lib_args (self, libraries, library_dirs, runtime_library_dirs):
+        """Typecheck and fix up some of the arguments supplied to the
+        'link_*' methods.  Specifically: ensure that all arguments are
+        lists, and augment them with their permanent versions
+        (eg. 'self.libraries' augments 'libraries').  Return a tuple with
+        fixed versions of all arguments.
+        """
+        if libraries is None:
+            libraries = self.libraries
+        elif type (libraries) in (ListType, TupleType):
+            libraries = list (libraries) + (self.libraries or [])
+        else:
+            raise TypeError, \
+                  "'libraries' (if supplied) must be a list of strings"
+
+        if library_dirs is None:
+            library_dirs = self.library_dirs
+        elif type (library_dirs) in (ListType, TupleType):
+            library_dirs = list (library_dirs) + (self.library_dirs or [])
+        else:
+            raise TypeError, \
+                  "'library_dirs' (if supplied) must be a list of strings"
+
+        if runtime_library_dirs is None:
+            runtime_library_dirs = self.runtime_library_dirs
+        elif type (runtime_library_dirs) in (ListType, TupleType):
+            runtime_library_dirs = (list (runtime_library_dirs) +
+                                    (self.runtime_library_dirs or []))
+        else:
+            raise TypeError, \
+                  "'runtime_library_dirs' (if supplied) " + \
+                  "must be a list of strings"
+
+        return (libraries, library_dirs, runtime_library_dirs)
+
+    # _fix_lib_args ()
+
+
+    def _need_link (self, objects, output_file):
+        """Return true if we need to relink the files listed in 'objects'
+        to recreate 'output_file'.
+        """
+        if self.force:
+            return 1
+        else:
+            if self.dry_run:
+                newer = newer_group (objects, output_file, missing='newer')
+            else:
+                newer = newer_group (objects, output_file)
+            return newer
+
+    # _need_link ()
+
+
+    # -- Worker methods ------------------------------------------------
+    # (must be implemented by subclasses)
+
+    def preprocess (self,
+                    source,
+                    output_file=None,
+                    macros=None,
+                    include_dirs=None,
+                    extra_preargs=None,
+                    extra_postargs=None):
+        """Preprocess a single C/C++ source file, named in 'source'.
+        Output will be written to file named 'output_file', or stdout if
+        'output_file' not supplied.  'macros' is a list of macro
+        definitions as for 'compile()', which will augment the macros set
+        with 'define_macro()' and 'undefine_macro()'.  'include_dirs' is a
+        list of directory names that will be added to the default list.
+
+        Raises PreprocessError on failure.
+        """
+        pass
+
+    def compile (self,
+                 sources,
+                 output_dir=None,
+                 macros=None,
+                 include_dirs=None,
+                 debug=0,
+                 extra_preargs=None,
+                 extra_postargs=None):
+        """Compile one or more source files.  'sources' must be a list of
+        filenames, most likely C/C++ files, but in reality anything that
+        can be handled by a particular compiler and compiler class
+        (eg. MSVCCompiler can handle resource files in 'sources').  Return
+        a list of object filenames, one per source filename in 'sources'.
+        Depending on the implementation, not all source files will
+        necessarily be compiled, but all corresponding object filenames
+        will be returned.
+
+        If 'output_dir' is given, object files will be put under it, while
+        retaining their original path component.  That is, "foo/bar.c"
+        normally compiles to "foo/bar.o" (for a Unix implementation); if
+        'output_dir' is "build", then it would compile to
+        "build/foo/bar.o".
+
+        'macros', if given, must be a list of macro definitions.  A macro
+        definition is either a (name, value) 2-tuple or a (name,) 1-tuple.
+        The former defines a macro; if the value is None, the macro is
+        defined without an explicit value.  The 1-tuple case undefines a
+        macro.  Later definitions/redefinitions/ undefinitions take
+        precedence.
+
+        'include_dirs', if given, must be a list of strings, the
+        directories to add to the default include file search path for this
+        compilation only.
+
+        'debug' is a boolean; if true, the compiler will be instructed to
+        output debug symbols in (or alongside) the object file(s).
+
+        'extra_preargs' and 'extra_postargs' are implementation- dependent.
+        On platforms that have the notion of a command-line (e.g. Unix,
+        DOS/Windows), they are most likely lists of strings: extra
+        command-line arguments to prepand/append to the compiler command
+        line.  On other platforms, consult the implementation class
+        documentation.  In any event, they are intended as an escape hatch
+        for those occasions when the abstract compiler framework doesn't
+        cut the mustard.
+
+        Raises CompileError on failure.
+        """
+        pass
+
+
+    def create_static_lib (self,
+                           objects,
+                           output_libname,
+                           output_dir=None,
+                           debug=0):
+        """Link a bunch of stuff together to create a static library file.
+        The "bunch of stuff" consists of the list of object files supplied
+        as 'objects', the extra object files supplied to
+        'add_link_object()' and/or 'set_link_objects()', the libraries
+        supplied to 'add_library()' and/or 'set_libraries()', and the
+        libraries supplied as 'libraries' (if any).
+
+        'output_libname' should be a library name, not a filename; the
+        filename will be inferred from the library name.  'output_dir' is
+        the directory where the library file will be put.
+
+        'debug' is a boolean; if true, debugging information will be
+        included in the library (note that on most platforms, it is the
+        compile step where this matters: the 'debug' flag is included here
+        just for consistency).
+
+        Raises LibError on failure.
+        """
+        pass
+
+
+    # values for target_desc parameter in link()
+    SHARED_OBJECT = "shared_object"
+    SHARED_LIBRARY = "shared_library"
+    EXECUTABLE = "executable"
+
+    def link (self,
+              target_desc,
+              objects,
+              output_filename,
+              output_dir=None,
+              libraries=None,
+              library_dirs=None,
+              runtime_library_dirs=None,
+              export_symbols=None,
+              debug=0,
+              extra_preargs=None,
+              extra_postargs=None,
+              build_temp=None):
+        """Link a bunch of stuff together to create an executable or
+        shared library file.
+
+        The "bunch of stuff" consists of the list of object files supplied
+        as 'objects'.  'output_filename' should be a filename.  If
+        'output_dir' is supplied, 'output_filename' is relative to it
+        (i.e. 'output_filename' can provide directory components if
+        needed).
+
+        'libraries' is a list of libraries to link against.  These are
+        library names, not filenames, since they're translated into
+        filenames in a platform-specific way (eg. "foo" becomes "libfoo.a"
+        on Unix and "foo.lib" on DOS/Windows).  However, they can include a
+        directory component, which means the linker will look in that
+        specific directory rather than searching all the normal locations.
+
+        'library_dirs', if supplied, should be a list of directories to
+        search for libraries that were specified as bare library names
+        (ie. no directory component).  These are on top of the system
+        default and those supplied to 'add_library_dir()' and/or
+        'set_library_dirs()'.  'runtime_library_dirs' is a list of
+        directories that will be embedded into the shared library and used
+        to search for other shared libraries that *it* depends on at
+        run-time.  (This may only be relevant on Unix.)
+
+        'export_symbols' is a list of symbols that the shared library will
+        export.  (This appears to be relevant only on Windows.)
+
+        'debug' is as for 'compile()' and 'create_static_lib()', with the
+        slight distinction that it actually matters on most platforms (as
+        opposed to 'create_static_lib()', which includes a 'debug' flag
+        mostly for form's sake).
+
+        'extra_preargs' and 'extra_postargs' are as for 'compile()' (except
+        of course that they supply command-line arguments for the
+        particular linker being used).
+
+        Raises LinkError on failure.
+        """
+        raise NotImplementedError
+
+
+    # Old 'link_*()' methods, rewritten to use the new 'link()' method.
+
+    def link_shared_lib (self,
+                         objects,
+                         output_libname,
+                         output_dir=None,
+                         libraries=None,
+                         library_dirs=None,
+                         runtime_library_dirs=None,
+                         export_symbols=None,
+                         debug=0,
+                         extra_preargs=None,
+                         extra_postargs=None,
+                         build_temp=None):
+        self.link(CCompiler.SHARED_LIBRARY, objects,
+                  self.library_filename(output_libname, lib_type='shared'),
+                  output_dir,
+                  libraries, library_dirs, runtime_library_dirs,
+                  export_symbols, debug,
+                  extra_preargs, extra_postargs, build_temp)
+
+
+    def link_shared_object (self,
+                            objects,
+                            output_filename,
+                            output_dir=None,
+                            libraries=None,
+                            library_dirs=None,
+                            runtime_library_dirs=None,
+                            export_symbols=None,
+                            debug=0,
+                            extra_preargs=None,
+                            extra_postargs=None,
+                            build_temp=None):
+        self.link(CCompiler.SHARED_OBJECT, objects,
+                  output_filename, output_dir,
+                  libraries, library_dirs, runtime_library_dirs,
+                  export_symbols, debug,
+                  extra_preargs, extra_postargs, build_temp)
+
+
+    def link_executable (self,
+                         objects,
+                         output_progname,
+                         output_dir=None,
+                         libraries=None,
+                         library_dirs=None,
+                         runtime_library_dirs=None,
+                         debug=0,
+                         extra_preargs=None,
+                         extra_postargs=None):
+        self.link(CCompiler.EXECUTABLE, objects,
+                  self.executable_filename(output_progname), output_dir,
+                  libraries, library_dirs, runtime_library_dirs, None,
+                  debug, extra_preargs, extra_postargs, None)
+
+
+    # -- Miscellaneous methods -----------------------------------------
+    # These are all used by the 'gen_lib_options() function; there is
+    # no appropriate default implementation so subclasses should
+    # implement all of these.
+
+    def library_dir_option (self, dir):
+        """Return the compiler option to add 'dir' to the list of
+        directories searched for libraries.
+        """
+        raise NotImplementedError
+
+    def runtime_library_dir_option (self, dir):
+        """Return the compiler option to add 'dir' to the list of
+        directories searched for runtime libraries.
+        """
+        raise NotImplementedError
+
+    def library_option (self, lib):
+        """Return the compiler option to add 'dir' to the list of libraries
+        linked into the shared library or executable.
+        """
+        raise NotImplementedError
+
+    def find_library_file (self, dirs, lib, debug=0):
+        """Search the specified list of directories for a static or shared
+        library file 'lib' and return the full path to that file.  If
+        'debug' true, look for a debugging version (if that makes sense on
+        the current platform).  Return None if 'lib' wasn't found in any of
+        the specified directories.
+        """
+        raise NotImplementedError
+
+
+    # -- Filename generation methods -----------------------------------
+
+    # The default implementation of the filename generating methods are
+    # prejudiced towards the Unix/DOS/Windows view of the world:
+    #   * object files are named by replacing the source file extension
+    #     (eg. .c/.cpp -> .o/.obj)
+    #   * library files (shared or static) are named by plugging the
+    #     library name and extension into a format string, eg.
+    #     "lib%s.%s" % (lib_name, ".a") for Unix static libraries
+    #   * executables are named by appending an extension (possibly
+    #     empty) to the program name: eg. progname + ".exe" for
+    #     Windows
+    #
+    # To reduce redundant code, these methods expect to find
+    # several attributes in the current object (presumably defined
+    # as class attributes):
+    #   * src_extensions -
+    #     list of C/C++ source file extensions, eg. ['.c', '.cpp']
+    #   * obj_extension -
+    #     object file extension, eg. '.o' or '.obj'
+    #   * static_lib_extension -
+    #     extension for static library files, eg. '.a' or '.lib'
+    #   * shared_lib_extension -
+    #     extension for shared library/object files, eg. '.so', '.dll'
+    #   * static_lib_format -
+    #     format string for generating static library filenames,
+    #     eg. 'lib%s.%s' or '%s.%s'
+    #   * shared_lib_format
+    #     format string for generating shared library filenames
+    #     (probably same as static_lib_format, since the extension
+    #     is one of the intended parameters to the format string)
+    #   * exe_extension -
+    #     extension for executable files, eg. '' or '.exe'
+
+    def object_filenames (self,
+                          source_filenames,
+                          strip_dir=0,
+                          output_dir=''):
+        if output_dir is None: output_dir = ''
+        obj_names = []
+        for src_name in source_filenames:
+            (base, ext) = os.path.splitext (src_name)
+            if ext not in self.src_extensions:
+                raise UnknownFileError, \
+                      "unknown file type '%s' (from '%s')" % \
+                      (ext, src_name)
+            if strip_dir:
+                base = os.path.basename (base)
+            obj_names.append (os.path.join (output_dir,
+                                            base + self.obj_extension))
+        return obj_names
+
+    # object_filenames ()
+
+
+    def shared_object_filename (self,
+                                basename,
+                                strip_dir=0,
+                                output_dir=''):
+        if output_dir is None: output_dir = ''
+        if strip_dir:
+            basename = os.path.basename (basename)
+        return os.path.join (output_dir, basename + self.shared_lib_extension)
+
+    def executable_filename (self,
+                                basename,
+                                strip_dir=0,
+                                output_dir=''):
+        if output_dir is None: output_dir = ''
+        if strip_dir:
+            basename = os.path.basename (basename)
+        return os.path.join(output_dir, basename + (self.exe_extension or ''))
+
+    def library_filename (self,
+                          libname,
+                          lib_type='static',     # or 'shared'
+                          strip_dir=0,
+                          output_dir=''):
+
+        if output_dir is None: output_dir = ''
+        if lib_type not in ("static","shared","dylib"):
+            raise ValueError, "'lib_type' must be \"static\", \"shared\" or \"dylib\""
+        fmt = getattr (self, lib_type + "_lib_format")
+        ext = getattr (self, lib_type + "_lib_extension")
+
+        (dir, base) = os.path.split (libname)
+        filename = fmt % (base, ext)
+        if strip_dir:
+            dir = ''
+
+        return os.path.join (output_dir, dir, filename)
+
+
+    # -- Utility methods -----------------------------------------------
+
+    def announce (self, msg, level=1):
+        if self.verbose >= level:
+            print msg
+
+    def debug_print (self, msg):
+        from distutils.core import DEBUG
+        if DEBUG:
+            print msg
+
+    def warn (self, msg):
+        sys.stderr.write ("warning: %s\n" % msg)
+
+    def execute (self, func, args, msg=None, level=1):
+        execute(func, args, msg, self.verbose >= level, self.dry_run)
+
+    def spawn (self, cmd):
+        spawn (cmd, verbose=self.verbose, dry_run=self.dry_run)
+
+    def move_file (self, src, dst):
+        return move_file (src, dst, verbose=self.verbose, dry_run=self.dry_run)
+
+    def mkpath (self, name, mode=0777):
+        mkpath (name, mode, self.verbose, self.dry_run)
+
+
+# class CCompiler
+
+
+# Map a sys.platform/os.name ('posix', 'nt') to the default compiler
+# type for that platform. Keys are interpreted as re match
+# patterns. Order is important; platform mappings are preferred over
+# OS names.
+_default_compilers = (
+
+    # Platform string mappings
+
+    # on a cygwin built python we can use gcc like an ordinary UNIXish
+    # compiler
+    ('cygwin.*', 'unix'),
+
+    # OS name mappings
+    ('posix', 'unix'),
+    ('nt', 'msvc'),
+    ('mac', 'mwerks'),
+
+    )
+
+def get_default_compiler(osname=None, platform=None):
+
+    """ Determine the default compiler to use for the given platform.
+
+        osname should be one of the standard Python OS names (i.e. the
+        ones returned by os.name) and platform the common value
+        returned by sys.platform for the platform in question.
+
+        The default values are os.name and sys.platform in case the
+        parameters are not given.
+
+    """
+    if osname is None:
+        osname = os.name
+    if platform is None:
+        platform = sys.platform
+    for pattern, compiler in _default_compilers:
+        if re.match(pattern, platform) is not None or \
+           re.match(pattern, osname) is not None:
+            return compiler
+    # Default to Unix compiler
+    return 'unix'
+
+# Map compiler types to (module_name, class_name) pairs -- ie. where to
+# find the code that implements an interface to this compiler.  (The module
+# is assumed to be in the 'distutils' package.)
+compiler_class = { 'unix':    ('unixccompiler', 'UnixCCompiler',
+                               "standard UNIX-style compiler"),
+                   'msvc':    ('msvccompiler', 'MSVCCompiler',
+                               "Microsoft Visual C++"),
+                   'cygwin':  ('cygwinccompiler', 'CygwinCCompiler',
+                               "Cygwin port of GNU C Compiler for Win32"),
+                   'mingw32': ('cygwinccompiler', 'Mingw32CCompiler',
+                               "Mingw32 port of GNU C Compiler for Win32"),
+                   'bcpp':    ('bcppcompiler', 'BCPPCompiler',
+                               "Borland C++ Compiler"),
+                   'mwerks':  ('mwerkscompiler', 'MWerksCompiler',
+                               "MetroWerks CodeWarrior"),
+                 }
+
+def show_compilers():
+    """Print list of available compilers (used by the "--help-compiler"
+    options to "build", "build_ext", "build_clib").
+    """
+    # XXX this "knows" that the compiler option it's describing is
+    # "--compiler", which just happens to be the case for the three
+    # commands that use it.
+    from distutils.fancy_getopt import FancyGetopt
+    compilers = []
+    for compiler in compiler_class.keys():
+        compilers.append(("compiler="+compiler, None,
+                          compiler_class[compiler][2]))
+    compilers.sort()
+    pretty_printer = FancyGetopt(compilers)
+    pretty_printer.print_help("List of available compilers:")
+
+
+def new_compiler (plat=None,
+                  compiler=None,
+                  verbose=0,
+                  dry_run=0,
+                  force=0):
+    """Generate an instance of some CCompiler subclass for the supplied
+    platform/compiler combination.  'plat' defaults to 'os.name'
+    (eg. 'posix', 'nt'), and 'compiler' defaults to the default compiler
+    for that platform.  Currently only 'posix' and 'nt' are supported, and
+    the default compilers are "traditional Unix interface" (UnixCCompiler
+    class) and Visual C++ (MSVCCompiler class).  Note that it's perfectly
+    possible to ask for a Unix compiler object under Windows, and a
+    Microsoft compiler object under Unix -- if you supply a value for
+    'compiler', 'plat' is ignored.
+    """
+    if plat is None:
+        plat = os.name
+
+    try:
+        if compiler is None:
+            compiler = get_default_compiler(plat)
+
+        (module_name, class_name, long_description) = compiler_class[compiler]
+    except KeyError:
+        msg = "don't know how to compile C/C++ code on platform '%s'" % plat
+        if compiler is not None:
+            msg = msg + " with '%s' compiler" % compiler
+        raise DistutilsPlatformError, msg
+
+    try:
+        module_name = "distutils." + module_name
+        __import__ (module_name)
+        module = sys.modules[module_name]
+        klass = vars(module)[class_name]
+    except ImportError:
+        raise DistutilsModuleError, \
+              "can't compile C/C++ code: unable to load module '%s'" % \
+              module_name
+    except KeyError:
+        raise DistutilsModuleError, \
+              ("can't compile C/C++ code: unable to find class '%s' " +
+               "in module '%s'") % (class_name, module_name)
+
+    return klass (verbose, dry_run, force)
+
+
+def gen_preprocess_options (macros, include_dirs):
+    """Generate C pre-processor options (-D, -U, -I) as used by at least
+    two types of compilers: the typical Unix compiler and Visual C++.
+    'macros' is the usual thing, a list of 1- or 2-tuples, where (name,)
+    means undefine (-U) macro 'name', and (name,value) means define (-D)
+    macro 'name' to 'value'.  'include_dirs' is just a list of directory
+    names to be added to the header file search path (-I).  Returns a list
+    of command-line options suitable for either Unix compilers or Visual
+    C++.
+    """
+    # XXX it would be nice (mainly aesthetic, and so we don't generate
+    # stupid-looking command lines) to go over 'macros' and eliminate
+    # redundant definitions/undefinitions (ie. ensure that only the
+    # latest mention of a particular macro winds up on the command
+    # line).  I don't think it's essential, though, since most (all?)
+    # Unix C compilers only pay attention to the latest -D or -U
+    # mention of a macro on their command line.  Similar situation for
+    # 'include_dirs'.  I'm punting on both for now.  Anyways, weeding out
+    # redundancies like this should probably be the province of
+    # CCompiler, since the data structures used are inherited from it
+    # and therefore common to all CCompiler classes.
+
+    pp_opts = []
+    for macro in macros:
+
+        if not (type (macro) is TupleType and
+                1 <= len (macro) <= 2):
+            raise TypeError, \
+                  ("bad macro definition '%s': " +
+                   "each element of 'macros' list must be a 1- or 2-tuple") % \
+                  macro
+
+        if len (macro) == 1:        # undefine this macro
+            pp_opts.append ("-U%s" % macro[0])
+        elif len (macro) == 2:
+            if macro[1] is None:    # define with no explicit value
+                pp_opts.append ("-D%s" % macro[0])
+            else:
+                # XXX *don't* need to be clever about quoting the
+                # macro value here, because we're going to avoid the
+                # shell at all costs when we spawn the command!
+                pp_opts.append ("-D%s=%s" % macro)
+
+    for dir in include_dirs:
+        pp_opts.append ("-I%s" % dir)
+
+    return pp_opts
+
+# gen_preprocess_options ()
+
+
+def gen_lib_options (compiler, library_dirs, runtime_library_dirs, libraries):
+    """Generate linker options for searching library directories and
+    linking with specific libraries.  'libraries' and 'library_dirs' are,
+    respectively, lists of library names (not filenames!) and search
+    directories.  Returns a list of command-line options suitable for use
+    with some compiler (depending on the two format strings passed in).
+    """
+    lib_opts = []
+
+    for dir in library_dirs:
+        lib_opts.append (compiler.library_dir_option (dir))
+
+    for dir in runtime_library_dirs:
+        lib_opts.append (compiler.runtime_library_dir_option (dir))
+
+    # XXX it's important that we *not* remove redundant library mentions!
+    # sometimes you really do have to say "-lfoo -lbar -lfoo" in order to
+    # resolve all symbols.  I just hope we never have to say "-lfoo obj.o
+    # -lbar" to get things to work -- that's certainly a possibility, but a
+    # pretty nasty way to arrange your C code.
+
+    for lib in libraries:
+        (lib_dir, lib_name) = os.path.split (lib)
+        if lib_dir:
+            lib_file = compiler.find_library_file ([lib_dir], lib_name)
+            if lib_file:
+                lib_opts.append (lib_file)
+            else:
+                compiler.warn ("no library file corresponding to "
+                               "'%s' found (skipping)" % lib)
+        else:
+            lib_opts.append (compiler.library_option (lib))
+
+    return lib_opts
+
+# gen_lib_options ()
diff --git a/lib-python/2.2/distutils/cmd.py b/lib-python/2.2/distutils/cmd.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/cmd.py
@@ -0,0 +1,486 @@
+"""distutils.cmd
+
+Provides the Command class, the base class for the command classes
+in the distutils.command package.
+"""
+
+# created 2000/04/03, Greg Ward
+# (extricated from core.py; actually dates back to the beginning)
+
+__revision__ = "$Id$"
+
+import sys, os, string, re
+from types import *
+from distutils.errors import *
+from distutils import util, dir_util, file_util, archive_util, dep_util
+
+
+class Command:
+    """Abstract base class for defining command classes, the "worker bees"
+    of the Distutils.  A useful analogy for command classes is to think of
+    them as subroutines with local variables called "options".  The options
+    are "declared" in 'initialize_options()' and "defined" (given their
+    final values, aka "finalized") in 'finalize_options()', both of which
+    must be defined by every command class.  The distinction between the
+    two is necessary because option values might come from the outside
+    world (command line, config file, ...), and any options dependent on
+    other options must be computed *after* these outside influences have
+    been processed -- hence 'finalize_options()'.  The "body" of the
+    subroutine, where it does all its work based on the values of its
+    options, is the 'run()' method, which must also be implemented by every
+    command class.
+    """
+
+    # 'sub_commands' formalizes the notion of a "family" of commands,
+    # eg. "install" as the parent with sub-commands "install_lib",
+    # "install_headers", etc.  The parent of a family of commands
+    # defines 'sub_commands' as a class attribute; it's a list of
+    #    (command_name : string, predicate : unbound_method | string | None)
+    # tuples, where 'predicate' is a method of the parent command that
+    # determines whether the corresponding command is applicable in the
+    # current situation.  (Eg. we "install_headers" is only applicable if
+    # we have any C header files to install.)  If 'predicate' is None,
+    # that command is always applicable.
+    #
+    # 'sub_commands' is usually defined at the *end* of a class, because
+    # predicates can be unbound methods, so they must already have been
+    # defined.  The canonical example is the "install" command.
+    sub_commands = []
+
+
+    # -- Creation/initialization methods -------------------------------
+
+    def __init__ (self, dist):
+        """Create and initialize a new Command object.  Most importantly,
+        invokes the 'initialize_options()' method, which is the real
+        initializer and depends on the actual command being
+        instantiated.
+        """
+        # late import because of mutual dependence between these classes
+        from distutils.dist import Distribution
+
+        if not isinstance(dist, Distribution):
+            raise TypeError, "dist must be a Distribution instance"
+        if self.__class__ is Command:
+            raise RuntimeError, "Command is an abstract class"
+
+        self.distribution = dist
+        self.initialize_options()
+
+        # Per-command versions of the global flags, so that the user can
+        # customize Distutils' behaviour command-by-command and let some
+        # commands fallback on the Distribution's behaviour.  None means
+        # "not defined, check self.distribution's copy", while 0 or 1 mean
+        # false and true (duh).  Note that this means figuring out the real
+        # value of each flag is a touch complicated -- hence "self.verbose"
+        # (etc.) will be handled by __getattr__, below.
+        self._verbose = None
+        self._dry_run = None
+
+        # Some commands define a 'self.force' option to ignore file
+        # timestamps, but methods defined *here* assume that
+        # 'self.force' exists for all commands.  So define it here
+        # just to be safe.
+        self.force = None
+
+        # The 'help' flag is just used for command-line parsing, so
+        # none of that complicated bureaucracy is needed.
+        self.help = 0
+
+        # 'finalized' records whether or not 'finalize_options()' has been
+        # called.  'finalize_options()' itself should not pay attention to
+        # this flag: it is the business of 'ensure_finalized()', which
+        # always calls 'finalize_options()', to respect/update it.
+        self.finalized = 0
+
+    # __init__ ()
+
+
+    def __getattr__ (self, attr):
+        if attr in ('verbose', 'dry_run'):
+            myval = getattr(self, "_" + attr)
+            if myval is None:
+                return getattr(self.distribution, attr)
+            else:
+                return myval
+        else:
+            raise AttributeError, attr
+
+
+    def ensure_finalized (self):
+        if not self.finalized:
+            self.finalize_options()
+        self.finalized = 1
+
+
+    # Subclasses must define:
+    #   initialize_options()
+    #     provide default values for all options; may be customized by
+    #     setup script, by options from config file(s), or by command-line
+    #     options
+    #   finalize_options()
+    #     decide on the final values for all options; this is called
+    #     after all possible intervention from the outside world
+    #     (command-line, option file, etc.) has been processed
+    #   run()
+    #     run the command: do whatever it is we're here to do,
+    #     controlled by the command's various option values
+
+    def initialize_options (self):
+        """Set default values for all the options that this command
+        supports.  Note that these defaults may be overridden by other
+        commands, by the setup script, by config files, or by the
+        command-line.  Thus, this is not the place to code dependencies
+        between options; generally, 'initialize_options()' implementations
+        are just a bunch of "self.foo = None" assignments.
+
+        This method must be implemented by all command classes.
+        """
+        raise RuntimeError, \
+              "abstract method -- subclass %s must override" % self.__class__
+
+    def finalize_options (self):
+        """Set final values for all the options that this command supports.
+        This is always called as late as possible, ie.  after any option
+        assignments from the command-line or from other commands have been
+        done.  Thus, this is the place to to code option dependencies: if
+        'foo' depends on 'bar', then it is safe to set 'foo' from 'bar' as
+        long as 'foo' still has the same value it was assigned in
+        'initialize_options()'.
+
+        This method must be implemented by all command classes.
+        """
+        raise RuntimeError, \
+              "abstract method -- subclass %s must override" % self.__class__
+
+
+    def dump_options (self, header=None, indent=""):
+        from distutils.fancy_getopt import longopt_xlate
+        if header is None:
+            header = "command options for '%s':" % self.get_command_name()
+        print indent + header
+        indent = indent + "  "
+        for (option, _, _) in self.user_options:
+            option = string.translate(option, longopt_xlate)
+            if option[-1] == "=":
+                option = option[:-1]
+            value = getattr(self, option)
+            print indent + "%s = %s" % (option, value)
+
+
+    def run (self):
+        """A command's raison d'etre: carry out the action it exists to
+        perform, controlled by the options initialized in
+        'initialize_options()', customized by other commands, the setup
+        script, the command-line, and config files, and finalized in
+        'finalize_options()'.  All terminal output and filesystem
+        interaction should be done by 'run()'.
+
+        This method must be implemented by all command classes.
+        """
+
+        raise RuntimeError, \
+              "abstract method -- subclass %s must override" % self.__class__
+
+    def announce (self, msg, level=1):
+        """If the current verbosity level is of greater than or equal to
+        'level' print 'msg' to stdout.
+        """
+        if self.verbose >= level:
+            print msg
+            sys.stdout.flush()
+
+    def debug_print (self, msg):
+        """Print 'msg' to stdout if the global DEBUG (taken from the
+        DISTUTILS_DEBUG environment variable) flag is true.
+        """
+        from distutils.core import DEBUG
+        if DEBUG:
+            print msg
+            sys.stdout.flush()
+
+
+
+    # -- Option validation methods -------------------------------------
+    # (these are very handy in writing the 'finalize_options()' method)
+    #
+    # NB. the general philosophy here is to ensure that a particular option
+    # value meets certain type and value constraints.  If not, we try to
+    # force it into conformance (eg. if we expect a list but have a string,
+    # split the string on comma and/or whitespace).  If we can't force the
+    # option into conformance, raise DistutilsOptionError.  Thus, command
+    # classes need do nothing more than (eg.)
+    #   self.ensure_string_list('foo')
+    # and they can be guaranteed that thereafter, self.foo will be
+    # a list of strings.
+
+    def _ensure_stringlike (self, option, what, default=None):
+        val = getattr(self, option)
+        if val is None:
+            setattr(self, option, default)
+            return default
+        elif type(val) is not StringType:
+            raise DistutilsOptionError, \
+                  "'%s' must be a %s (got `%s`)" % (option, what, val)
+        return val
+
+    def ensure_string (self, option, default=None):
+        """Ensure that 'option' is a string; if not defined, set it to
+        'default'.
+        """
+        self._ensure_stringlike(option, "string", default)
+
+    def ensure_string_list (self, option):
+        """Ensure that 'option' is a list of strings.  If 'option' is
+        currently a string, we split it either on /,\s*/ or /\s+/, so
+        "foo bar baz", "foo,bar,baz", and "foo,   bar baz" all become
+        ["foo", "bar", "baz"].
+        """
+        val = getattr(self, option)
+        if val is None:
+            return
+        elif type(val) is StringType:
+            setattr(self, option, re.split(r',\s*|\s+', val))
+        else:
+            if type(val) is ListType:
+                types = map(type, val)
+                ok = (types == [StringType] * len(val))
+            else:
+                ok = 0
+
+            if not ok:
+                raise DistutilsOptionError, \
+                      "'%s' must be a list of strings (got %s)" % \
+                      (option, `val`)
+
+    def _ensure_tested_string (self, option, tester,
+                               what, error_fmt, default=None):
+        val = self._ensure_stringlike(option, what, default)
+        if val is not None and not tester(val):
+            raise DistutilsOptionError, \
+                  ("error in '%s' option: " + error_fmt) % (option, val)
+
+    def ensure_filename (self, option):
+        """Ensure that 'option' is the name of an existing file."""
+        self._ensure_tested_string(option, os.path.isfile,
+                                   "filename",
+                                   "'%s' does not exist or is not a file")
+
+    def ensure_dirname (self, option):
+        self._ensure_tested_string(option, os.path.isdir,
+                                   "directory name",
+                                   "'%s' does not exist or is not a directory")
+
+
+    # -- Convenience methods for commands ------------------------------
+
+    def get_command_name (self):
+        if hasattr(self, 'command_name'):
+            return self.command_name
+        else:
+            return self.__class__.__name__
+
+
+    def set_undefined_options (self, src_cmd, *option_pairs):
+        """Set the values of any "undefined" options from corresponding
+        option values in some other command object.  "Undefined" here means
+        "is None", which is the convention used to indicate that an option
+        has not been changed between 'initialize_options()' and
+        'finalize_options()'.  Usually called from 'finalize_options()' for
+        options that depend on some other command rather than another
+        option of the same command.  'src_cmd' is the other command from
+        which option values will be taken (a command object will be created
+        for it if necessary); the remaining arguments are
+        '(src_option,dst_option)' tuples which mean "take the value of
+        'src_option' in the 'src_cmd' command object, and copy it to
+        'dst_option' in the current command object".
+        """
+
+        # Option_pairs: list of (src_option, dst_option) tuples
+
+        src_cmd_obj = self.distribution.get_command_obj(src_cmd)
+        src_cmd_obj.ensure_finalized()
+        for (src_option, dst_option) in option_pairs:
+            if getattr(self, dst_option) is None:
+                setattr(self, dst_option,
+                        getattr(src_cmd_obj, src_option))
+
+
+    def get_finalized_command (self, command, create=1):
+        """Wrapper around Distribution's 'get_command_obj()' method: find
+        (create if necessary and 'create' is true) the command object for
+        'command', call its 'ensure_finalized()' method, and return the
+        finalized command object.
+        """
+        cmd_obj = self.distribution.get_command_obj(command, create)
+        cmd_obj.ensure_finalized()
+        return cmd_obj
+
+    # XXX rename to 'get_reinitialized_command()'? (should do the
+    # same in dist.py, if so)
+    def reinitialize_command (self, command, reinit_subcommands=0):
+        return self.distribution.reinitialize_command(
+            command, reinit_subcommands)
+
+    def run_command (self, command):
+        """Run some other command: uses the 'run_command()' method of
+        Distribution, which creates and finalizes the command object if
+        necessary and then invokes its 'run()' method.
+        """
+        self.distribution.run_command(command)
+
+
+    def get_sub_commands (self):
+        """Determine the sub-commands that are relevant in the current
+        distribution (ie., that need to be run).  This is based on the
+        'sub_commands' class attribute: each tuple in that list may include
+        a method that we call to determine if the subcommand needs to be
+        run for the current distribution.  Return a list of command names.
+        """
+        commands = []
+        for (cmd_name, method) in self.sub_commands:
+            if method is None or method(self):
+                commands.append(cmd_name)
+        return commands
+
+
+    # -- External world manipulation -----------------------------------
+
+    def warn (self, msg):
+        sys.stderr.write("warning: %s: %s\n" %
+                         (self.get_command_name(), msg))
+
+
+    def execute (self, func, args, msg=None, level=1):
+        util.execute(func, args, msg, self.verbose >= level, self.dry_run)
+
+
+    def mkpath (self, name, mode=0777):
+        dir_util.mkpath(name, mode,
+                        self.verbose, self.dry_run)
+
+
+    def copy_file (self, infile, outfile,
+                   preserve_mode=1, preserve_times=1, link=None, level=1):
+        """Copy a file respecting verbose, dry-run and force flags.  (The
+        former two default to whatever is in the Distribution object, and
+        the latter defaults to false for commands that don't define it.)"""
+
+        return file_util.copy_file(
+            infile, outfile,
+            preserve_mode, preserve_times,
+            not self.force,
+            link,
+            self.verbose >= level,
+            self.dry_run)
+
+
+    def copy_tree (self, infile, outfile,
+                   preserve_mode=1, preserve_times=1, preserve_symlinks=0,
+                   level=1):
+        """Copy an entire directory tree respecting verbose, dry-run,
+        and force flags.
+        """
+        return dir_util.copy_tree(
+            infile, outfile,
+            preserve_mode,preserve_times,preserve_symlinks,
+            not self.force,
+            self.verbose >= level,
+            self.dry_run)
+
+
+    def move_file (self, src, dst, level=1):
+        """Move a file respecting verbose and dry-run flags."""
+        return file_util.move_file(src, dst,
+                                   self.verbose >= level,
+                                   self.dry_run)
+
+
+    def spawn (self, cmd, search_path=1, level=1):
+        """Spawn an external command respecting verbose and dry-run flags."""
+        from distutils.spawn import spawn
+        spawn(cmd, search_path,
+              self.verbose >= level,
+              self.dry_run)
+
+
+    def make_archive (self, base_name, format,
+                      root_dir=None, base_dir=None):
+        return archive_util.make_archive(
+            base_name, format, root_dir, base_dir,
+            self.verbose, self.dry_run)
+
+
+    def make_file (self, infiles, outfile, func, args,
+                   exec_msg=None, skip_msg=None, level=1):
+        """Special case of 'execute()' for operations that process one or
+        more input files and generate one output file.  Works just like
+        'execute()', except the operation is skipped and a different
+        message printed if 'outfile' already exists and is newer than all
+        files listed in 'infiles'.  If the command defined 'self.force',
+        and it is true, then the command is unconditionally run -- does no
+        timestamp checks.
+        """
+        if exec_msg is None:
+            exec_msg = "generating %s from %s" % \
+                       (outfile, string.join(infiles, ', '))
+        if skip_msg is None:
+            skip_msg = "skipping %s (inputs unchanged)" % outfile
+
+
+        # Allow 'infiles' to be a single string
+        if type(infiles) is StringType:
+            infiles = (infiles,)
+        elif type(infiles) not in (ListType, TupleType):
+            raise TypeError, \
+                  "'infiles' must be a string, or a list or tuple of strings"
+
+        # If 'outfile' must be regenerated (either because it doesn't
+        # exist, is out-of-date, or the 'force' flag is true) then
+        # perform the action that presumably regenerates it
+        if self.force or dep_util.newer_group (infiles, outfile):
+            self.execute(func, args, exec_msg, level)
+
+        # Otherwise, print the "skip" message
+        else:
+            self.announce(skip_msg, level)
+
+    # make_file ()
+
+# class Command
+
+
+# XXX 'install_misc' class not currently used -- it was the base class for
+# both 'install_scripts' and 'install_data', but they outgrew it.  It might
+# still be useful for 'install_headers', though, so I'm keeping it around
+# for the time being.
+
+class install_misc (Command):
+    """Common base class for installing some files in a subdirectory.
+    Currently used by install_data and install_scripts.
+    """
+
+    user_options = [('install-dir=', 'd', "directory to install the files to")]
+
+    def initialize_options (self):
+        self.install_dir = None
+        self.outfiles = []
+
+    def _install_dir_from (self, dirname):
+        self.set_undefined_options('install', (dirname, 'install_dir'))
+
+    def _copy_files (self, filelist):
+        self.outfiles = []
+        if not filelist:
+            return
+        self.mkpath(self.install_dir)
+        for f in filelist:
+            self.copy_file(f, self.install_dir)
+            self.outfiles.append(os.path.join(self.install_dir, f))
+
+    def get_outputs (self):
+        return self.outfiles
+
+
+if __name__ == "__main__":
+    print "ok"
diff --git a/lib-python/2.2/distutils/command/__init__.py b/lib-python/2.2/distutils/command/__init__.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/command/__init__.py
@@ -0,0 +1,24 @@
+"""distutils.command
+
+Package containing implementation of all the standard Distutils
+commands."""
+
+__revision__ = "$Id$"
+
+__all__ = ['build',
+           'build_py',
+           'build_ext',
+           'build_clib',
+           'build_scripts',
+           'clean',
+           'install',
+           'install_lib',
+           'install_headers',
+           'install_scripts',
+           'install_data',
+           'sdist',
+           'bdist',
+           'bdist_dumb',
+           'bdist_rpm',
+           'bdist_wininst',
+          ]
diff --git a/lib-python/2.2/distutils/command/bdist.py b/lib-python/2.2/distutils/command/bdist.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/command/bdist.py
@@ -0,0 +1,139 @@
+"""distutils.command.bdist
+
+Implements the Distutils 'bdist' command (create a built [binary]
+distribution)."""
+
+# created 2000/03/29, Greg Ward
+
+__revision__ = "$Id$"
+
+import os, string
+from types import *
+from distutils.core import Command
+from distutils.errors import *
+from distutils.util import get_platform
+
+
+def show_formats ():
+    """Print list of available formats (arguments to "--format" option).
+    """
+    from distutils.fancy_getopt import FancyGetopt
+    formats=[]
+    for format in bdist.format_commands:
+        formats.append(("formats=" + format, None,
+                        bdist.format_command[format][1]))
+    pretty_printer = FancyGetopt(formats)
+    pretty_printer.print_help("List of available distribution formats:")
+
+
+class bdist (Command):
+
+    description = "create a built (binary) distribution"
+
+    user_options = [('bdist-base=', 'b',
+                     "temporary directory for creating built distributions"),
+                    ('plat-name=', 'p',
+                     "platform name to embed in generated filenames "
+                     "(default: %s)" % get_platform()),
+                    ('formats=', None,
+                     "formats for distribution (comma-separated list)"),
+                    ('dist-dir=', 'd',
+                     "directory to put final built distributions in "
+                     "[default: dist]"),
+                   ]
+
+    help_options = [
+        ('help-formats', None,
+         "lists available distribution formats", show_formats),
+        ]
+
+    # The following commands do not take a format option from bdist
+    no_format_option = ('bdist_rpm',)
+
+    # This won't do in reality: will need to distinguish RPM-ish Linux,
+    # Debian-ish Linux, Solaris, FreeBSD, ..., Windows, Mac OS.
+    default_format = { 'posix': 'gztar',
+                       'nt': 'zip', }
+
+    # Establish the preferred order (for the --help-formats option).
+    format_commands = ['rpm', 'gztar', 'bztar', 'ztar', 'tar',
+                       'wininst', 'zip']
+
+    # And the real information.
+    format_command = { 'rpm':   ('bdist_rpm',  "RPM distribution"),
+                       'gztar': ('bdist_dumb', "gzip'ed tar file"),
+                       'bztar': ('bdist_dumb', "bzip2'ed tar file"),
+                       'ztar':  ('bdist_dumb', "compressed tar file"),
+                       'tar':   ('bdist_dumb', "tar file"),
+                       'wininst': ('bdist_wininst',
+                                   "Windows executable installer"),
+                       'zip':   ('bdist_dumb', "ZIP file"),
+                     }
+
+
+    def initialize_options (self):
+        self.bdist_base = None
+        self.plat_name = None
+        self.formats = None
+        self.dist_dir = None
+
+    # initialize_options()
+
+
+    def finalize_options (self):
+        # have to finalize 'plat_name' before 'bdist_base'
+        if self.plat_name is None:
+            self.plat_name = get_platform()
+
+        # 'bdist_base' -- parent of per-built-distribution-format
+        # temporary directories (eg. we'll probably have
+        # "build/bdist.<plat>/dumb", "build/bdist.<plat>/rpm", etc.)
+        if self.bdist_base is None:
+            build_base = self.get_finalized_command('build').build_base
+            self.bdist_base = os.path.join(build_base,
+                                           'bdist.' + self.plat_name)
+
+        self.ensure_string_list('formats')
+        if self.formats is None:
+            try:
+                self.formats = [self.default_format[os.name]]
+            except KeyError:
+                raise DistutilsPlatformError, \
+                      "don't know how to create built distributions " + \
+                      "on platform %s" % os.name
+
+        if self.dist_dir is None:
+            self.dist_dir = "dist"
+
+    # finalize_options()
+
+
+    def run (self):
+
+        # Figure out which sub-commands we need to run.
+        commands = []
+        for format in self.formats:
+            try:
+                commands.append(self.format_command[format][0])
+            except KeyError:
+                raise DistutilsOptionError, "invalid format '%s'" % format
+
+        # Reinitialize and run each command.
+        for i in range(len(self.formats)):
+            cmd_name = commands[i]
+            sub_cmd = self.reinitialize_command(cmd_name)
+            if cmd_name not in self.no_format_option:
+                sub_cmd.format = self.formats[i]
+
+            print ("bdist.run: format=%s, command=%s, rest=%s" %
+                   (self.formats[i], cmd_name, commands[i+1:]))
+
+            # If we're going to need to run this command again, tell it to
+            # keep its temporary files around so subsequent runs go faster.
+            if cmd_name in commands[i+1:]:
+                sub_cmd.keep_temp = 1
+            self.run_command(cmd_name)
+
+    # run()
+
+# class bdist
diff --git a/lib-python/2.2/distutils/command/bdist_dumb.py b/lib-python/2.2/distutils/command/bdist_dumb.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/command/bdist_dumb.py
@@ -0,0 +1,96 @@
+"""distutils.command.bdist_dumb
+
+Implements the Distutils 'bdist_dumb' command (create a "dumb" built
+distribution -- i.e., just an archive to be unpacked under $prefix or
+$exec_prefix)."""
+
+# created 2000/03/29, Greg Ward
+
+__revision__ = "$Id$"
+
+import os
+from distutils.core import Command
+from distutils.util import get_platform
+from distutils.dir_util import create_tree, remove_tree
+from distutils.errors import *
+
+class bdist_dumb (Command):
+
+    description = "create a \"dumb\" built distribution"
+
+    user_options = [('bdist-dir=', 'd',
+                     "temporary directory for creating the distribution"),
+                    ('plat-name=', 'p',
+                     "platform name to embed in generated filenames "
+                     "(default: %s)" % get_platform()),
+                    ('format=', 'f',
+                     "archive format to create (tar, ztar, gztar, zip)"),
+                    ('keep-temp', 'k',
+                     "keep the pseudo-installation tree around after " +
+                     "creating the distribution archive"),
+                    ('dist-dir=', 'd',
+                     "directory to put final built distributions in"),
+                   ]
+
+    boolean_options = ['keep-temp']
+
+    default_format = { 'posix': 'gztar',
+                       'nt': 'zip', }
+
+
+    def initialize_options (self):
+        self.bdist_dir = None
+        self.plat_name = None
+        self.format = None
+        self.keep_temp = 0
+        self.dist_dir = None
+
+    # initialize_options()
+
+
+    def finalize_options (self):
+
+        if self.bdist_dir is None:
+            bdist_base = self.get_finalized_command('bdist').bdist_base
+            self.bdist_dir = os.path.join(bdist_base, 'dumb')
+
+        if self.format is None:
+            try:
+                self.format = self.default_format[os.name]
+            except KeyError:
+                raise DistutilsPlatformError, \
+                      ("don't know how to create dumb built distributions " +
+                       "on platform %s") % os.name
+
+        self.set_undefined_options('bdist',
+                                   ('dist_dir', 'dist_dir'),
+                                   ('plat_name', 'plat_name'))
+
+    # finalize_options()
+
+
+    def run (self):
+
+        self.run_command('build')
+
+        install = self.reinitialize_command('install', reinit_subcommands=1)
+        install.root = self.bdist_dir
+        install.warn_dir = 0
+
+        self.announce("installing to %s" % self.bdist_dir)
+        self.run_command('install')
+
+        # And make an archive relative to the root of the
+        # pseudo-installation tree.
+        archive_basename = "%s.%s" % (self.distribution.get_fullname(),
+                                      self.plat_name)
+        self.make_archive(os.path.join(self.dist_dir, archive_basename),
+                          self.format,
+                          root_dir=self.bdist_dir)
+
+        if not self.keep_temp:
+            remove_tree(self.bdist_dir, self.verbose, self.dry_run)
+
+    # run()
+
+# class bdist_dumb
diff --git a/lib-python/2.2/distutils/command/bdist_rpm.py b/lib-python/2.2/distutils/command/bdist_rpm.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/command/bdist_rpm.py
@@ -0,0 +1,488 @@
+"""distutils.command.bdist_rpm
+
+Implements the Distutils 'bdist_rpm' command (create RPM source and binary
+distributions)."""
+
+# created 2000/04/25, by Harry Henry Gebel
+
+__revision__ = "$Id$"
+
+import sys, os, string
+import glob
+from types import *
+from distutils.core import Command, DEBUG
+from distutils.util import get_platform
+from distutils.file_util import write_file
+from distutils.errors import *
+
+class bdist_rpm (Command):
+
+    description = "create an RPM distribution"
+
+    user_options = [
+        ('bdist-base=', None,
+         "base directory for creating built distributions"),
+        ('rpm-base=', None,
+         "base directory for creating RPMs (defaults to \"rpm\" under "
+         "--bdist-base; must be specified for RPM 2)"),
+        ('dist-dir=', 'd',
+         "directory to put final RPM files in "
+         "(and .spec files if --spec-only)"),
+        ('python=', None,
+         "path to Python interpreter to hard-code in the .spec file "
+         "(default: \"python\")"),
+        ('fix-python', None,
+         "hard-code the exact path to the current Python interpreter in "
+         "the .spec file"),
+        ('spec-only', None,
+         "only regenerate spec file"),
+        ('source-only', None,
+         "only generate source RPM"),
+        ('binary-only', None,
+         "only generate binary RPM"),
+        ('use-bzip2', None,
+         "use bzip2 instead of gzip to create source distribution"),
+
+        # More meta-data: too RPM-specific to put in the setup script,
+        # but needs to go in the .spec file -- so we make these options
+        # to "bdist_rpm".  The idea is that packagers would put this
+        # info in setup.cfg, although they are of course free to
+        # supply it on the command line.
+        ('distribution-name=', None,
+         "name of the (Linux) distribution to which this "
+         "RPM applies (*not* the name of the module distribution!)"),
+        ('group=', None,
+         "package classification [default: \"Development/Libraries\"]"),
+        ('release=', None,
+         "RPM release number"),
+        ('serial=', None,
+         "RPM serial number"),
+        ('vendor=', None,
+         "RPM \"vendor\" (eg. \"Joe Blow <joe at example.com>\") "
+         "[default: maintainer or author from setup script]"),
+        ('packager=', None,
+         "RPM packager (eg. \"Jane Doe <jane at example.net>\")"
+         "[default: vendor]"),
+        ('doc-files=', None,
+         "list of documentation files (space or comma-separated)"),
+        ('changelog=', None,
+         "RPM changelog"),
+        ('icon=', None,
+         "name of icon file"),
+        ('provides=', None,
+         "capabilities provided by this package"),
+        ('requires=', None,
+         "capabilities required by this package"),
+        ('conflicts=', None,
+         "capabilities which conflict with this package"),
+        ('build-requires=', None,
+         "capabilities required to build this package"),
+        ('obsoletes=', None,
+         "capabilities made obsolete by this package"),
+
+        # Actions to take when building RPM
+        ('keep-temp', 'k',
+         "don't clean up RPM build directory"),
+        ('no-keep-temp', None,
+         "clean up RPM build directory [default]"),
+        ('use-rpm-opt-flags', None,
+         "compile with RPM_OPT_FLAGS when building from source RPM"),
+        ('no-rpm-opt-flags', None,
+         "do not pass any RPM CFLAGS to compiler"),
+        ('rpm3-mode', None,
+         "RPM 3 compatibility mode (default)"),
+        ('rpm2-mode', None,
+         "RPM 2 compatibility mode"),
+       ]
+
+    boolean_options = ['keep-temp', 'use-rpm-opt-flags', 'rpm3-mode']
+
+    negative_opt = {'no-keep-temp': 'keep-temp',
+                    'no-rpm-opt-flags': 'use-rpm-opt-flags',
+                    'rpm2-mode': 'rpm3-mode'}
+
+
+    def initialize_options (self):
+        self.bdist_base = None
+        self.rpm_base = None
+        self.dist_dir = None
+        self.python = None
+        self.fix_python = None
+        self.spec_only = None
+        self.binary_only = None
+        self.source_only = None
+        self.use_bzip2 = None
+
+        self.distribution_name = None
+        self.group = None
+        self.release = None
+        self.serial = None
+        self.vendor = None
+        self.packager = None
+        self.doc_files = None
+        self.changelog = None
+        self.icon = None
+
+        self.prep_script = None
+        self.build_script = None
+        self.install_script = None
+        self.clean_script = None
+        self.pre_install = None
+        self.post_install = None
+        self.pre_uninstall = None
+        self.post_uninstall = None
+        self.prep = None
+        self.provides = None
+        self.requires = None
+        self.conflicts = None
+        self.build_requires = None
+        self.obsoletes = None
+
+        self.keep_temp = 0
+        self.use_rpm_opt_flags = 1
+        self.rpm3_mode = 1
+
+    # initialize_options()
+
+
+    def finalize_options (self):
+        self.set_undefined_options('bdist', ('bdist_base', 'bdist_base'))
+        if self.rpm_base is None:
+            if not self.rpm3_mode:
+                raise DistutilsOptionError, \
+                      "you must specify --rpm-base in RPM 2 mode"
+            self.rpm_base = os.path.join(self.bdist_base, "rpm")
+
+        if self.python is None:
+            if self.fix_python:
+                self.python = sys.executable
+            else:
+                self.python = "python"
+        elif self.fix_python:
+            raise DistutilsOptionError, \
+                  "--python and --fix-python are mutually exclusive options"
+
+        if os.name != 'posix':
+            raise DistutilsPlatformError, \
+                  ("don't know how to create RPM "
+                   "distributions on platform %s" % os.name)
+        if self.binary_only and self.source_only:
+            raise DistutilsOptionError, \
+                  "cannot supply both '--source-only' and '--binary-only'"
+
+        # don't pass CFLAGS to pure python distributions
+        if not self.distribution.has_ext_modules():
+            self.use_rpm_opt_flags = 0
+
+        self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
+        self.finalize_package_data()
+
+    # finalize_options()
+
+    def finalize_package_data (self):
+        self.ensure_string('group', "Development/Libraries")
+        self.ensure_string('vendor',
+                           "%s <%s>" % (self.distribution.get_contact(),
+                                        self.distribution.get_contact_email()))
+        self.ensure_string('packager')
+        self.ensure_string_list('doc_files')
+        if type(self.doc_files) is ListType:
+            for readme in ('README', 'README.txt'):
+                if os.path.exists(readme) and readme not in self.doc_files:
+                    self.doc_files.append(readme)
+
+        self.ensure_string('release', "1")
+        self.ensure_string('serial')   # should it be an int?
+
+        self.ensure_string('distribution_name')
+
+        self.ensure_string('changelog')
+          # Format changelog correctly
+        self.changelog = self._format_changelog(self.changelog)
+
+        self.ensure_filename('icon')
+
+        self.ensure_filename('prep_script')
+        self.ensure_filename('build_script')
+        self.ensure_filename('install_script')
+        self.ensure_filename('clean_script')
+        self.ensure_filename('pre_install')
+        self.ensure_filename('post_install')
+        self.ensure_filename('pre_uninstall')
+        self.ensure_filename('post_uninstall')
+
+        # XXX don't forget we punted on summaries and descriptions -- they
+        # should be handled here eventually!
+
+        # Now *this* is some meta-data that belongs in the setup script...
+        self.ensure_string_list('provides')
+        self.ensure_string_list('requires')
+        self.ensure_string_list('conflicts')
+        self.ensure_string_list('build_requires')
+        self.ensure_string_list('obsoletes')
+
+    # finalize_package_data ()
+
+
+    def run (self):
+
+        if DEBUG:
+            print "before _get_package_data():"
+            print "vendor =", self.vendor
+            print "packager =", self.packager
+            print "doc_files =", self.doc_files
+            print "changelog =", self.changelog
+
+        # make directories
+        if self.spec_only:
+            spec_dir = self.dist_dir
+            self.mkpath(spec_dir)
+        else:
+            rpm_dir = {}
+            for d in ('SOURCES', 'SPECS', 'BUILD', 'RPMS', 'SRPMS'):
+                rpm_dir[d] = os.path.join(self.rpm_base, d)
+                self.mkpath(rpm_dir[d])
+            spec_dir = rpm_dir['SPECS']
+
+        # Spec file goes into 'dist_dir' if '--spec-only specified',
+        # build/rpm.<plat> otherwise.
+        spec_path = os.path.join(spec_dir,
+                                 "%s.spec" % self.distribution.get_name())
+        self.execute(write_file,
+                     (spec_path,
+                      self._make_spec_file()),
+                     "writing '%s'" % spec_path)
+
+        if self.spec_only: # stop if requested
+            return
+
+        # Make a source distribution and copy to SOURCES directory with
+        # optional icon.
+        sdist = self.reinitialize_command('sdist')
+        if self.use_bzip2:
+            sdist.formats = ['bztar']
+        else:
+            sdist.formats = ['gztar']
+        self.run_command('sdist')
+
+        source = sdist.get_archive_files()[0]
+        source_dir = rpm_dir['SOURCES']
+        self.copy_file(source, source_dir)
+
+        if self.icon:
+            if os.path.exists(self.icon):
+                self.copy_file(self.icon, source_dir)
+            else:
+                raise DistutilsFileError, \
+                      "icon file '%s' does not exist" % self.icon
+
+
+        # build package
+        self.announce('building RPMs')
+        rpm_cmd = ['rpm']
+        if os.path.exists('/usr/bin/rpmbuild') or \
+           os.path.exists('/bin/rpmbuild'):
+            rpm_cmd = ['rpmbuild']
+        if self.source_only: # what kind of RPMs?
+            rpm_cmd.append('-bs')
+        elif self.binary_only:
+            rpm_cmd.append('-bb')
+        else:
+            rpm_cmd.append('-ba')
+        if self.rpm3_mode:
+            rpm_cmd.extend(['--define',
+                             '_topdir %s/%s' % (os.getcwd(), self.rpm_base),])
+        if not self.keep_temp:
+            rpm_cmd.append('--clean')
+        rpm_cmd.append(spec_path)
+        self.spawn(rpm_cmd)
+
+        # XXX this is a nasty hack -- we really should have a proper way to
+        # find out the names of the RPM files created; also, this assumes
+        # that RPM creates exactly one source and one binary RPM.
+        if not self.dry_run:
+            if not self.binary_only:
+                srpms = glob.glob(os.path.join(rpm_dir['SRPMS'], "*.rpm"))
+                assert len(srpms) == 1, \
+                       "unexpected number of SRPM files found: %s" % srpms
+                self.move_file(srpms[0], self.dist_dir)
+
+            if not self.source_only:
+                rpms = glob.glob(os.path.join(rpm_dir['RPMS'], "*/*.rpm"))
+                assert len(rpms) == 1, \
+                       "unexpected number of RPM files found: %s" % rpms
+                self.move_file(rpms[0], self.dist_dir)
+
+    # run()
+
+
+    def _make_spec_file(self):
+        """Generate the text of an RPM spec file and return it as a
+        list of strings (one per line).
+        """
+        # definitions and headers
+        spec_file = [
+            '%define name ' + self.distribution.get_name(),
+            '%define version ' + self.distribution.get_version(),
+            '%define release ' + self.release,
+            '',
+            'Summary: ' + self.distribution.get_description(),
+            ]
+
+        # put locale summaries into spec file
+        # XXX not supported for now (hard to put a dictionary
+        # in a config file -- arg!)
+        #for locale in self.summaries.keys():
+        #    spec_file.append('Summary(%s): %s' % (locale,
+        #                                          self.summaries[locale]))
+
+        spec_file.extend([
+            'Name: %{name}',
+            'Version: %{version}',
+            'Release: %{release}',])
+
+        # XXX yuck! this filename is available from the "sdist" command,
+        # but only after it has run: and we create the spec file before
+        # running "sdist", in case of --spec-only.
+        if self.use_bzip2:
+            spec_file.append('Source0: %{name}-%{version}.tar.bz2')
+        else:
+            spec_file.append('Source0: %{name}-%{version}.tar.gz')
+
+        spec_file.extend([
+            'Copyright: ' + self.distribution.get_license(),
+            'Group: ' + self.group,
+            'BuildRoot: %{_tmppath}/%{name}-buildroot',
+            'Prefix: %{_prefix}', ])
+
+        # noarch if no extension modules
+        if not self.distribution.has_ext_modules():
+            spec_file.append('BuildArchitectures: noarch')
+
+        for field in ('Vendor',
+                      'Packager',
+                      'Provides',
+                      'Requires',
+                      'Conflicts',
+                      'Obsoletes',
+                      ):
+            val = getattr(self, string.lower(field))
+            if type(val) is ListType:
+                spec_file.append('%s: %s' % (field, string.join(val)))
+            elif val is not None:
+                spec_file.append('%s: %s' % (field, val))
+
+
+        if self.distribution.get_url() != 'UNKNOWN':
+            spec_file.append('Url: ' + self.distribution.get_url())
+
+        if self.distribution_name:
+            spec_file.append('Distribution: ' + self.distribution_name)
+
+        if self.build_requires:
+            spec_file.append('BuildRequires: ' +
+                             string.join(self.build_requires))
+
+        if self.icon:
+            spec_file.append('Icon: ' + os.path.basename(self.icon))
+
+        spec_file.extend([
+            '',
+            '%description',
+            self.distribution.get_long_description()
+            ])
+
+        # put locale descriptions into spec file
+        # XXX again, suppressed because config file syntax doesn't
+        # easily support this ;-(
+        #for locale in self.descriptions.keys():
+        #    spec_file.extend([
+        #        '',
+        #        '%description -l ' + locale,
+        #        self.descriptions[locale],
+        #        ])
+
+        # rpm scripts
+        # figure out default build script
+        def_build = "%s setup.py build" % self.python
+        if self.use_rpm_opt_flags:
+            def_build = 'env CFLAGS="$RPM_OPT_FLAGS" ' + def_build
+
+        # insert contents of files
+
+        # XXX this is kind of misleading: user-supplied options are files
+        # that we open and interpolate into the spec file, but the defaults
+        # are just text that we drop in as-is.  Hmmm.
+
+        script_options = [
+            ('prep', 'prep_script', "%setup"),
+            ('build', 'build_script', def_build),
+            ('install', 'install_script',
+             ("%s setup.py install "
+              "--root=$RPM_BUILD_ROOT "
+              "--record=INSTALLED_FILES") % self.python),
+            ('clean', 'clean_script', "rm -rf $RPM_BUILD_ROOT"),
+            ('pre', 'pre_install', None),
+            ('post', 'post_install', None),
+            ('preun', 'pre_uninstall', None),
+            ('postun', 'post_uninstall', None),
+        ]
+
+        for (rpm_opt, attr, default) in script_options:
+            # Insert contents of file referred to, if no file is refered to
+            # use 'default' as contents of script
+            val = getattr(self, attr)
+            if val or default:
+                spec_file.extend([
+                    '',
+                    '%' + rpm_opt,])
+                if val:
+                    spec_file.extend(string.split(open(val, 'r').read(), '\n'))
+                else:
+                    spec_file.append(default)
+
+
+        # files section
+        spec_file.extend([
+            '',
+            '%files -f INSTALLED_FILES',
+            '%defattr(-,root,root)',
+            ])
+
+        if self.doc_files:
+            spec_file.append('%doc ' + string.join(self.doc_files))
+
+        if self.changelog:
+            spec_file.extend([
+                '',
+                '%changelog',])
+            spec_file.extend(self.changelog)
+
+        return spec_file
+
+    # _make_spec_file ()
+
+    def _format_changelog(self, changelog):
+        """Format the changelog correctly and convert it to a list of strings
+        """
+        if not changelog:
+            return changelog
+        new_changelog = []
+        for line in string.split(string.strip(changelog), '\n'):
+            line = string.strip(line)
+            if line[0] == '*':
+                new_changelog.extend(['', line])
+            elif line[0] == '-':
+                new_changelog.append(line)
+            else:
+                new_changelog.append('  ' + line)
+
+        # strip trailing newline inserted by first changelog entry
+        if not new_changelog[0]:
+            del new_changelog[0]
+
+        return new_changelog
+
+    # _format_changelog()
+
+# class bdist_rpm
diff --git a/lib-python/2.2/distutils/command/bdist_wininst.py b/lib-python/2.2/distutils/command/bdist_wininst.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/command/bdist_wininst.py
@@ -0,0 +1,570 @@
+"""distutils.command.bdist_wininst
+
+Implements the Distutils 'bdist_wininst' command: create a windows installer
+exe-program."""
+
+# created 2000/06/02, Thomas Heller
+
+__revision__ = "$Id$"
+
+import sys, os, string
+from distutils.core import Command
+from distutils.util import get_platform
+from distutils.dir_util import create_tree, remove_tree
+from distutils.errors import *
+
+class bdist_wininst (Command):
+
+    description = "create an executable installer for MS Windows"
+
+    user_options = [('bdist-dir=', None,
+                     "temporary directory for creating the distribution"),
+                    ('keep-temp', 'k',
+                     "keep the pseudo-installation tree around after " +
+                     "creating the distribution archive"),
+                    ('target-version=', 'v',
+                     "require a specific python version" +
+                     " on the target system"),
+                    ('no-target-compile', 'c',
+                     "do not compile .py to .pyc on the target system"),
+                    ('no-target-optimize', 'o',
+                     "do not compile .py to .pyo (optimized)"
+                     "on the target system"),
+                    ('dist-dir=', 'd',
+                     "directory to put final built distributions in"),
+                    ('bitmap=', 'b',
+                     "bitmap to use for the installer instead of python-powered logo"),
+                    ('title=', 't',
+                     "title to display on the installer background instead of default"),
+                   ]
+
+    boolean_options = ['keep-temp', 'no-target-compile', 'no-target-optimize',
+                       'skip-build']
+
+    def initialize_options (self):
+        self.bdist_dir = None
+        self.keep_temp = 0
+        self.no_target_compile = 0
+        self.no_target_optimize = 0
+        self.target_version = None
+        self.dist_dir = None
+        self.bitmap = None
+        self.title = None
+
+    # initialize_options()
+
+
+    def finalize_options (self):
+        if self.bdist_dir is None:
+            bdist_base = self.get_finalized_command('bdist').bdist_base
+            self.bdist_dir = os.path.join(bdist_base, 'wininst')
+        if not self.target_version:
+            self.target_version = ""
+        if self.distribution.has_ext_modules():
+            short_version = sys.version[:3]
+            if self.target_version and self.target_version != short_version:
+                raise DistutilsOptionError, \
+                      "target version can only be" + short_version
+            self.target_version = short_version
+
+        self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
+
+    # finalize_options()
+
+
+    def run (self):
+        if (sys.platform != "win32" and
+            (self.distribution.has_ext_modules() or
+             self.distribution.has_c_libraries())):
+            raise DistutilsPlatformError \
+                  ("distribution contains extensions and/or C libraries; "
+                   "must be compiled on a Windows 32 platform")
+
+        self.run_command('build')
+
+        install = self.reinitialize_command('install', reinit_subcommands=1)
+        install.root = self.bdist_dir
+        install.warn_dir = 0
+
+        install_lib = self.reinitialize_command('install_lib')
+        # we do not want to include pyc or pyo files
+        install_lib.compile = 0
+        install_lib.optimize = 0
+
+        # Use a custom scheme for the zip-file, because we have to decide
+        # at installation time which scheme to use.
+        for key in ('purelib', 'platlib', 'headers', 'scripts', 'data'):
+            value = string.upper(key)
+            if key == 'headers':
+                value = value + '/Include/$dist_name'
+            setattr(install,
+                    'install_' + key,
+                    value)
+
+        self.announce("installing to %s" % self.bdist_dir)
+        install.ensure_finalized()
+
+        # avoid warning of 'install_lib' about installing
+        # into a directory not in sys.path
+        sys.path.insert(0, os.path.join(self.bdist_dir, 'PURELIB'))
+
+        install.run()
+
+        del sys.path[0]
+
+        # And make an archive relative to the root of the
+        # pseudo-installation tree.
+        from tempfile import mktemp
+        archive_basename = mktemp()
+        fullname = self.distribution.get_fullname()
+        arcname = self.make_archive(archive_basename, "zip",
+                                    root_dir=self.bdist_dir)
+        # create an exe containing the zip-file
+        self.create_exe(arcname, fullname, self.bitmap)
+        # remove the zip-file again
+        self.announce("removing temporary file '%s'" % arcname)
+        os.remove(arcname)
+
+        if not self.keep_temp:
+            remove_tree(self.bdist_dir, self.verbose, self.dry_run)
+
+    # run()
+
+    def get_inidata (self):
+        # Return data describing the installation.
+
+        lines = []
+        metadata = self.distribution.metadata
+
+        # Write the [metadata] section.  Values are written with
+        # repr()[1:-1], so they do not contain unprintable characters, and
+        # are not surrounded by quote chars.
+        lines.append("[metadata]")
+
+        # 'info' will be displayed in the installer's dialog box,
+        # describing the items to be installed.
+        info = (metadata.long_description or '') + '\n'
+
+        for name in ["author", "author_email", "description", "maintainer",
+                     "maintainer_email", "name", "url", "version"]:
+            data = getattr(metadata, name, "")
+            if data:
+                info = info + ("\n    %s: %s" % \
+                               (string.capitalize(name), data))
+                lines.append("%s=%s" % (name, repr(data)[1:-1]))
+
+        # The [setup] section contains entries controlling
+        # the installer runtime.
+        lines.append("\n[Setup]")
+        lines.append("info=%s" % repr(info)[1:-1])
+        lines.append("target_compile=%d" % (not self.no_target_compile))
+        lines.append("target_optimize=%d" % (not self.no_target_optimize))
+        if self.target_version:
+            lines.append("target_version=%s" % self.target_version)
+
+        title = self.title or self.distribution.get_fullname()
+        lines.append("title=%s" % repr(title)[1:-1])
+        import time
+        import distutils
+        build_info = "Build %s with distutils-%s" % \
+                     (time.ctime(time.time()), distutils.__version__)
+        lines.append("build_info=%s" % build_info)
+        return string.join(lines, "\n")
+
+    # get_inidata()
+
+    def create_exe (self, arcname, fullname, bitmap=None):
+        import struct
+
+        self.mkpath(self.dist_dir)
+
+        cfgdata = self.get_inidata()
+
+        if self.target_version:
+            # if we create an installer for a specific python version,
+            # it's better to include this in the name
+            installer_name = os.path.join(self.dist_dir,
+                                          "%s.win32-py%s.exe" %
+                                           (fullname, self.target_version))
+        else:
+            installer_name = os.path.join(self.dist_dir,
+                                          "%s.win32.exe" % fullname)
+        self.announce("creating %s" % installer_name)
+
+        if bitmap:
+            bitmapdata = open(bitmap, "rb").read()
+            bitmaplen = len(bitmapdata)
+        else:
+            bitmaplen = 0
+
+        file = open(installer_name, "wb")
+        file.write(self.get_exe_bytes())
+        if bitmap:
+            file.write(bitmapdata)
+
+        file.write(cfgdata)
+        header = struct.pack("<iii",
+                             0x1234567A,       # tag
+                             len(cfgdata),     # length
+                             bitmaplen,        # number of bytes in bitmap
+                             )
+        file.write(header)
+        file.write(open(arcname, "rb").read())
+
+    # create_exe()
+
+    def get_exe_bytes (self):
+        import base64
+        return base64.decodestring(EXEDATA)
+# class bdist_wininst
+
+if __name__ == '__main__':
+    # recreate EXEDATA from wininst.exe by rewriting this file
+    import re, base64
+    moddata = open("bdist_wininst.py", "r").read()
+    exedata = open("../../misc/wininst.exe", "rb").read()
+    print "wininst.exe length is %d bytes" % len(exedata)
+    print "wininst.exe encoded length is %d bytes" % len(base64.encodestring(exedata))
+    exp = re.compile('EXE'+'DATA = """\\\\(\n.*)*\n"""', re.M)
+    data = exp.sub('EXE' + 'DATA = """\\\\\n%s"""' %
+                    base64.encodestring(exedata), moddata)
+    open("bdist_wininst.py", "w").write(data)
+    print "bdist_wininst.py recreated"
+
+EXEDATA = """\
+TVqQAAMAAAAEAAAA//8AALgAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAA8AAAAA4fug4AtAnNIbgBTM0hVGhpcyBwcm9ncmFtIGNhbm5vdCBiZSBydW4gaW4gRE9TIG1v
+ZGUuDQ0KJAAAAAAAAAA/SHa+eykY7XspGO17KRjtADUU7XkpGO0UNhLtcCkY7fg1Fu15KRjtFDYc
+7XkpGO0ZNgvtcykY7XspGe0GKRjteykY7XYpGO19ChLteSkY7bwvHu16KRjtUmljaHspGO0AAAAA
+AAAAAAAAAAAAAAAAUEUAAEwBAwCUrh88AAAAAAAAAADgAA8BCwEGAABQAAAAEAAAAKAAANDuAAAA
+sAAAAAABAAAAQAAAEAAAAAIAAAQAAAAAAAAABAAAAAAAAAAAEAEAAAQAAAAAAAACAAAAAAAQAAAQ
+AAAAABAAABAAAAAAAAAQAAAAAAAAAAAAAAAwAQEAbAEAAAAAAQAwAQAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABVUFgwAAAAAACgAAAAEAAAAAAAAAAEAAAA
+AAAAAAAAAAAAAACAAADgVVBYMQAAAAAAUAAAALAAAABCAAAABAAAAAAAAAAAAAAAAAAAQAAA4C5y
+c3JjAAAAABAAAAAAAQAABAAAAEYAAAAAAAAAAAAAAAAAAEAAAMAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACgAkSW5mbzogVGhpcyBmaWxlIGlz
+IHBhY2tlZCB3aXRoIHRoZSBVUFggZXhlY3V0YWJsZSBwYWNrZXIgaHR0cDovL3VweC50c3gub3Jn
+ICQKACRJZDogVVBYIDEuMDEgQ29weXJpZ2h0IChDKSAxOTk2LTIwMDAgdGhlIFVQWCBUZWFtLiBB
+bGwgUmlnaHRzIFJlc2VydmVkLiAkCgBVUFghDAkCCjD69l3lQx/kVsgAAME+AAAAsAAAJgEA4P/b
+//9TVVaLdCQUhfZXdH2LbCQci3wMgD4AdHBqXFb/5vZv/xU0YUAAi/BZHVl0X4AmAFcRvGD9v/n+
+2IP7/3Unag+4hcB1E4XtdA9XaBBw/d/+vw1qBf/Vg8QM6wdXagEJWVn2wxB1HGi3ABOyna0ALbQp
+Dcb3/3/7BlxGdYssWF9eXVvDVYvsg+wMU1ZXiz3ALe/uf3cz9rs5wDl1CHUHx0UIAQxWaIBMsf9v
+bxFWVlMFDP/Xg/j/iUX8D4WIY26+vZnUEQN1GyEg/3UQ6Bf/b7s31wBopw+EA0HrsR9QdAmPbduz
+UI/rL1wgGOpTDGoCrM2W7f9VIPDALmcQZronYy91JS67aFTH6Xbf891TAes7B1kO8yR0Cq3QHvkT
+A41F9G4GAgx7n4UYQtB9/BIDvO7NNEioNBR1CQvIlgbTfTN/DlZqBFYQxBD7GlyEyHyJfg9hOIKz
+3drmPOsmpSsCUyqs+b5tW1OnCCWLBDvGdRcnEMKGNuEoco4KM8BsC+3/5FvJOIN9EAhTi10IaUOS
+druwffI4k8jdUOjITCJFsnzb3AwvUMgIFEBqAcz+c7ftGF4G2CVoqFEq8VCJXdS/sHDrLSIbfRw7
+dGn/dChQaO72+b6QmBlLBCPsjnQTGnOd+5YNfIsEyYr2IR8byFn3Inw6Lh9kQ+2w0VoDxUUSPsgP
+3ea+U5ccGY1e8MwUxuPO8GHOgewo4auLVRBExv/tf4tMAvqNXALqV5/gK0MMK8GD6BaLG//L7f/P
+gTtQSwUGiX3o8GsCg2UUAGaDewoA/5v77g+OYA7rCYtN7D/M6ItEESqNNBEDttkubzP6gT4BAjA6
+gT8Lv3Wf7QMEPC7BLpSJMQPKD79WbY/dth4I9AZOIAwcA1UV0W370rwITxyJwVcaA9CbEBYjNP72
+6I1EAipF3I2F2P6baTShezdgCy7dgLwF1w9cMseY4VkYaLATHShPFz4bMyvtlGX4hoQFtrRhexFS
+5PaDOMA+Cn5jL9vwDfzw/zBSUAp19HyWzNYNNOwPEMoA2zn3Py38/0X4g8AILzU9dciruTo3e0ca
+UGU0aEAKsIG8zJWwBXitrPuG25p0SqZmi0YMUAQOQ5prbDh2ueRQVCyrvwb30UclIicbCBt2FMyz
+/bZRDdxKAfqZGNJ7+9g2mRjJFXlQKUMKUEO7PbexagbBGLwPtRQ5Aiq4rnsPjE1h6ZFw+7pg7rFf
+pjTIjRzIlv9zBNSo2W/uNig3XxrwJvQDyCvYGSY5hIWz/HYQKksgHMDeL1mNT8H+tr2KCID5MwUE
+L3UCQEtT9p1hKQg3W+A6oQQbj9elLHjrA+quXCT937eGBAcRO4TJdAs6A8YAXEB175OtLyeXQAwP
+dBfGFI2d2VzxTthIBmncTd/YSzbAV1AU1GPYnF1b/m+2DAzQagqZWff5M8lolHFRAPmc4zYeaLyu
+CZVgakvNVk8wUELXGrHtZusxFBVIvuCPBFb0d7fa1llQUg8BGh04GP/TaGAdvAzH9BdgIzvwvnYK
+ARXTqShfmjlzpjyfnszwvm331vnywhAA2rgABgA9PI6tWejhTumUgQkQGrsbcEtSrAz8fQhXIQdD
+3Y6G7aF0ozxZJRQ8aHIiaDWd++ABBAB5pFUG7FBsLrT/Ko/HBCSAoQyfAPCgcRvGZ8Ea6DXkvBrs
+9u/WXegDQdZoQJAWaP0MeBvZHOChAARfrF7rJ7oCT+93gXgIOAEZW35wNfO95x3RdFzgKZ9tw/3V
+gz1sp6tCCHR1OW8kuq1WxyQpqEGhHbh1629Ai1AKjUgOAlFS21FWs3TvWUZEozAsDPwN0nBCXvwQ
+3vCwq7BFiMPXKNaswRpotFoFnsKS9FvfNt4RK9ArflIP+CtV8GNSs612+pkrwtH46xW0xw3LjcgI
+hax1g3wC2K7Q8X4GuOjAw64OCAIMxUH4+30FuLgTuAwRnouEdQEXrgwLIzdOV/2wu7ks5QK0JCAT
+iy1/LcIATWdY2PQrSLYVRS4ov91utrv+C2Y7xxQAwegQHoQBaYjwo062C13PzhB79gsJC9W7MKJA
+OlNoZgwHWbiAV1bbuAtsvOnaANkWAUZIixMvbZ0Z1ViJUxB0EqqD7WADRUjCaIYZiyvQvbvDdDSA
+PWWxUy/GOpeMX324ciZMFRw2hpthIWk3fFHkkLGFz0NAKSBu4QCZdOtKRBc8H7r3ahBoNB5JtMMd
+7ANjwehmSV/TQ8OI2MK5ILvECE7x11JW6ChBkb+ojjtkG9v6aO+j0wjwYJsVpIuBSgqgjgQWwdJ4
+dnav1hvsGGiZeLs+DtNcss2WNFNfKeSKYMh0g0BAWCbTQcaOb1pQHolo0Eps0LnuZKMEHP4bHHPc
+sfZ+m90CdR//NSEFIrpDiJkMrkQQMBDLXqHZDFDrQOvNS08Da5c79u4NjegmUWiDj0MKKAu80RDK
+x3wEF5MRdU8IdBMYrv93BBMcL1n4gg4KWfHw60uMQzIdySz9O/TT7AgfM/9XV6do/i0D4dbChq91
+BKvrIANXYJejhDUf1PmjdNZpgcRU/oHc4O14+5Le+FMz23cZAALxUHPSEVigKb38XBhgIUfthjYH
+NrgMxFMAKmyGZn5TUI1FmMc5oyf41lkgshwx9R+FZkeHPCO8YUU/C3ywWxM7wy90GDgYP43M9mjs
+TZhR8nLeNpx4NhfoU1AvSP8oc9bWRi32wxBk/gHubJ5tcNcc6Cn4/vxys7O1kmVi7MdmS9ZmIKrG
+Newzy9be8P1OABbwDAiahtgbEB8bKXBZLmDD7jfoaJp09xhYwT2w/PKEG6BYEsGETLxGUP2mg4+W
+WbboW7iu6fpZpV4t8QKsPaZjlShvuGplXQIEAJGwHUSYcs7EaOwQ2+B25uROEsBhjA6dHbBOWZNG
+ATXr2dgGkyXkFiBomlYGkwO4c5TuH33JdGybxUAIPTHsnqUeW3k9iZ/rAxJjIQyLNVM9PiT+NL7M
+iT1EoqiAuF8jFSjkC58XC1ZjZ7AgHKAUE+I1paQCYZK7nQvP0e0jaKS7U2ako2gML77fYiApQKAF
+xDggpptK7TMCybGAOTW8vfBKBdyjcEiTODpyd/uL2mcOoVke1KEZE2hs0G5WSGcaBRV1iUlMwiRd
+EVpbPGlzpI5XdRkPVs906/BOaA+sNeg6S/3bLyZw4fiFVQWDyP/rclNv5CY2IZhg7qx0QJgHSXPS
+bdH4Coz8eOg0J83waPRYVnczUWB1o2jAhIkJ2HRbiX8C+O70D1mqRCthx+UQsblUHtmPnF5bX+lQ
+AWoFE2+htkMSToUFKOTZlQYG2B9aq/8EQev2D7fBweBn2MNWuw2zMR5SNlPQKb1rXbIfoVZVEEEU
+yVFPROLthT2EkQB1Gfe4u9742BvAg+D0wGOJ7/82/wURSMYPGGjYdG0dwB0p9/+UJHQv/3QEJrAC
+LXZvKjS5LDCb2pgsywOSEF4kuGUzAogseZfLvX2LdgSUdYSLUvXzhTcCPP2UANTc0AObrVsQEAD8
+VQyiq8OLbW0xtcR9RnbDKQbpAJt0e6yb27ICXiEPhf6hZKEFhc+T4JmDPHURS05kOGBoXucCVjub
+9MgGNIem1i59LdmpV/QQqDmtHIMXnJNQPScR28imd6wZajAbtTR1CPZ00sButPRzinDS2dfchiAg
+ZNZqfIHZHBUNYE5H7iTwSgsfgHU2H8avYH9osuuWfWlvWWdFQibs6xtXCCaxlKuH6nMIgcYReBiT
+eD+JBmkYo10ikkYEAyJev4VLS3wyVjlivgp0NRaa4wuCTQhQUbwFgxHe7GxUiYyGFXBWkSWu5HOQ
+MwmxXY4GiB0olLqvATudK/B5ElY04PG6mE0j/FS5z9QhsiFAMVa8f6Xm0gFe+qCOPdMHAnQ4GO7F
+jWrE3HU1BFO3EiNxtF0GwQQHdad3Q+zNxwUq8+vBiT5l08RJucNRMBwC3EuVaKWaX6Y95LnONTTW
+Qx8Km8gbaJzMnAnEIuvbAUAD5Sw4HDy/2cCd7alLiP705NsZLPIMdw0ISWwNOTg0o6qooYTC7Hsj
+gGgP0wVipHNn7th2ETgFYylU9gyzQ9EXJEYswjB3rF1ozDART0etzcVIKkQ/ZSVZRMrVGNYfJwPj
+HTYZFbg1n5jAbALIYc4JM0jc+6x2vBRbYe2ssPxQQVxQAIzU7OyJVwBdGxM/JFL2SpRff4N98AEk
+7CzYdRw9jHzChGYOaApVIPwQms0mbI4ZuN+RGvYYQIkCYHZq/LM3zCwUZBRpDQQbkEBXYqYwuSQP
+hHawLHZUGbQ4sS48HZy0RpZGts147GR0QLDSO8w1/gMjpRuwxw8cQL7w36pWpsKwsiNWonkYDWUy
+qwyDVjwbsXdyNfxsBTwgMl5Innf0ETYL3SvWGzO7JLReJoh9p3TByllnZSiLJ8F0SWF1ZamWeMBS
+knDBllBgaCHkegsGcYRcSBYEXfF20yG4dFRqC1kRjX3EpRvdLgPzqwb0iQCrqwDbNvahaLsMqxqQ
+E4wbv9bcyMEACO5UwDCJL7WgFYUveUh7O7CwHNwcJJsb2HEWB2DigVsGzGv+1qzoTOdcaCsSbCAT
+nlwy1kEZ9G3LHM6TS074buEldOdm2J+JjlyMNHyYy2baHJgFlCwFrIzbsv12f5Agm3W0AryoD6Qo
+QvmgBHco2eINE8uE9D81aKPdxFsaa2y9GVcUVbvow6Ve1hO+yGLAd9+ptHs41xgQaoQqPhimztyB
+iUoTQVWQ2AvasrgoDr0n2wvabCM9xSisjtTBJnstVCNonHSHTsYV1aOaFIznm6GEkEUKaDCi30g3
+kHxbgHSyaLB9WDMZqhkOUCGigSaZD6IWWlWZqw0Xr6E7DW9b0QzGc0A2E4AH5BaPRygrKgAQLfHW
+1LdWGld0b7wQFP8pDG0aZoP/AnZhl7e3v191TopIAUAIMHxKBDN+Hm5032L/lwxydTtAxgYNRusz
+BgMKRk9PxBIOGqfkJlH8NXrSOXw8CgsfT4gG2qJ/M9QG6wWIDkZAT72ZjNXbFXhrgCaoRiiPwqEk
+wbW8qOn4yI1WK9gD3JYVQADkFsCxYeADAH+xIYkuA4++P/CAnVzhH2YTlUxghdi7CHjvmq0AmyL4
+5CX0ZoR52Op3Fx/8TdxdHYOBJnbY7yrQ02Y4kHdWjYzQZRKlWKLtEWlktNasuQQLLUauV8gRDbhi
+cbgKEhLtTAQG+GEhni2Tg+OTVaQEI1sbYIhTuHikRGRzO36g/VR4xjoZ7AsRUGytZD07FfvtRNk9
+O78420hm4BA3ETkcEoFgL5gQRR1o8MKxngepakQlqF5WNcDBSMYiMTpXXXMjXdSeThxQty3cZru3
+U1NEKlNmTdgfps7APqlDFufx7bXdAV3Wag8YoC8KW5ztUMYNZLdgNvdtI+wsyAjWLBNcujiEdzUj
+U0w0hZZ6fWpb2PfYsrJ036Db2UNqXVMN+P8IuSr9PIAnAEcsTOAcskTrA4AXqQhTSzwISKVEMgQU
+0k2eYAhpXXKUHDI9LQjVKzZSLTpi313oJdF1AjmhmA5GgzgBftx/PvgQD74GajaUWesRiw2QCcdW
+re4ViwmKqlkIrwYCO2nQVsBeT3w0kjxFdBSObaEaG7IIwLX4AhNcbpT0CV388ArGmtrRbQlT7+R5
+kD2LG+AsSbXqCf92VR4/Y+xnzB5oyBsIrFk7w1mkprtUFXUWH7BTaUzqI8HXH95qKE+Rdu83cDv7
+dQtooCIZHpiLNWl6neyii2gWs818gLM5Wx8X8hChcFkMBAMVQw4E94Q16xoIFv7GEmYaDUBO68SA
+pDWiJMT7SwBSGOGtwd/TiQSPQTtNhAl8G4MKHDMHvIPDKFNssySJA5nZjcathVWxTXIFgzNcJHRr
+neoSMecvaJhMZlORoox+ipB5ajO1hEwmhT+xuV0Mnlj4uk+OHw91LfEsc9wCSvjxXxdMRtwe/zBT
+ZISubIxgOxSLGNC2PFj04fdu1mo7x3VFLhwzuxv/AMfhofONSn7YFgEKSYagmS+2AmC2UpjfCI85
+sMhmMfxeKIEc2AF0GngQwF6ezxAb46L060Mp2CMD8vx4CA/rICH2yIEc6Ad5WYPqIhcKhUula/P+
+ve+sMX5YnwVkFBG0kTCgs2fs/LhmBhpRCl+dQGCpFlZuePpLvpl7FOsbFh8ccME3CA+xQPRcFmrU
+EieCRZgSwKF0hWFqmQUMnoNYXTpZw1e+bQBiiwPvVjT/VaAOOPExHCC6WaPGquGZaYMOCHpLtOt7
+wI5fEGKi9cIhMmCW6ILEo2jTEBdaYYv0iA2mKCEHOZqOtW8sDDAOfRyDBz9/SPACYB4fQHQcagzp
+cI0Gc2e1MFDClSpZABKq6FQDo5ESQUlAJM5fUaKrMToXrTPRCIDiqaX4iMACgz0rTQkoTYBkgCiA
+BLja+wxXNAFLdQShG0wE5GIJgl/XljCQ2KiECAhGAJbK/TeLVQgai0zAW9HeZCtBEAJ2IoE5d9TG
+HV6NNBAIw9s+elZqbjLbNBILtziMrwBOo/5vUfLZDYvWK1YEK9GJFSC1sVu1K0a9ELtX/gzUkkS/
+gIkBK34EarFGd5Yoe1H8m4hWtmYld1Lq0hbajGugsxM/hBs2dHbICALQmiLyeQnYvRUISAx0LhdX
+UEkjxBcEqsyG68bCcFszbEWiRt/+Pw7MzEgz0jvCVnQzi0hLynQsiUL/L9tQFAIIGItxDPfeG/ZS
+g+bYF7C7/Ykxi0AcIBRRPyhMcDPAUoWvJ7icCAVHMOyQAH0JZqED+PZ0OotGEzMXJEtD7bYsPRQN
+ClbmNgjptnhzHhooUFHkJA3H+AhgbgAAVOJWsDVfLQMp94oBDVBmYRemOsF/597BYbvNGDgK3ILA
+O/d1Ck3fYsQ/TmQgiX4YzwprfENvYCDwR7x+KDl+JIQOcI1deSQQSIFqGGGEpGubIUMniYY+/PcX
+fptMJYl4FItWF8+Jegx9DLR1b/9+99nHQAwBePkIfFkED39UH7h/YWv/EdPgiUoQUtdRN9ob0lD3
+0gTu0/aB4sBGZVJ+KMwZHYL/NXhBT1Y5ehR1DyOxBvCWbg5PC4zwZLNWG8lfuPppECrPE5ZxU1UQ
+ux3Kpc4EBHYK+QOhE4Xmtj4AE/ADVCOC/fsOevoEv/tzlcNLvQXB4/uJXB3w7aEZiQjIDQ+HxBok
+NFvh4Y0QOBkEtj2ISbe2o20eiQ3fQYsvBYsO9RvfxooRHAQ1FhAEg+EPQuDc3yixLhZ0FccADVXd
+fXfJvGwY5Hpy66Iii1AQwenJgd24KMEIXXYYJNDztbaB8CQuFwW9BG+7ws0RSDPJjmYIQHaLXhzY
+HremiUsGib0fAxOJ93+Jt3ZDBMFmA8H39YXSdCHHA1Y8Xcy9lNHdX7hoZ3Mbn/bBICWBYykHtByx
+YSYc2HHeKrh+2il8X6Ri/XUYZigE+6MCVfNaLLa1DQqCApIiAU8Al9rsaQJzoDONSDbnIm0CUh4S
+RFQMyTfbOvkL2Aw54wh7wZx5LQJj5O3hzzXemkrcweEYSAvk+Lpka0k0CfhKVqEw1m6DSEKJBjoc
+FJAG7G9dgUg34hADyolIOQpILpJLvgibLblkC4Q2P5Y53Jg5SDQSNoLZDBHr5TNZ6QMhIAegpDvo
+h2xoAnUJi8dlwggOzrllp2dyamN3JrPQpBZQR27HAQOWEB5gORZITzfOlqXhigobUOHRPlaTHCBH
+AgQO0mGHECYgiSizEkJKGCEfstdsF3hOMPMGuPg7hmlYRmkskHAsm80KACVqW5JvKwD9DEMBKf3w
+i7klBjgLRzTZLLtGAgO0Nu4tN9Msm2ZotDU1otfLLJtlETZL7Df4W4sHksB/01fyKgGH23o8iUNC
+rcXWFrIEDwQFTL46sMEb60coUqZXygqcut51BnUNPldPKtuNdwTqKMfyAUY0AjBsLQhsDjjuUQgg
+1oUJ+HQOMbLQH4FkbbdgRzDAw9/8nWsQX21qqmRjIFDiixK+SfbYjkYXcsEHTyhcSYEDrgUYGl/Z
+s9IFQ5d6VyiMkEXuMQbqw3JAc9ActrNQKCgfnyusgXOnUR4uojZ1qxokAiAD2JCYLbweiV4svDjI
+BHrZi8U9qgCD7NBadB+VOFNvOFX7bq2xNSlDsmsSSC5LNLb4ZgJeEDBWO8iwVNeWf9cKFURzBSvB
+SOsFLAce8Ll8AYwDg/gJGQyFLDDUb3BAfhiD/QNzPIkN15Oh0JYNxuR//9/2SIoPxxRMlIvRi83T
+4oPFCGML8kfae9d1MYk4iS9yzusEN6+mFvitmQeLyNHotQFyWeCm+4lLGHeRY1SD7QMZAWx6/+3N
+HAfB7gPT7ivpP7MpvirUUR1BSFFSFyd2t41xjQ0wUQ44Us46el3DRxwkXCE0+NpRPlDi7Q8sUhDe
+EDgcOfMV6BSJrrXvXMBiZuxYcQZhFHWHN+QD+P1YFHBuXbzOIHMsqfr6oAY9ly3QP0wsT/Z8QOJC
+m8EnAPLUiovOFnhXaoLhB3LqEDPRr6K6tbf/OO2LwTvF+gSJbFxLJgFbYthBi4kD6UzSF4dNdG68
+KsccBYWdFqsbvmt8GkQ71nUjv4t7KIsUvmu8GYvXO7EVcwcrwkhX1x3bLmQr8nOJNXVntExB7XCh
+QkgE91M0KA6s+2JrB0cwatajTDocbcPbMSvKSf9LLAcEkJHv9j5VdSBi99Znm9x88k6LzsKLyKRe
+oWGYcLALBclp6N5gdp3CO8EFwT4U+yUKrUQwJIEC86WLyi07PP6CjeEDK9DzpNpcJbvRtrdEA1IN
+S10V8CsMlaEzXRaJeBwpwli1uf5o/UMYkwcqOZDHGJYOczgyDxnjKg6S0iX/bLE5uj8lyCCYH4cd
+3bHQtwbW0DzgCIH6oGxdwc0FE/IFegV9H82Z6BtGjYQIAjp3A3w2ztJIKPlQYQyNBSxgvvEOSA7H
+QwhKA0bT2PvrCK5xU5IIEXm60OgKg2Itc2hZMkymkt++NAYDJToiLSwITrGL/Nh+bIoYpEsMxQSR
+YQjDXKE1CAOGamdk74fdcpgwuBOhyHMhPDRzhffaxzFpNaA3IPSl7XRy33AaJG9DEI1TNmdosFFS
+NFfx41BdAzibUUAsEPCFWMi2mSH7COYFguHDV09l0DTiHzc1byfezgJdD4N70lk76HNr78ejM+NK
+OwXr+vmE5t5rSpj29PkHl441N/ou+c2LyUCOXHsHf7kUI8bmVMEBjeY0u9tq0Ha0VRCXNHMbySy4
+1nYr6tEMRYQSiu+2wzVxQKQ3L4AjErnNeDy+0HQDM/KD6BLNWdhfcpcrJPgLH8ALO+lzO5lg3QJ5
+4AQfMJ1cezRy6cnsfHf2Gv3uVYsMjakjziYOFDXea7Vi1JAb1+lcRjgVHOGMCh7c6936A9A7Koep
+ddMqoUaJpTkQ6ZnwfHMxd4KTFQ3aHYr86wIP314qAKgMQUiZj/x19XeJTBxIaF56goWY+kC3vRVA
+JCZRUECNrWMzZt8JLCRRElI8E/DXcDY7P1FCBUOByEYBa88UZcsO86wJB0AGD0WMJHfS9DgfFUwk
+CmuzjyYZCCU0z3c9bN/TgJ88ICsceVDluWMIpE6EVwQEPMC2kAYpSA9zLVoLC15rPDCX2PF1q4sE
+0CudOANWTEGrOXjozk3urdGpr+dRXEmxe12JZp5AdFZdtlQDLl6cAB0nTWcGicI+DSMYsZAmDgUp
+zCEYBuZrJYnSACzjYC7JAKGdz4smttt6bWialtrplUxRdxJozb2F2hewkMNuh1yhMwYww+DNtHFo
+UVxh/csz7blZ4xhgez9VUfLkksF++Ndq/SvRwwPqUE5LWLYnu0yNMYtpOVHQtwibaysBZpLqLxVS
+UdosgWw6Q4Uyrb1l32rHQRhAg0tGQIYw92NISFGJeQRGRBg24cAREUsg6LOsmEVwjfKEp4Qjgb1B
+FVLIxlQ+Ay7eysQAzjkFhU58QQSTiuCewb3R9wPug1FP0VqCKYFYuEXwISwYE5/Pnmr8UNJQCIGU
+eZAcQuEdUozPK44bCaRAGJ39PYyy2XUGW6VPUesYbJGoOtciaJTYMiIkFHyeXasyRruRUgbhwGXd
+UAY1z4J7CWkA2v6BGGKFTP1fLaRzISRMEFkg4YDsGFKEPiOFPUIJO1w9Wyl5SFBSpgcMd4fX60Cm
+ZudBUFZT98hyQnRLU9F0N6HtI7S5e+ggNy6JVgR/ZFuq/FAr1YtuCONufT7GAfKtZggYMbXwPTJD
+LovHTFZVxWmyNWhjQ0tWmRCSXgo7nYQJAemYoJcNQSaBIRiRU2PtqwlPsP5FQ0g3nsJeKkP/1DkU
+zTpyuVxuA0A7azwaPQE+bJbL5VBA90ReRaI4OsyATbNWuDlBGyADXFLvDKIU4AAXuFZXGEfAU7hS
+WGndi36vUS5YRigYDRgIV9gBDnBj6U8xSDUWt7vvQM+AG911CuzCDOO7d7HAXPnbD4bvEVWB+7AV
+mY+7+L3DcgW4CCvYgg+Moa3xW7Ti6MHt22EQihaDxnL2LgobrFbxA/kI8sghhxzz9PUhhxxy9vf4
+hxxyyPn6+/wccsgh/f7/lWCD7QNNvGSf3rbOQFkVFhJGE0h19G3sbqWxDbnx8vfxTL93q233CIs1
+9/fri/WHEzEOLxFbXRdbCV8LwQgm+DEJn5UIUG5QtxDKTdZQHwhGx7s0dEwEww8fHKHRFC2pN7mK
+3nFXqE+jRYhQEFoMiEgR3EFvBHUAAA9IGMNXPBwG3xR/IHbBhKGFzgNGkvCiLUFjVsjabgzC1cLm
+wQw0wX7FB9unabwQwkYsB4kzxQ0o0E063/4GQoc2fmzoT089HBqztiPQnc4QCgqSbGr5g5EoRnos
+iX47Swts5YwpKyJ7rfktldpWhYkGZdxVDTu6LQpFlFZSIk0RT1XdU8eAEHdIfOrIo34zXSuZHLhI
+nSgNGWsFrkCumaMw+r9GA3KldBNJ99kbyf1iqzcZAoPB701hOJ2pVqLPZmMQuBK26q2xYkWyRVj4
+c0TnYsXDQFwEug61AV6xi+0wALKOnPuSe8/T4NAAxwgLyDZ52eiu/eAsQT8KLHK8roVjS3Xf+CMg
+CFbISRh49CsEJRTT6LiCS79GbsFFK/hAigE0WyTexRaLSY+VCAZ0F3rMr6gQdNXgD66Lki7WSq8F
+Ih8C2qK6bUCvRcOo/+O5IWcOJx8Hgr3PHNLaQhqvSNz5hgXsedDn2Ahv5uQjvosETLlNBANda629
+yM6tkbDUcgO1qDYz19OOJBgMzfVFzGVeJYwiOZYDRAFpmIRkDEQEhJsFw4XwUmUMjQzBiAB5gBBB
+2ALkkEOGDAwFQwEKDG9+Azfg2IBrFdV1A8IrOVOTejdA1h8NrxDt7SOWsVoBVeL5Uc2FlywtoLTZ
+Po51IT4wO8ERPTip1FQtKQz7ceqIsAjrD39nhtIkShsUUoVyYpIhMzI8DG1iG+zkBl1jYSJebons
+kI9intsB90kYIZBC8wmISv8R9xQ590FIO1AIZgdOYHN0PAxmSWHPKAIb0mA3sADjk/FRgeBNCogV
+YWDvCkJIRL32LQNPwM8UiysK4gMFjtHHQx8rzRMXJ4mQNRGq9BTDIPDNdEoJMBgofEBiyI/AG1Bl
+av0rzVNtrjA3VlBJEOu08iALlZiKiQN/74eyPoP/B3YVPzyD7whneK8EkUyJTDfqUFhCULaLstgx
+F7TqYrNOIDr4S5neK21uPPlTK/2La0ZYoTdk74kLW/4kEwmPEkEBi2QiWzv+s9xu1ZC0vnFJA0xK
+0C6Xy22OSwcETCJN905vT68MgFkt3/nokUWQDCBRU6djoXhsIPcTdhBVN4NEZ9jbdQnAj4+OoVtZ
+dRyyVlXcQF0XWY26U+sgUlUTla4FowET9LbaMtHcotP+NxoTtX+JW1NSx0cYdI2KV/jtXYo0XV5M
+Hvt0BoN9i5puo5gMH1C+wmGxsJgwKc+7yv09gezwoowk9Ab8tCRugX4Q8O1Xz0QDmqZpmkhMUFRY
+XGmapmlgZGhscFzAm6Z0eHyJrCRvv1BigzIB735chESNRF2gS28DQ0qJuu05CHUf6L/y1XEYgZRu
+wIkpiSrGFl6EFI8anBe5G+ipLxGNmDtDOSjQDeALPUGDwAQmdvNuPD5tdvnNcwaaYroPG/0f+yu0
+eDkudQhKg+4EO9UFO7/Ntov6pSx2JVT6vlGJO+7t/8bT5q9zEo1cjEQrM3glU8ME0REZIrTBcvJv
+laOFF+hWuBwMRI0DK/G6QHm6k82oEBGiA87liPe3NvgsC/ZKhzPbA0wcSEkW4X435YwcF3Xv3T3I
+QF8xi7TN/wHb4dYcFYyEHD0oPN6OdXKMDYlceEKJERIjvmkoexwIQzvZcsVXNjJ2u4vf90KMFDWU
+iSGmocBpXQNxJHOO6HseYcffABJ8xG+nxB08D4+BAjM0hyJRaGWHDbm3wHuBCjtJhdLsKz4gwfbe
+Nv07TQ+OB2AUOFhys8jWLC34bDPR/y+6OAPfK9NFA8871/AmdNQt0RrXHCBJy5n+nwS4jX0BO8d2
+J4PP//fAbViiGi3HbhhBBLtbWFiufb7FbeAfByvHEmPLUrRy7aEkvzvnyFFftouxfAP4gf+ITx/O
+2NjvJiArLMIvjajewd6UhNg2iTgTYden3tkqdDhDiEygtIQsmth+EdbLiAUxvca18Osl14tK/O+L
+9dPB3Y2Fb0Mr8IkUO3Sf6wlKGIoN7z0o4PAGj//tDUfoWoxuitAJHCrTiD0Db3y6MYsIDJF/cgfG
+Du+KbmPA6583KQyT8XMUgXYX/qP+yRvSg+Kg9mCIcesgkPtNVyAUweYCihQxDC3erR1sgMJLNDEh
+sRa+aLkE9g6HJEe62MTWRuK8tDsVcx7Gb9Fdt8UAgzB3iTmNPNWkhG5nOHEEhh1y5tUUel9wZWKN
+wjGBhcJ0CLQW4fYz0NHoB3X4WEoO0UZoOChgjByNBe8K7YMxJE8j+ss6XxiD6AQX7Ee5T4gmK985
+M4xx4lgII3XcdRXIqaEOT0ogK9LCHKePj4dSkEDrwZowvY1XHk6RG0KydFff1zv1dBeRLAF0Tfu4
+gLUWAQwKhMCwCCQPXx7LA62jYThoEncGkAZkGAtfNHA4gWY0VWQY8FaPkzRS09hoGGPYQe4CwJhi
+BBVVUowb1BJwQIXTRVhEIeAk80DamWywTChIOHtGd24nFkwQZFFWHlu/B/aoUlFLdSQngzoWCAAY
+gN+B/Wp3Ez8sJ/CWHavkT1HIhy3ZII4e+3UfHlkQeASO4yP8dMhekg8C4C8jwM6AwUu8QpglMJic
+RSMvLpAkD98NSPyAd4No3gChTAq7m3IXnIkCEJTHAVARxwJxOuDhUIxAyFHtDGoAG7Bja9d7wNt+
+nNp2/cF3dgMVLBFE0PVGe+876FjokYatwzcyIPcI6iCF2kr8VhQrxQPV5jBWllSIX4I4cA6LSzxV
+BT1uAm42QzwSzYv3pKk+YlKmWcqmO8e/cgPFF0ssA/2iCnV+0bmtakFEKA2RdVvYnW4fczTqmivu
+nxCEkOXkCldHV9RYBzlWRzB8zfdaiy1e+IR7guSMnHpRsIphWr5SXTAoVIlRcjUYvXjBEl4fzBdu
+Nw5Z+YtpnFEgO3EwHLtRCzc4HTvuUUEculzUPzlzCSv1Tv7OSSj3qqUxzYE2fEnTTbQOHCwgg/hR
+J5pLPCKLSUEKKLHVEYulyBpb7O3e6QvWRx1y4liiVzDciL/BI8rIihzOjTTOLISOuAK8c8IyTgHT
+6gRnFqB1Ecc5BL4j3T7AD2sMnWBeBDYDy/0DyIE4VXTHg+MPK8P2FeiCNDFODavLIyaZSLakDw8g
+yJQtaTScMTNlI+QFAZTPLuwBeDvDcytZGIP51X4OaOfVh9dBJi1nS3yXcgc8WU76bI7WqM9wwe7H
+9RAKuKJI15QH3+BCvEkoETv3cheLGnywf/dFig5GiE3/BoPrAusB8O1YI+sncSwfO992Ezv7WyuL
+HRwARUZPdfYYKBCWbGcGS57rGb8GCvT83AQZcEVJgWGrH7EjEnI6DnIz+WrrHLVI2LWcEEkEbY5f
+FRN0K/M+rPAR4Bfqsq078w+C3CcCzc22S9h0LdnFZQV67O3B6x7ZcwLeOCv5MzE2xuqNFM2awsQc
++t5BXIIWU0YI6s+JPiuskisUZ1YNVukAe+HUc2IgdFZX2GxWpM9a2712gFwocj8QlWoy8mb+9YhB
+t7adaAMrQVhAizE6eC+xQTl3X4lBZ5r9jeKmd2af/yU4fYyMjGwFPERITFvxit7MzFE90wtyHPt9
+C4fpCy0EhQEXc+xNJW5dmMQMi+Fgz1CpMCPbw8w9UFxFfZcffGr/aIhTEF5koaFQVNx6S3QlBxho
+U7lf/r+lZegz24ld/GoC/xX4WYMNeKM/7Si2swZ8FPy0Dbh35Lk98QgNAGG0oQQMd+8K9wCjgCjr
+/TkdkBh1DGj3z9zK/l1OCGEY6GgMcIIN1PtsCHAn4qGwP/OU280tUWCsDAmcUAOQR7RUNKBcEPUX
+gCFfBDIATqEUu79BfW4wxYA+InU6RgiKBh6Qb/s6w3QEPA3yEgQgdvKLu2bb1NBOpLDB9kXQM+ft
+rWoRvtTrDisgdtgsFS366/VqCliV62jXoJ5Uih/3kTMI9IZfGGtF7FQJiU2Iy7C94OLcWQou/3WI
+HyAVjYyNYyQFHAxhdu2NmAMELC9OEi4krLCsw5IA3fRgqJLtfPBgAABpvgKpVBUQEZqmG6QSCAMH
+CQZpmqZpCgULBAym6ZqmAw0CPw4Bf/t/kA8gaW5mbGF0ZSAxLgEzIENvcHn/3337cmlnaHQPOTk1
+LQQ4IE1hcmsgQWRsZXIg7733ZktXY297g7733nt/e3drX6cTaZqm6bMXGx8jK6ZpmqYzO0NTY56m
+aZpzg6PD4wEZsosQJQEDAiEZkiEDBGWnGZIFAHBft4RZskcvf/eapum+8xk/ITFBYdl1p2mBwUCB
+AwECpmmapgMEBggMmqZpmhAYIDBAYMhGtsLn18eEJCzhBqerrxnkW8KzAwsM0QBBBg3muqoozDWX
+zgMAv12AD0NyZaVEaQZjdG9yeez/n/ogKCVzKY9NYXBWaWV3T2ZGaWxlFbJ3bxYrEB1waW5nF/YT
+YJYQ+kVuZCAZwoK5/3R1cm5zICVkUxcUYGA/WBNJbml0MhjBYNU9NjNcHIywjoBSV4iEB8iyGWx8
+D3RocWDJkwM2AC9McUxxu3/7V1NvZnR3YYBcTWljcm9zDVxX/2/tb5tkb3dzXEOTF250VmVyc2lv
+blxVb+3tl25zdGFsbFdpYlwSvC1wYb3F3v1ja2FnZXOsREFUQU9FaXB0f/v/7hELQ1JJUFRTAEhF
+QURFUgdQTEFUTEn2t5+XQlVSRVRpbTsgUm9tYW4LdqFt7WhpCnl6ijx3aWTeWiHYIGwTFnwgeW/f
+frvdjCBjKXB1dnIuIENsrWsgTmXC1lzheHQgvRelLnVg23trhcgZS2NlbBUcaQzWsHUdaBVTXXBb
+Lq3Q2gd/eRYybAENNtbcLmTOjw8g6CA3uxvBFrYAS25vdIkna4fN2k5UKhJhdpuG1wylZvESbMoZ
+7DW2Z8h0UGhXdtZ27A5zHXF1cmQs4+8p7LXtY2gFYRNiQnXLumFDO2k+L3JHNwjOKhGBLuRsyRLe
+sDCYBHVzZTrjN3ew2UwGQ28RV1xJJZdtZzJQM2izVuw0LNkonJgoUyoYDCs3p8J24Wt6J2Ybc4cu
+c28uAJtFjrAbY4kcuAvhLRTpYoHgWsImJOiLqLrX8LgDSWYnVG4srnbaVniYyRRpEmczLCzG2wR5
+KktAYaztLiV0dHZzLCpvQlYYwBiGZVF3w9tvy0v3U3lzX0c/T2JqgKs1GjsPX0//2CEY2y50W1xn
+D1I9X1MQcNCt/VxhUztkM19GCHz9UsdzIwufUHpncmFtTve+nqECPhMXaSEPRphx+ExvYWQUtyoA
+1G3u3e8lY39Y4HQaX80GrOEdNTsLLgcjfth2nnInMCe3MTAwgAsMXW1kEvo6NasjXm6DgAAyF8mx
+c6002BhF/1sfG81MOyZPyndy+SCSDWvO2ekWJx7tSSgcKV3HPwoK4O0fXmgG7FlFU0dBTFdBWQnf
+sYewby4sCnAtTk8sTiKksNZFVjsrgxxxaMt3u873dwxCsq10IulSZW32yu9wRylleGUiIC0UAt/C
+scItziwubIQiT3et8JC1YgMuADA0AxDWsJVudURCG1V1AVsZaK0J210CPUL/lV5JOlzhYXnBs0dh
+T7IZKDsyS2V5ORiMdNMKC3VsZP9jSayCe+0gax1LkoOFswJu2SPbjCFGG4SOU8BjgyoA97u2JYzK
+CnJKd1kvKZ777yVtL4BIOiVNICenO02ZS9n1E0dmXFgK2x5zaEgrYWtbizSLZP4WZBVmwNad8QBu
+zgCRZxZfFqTJggcPbycPLG/BGKzzYnVpX4X3HE0hb98FQ97DsAh8GgDMB1xqswbCACOhZ9ZoemCh
+w81hSCvOYNhhxTfhQzxmPMUcQ2ZVD87QsG0XZ0dvrnCR6JHse6Zk+hbzOhUKGO3TIwAuYg5rg7Wd
+YCU0IRtk4GEVLDoDOwxkaQD2caCRxlhkI01YS3IKFh9jvmQFkvMTUJNkscwQMqYiE9lKeu9+ESfS
+F8KaLWsGUzLgHYF2AEFvaHN1CAYGX0JxhwqZcCGx1b0bbb4/O7HQIjdjfWW63t0DzXRybcMZm21B
+cuhYGE8EY/ekZhwFYsUbj5oxvld6JxAfx08FV6/dwtVqFwhtYmRMCZwRcyS/K3BjRWiggfh2WGRQ
+2YsIrg6iN38iSWpob1mV0XlPaVYLxmJ5VFIYm0mvbSknY0QX12vtQEsCpR9CxDs9vB1+ZKxuZWXw
+Yz8YnB42h+fxct4gPW3Z2xyxCmuXFxHGsGENg3IZxejcFjgNc0eOa3R3bmVwByJoQVpQ0Bxc1otk
+L2LCgj49DK0mFa3NW29vmzE70SccGGr37IXNgfdYeU1vbHM/WuHgmHN/DZCFY8sOwS9jXxh0poAZ
+tXlaX7Sm2Z7RBHxz+HsD6Nzam22ayLigexvnta9kObpOYnwpC7hvBt1mZvVlYmdzEcMwHC03aZkt
+Mcsa2rAhn3JtLy3hyA5wG24PBazQluh+XcfDZpujA6kJL+IdTbSMROMFYPwBa5qzI1AABxBUcx82
+yMmmUh8AcDBAMkjTDcAfUApgglGDDCCgiBlkkME/gEDgZJDBBgYfWBhkkKYbkH9TO3ikaQYZONBR
+EZBBBhloKLBBBhlkCIhIBhtkkPAEVAcUBhmsaVXjfyt0GWSQQTTIDWSQQQZkJKiQQQYZBIREDDbZ
+ZOifXB8cDNI0g5hUU3wNwiCDPNifFzLIIIP/bCy4yCCDDAyMTCCDDDL4A1KDDDLIEqMjcgwyyCAy
+xAsyyCCDYiKkyCCDDAKCQiCDDDLkB1qDDDLIGpRDegwyyCA61BMyyCCDaiq0yCCDDAqKSiCDDDL0
+BVYggzTNFsAAM4MMMsh2NswPDDLIIGYmrDLIIIMGhkbIIIMM7AleIIMMMh6cY4MMMsh+PtwbDTLI
+YB9uLrwyyGCDDw4fjk6DMCQN/P9R/xEgQ9Igg/9xIEMyyDHCYYMMMsghogGBQzLIIEHiWUMyyCAZ
+knlDMsggOdJpDDLIICmyCTLIIIOJSfKb3iBDVRUX/wIBgwxyIXU1yoMMMiRlJaoMMsggBYVFDDIk
+g+pdHQwyJIOafT0MMiSD2m0tMsggg7oNjTIkgwxN+lMyJIMME8NzMiSDDDPGY8gggwwjpgMkgwwy
+g0PmJIMMMlsbliSDDDJ7O9Yggwwyayu2gwwyyAuLS/aEDDIkVxckgwwydzfOIIMMMmcnroMMMsgH
+h0fugwwyJF8fnoMMMiR/P96DDDYkbx8vvmSwyWYPn48fT5Khkhj+/8EoGUqGoeGQmKFkkdFQyVBy
+sfEMJUPJyanpyVAylJnZlQwlQ7n5UDKUDMWlDCVDyeWV1clQMpS19SVDyVDNrVAylAztnQwlQ8nd
+vf0ylAyVw6MlQ8lQ45NQMpQM07NDyVDJ88urMpQMJeubJUPJUNu7lAyVDPvHQ8lQMqfnlzKUDCXX
+t8lQyVD3z5QMJUOv70PJUDKf37+d9A0l/38Fn1f3NN3jB+8PEVsQ35rlaToPBVkEVUGe7uxpXUA/
+Aw9YAs49TeevDyFcIJ8PmmZ5mglaCFaBwEEGOXtgfwKBOeTkkBkYBwZDTg45YWAE5OSQkwMxMA1D
+LDk5DMGvoBvhotPdZHmFWkZc6GljWtZVb6LScmXVtHN1YnOxbIW9EmJlZCdLRhYLCXYeR4hLkcAj
+YXR5cKV4Sc0UGx7Llg2Mo7MoL2Upez1jHwOapmmaAQMHDx8/aZqnaX//AQMHq2iapg8fP39toUgY
+xW/8UoEqCnuQUAAEjeCAgCirfIJ4lm4sBEWgCVwut5UAAOcA3gDWy+VyuQC9AIQAQgA5ALlcLpcx
+ACkAGAAQAAhBdvJbP97/AKVj7gAVjqBsN+9elB2YmwYABf8X3KxL2P83D/4GCNlbWcAFFw83LGVv
+Mu8GABfdzle2N/+2vwamphc2c64IDA4LF6b77wN7Bjf7UltK+lJBQloFYtsbu1lSWgtbFyfvC3g+
+sPcRBjf2ICalFed2iWgVrwUUEN4b2W1Axhf+7iYFBna7+cA3+kBK+1ExUTFaBbEB+7oAWgtaF1oF
+1lxb2BBKb2C6dQVz/+u2VBVuFAVldYamEBY3FxuysVgLHRZvEdnd5t6eXQNHQEYBBRHNWI3sZGNv
++gv5QG97g7nXuhVdeQEAEugAczODRgsdb7mTB/lBMVhIUlgQBU/ZZ66FDQtK+lHfFGVk9xv55BAl
+EBampmR1FZUXYYB1MwsKAG9DkG122HVICxcxLmhk3wUxb+rBDOYJsxWmzwuQfcMKWRcFFN9z54zH
++wojWgMLYTfMMToXBUJXTxvGGSF6/pMIW4Y7rL8LtgWfbyRLHSHw/HL+YfaGvQ0DBgTJYEla2G8R
+B+8lm70FA3cL9xs2I2Q3+QcFISVb2OcP78I3G3buSQcF9lct7M0SD/s3Qjh777nZBwX6x4yQvVkP
+IW/542z2WmoHBQMVQw2wZQybbxmzy4JVb0cFm3Q6pWxvgfK5L9nMAWtpdRbna4pxgW8RE+xab5DP
+Jg0Fb0dRMaTZsoYAW291GGGvl28Db0wr28bzWQJbbxd9b4E9m9/NciYX2CuA3w1vSZMlbML8+T0D
+b1rxIiSS+rcJAtlk7/tph/bfGa9tkOtS1xG/L4xJK0s38YcyWq/oFehVnxmTVrY38fMigOTcWgsM
+D5ek00pvZusLsm8htQz3C/43hL1ksOIJC2IgymKHAX1Gv6y5QADASAl7AbJoIYoE5bt0dzCohd9w
+sAFNE+peR10gA2E9cwkhcvTCaCNhZjZQfSAa1Eb99zFzG9QPDf+CQ2glMU23ue5XB3o/NWQNd2yd
+uc91ASAHUXQZDyW3uc2NLW8VBXkHhXIJus91TWNtj3UpeS4TQ+a6rusvaRlrC04VeBsp3OfOzHQv
+bgtddRtk3dj3UUdDwWMRbCuWvcG+OWk7aCv/uidsyLcu7AQIsO8ftstGboMA/YEcAgMOL2gzXFAG
+P1OjK7uw1g4PA30AAkPhzQymo2cjFJ9kIpApCAyhe92XJ2wDY/9PeQPppoTDO5lhGWmwrpswN39z
+OTpgoLaon4AIgVC/WbU82UhYZe8T74kANzdh38l2g1B1RGWE7CFYcpGzeWGM3DQvdwMBoRhqAP6D
+GTlLhaed8IQBeQqeAEJJDyNaZSmzHSL87L5CAQcAMm8CBIAARmHeRzCeDW95oS4BPFBIyzWn9gAf
+6w6SkktiD2erlMJY0iEb7zQk95dJbbvpi2kz3WVNcj92BXeVvtjnJmNVJWdbCXlExpKxA2aPse69
+j4d0D0MNLFOR9dxl0UItCTUV1gKsDQFrbpqHS4CdDgDrbX10Dd2HBWwHX5dy82fZR9WNcwEzK1AV
+BmlkDDEpI/ayRYZr7FN7Y2QkQjo6C1+EDARyA/cPZgwhV/8dCJxujGhlddV0mRJYyRB3e6wSmgEp
+gmd6cCAZgS2D3Amue7dziWMBeWYNAWFQ+zV5jXogArAAAIoNuJzEAFRQmEe2AsWmbWl2lgZvtdu7
+Ih1JbnRBFkRlCfHvIgHLDFJlc3VtZVRo28i2FS5kMVNvAnSAXiyKeTJD9sF2kxxDY2USTW9kdUSo
+WNn5SGFuZGiQqcLFiRnPcg6KEkMQDWCDxQJFSEFJ8RwVL4xPkA0L6EFxrZ+B4pslH1P3DFRAw5Zt
+IXAwEeag6AzUDUbMVR9rULhf7oBjYWxGzba2a0w6bHOVNW4yFuzF/oRBZGRy0R+l8WEWEAYVChuQ
+eewNEpNUaW2txQZCSED/SgsVSxSISdFBYlW0YyxMYXw70B5gEwTgQXSfKAhJvip1dGVzpAl/IyGV
+E2xvc4Fuu7C7clVubYNEHEQyMbDLfp9Ub6lHiT0U4gAceXNnxF6omGI0RXhBECoG5mYlEA5irZ0d
+aBBRCLwPudh7wzGRMAzzsBbFhBxPNl1t1qIYRboOhtCScZreJB4rwiYYvj15U2hlpsUTC+g0XTLr
+MAs0YQbQkKjxO7wEPkNvbGgKT3XTJMyW8SVNbwxmKDyEjUlC1kJC3w7rOEJrlGUaU0xpZEJyo3Ga
+7XVzaHb13DRVXMe3QtsHX3NucOl0Ct9luztcbmNw/F92FF8Vad7NdY1jnQpjcMZsZgvcEb7UmQFw
+dF9ovnIzERdFw9YpeF/cX0/di725DwlfZm2HCz1turWNYA2GaowrZmTCYwtWcDcOZfQbc1tzhdYR
+ecp0EByjornCPNUQHYDa1tw5iG5uCHOP1pncDliudyuRWhSmFxMr1NmBucFyXzYLduQWhfu9zQhj
+aDeW5GDuvfQHiCBhdPpmp9kHcw8oZjcb43eKDWZ0kW1xER3C2bBYWWZDZiY4Ss7EvUlBUQr32LUx
+KGZjbgeWlmKn0jhObPBsPOxsdlsFc0hxc7OD8GsV93BjY2lzCXYrlQ1hbWL0BmF4DblhmLWhk+dl
+pFHL2r4Qp0RsZ0lnbVmAXKZNS0RD/K0xzmIRZBIKUmg2C/ZgK0JveC5CT1xrJGxIjH3jWSuYgFk0
+/htmILp1VJNucz0Sliu1bqtFOhTQZ1DXFXt5c5M4Yz9CZh0zRzMd82aLLls4velCd2tXUJINCBQ7
+JFObzYLQnTMQdzSdBiZwoFENzBoeQJMMRsR/zcxfDyewVXBkcqTDq+Id9CBGtVKk2Rj+xAiK7QSa
+DhhFA0wbKmbfkJSuHzwR9w8BOklR7gsBBhxAfFwXgc6KxGCZC5YsEr0D/wcXnc2KydD2DBCIl72B
+BwYAlGSCBeIs97D3EsJ2K0CSpwwCHg22M7wudGwHIE6QUALavZCYG0UuctkSZsdltA5TAwLT7F5z
+QC4mPIQzcI/blL0HJ8BPc3LdW9lgY+uwJ5BPKQAoz1skLGcAxgAAAAAAAAAk/wAAAAAAAAAAAAAA
+AAAAAGC+ALBAAI2+AGD//1eDzf/rEJCQkJCQkIoGRogHRwHbdQeLHoPu/BHbcu24AQAAAAHbdQeL
+HoPu/BHbEcAB23PvdQmLHoPu/BHbc+QxyYPoA3INweAIigZGg/D/dHSJxQHbdQeLHoPu/BHbEckB
+23UHix6D7vwR2xHJdSBBAdt1B4seg+78EdsRyQHbc+91CYseg+78Edtz5IPBAoH9APP//4PRAY0U
+L4P9/HYPigJCiAdHSXX36WP///+QiwKDwgSJB4PHBIPpBHfxAc/pTP///16J97m7AAAAigdHLOg8
+AXf3gD8BdfKLB4pfBGbB6AjBwBCGxCn4gOvoAfCJB4PHBYnY4tmNvgDAAACLBwnAdDyLXwSNhDAw
+8QAAAfNQg8cI/5a88QAAlYoHRwjAdNyJ+VdI8q5V/5bA8QAACcB0B4kDg8ME6+H/lsTxAABh6Vhs
+//8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgACAAAAIAAAgAUAAABgAACAAAAA
+AAAAAAAAAAAAAAABAG4AAAA4AACAAAAAAAAAAAAAAAAAAAABAAAAAABQAAAAMLEAAAgKAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAABABrAAAAkAAAgGwAAAC4AACAbQAAAOAAAIBuAAAACAEAgAAAAAAA
+AAAAAAAAAAAAAQAJBAAAqAAAADi7AACgAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEACQQAANAA
+AADYvAAABAIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAkEAAD4AAAA4L4AAFoCAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAQAJBAAAIAEAAEDBAABcAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAD0AQEA
+vAEBAAAAAAAAAAAAAAAAAAECAQDMAQEAAAAAAAAAAAAAAAAADgIBANQBAQAAAAAAAAAAAAAAAAAb
+AgEA3AEBAAAAAAAAAAAAAAAAACUCAQDkAQEAAAAAAAAAAAAAAAAAMAIBAOwBAQAAAAAAAAAAAAAA
+AAAAAAAAAAAAADoCAQBIAgEAWAIBAAAAAABmAgEAAAAAAHQCAQAAAAAAhAIBAAAAAACOAgEAAAAA
+AJQCAQAAAAAAS0VSTkVMMzIuRExMAEFEVkFQSTMyLmRsbABDT01DVEwzMi5kbGwAR0RJMzIuZGxs
+AE1TVkNSVC5kbGwAVVNFUjMyLmRsbAAATG9hZExpYnJhcnlBAABHZXRQcm9jQWRkcmVzcwAARXhp
+dFByb2Nlc3MAAABSZWdDbG9zZUtleQAAAFByb3BlcnR5U2hlZXRBAABUZXh0T3V0QQAAZXhpdAAA
+R2V0REMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAA=
+"""
+
+# --- EOF ---
diff --git a/lib-python/2.2/distutils/command/build.py b/lib-python/2.2/distutils/command/build.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/command/build.py
@@ -0,0 +1,131 @@
+"""distutils.command.build
+
+Implements the Distutils 'build' command."""
+
+# created 1999/03/08, Greg Ward
+
+__revision__ = "$Id$"
+
+import sys, os
+from distutils.core import Command
+from distutils.util import get_platform
+
+
+def show_compilers ():
+    from distutils.ccompiler import show_compilers
+    show_compilers()
+
+
+class build (Command):
+
+    description = "build everything needed to install"
+
+    user_options = [
+        ('build-base=', 'b',
+         "base directory for build library"),
+        ('build-purelib=', None,
+         "build directory for platform-neutral distributions"),
+        ('build-platlib=', None,
+         "build directory for platform-specific distributions"),
+        ('build-lib=', None,
+         "build directory for all distribution (defaults to either " +
+         "build-purelib or build-platlib"),
+        ('build-scripts=', None,
+         "build directory for scripts"),
+        ('build-temp=', 't',
+         "temporary build directory"),
+        ('compiler=', 'c',
+         "specify the compiler type"),
+        ('debug', 'g',
+         "compile extensions and libraries with debugging information"),
+        ('force', 'f',
+         "forcibly build everything (ignore file timestamps)"),
+        ]
+
+    boolean_options = ['debug', 'force']
+
+    help_options = [
+        ('help-compiler', None,
+         "list available compilers", show_compilers),
+        ]
+
+    def initialize_options (self):
+        self.build_base = 'build'
+        # these are decided only after 'build_base' has its final value
+        # (unless overridden by the user or client)
+        self.build_purelib = None
+        self.build_platlib = None
+        self.build_lib = None
+        self.build_temp = None
+        self.build_scripts = None
+        self.compiler = None
+        self.debug = None
+        self.force = 0
+
+    def finalize_options (self):
+
+        plat_specifier = ".%s-%s" % (get_platform(), sys.version[0:3])
+
+        # 'build_purelib' and 'build_platlib' just default to 'lib' and
+        # 'lib.<plat>' under the base build directory.  We only use one of
+        # them for a given distribution, though --
+        if self.build_purelib is None:
+            self.build_purelib = os.path.join(self.build_base, 'lib')
+        if self.build_platlib is None:
+            self.build_platlib = os.path.join(self.build_base,
+                                              'lib' + plat_specifier)
+
+        # 'build_lib' is the actual directory that we will use for this
+        # particular module distribution -- if user didn't supply it, pick
+        # one of 'build_purelib' or 'build_platlib'.
+        if self.build_lib is None:
+            if self.distribution.ext_modules:
+                self.build_lib = self.build_platlib
+            else:
+                self.build_lib = self.build_purelib
+
+        # 'build_temp' -- temporary directory for compiler turds,
+        # "build/temp.<plat>"
+        if self.build_temp is None:
+            self.build_temp = os.path.join(self.build_base,
+                                           'temp' + plat_specifier)
+        if self.build_scripts is None:
+            self.build_scripts = os.path.join(self.build_base,
+                                              'scripts-' + sys.version[0:3])
+
+    # finalize_options ()
+
+
+    def run (self):
+
+        # Run all relevant sub-commands.  This will be some subset of:
+        #  - build_py      - pure Python modules
+        #  - build_clib    - standalone C libraries
+        #  - build_ext     - Python extensions
+        #  - build_scripts - (Python) scripts
+        for cmd_name in self.get_sub_commands():
+            self.run_command(cmd_name)
+
+
+    # -- Predicates for the sub-command list ---------------------------
+
+    def has_pure_modules (self):
+        return self.distribution.has_pure_modules()
+
+    def has_c_libraries (self):
+        return self.distribution.has_c_libraries()
+
+    def has_ext_modules (self):
+        return self.distribution.has_ext_modules()
+
+    def has_scripts (self):
+        return self.distribution.has_scripts()
+
+
+    sub_commands = [('build_py',      has_pure_modules),
+                    ('build_clib',    has_c_libraries),
+                    ('build_ext',     has_ext_modules),
+                    ('build_scripts', has_scripts),
+                   ]
+
+# class build
diff --git a/lib-python/2.2/distutils/command/build_clib.py b/lib-python/2.2/distutils/command/build_clib.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/command/build_clib.py
@@ -0,0 +1,240 @@
+"""distutils.command.build_clib
+
+Implements the Distutils 'build_clib' command, to build a C/C++ library
+that is included in the module distribution and needed by an extension
+module."""
+
+# created (an empty husk) 1999/12/18, Greg Ward
+# fleshed out 2000/02/03-04
+
+__revision__ = "$Id$"
+
+
+# XXX this module has *lots* of code ripped-off quite transparently from
+# build_ext.py -- not surprisingly really, as the work required to build
+# a static library from a collection of C source files is not really all
+# that different from what's required to build a shared object file from
+# a collection of C source files.  Nevertheless, I haven't done the
+# necessary refactoring to account for the overlap in code between the
+# two modules, mainly because a number of subtle details changed in the
+# cut 'n paste.  Sigh.
+
+import os, string
+from types import *
+from distutils.core import Command
+from distutils.errors import *
+from distutils.sysconfig import customize_compiler
+
+
+def show_compilers ():
+    from distutils.ccompiler import show_compilers
+    show_compilers()
+
+
+class build_clib (Command):
+
+    description = "build C/C++ libraries used by Python extensions"
+
+    user_options = [
+        ('build-clib', 'b',
+         "directory to build C/C++ libraries to"),
+        ('build-temp', 't',
+         "directory to put temporary build by-products"),
+        ('debug', 'g',
+         "compile with debugging information"),
+        ('force', 'f',
+         "forcibly build everything (ignore file timestamps)"),
+        ('compiler=', 'c',
+         "specify the compiler type"),
+        ]
+
+    boolean_options = ['debug', 'force']
+
+    help_options = [
+        ('help-compiler', None,
+         "list available compilers", show_compilers),
+        ]
+
+    def initialize_options (self):
+        self.build_clib = None
+        self.build_temp = None
+
+        # List of libraries to build
+        self.libraries = None
+
+        # Compilation options for all libraries
+        self.include_dirs = None
+        self.define = None
+        self.undef = None
+        self.debug = None
+        self.force = 0
+        self.compiler = None
+
+    # initialize_options()
+
+
+    def finalize_options (self):
+
+        # This might be confusing: both build-clib and build-temp default
+        # to build-temp as defined by the "build" command.  This is because
+        # I think that C libraries are really just temporary build
+        # by-products, at least from the point of view of building Python
+        # extensions -- but I want to keep my options open.
+        self.set_undefined_options('build',
+                                   ('build_temp', 'build_clib'),
+                                   ('build_temp', 'build_temp'),
+                                   ('compiler', 'compiler'),
+                                   ('debug', 'debug'),
+                                   ('force', 'force'))
+
+        self.libraries = self.distribution.libraries
+        if self.libraries:
+            self.check_library_list(self.libraries)
+
+        if self.include_dirs is None:
+            self.include_dirs = self.distribution.include_dirs or []
+        if type(self.include_dirs) is StringType:
+            self.include_dirs = string.split(self.include_dirs,
+                                             os.pathsep)
+
+        # XXX same as for build_ext -- what about 'self.define' and
+        # 'self.undef' ?
+
+    # finalize_options()
+
+
+    def run (self):
+
+        if not self.libraries:
+            return
+
+        # Yech -- this is cut 'n pasted from build_ext.py!
+        from distutils.ccompiler import new_compiler
+        self.compiler = new_compiler(compiler=self.compiler,
+                                     verbose=self.verbose,
+                                     dry_run=self.dry_run,
+                                     force=self.force)
+        customize_compiler(self.compiler)
+
+        if self.include_dirs is not None:
+            self.compiler.set_include_dirs(self.include_dirs)
+        if self.define is not None:
+            # 'define' option is a list of (name,value) tuples
+            for (name,value) in self.define:
+                self.compiler.define_macro(name, value)
+        if self.undef is not None:
+            for macro in self.undef:
+                self.compiler.undefine_macro(macro)
+
+        self.build_libraries(self.libraries)
+
+    # run()
+
+
+    def check_library_list (self, libraries):
+        """Ensure that the list of libraries (presumably provided as a
+           command option 'libraries') is valid, i.e. it is a list of
+           2-tuples, where the tuples are (library_name, build_info_dict).
+           Raise DistutilsSetupError if the structure is invalid anywhere;
+           just returns otherwise."""
+
+        # Yechh, blecch, ackk: this is ripped straight out of build_ext.py,
+        # with only names changed to protect the innocent!
+
+        if type(libraries) is not ListType:
+            raise DistutilsSetupError, \
+                  "'libraries' option must be a list of tuples"
+
+        for lib in libraries:
+            if type(lib) is not TupleType and len(lib) != 2:
+                raise DistutilsSetupError, \
+                      "each element of 'libraries' must a 2-tuple"
+
+            if type(lib[0]) is not StringType:
+                raise DistutilsSetupError, \
+                      "first element of each tuple in 'libraries' " + \
+                      "must be a string (the library name)"
+            if '/' in lib[0] or (os.sep != '/' and os.sep in lib[0]):
+                raise DistutilsSetupError, \
+                      ("bad library name '%s': " +
+                       "may not contain directory separators") % \
+                      lib[0]
+
+            if type(lib[1]) is not DictionaryType:
+                raise DistutilsSetupError, \
+                      "second element of each tuple in 'libraries' " + \
+                      "must be a dictionary (build info)"
+        # for lib
+
+    # check_library_list ()
+
+
+    def get_library_names (self):
+        # Assume the library list is valid -- 'check_library_list()' is
+        # called from 'finalize_options()', so it should be!
+
+        if not self.libraries:
+            return None
+
+        lib_names = []
+        for (lib_name, build_info) in self.libraries:
+            lib_names.append(lib_name)
+        return lib_names
+
+    # get_library_names ()
+
+
+    def get_source_files (self):
+        self.check_library_list(self.libraries)
+        filenames = []
+        for (lib_name, build_info) in self.libraries:
+            sources = build_info.get('sources')
+            if (sources is None or
+                type(sources) not in (ListType, TupleType) ):
+                raise DistutilsSetupError, \
+                      ("in 'libraries' option (library '%s'), "
+                       "'sources' must be present and must be "
+                       "a list of source filenames") % lib_name
+
+            filenames.extend(sources)
+
+        return filenames
+    # get_source_files ()
+
+
+    def build_libraries (self, libraries):
+
+        for (lib_name, build_info) in libraries:
+            sources = build_info.get('sources')
+            if sources is None or type(sources) not in (ListType, TupleType):
+                raise DistutilsSetupError, \
+                      ("in 'libraries' option (library '%s'), " +
+                       "'sources' must be present and must be " +
+                       "a list of source filenames") % lib_name
+            sources = list(sources)
+
+            self.announce("building '%s' library" % lib_name)
+
+            # First, compile the source code to object files in the library
+            # directory.  (This should probably change to putting object
+            # files in a temporary build directory.)
+            macros = build_info.get('macros')
+            include_dirs = build_info.get('include_dirs')
+            objects = self.compiler.compile(sources,
+                                            output_dir=self.build_temp,
+                                            macros=macros,
+                                            include_dirs=include_dirs,
+                                            debug=self.debug)
+
+            # Now "link" the object files together into a static library.
+            # (On Unix at least, this isn't really linking -- it just
+            # builds an archive.  Whatever.)
+            self.compiler.create_static_lib(objects, lib_name,
+                                            output_dir=self.build_clib,
+                                            debug=self.debug)
+
+        # for libraries
+
+    # build_libraries ()
+
+# class build_lib
diff --git a/lib-python/2.2/distutils/command/build_ext.py b/lib-python/2.2/distutils/command/build_ext.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/command/build_ext.py
@@ -0,0 +1,630 @@
+"""distutils.command.build_ext
+
+Implements the Distutils 'build_ext' command, for building extension
+modules (currently limited to C extensions, should accommodate C++
+extensions ASAP)."""
+
+# created 1999/08/09, Greg Ward
+
+__revision__ = "$Id$"
+
+import sys, os, string, re
+from types import *
+from distutils.core import Command
+from distutils.errors import *
+from distutils.sysconfig import customize_compiler
+from distutils.dep_util import newer_group
+from distutils.extension import Extension
+
+# An extension name is just a dot-separated list of Python NAMEs (ie.
+# the same as a fully-qualified module name).
+extension_name_re = re.compile \
+    (r'^[a-zA-Z_][a-zA-Z_0-9]*(\.[a-zA-Z_][a-zA-Z_0-9]*)*$')
+
+
+def show_compilers ():
+    from distutils.ccompiler import show_compilers
+    show_compilers()
+
+
+class build_ext (Command):
+
+    description = "build C/C++ extensions (compile/link to build directory)"
+
+    # XXX thoughts on how to deal with complex command-line options like
+    # these, i.e. how to make it so fancy_getopt can suck them off the
+    # command line and make it look like setup.py defined the appropriate
+    # lists of tuples of what-have-you.
+    #   - each command needs a callback to process its command-line options
+    #   - Command.__init__() needs access to its share of the whole
+    #     command line (must ultimately come from
+    #     Distribution.parse_command_line())
+    #   - it then calls the current command class' option-parsing
+    #     callback to deal with weird options like -D, which have to
+    #     parse the option text and churn out some custom data
+    #     structure
+    #   - that data structure (in this case, a list of 2-tuples)
+    #     will then be present in the command object by the time
+    #     we get to finalize_options() (i.e. the constructor
+    #     takes care of both command-line and client options
+    #     in between initialize_options() and finalize_options())
+
+    sep_by = " (separated by '%s')" % os.pathsep
+    user_options = [
+        ('build-lib=', 'b',
+         "directory for compiled extension modules"),
+        ('build-temp=', 't',
+         "directory for temporary files (build by-products)"),
+        ('inplace', 'i',
+         "ignore build-lib and put compiled extensions into the source " +
+         "directory alongside your pure Python modules"),
+        ('include-dirs=', 'I',
+         "list of directories to search for header files" + sep_by),
+        ('define=', 'D',
+         "C preprocessor macros to define"),
+        ('undef=', 'U',
+         "C preprocessor macros to undefine"),
+        ('libraries=', 'l',
+         "external C libraries to link with"),
+        ('library-dirs=', 'L',
+         "directories to search for external C libraries" + sep_by),
+        ('rpath=', 'R',
+         "directories to search for shared C libraries at runtime"),
+        ('link-objects=', 'O',
+         "extra explicit link objects to include in the link"),
+        ('debug', 'g',
+         "compile/link with debugging information"),
+        ('force', 'f',
+         "forcibly build everything (ignore file timestamps)"),
+        ('compiler=', 'c',
+         "specify the compiler type"),
+        ('swig-cpp', None,
+         "make SWIG create C++ files (default is C)"),
+        ]
+
+    boolean_options = ['inplace', 'debug', 'force', 'swig-cpp']
+
+    help_options = [
+        ('help-compiler', None,
+         "list available compilers", show_compilers),
+        ]
+
+    def initialize_options (self):
+        self.extensions = None
+        self.build_lib = None
+        self.build_temp = None
+        self.inplace = 0
+        self.package = None
+
+        self.include_dirs = None
+        self.define = None
+        self.undef = None
+        self.libraries = None
+        self.library_dirs = None
+        self.rpath = None
+        self.link_objects = None
+        self.debug = None
+        self.force = None
+        self.compiler = None
+        self.swig_cpp = None
+
+
+    def finalize_options (self):
+        from distutils import sysconfig
+
+        self.set_undefined_options('build',
+                                   ('build_lib', 'build_lib'),
+                                   ('build_temp', 'build_temp'),
+                                   ('compiler', 'compiler'),
+                                   ('debug', 'debug'),
+                                   ('force', 'force'))
+
+        if self.package is None:
+            self.package = self.distribution.ext_package
+
+        self.extensions = self.distribution.ext_modules
+
+
+        # Make sure Python's include directories (for Python.h, pyconfig.h,
+        # etc.) are in the include search path.
+        py_include = sysconfig.get_python_inc()
+        plat_py_include = sysconfig.get_python_inc(plat_specific=1)
+        if self.include_dirs is None:
+            self.include_dirs = self.distribution.include_dirs or []
+        if type(self.include_dirs) is StringType:
+            self.include_dirs = string.split(self.include_dirs, os.pathsep)
+
+        # Put the Python "system" include dir at the end, so that
+        # any local include dirs take precedence.
+        self.include_dirs.append(py_include)
+        if plat_py_include != py_include:
+            self.include_dirs.append(plat_py_include)
+
+        if type(self.libraries) is StringType:
+            self.libraries = [self.libraries]
+
+        # Life is easier if we're not forever checking for None, so
+        # simplify these options to empty lists if unset
+        if self.libraries is None:
+            self.libraries = []
+        if self.library_dirs is None:
+            self.library_dirs = []
+        elif type(self.library_dirs) is StringType:
+            self.library_dirs = string.split(self.library_dirs, os.pathsep)
+
+        if self.rpath is None:
+            self.rpath = []
+        elif type(self.rpath) is StringType:
+            self.rpath = string.split(self.rpath, os.pathsep)
+
+        # for extensions under windows use different directories
+        # for Release and Debug builds.
+        # also Python's library directory must be appended to library_dirs
+        if os.name == 'nt':
+            self.library_dirs.append(os.path.join(sys.exec_prefix, 'libs'))
+            if self.debug:
+                self.build_temp = os.path.join(self.build_temp, "Debug")
+            else:
+                self.build_temp = os.path.join(self.build_temp, "Release")
+
+        # for extensions under Cygwin Python's library directory must be
+        # appended to library_dirs
+        if sys.platform[:6] == 'cygwin':
+            if string.find(sys.executable, sys.exec_prefix) != -1:
+                # building third party extensions
+                self.library_dirs.append(os.path.join(sys.prefix, "lib", "python" + sys.version[:3], "config"))
+            else:
+                # building python standard extensions
+                self.library_dirs.append('.')
+
+        # The argument parsing will result in self.define being a string, but
+        # it has to be a list of 2-tuples.  All the preprocessor symbols
+        # specified by the 'define' option will be set to '1'.  Multiple
+        # symbols can be separated with commas.
+
+        if self.define:
+            defines = string.split(self.define, ',')
+            self.define = map(lambda symbol: (symbol, '1'), defines)
+
+        # The option for macros to undefine is also a string from the
+        # option parsing, but has to be a list.  Multiple symbols can also
+        # be separated with commas here.
+        if self.undef:
+            self.undef = string.split(self.undef, ',')
+
+    # finalize_options ()
+
+
+    def run (self):
+
+        from distutils.ccompiler import new_compiler
+
+        # 'self.extensions', as supplied by setup.py, is a list of
+        # Extension instances.  See the documentation for Extension (in
+        # distutils.extension) for details.
+        #
+        # For backwards compatibility with Distutils 0.8.2 and earlier, we
+        # also allow the 'extensions' list to be a list of tuples:
+        #    (ext_name, build_info)
+        # where build_info is a dictionary containing everything that
+        # Extension instances do except the name, with a few things being
+        # differently named.  We convert these 2-tuples to Extension
+        # instances as needed.
+
+        if not self.extensions:
+            return
+
+        # If we were asked to build any C/C++ libraries, make sure that the
+        # directory where we put them is in the library search path for
+        # linking extensions.
+        if self.distribution.has_c_libraries():
+            build_clib = self.get_finalized_command('build_clib')
+            self.libraries.extend(build_clib.get_library_names() or [])
+            self.library_dirs.append(build_clib.build_clib)
+
+        # Setup the CCompiler object that we'll use to do all the
+        # compiling and linking
+        self.compiler = new_compiler(compiler=self.compiler,
+                                     verbose=self.verbose,
+                                     dry_run=self.dry_run,
+                                     force=self.force)
+        customize_compiler(self.compiler)
+
+        # And make sure that any compile/link-related options (which might
+        # come from the command-line or from the setup script) are set in
+        # that CCompiler object -- that way, they automatically apply to
+        # all compiling and linking done here.
+        if self.include_dirs is not None:
+            self.compiler.set_include_dirs(self.include_dirs)
+        if self.define is not None:
+            # 'define' option is a list of (name,value) tuples
+            for (name,value) in self.define:
+                self.compiler.define_macro(name, value)
+        if self.undef is not None:
+            for macro in self.undef:
+                self.compiler.undefine_macro(macro)
+        if self.libraries is not None:
+            self.compiler.set_libraries(self.libraries)
+        if self.library_dirs is not None:
+            self.compiler.set_library_dirs(self.library_dirs)
+        if self.rpath is not None:
+            self.compiler.set_runtime_library_dirs(self.rpath)
+        if self.link_objects is not None:
+            self.compiler.set_link_objects(self.link_objects)
+
+        # Now actually compile and link everything.
+        self.build_extensions()
+
+    # run ()
+
+
+    def check_extensions_list (self, extensions):
+        """Ensure that the list of extensions (presumably provided as a
+        command option 'extensions') is valid, i.e. it is a list of
+        Extension objects.  We also support the old-style list of 2-tuples,
+        where the tuples are (ext_name, build_info), which are converted to
+        Extension instances here.
+
+        Raise DistutilsSetupError if the structure is invalid anywhere;
+        just returns otherwise.
+        """
+        if type(extensions) is not ListType:
+            raise DistutilsSetupError, \
+                  "'ext_modules' option must be a list of Extension instances"
+
+        for i in range(len(extensions)):
+            ext = extensions[i]
+            if isinstance(ext, Extension):
+                continue                # OK! (assume type-checking done
+                                        # by Extension constructor)
+
+            (ext_name, build_info) = ext
+            self.warn(("old-style (ext_name, build_info) tuple found in "
+                       "ext_modules for extension '%s'"
+                       "-- please convert to Extension instance" % ext_name))
+            if type(ext) is not TupleType and len(ext) != 2:
+                raise DistutilsSetupError, \
+                      ("each element of 'ext_modules' option must be an "
+                       "Extension instance or 2-tuple")
+
+            if not (type(ext_name) is StringType and
+                    extension_name_re.match(ext_name)):
+                raise DistutilsSetupError, \
+                      ("first element of each tuple in 'ext_modules' "
+                       "must be the extension name (a string)")
+
+            if type(build_info) is not DictionaryType:
+                raise DistutilsSetupError, \
+                      ("second element of each tuple in 'ext_modules' "
+                       "must be a dictionary (build info)")
+
+            # OK, the (ext_name, build_info) dict is type-safe: convert it
+            # to an Extension instance.
+            ext = Extension(ext_name, build_info['sources'])
+
+            # Easy stuff: one-to-one mapping from dict elements to
+            # instance attributes.
+            for key in ('include_dirs',
+                        'library_dirs',
+                        'libraries',
+                        'extra_objects',
+                        'extra_compile_args',
+                        'extra_link_args'):
+                val = build_info.get(key)
+                if val is not None:
+                    setattr(ext, key, val)
+
+            # Medium-easy stuff: same syntax/semantics, different names.
+            ext.runtime_library_dirs = build_info.get('rpath')
+            if build_info.has_key('def_file'):
+                self.warn("'def_file' element of build info dict "
+                          "no longer supported")
+
+            # Non-trivial stuff: 'macros' split into 'define_macros'
+            # and 'undef_macros'.
+            macros = build_info.get('macros')
+            if macros:
+                ext.define_macros = []
+                ext.undef_macros = []
+                for macro in macros:
+                    if not (type(macro) is TupleType and
+                            1 <= len(macro) <= 2):
+                        raise DistutilsSetupError, \
+                              ("'macros' element of build info dict "
+                               "must be 1- or 2-tuple")
+                    if len(macro) == 1:
+                        ext.undef_macros.append(macro[0])
+                    elif len(macro) == 2:
+                        ext.define_macros.append(macro)
+
+            extensions[i] = ext
+
+        # for extensions
+
+    # check_extensions_list ()
+
+
+    def get_source_files (self):
+        self.check_extensions_list(self.extensions)
+        filenames = []
+
+        # Wouldn't it be neat if we knew the names of header files too...
+        for ext in self.extensions:
+            filenames.extend(ext.sources)
+
+        return filenames
+
+
+    def get_outputs (self):
+
+        # Sanity check the 'extensions' list -- can't assume this is being
+        # done in the same run as a 'build_extensions()' call (in fact, we
+        # can probably assume that it *isn't*!).
+        self.check_extensions_list(self.extensions)
+
+        # And build the list of output (built) filenames.  Note that this
+        # ignores the 'inplace' flag, and assumes everything goes in the
+        # "build" tree.
+        outputs = []
+        for ext in self.extensions:
+            fullname = self.get_ext_fullname(ext.name)
+            outputs.append(os.path.join(self.build_lib,
+                                        self.get_ext_filename(fullname)))
+        return outputs
+
+    # get_outputs ()
+
+    def build_extensions(self):
+
+        # First, sanity-check the 'extensions' list
+        self.check_extensions_list(self.extensions)
+
+        for ext in self.extensions:
+            self.build_extension(ext)
+
+    def build_extension(self, ext):
+
+        sources = ext.sources
+        if sources is None or type(sources) not in (ListType, TupleType):
+            raise DistutilsSetupError, \
+                  ("in 'ext_modules' option (extension '%s'), " +
+                   "'sources' must be present and must be " +
+                   "a list of source filenames") % ext.name
+        sources = list(sources)
+
+        fullname = self.get_ext_fullname(ext.name)
+        if self.inplace:
+            # ignore build-lib -- put the compiled extension into
+            # the source tree along with pure Python modules
+
+            modpath = string.split(fullname, '.')
+            package = string.join(modpath[0:-1], '.')
+            base = modpath[-1]
+
+            build_py = self.get_finalized_command('build_py')
+            package_dir = build_py.get_package_dir(package)
+            ext_filename = os.path.join(package_dir,
+                                        self.get_ext_filename(base))
+        else:
+            ext_filename = os.path.join(self.build_lib,
+                                        self.get_ext_filename(fullname))
+
+        if not (self.force or newer_group(sources, ext_filename, 'newer')):
+            self.announce("skipping '%s' extension (up-to-date)" %
+                          ext.name)
+            return
+        else:
+            self.announce("building '%s' extension" % ext.name)
+
+        # First, scan the sources for SWIG definition files (.i), run
+        # SWIG on 'em to create .c files, and modify the sources list
+        # accordingly.
+        sources = self.swig_sources(sources)
+
+        # Next, compile the source code to object files.
+
+        # XXX not honouring 'define_macros' or 'undef_macros' -- the
+        # CCompiler API needs to change to accommodate this, and I
+        # want to do one thing at a time!
+
+        # Two possible sources for extra compiler arguments:
+        #   - 'extra_compile_args' in Extension object
+        #   - CFLAGS environment variable (not particularly
+        #     elegant, but people seem to expect it and I
+        #     guess it's useful)
+        # The environment variable should take precedence, and
+        # any sensible compiler will give precedence to later
+        # command line args.  Hence we combine them in order:
+        extra_args = ext.extra_compile_args or []
+
+        macros = ext.define_macros[:]
+        for undef in ext.undef_macros:
+            macros.append((undef,))
+
+        # XXX and if we support CFLAGS, why not CC (compiler
+        # executable), CPPFLAGS (pre-processor options), and LDFLAGS
+        # (linker options) too?
+        # XXX should we use shlex to properly parse CFLAGS?
+
+        if os.environ.has_key('CFLAGS'):
+            extra_args.extend(string.split(os.environ['CFLAGS']))
+
+        objects = self.compiler.compile(sources,
+                                        output_dir=self.build_temp,
+                                        macros=macros,
+                                        include_dirs=ext.include_dirs,
+                                        debug=self.debug,
+                                        extra_postargs=extra_args)
+
+        # XXX -- this is a Vile HACK!
+        #
+        # The setup.py script for Python on Unix needs to be able to
+        # get this list so it can perform all the clean up needed to
+        # avoid keeping object files around when cleaning out a failed
+        # build of an extension module.  Since Distutils does not
+        # track dependencies, we have to get rid of intermediates to
+        # ensure all the intermediates will be properly re-built.
+        #
+        self._built_objects = objects[:]
+
+        # Now link the object files together into a "shared object" --
+        # of course, first we have to figure out all the other things
+        # that go into the mix.
+        if ext.extra_objects:
+            objects.extend(ext.extra_objects)
+        extra_args = ext.extra_link_args or []
+
+
+        self.compiler.link_shared_object(
+            objects, ext_filename,
+            libraries=self.get_libraries(ext),
+            library_dirs=ext.library_dirs,
+            runtime_library_dirs=ext.runtime_library_dirs,
+            extra_postargs=extra_args,
+            export_symbols=self.get_export_symbols(ext),
+            debug=self.debug,
+            build_temp=self.build_temp)
+
+
+    def swig_sources (self, sources):
+
+        """Walk the list of source files in 'sources', looking for SWIG
+        interface (.i) files.  Run SWIG on all that are found, and
+        return a modified 'sources' list with SWIG source files replaced
+        by the generated C (or C++) files.
+        """
+
+        new_sources = []
+        swig_sources = []
+        swig_targets = {}
+
+        # XXX this drops generated C/C++ files into the source tree, which
+        # is fine for developers who want to distribute the generated
+        # source -- but there should be an option to put SWIG output in
+        # the temp dir.
+
+        if self.swig_cpp:
+            target_ext = '.cpp'
+        else:
+            target_ext = '.c'
+
+        for source in sources:
+            (base, ext) = os.path.splitext(source)
+            if ext == ".i":             # SWIG interface file
+                new_sources.append(base + target_ext)
+                swig_sources.append(source)
+                swig_targets[source] = new_sources[-1]
+            else:
+                new_sources.append(source)
+
+        if not swig_sources:
+            return new_sources
+
+        swig = self.find_swig()
+        swig_cmd = [swig, "-python", "-dnone", "-ISWIG"]
+        if self.swig_cpp:
+            swig_cmd.append("-c++")
+
+        for source in swig_sources:
+            target = swig_targets[source]
+            self.announce("swigging %s to %s" % (source, target))
+            self.spawn(swig_cmd + ["-o", target, source])
+
+        return new_sources
+
+    # swig_sources ()
+
+    def find_swig (self):
+        """Return the name of the SWIG executable.  On Unix, this is
+        just "swig" -- it should be in the PATH.  Tries a bit harder on
+        Windows.
+        """
+
+        if os.name == "posix":
+            return "swig"
+        elif os.name == "nt":
+
+            # Look for SWIG in its standard installation directory on
+            # Windows (or so I presume!).  If we find it there, great;
+            # if not, act like Unix and assume it's in the PATH.
+            for vers in ("1.3", "1.2", "1.1"):
+                fn = os.path.join("c:\\swig%s" % vers, "swig.exe")
+                if os.path.isfile(fn):
+                    return fn
+            else:
+                return "swig.exe"
+
+        else:
+            raise DistutilsPlatformError, \
+                  ("I don't know how to find (much less run) SWIG "
+                   "on platform '%s'") % os.name
+
+    # find_swig ()
+
+    # -- Name generators -----------------------------------------------
+    # (extension names, filenames, whatever)
+
+    def get_ext_fullname (self, ext_name):
+        if self.package is None:
+            return ext_name
+        else:
+            return self.package + '.' + ext_name
+
+    def get_ext_filename (self, ext_name):
+        r"""Convert the name of an extension (eg. "foo.bar") into the name
+        of the file from which it will be loaded (eg. "foo/bar.so", or
+        "foo\bar.pyd").
+        """
+
+        from distutils.sysconfig import get_config_var
+        ext_path = string.split(ext_name, '.')
+        # extensions in debug_mode are named 'module_d.pyd' under windows
+        so_ext = get_config_var('SO')
+        if os.name == 'nt' and self.debug:
+            return apply(os.path.join, ext_path) + '_d' + so_ext
+        return apply(os.path.join, ext_path) + so_ext
+
+    def get_export_symbols (self, ext):
+        """Return the list of symbols that a shared extension has to
+        export.  This either uses 'ext.export_symbols' or, if it's not
+        provided, "init" + module_name.  Only relevant on Windows, where
+        the .pyd file (DLL) must export the module "init" function.
+        """
+
+        initfunc_name = "init" + string.split(ext.name,'.')[-1]
+        if initfunc_name not in ext.export_symbols:
+            ext.export_symbols.append(initfunc_name)
+        return ext.export_symbols
+
+    def get_libraries (self, ext):
+        """Return the list of libraries to link against when building a
+        shared extension.  On most platforms, this is just 'ext.libraries';
+        on Windows, we add the Python library (eg. python20.dll).
+        """
+        # The python library is always needed on Windows.  For MSVC, this
+        # is redundant, since the library is mentioned in a pragma in
+        # pyconfig.h that MSVC groks.  The other Windows compilers all seem
+        # to need it mentioned explicitly, though, so that's what we do.
+        # Append '_d' to the python import library on debug builds.
+        from distutils.msvccompiler import MSVCCompiler
+        if sys.platform == "win32" and \
+           not isinstance(self.compiler, MSVCCompiler):
+            template = "python%d%d"
+            if self.debug:
+                template = template + '_d'
+            pythonlib = (template %
+                   (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
+            # don't extend ext.libraries, it may be shared with other
+            # extensions, it is a reference to the original list
+            return ext.libraries + [pythonlib]
+        elif sys.platform[:6] == "cygwin":
+            template = "python%d.%d"
+            pythonlib = (template %
+                   (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
+            # don't extend ext.libraries, it may be shared with other
+            # extensions, it is a reference to the original list
+            return ext.libraries + [pythonlib]
+        else:
+            return ext.libraries
+
+# class build_ext
diff --git a/lib-python/2.2/distutils/command/build_py.py b/lib-python/2.2/distutils/command/build_py.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/command/build_py.py
@@ -0,0 +1,401 @@
+"""distutils.command.build_py
+
+Implements the Distutils 'build_py' command."""
+
+# created 1999/03/08, Greg Ward
+
+__revision__ = "$Id$"
+
+import sys, string, os
+from types import *
+from glob import glob
+
+from distutils.core import Command
+from distutils.errors import *
+from distutils.util import convert_path
+
+
+class build_py (Command):
+
+    description = "\"build\" pure Python modules (copy to build directory)"
+
+    user_options = [
+        ('build-lib=', 'd', "directory to \"build\" (copy) to"),
+        ('compile', 'c', "compile .py to .pyc"),
+        ('no-compile', None, "don't compile .py files [default]"),
+        ('optimize=', 'O',
+         "also compile with optimization: -O1 for \"python -O\", "
+         "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
+        ('force', 'f', "forcibly build everything (ignore file timestamps)"),
+        ]
+
+    boolean_options = ['compile', 'force']
+    negative_opt = {'no-compile' : 'compile'}
+
+
+    def initialize_options (self):
+        self.build_lib = None
+        self.py_modules = None
+        self.package = None
+        self.package_dir = None
+        self.compile = 0
+        self.optimize = 0
+        self.force = None
+
+    def finalize_options (self):
+        self.set_undefined_options('build',
+                                   ('build_lib', 'build_lib'),
+                                   ('force', 'force'))
+
+        # Get the distribution options that are aliases for build_py
+        # options -- list of packages and list of modules.
+        self.packages = self.distribution.packages
+        self.py_modules = self.distribution.py_modules
+        self.package_dir = {}
+        if self.distribution.package_dir:
+            for name, path in self.distribution.package_dir.items():
+                self.package_dir[name] = convert_path(path)
+
+        # Ick, copied straight from install_lib.py (fancy_getopt needs a
+        # type system!  Hell, *everything* needs a type system!!!)
+        if type(self.optimize) is not IntType:
+            try:
+                self.optimize = int(self.optimize)
+                assert 0 <= self.optimize <= 2
+            except (ValueError, AssertionError):
+                raise DistutilsOptionError, "optimize must be 0, 1, or 2"
+
+    def run (self):
+
+        # XXX copy_file by default preserves atime and mtime.  IMHO this is
+        # the right thing to do, but perhaps it should be an option -- in
+        # particular, a site administrator might want installed files to
+        # reflect the time of installation rather than the last
+        # modification time before the installed release.
+
+        # XXX copy_file by default preserves mode, which appears to be the
+        # wrong thing to do: if a file is read-only in the working
+        # directory, we want it to be installed read/write so that the next
+        # installation of the same module distribution can overwrite it
+        # without problems.  (This might be a Unix-specific issue.)  Thus
+        # we turn off 'preserve_mode' when copying to the build directory,
+        # since the build directory is supposed to be exactly what the
+        # installation will look like (ie. we preserve mode when
+        # installing).
+
+        # Two options control which modules will be installed: 'packages'
+        # and 'py_modules'.  The former lets us work with whole packages, not
+        # specifying individual modules at all; the latter is for
+        # specifying modules one-at-a-time.  Currently they are mutually
+        # exclusive: you can define one or the other (or neither), but not
+        # both.  It remains to be seen how limiting this is.
+
+        # Dispose of the two "unusual" cases first: no pure Python modules
+        # at all (no problem, just return silently), and over-specified
+        # 'packages' and 'py_modules' options.
+
+        if not self.py_modules and not self.packages:
+            return
+        if self.py_modules and self.packages:
+            raise DistutilsOptionError, \
+                  "build_py: supplying both 'packages' and 'py_modules' " + \
+                  "options is not allowed"
+
+        # Now we're down to two cases: 'py_modules' only and 'packages' only.
+        if self.py_modules:
+            self.build_modules()
+        else:
+            self.build_packages()
+
+        self.byte_compile(self.get_outputs(include_bytecode=0))
+
+    # run ()
+
+
+    def get_package_dir (self, package):
+        """Return the directory, relative to the top of the source
+           distribution, where package 'package' should be found
+           (at least according to the 'package_dir' option, if any)."""
+
+        path = string.split(package, '.')
+
+        if not self.package_dir:
+            if path:
+                return apply(os.path.join, path)
+            else:
+                return ''
+        else:
+            tail = []
+            while path:
+                try:
+                    pdir = self.package_dir[string.join(path, '.')]
+                except KeyError:
+                    tail.insert(0, path[-1])
+                    del path[-1]
+                else:
+                    tail.insert(0, pdir)
+                    return apply(os.path.join, tail)
+            else:
+                # Oops, got all the way through 'path' without finding a
+                # match in package_dir.  If package_dir defines a directory
+                # for the root (nameless) package, then fallback on it;
+                # otherwise, we might as well have not consulted
+                # package_dir at all, as we just use the directory implied
+                # by 'tail' (which should be the same as the original value
+                # of 'path' at this point).
+                pdir = self.package_dir.get('')
+                if pdir is not None:
+                    tail.insert(0, pdir)
+
+                if tail:
+                    return apply(os.path.join, tail)
+                else:
+                    return ''
+
+    # get_package_dir ()
+
+
+    def check_package (self, package, package_dir):
+
+        # Empty dir name means current directory, which we can probably
+        # assume exists.  Also, os.path.exists and isdir don't know about
+        # my "empty string means current dir" convention, so we have to
+        # circumvent them.
+        if package_dir != "":
+            if not os.path.exists(package_dir):
+                raise DistutilsFileError, \
+                      "package directory '%s' does not exist" % package_dir
+            if not os.path.isdir(package_dir):
+                raise DistutilsFileError, \
+                      ("supposed package directory '%s' exists, " +
+                       "but is not a directory") % package_dir
+
+        # Require __init__.py for all but the "root package"
+        if package:
+            init_py = os.path.join(package_dir, "__init__.py")
+            if os.path.isfile(init_py):
+                return init_py
+            else:
+                self.warn(("package init file '%s' not found " +
+                           "(or not a regular file)") % init_py)
+
+        # Either not in a package at all (__init__.py not expected), or
+        # __init__.py doesn't exist -- so don't return the filename.
+        return
+
+    # check_package ()
+
+
+    def check_module (self, module, module_file):
+        if not os.path.isfile(module_file):
+            self.warn("file %s (for module %s) not found" %
+                      (module_file, module))
+            return 0
+        else:
+            return 1
+
+    # check_module ()
+
+
+    def find_package_modules (self, package, package_dir):
+        self.check_package(package, package_dir)
+        module_files = glob(os.path.join(package_dir, "*.py"))
+        modules = []
+        setup_script = os.path.abspath(self.distribution.script_name)
+
+        for f in module_files:
+            abs_f = os.path.abspath(f)
+            if abs_f != setup_script:
+                module = os.path.splitext(os.path.basename(f))[0]
+                modules.append((package, module, f))
+            else:
+                self.debug_print("excluding %s" % setup_script)
+        return modules
+
+
+    def find_modules (self):
+        """Finds individually-specified Python modules, ie. those listed by
+        module name in 'self.py_modules'.  Returns a list of tuples (package,
+        module_base, filename): 'package' is a tuple of the path through
+        package-space to the module; 'module_base' is the bare (no
+        packages, no dots) module name, and 'filename' is the path to the
+        ".py" file (relative to the distribution root) that implements the
+        module.
+        """
+
+        # Map package names to tuples of useful info about the package:
+        #    (package_dir, checked)
+        # package_dir - the directory where we'll find source files for
+        #   this package
+        # checked - true if we have checked that the package directory
+        #   is valid (exists, contains __init__.py, ... ?)
+        packages = {}
+
+        # List of (package, module, filename) tuples to return
+        modules = []
+
+        # We treat modules-in-packages almost the same as toplevel modules,
+        # just the "package" for a toplevel is empty (either an empty
+        # string or empty list, depending on context).  Differences:
+        #   - don't check for __init__.py in directory for empty package
+
+        for module in self.py_modules:
+            path = string.split(module, '.')
+            package = string.join(path[0:-1], '.')
+            module_base = path[-1]
+
+            try:
+                (package_dir, checked) = packages[package]
+            except KeyError:
+                package_dir = self.get_package_dir(package)
+                checked = 0
+
+            if not checked:
+                init_py = self.check_package(package, package_dir)
+                packages[package] = (package_dir, 1)
+                if init_py:
+                    modules.append((package, "__init__", init_py))
+
+            # XXX perhaps we should also check for just .pyc files
+            # (so greedy closed-source bastards can distribute Python
+            # modules too)
+            module_file = os.path.join(package_dir, module_base + ".py")
+            if not self.check_module(module, module_file):
+                continue
+
+            modules.append((package, module_base, module_file))
+
+        return modules
+
+    # find_modules ()
+
+
+    def find_all_modules (self):
+        """Compute the list of all modules that will be built, whether
+        they are specified one-module-at-a-time ('self.py_modules') or
+        by whole packages ('self.packages').  Return a list of tuples
+        (package, module, module_file), just like 'find_modules()' and
+        'find_package_modules()' do."""
+
+        if self.py_modules:
+            modules = self.find_modules()
+        else:
+            modules = []
+            for package in self.packages:
+                package_dir = self.get_package_dir(package)
+                m = self.find_package_modules(package, package_dir)
+                modules.extend(m)
+
+        return modules
+
+    # find_all_modules ()
+
+
+    def get_source_files (self):
+
+        modules = self.find_all_modules()
+        filenames = []
+        for module in modules:
+            filenames.append(module[-1])
+
+        return filenames
+
+
+    def get_module_outfile (self, build_dir, package, module):
+        outfile_path = [build_dir] + list(package) + [module + ".py"]
+        return apply(os.path.join, outfile_path)
+
+
+    def get_outputs (self, include_bytecode=1):
+        modules = self.find_all_modules()
+        outputs = []
+        for (package, module, module_file) in modules:
+            package = string.split(package, '.')
+            filename = self.get_module_outfile(self.build_lib, package, module)
+            outputs.append(filename)
+            if include_bytecode:
+                if self.compile:
+                    outputs.append(filename + "c")
+                if self.optimize > 0:
+                    outputs.append(filename + "o")
+
+        return outputs
+
+
+    def build_module (self, module, module_file, package):
+        if type(package) is StringType:
+            package = string.split(package, '.')
+        elif type(package) not in (ListType, TupleType):
+            raise TypeError, \
+                  "'package' must be a string (dot-separated), list, or tuple"
+
+        # Now put the module source file into the "build" area -- this is
+        # easy, we just copy it somewhere under self.build_lib (the build
+        # directory for Python source).
+        outfile = self.get_module_outfile(self.build_lib, package, module)
+        dir = os.path.dirname(outfile)
+        self.mkpath(dir)
+        return self.copy_file(module_file, outfile, preserve_mode=0)
+
+
+    def build_modules (self):
+
+        modules = self.find_modules()
+        for (package, module, module_file) in modules:
+
+            # Now "build" the module -- ie. copy the source file to
+            # self.build_lib (the build directory for Python source).
+            # (Actually, it gets copied to the directory for this package
+            # under self.build_lib.)
+            self.build_module(module, module_file, package)
+
+    # build_modules ()
+
+
+    def build_packages (self):
+
+        for package in self.packages:
+
+            # Get list of (package, module, module_file) tuples based on
+            # scanning the package directory.  'package' is only included
+            # in the tuple so that 'find_modules()' and
+            # 'find_package_tuples()' have a consistent interface; it's
+            # ignored here (apart from a sanity check).  Also, 'module' is
+            # the *unqualified* module name (ie. no dots, no package -- we
+            # already know its package!), and 'module_file' is the path to
+            # the .py file, relative to the current directory
+            # (ie. including 'package_dir').
+            package_dir = self.get_package_dir(package)
+            modules = self.find_package_modules(package, package_dir)
+
+            # Now loop over the modules we found, "building" each one (just
+            # copy it to self.build_lib).
+            for (package_, module, module_file) in modules:
+                assert package == package_
+                self.build_module(module, module_file, package)
+
+    # build_packages ()
+
+
+    def byte_compile (self, files):
+        from distutils.util import byte_compile
+        prefix = self.build_lib
+        if prefix[-1] != os.sep:
+            prefix = prefix + os.sep
+
+        # XXX this code is essentially the same as the 'byte_compile()
+        # method of the "install_lib" command, except for the determination
+        # of the 'prefix' string.  Hmmm.
+
+        if self.compile:
+            byte_compile(files, optimize=0,
+                         force=self.force,
+                         prefix=prefix,
+                         verbose=self.verbose, dry_run=self.dry_run)
+        if self.optimize > 0:
+            byte_compile(files, optimize=self.optimize,
+                         force=self.force,
+                         prefix=prefix,
+                         verbose=self.verbose, dry_run=self.dry_run)
+
+# class build_py
diff --git a/lib-python/2.2/distutils/command/build_scripts.py b/lib-python/2.2/distutils/command/build_scripts.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/command/build_scripts.py
@@ -0,0 +1,110 @@
+"""distutils.command.build_scripts
+
+Implements the Distutils 'build_scripts' command."""
+
+# created 2000/05/23, Bastian Kleineidam
+
+__revision__ = "$Id$"
+
+import sys, os, re
+from distutils import sysconfig
+from distutils.core import Command
+from distutils.dep_util import newer
+from distutils.util import convert_path
+
+# check if Python is called on the first line with this expression
+first_line_re = re.compile(r'^#!.*python(\s+.*)?$')
+
+class build_scripts (Command):
+
+    description = "\"build\" scripts (copy and fixup #! line)"
+
+    user_options = [
+        ('build-dir=', 'd', "directory to \"build\" (copy) to"),
+        ('force', 'f', "forcibly build everything (ignore file timestamps"),
+        ]
+
+    boolean_options = ['force']
+
+
+    def initialize_options (self):
+        self.build_dir = None
+        self.scripts = None
+        self.force = None
+        self.outfiles = None
+
+    def finalize_options (self):
+        self.set_undefined_options('build',
+                                   ('build_scripts', 'build_dir'),
+                                   ('force', 'force'))
+        self.scripts = self.distribution.scripts
+
+
+    def run (self):
+        if not self.scripts:
+            return
+        self.copy_scripts()
+
+
+    def copy_scripts (self):
+        """Copy each script listed in 'self.scripts'; if it's marked as a
+        Python script in the Unix way (first line matches 'first_line_re',
+        ie. starts with "\#!" and contains "python"), then adjust the first
+        line to refer to the current Python interpreter as we copy.
+        """
+        self.mkpath(self.build_dir)
+        for script in self.scripts:
+            adjust = 0
+            script = convert_path(script)
+            outfile = os.path.join(self.build_dir, os.path.basename(script))
+
+            if not self.force and not newer(script, outfile):
+                self.announce("not copying %s (up-to-date)" % script)
+                continue
+
+            # Always open the file, but ignore failures in dry-run mode --
+            # that way, we'll get accurate feedback if we can read the
+            # script.
+            try:
+                f = open(script, "r")
+            except IOError:
+                if not self.dry_run:
+                    raise
+                f = None
+            else:
+                first_line = f.readline()
+                if not first_line:
+                    self.warn("%s is an empty file (skipping)" % script)
+                    continue
+
+                match = first_line_re.match(first_line)
+                if match:
+                    adjust = 1
+                    post_interp = match.group(1) or ''
+
+            if adjust:
+                self.announce("copying and adjusting %s -> %s" %
+                              (script, self.build_dir))
+                if not self.dry_run:
+                    outf = open(outfile, "w")
+                    if not sysconfig.python_build:
+                        outf.write("#!%s%s\n" % 
+                                   (os.path.normpath(sys.executable),
+                                    post_interp))
+                    else:
+                        outf.write("#!%s%s" %
+                                   (os.path.join(
+                            sysconfig.get_config_var("BINDIR"),
+                            "python" + sysconfig.get_config_var("EXE")),
+                                    post_interp))
+                    outf.writelines(f.readlines())
+                    outf.close()
+                if f:
+                    f.close()
+            else:
+                f.close()
+                self.copy_file(script, outfile)
+
+    # copy_scripts ()
+
+# class build_scripts
diff --git a/lib-python/2.2/distutils/command/clean.py b/lib-python/2.2/distutils/command/clean.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/command/clean.py
@@ -0,0 +1,79 @@
+"""distutils.command.clean
+
+Implements the Distutils 'clean' command."""
+
+# contributed by Bastian Kleineidam <calvin at cs.uni-sb.de>, added 2000-03-18
+
+__revision__ = "$Id$"
+
+import os
+from distutils.core import Command
+from distutils.dir_util import remove_tree
+
+class clean (Command):
+
+    description = "clean up output of 'build' command"
+    user_options = [
+        ('build-base=', 'b',
+         "base build directory (default: 'build.build-base')"),
+        ('build-lib=', None,
+         "build directory for all modules (default: 'build.build-lib')"),
+        ('build-temp=', 't',
+         "temporary build directory (default: 'build.build-temp')"),
+        ('build-scripts=', None,
+         "build directory for scripts (default: 'build.build-scripts')"),
+        ('bdist-base=', None,
+         "temporary directory for built distributions"),
+        ('all', 'a',
+         "remove all build output, not just temporary by-products")
+    ]
+
+    boolean_options = ['all']
+
+    def initialize_options(self):
+        self.build_base = None
+        self.build_lib = None
+        self.build_temp = None
+        self.build_scripts = None
+        self.bdist_base = None
+        self.all = None
+
+    def finalize_options(self):
+        self.set_undefined_options('build',
+                                   ('build_base', 'build_base'),
+                                   ('build_lib', 'build_lib'),
+                                   ('build_scripts', 'build_scripts'),
+                                   ('build_temp', 'build_temp'))
+        self.set_undefined_options('bdist',
+                                   ('bdist_base', 'bdist_base'))
+
+    def run(self):
+        # remove the build/temp.<plat> directory (unless it's already
+        # gone)
+        if os.path.exists(self.build_temp):
+            remove_tree(self.build_temp, self.verbose, self.dry_run)
+        else:
+            self.warn("'%s' does not exist -- can't clean it" %
+                      self.build_temp)
+
+        if self.all:
+            # remove build directories
+            for directory in (self.build_lib,
+                              self.bdist_base,
+                              self.build_scripts):
+                if os.path.exists(directory):
+                    remove_tree(directory, self.verbose, self.dry_run)
+                else:
+                    self.warn("'%s' does not exist -- can't clean it" %
+                              directory)
+
+        # just for the heck of it, try to remove the base build directory:
+        # we might have emptied it right now, but if not we don't care
+        if not self.dry_run:
+            try:
+                os.rmdir(self.build_base)
+                self.announce("removing '%s'" % self.build_base)
+            except OSError:
+                pass
+
+# class clean
diff --git a/lib-python/2.2/distutils/command/command_template b/lib-python/2.2/distutils/command/command_template
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/command/command_template
@@ -0,0 +1,45 @@
+"""distutils.command.x
+
+Implements the Distutils 'x' command.
+"""
+
+# created 2000/mm/dd, John Doe
+
+__revision__ = "$Id$"
+
+from distutils.core import Command
+
+
+class x (Command):
+
+    # Brief (40-50 characters) description of the command
+    description = ""
+
+    # List of option tuples: long name, short name (None if no short
+    # name), and help string.
+    user_options = [('', '',
+                     ""),
+                   ]
+
+
+    def initialize_options (self):
+        self. = None
+        self. = None
+        self. = None
+
+    # initialize_options()
+
+
+    def finalize_options (self):
+        if self.x is None:
+            self.x = 
+
+    # finalize_options()
+
+
+    def run (self):
+
+
+    # run()
+
+# class x
diff --git a/lib-python/2.2/distutils/command/config.py b/lib-python/2.2/distutils/command/config.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/command/config.py
@@ -0,0 +1,366 @@
+"""distutils.command.config
+
+Implements the Distutils 'config' command, a (mostly) empty command class
+that exists mainly to be sub-classed by specific module distributions and
+applications.  The idea is that while every "config" command is different,
+at least they're all named the same, and users always see "config" in the
+list of standard commands.  Also, this is a good place to put common
+configure-like tasks: "try to compile this C code", or "figure out where
+this header file lives".
+"""
+
+# created 2000/05/29, Greg Ward
+
+__revision__ = "$Id$"
+
+import sys, os, string, re
+from types import *
+from distutils.core import Command
+from distutils.errors import DistutilsExecError
+
+
+LANG_EXT = {'c': '.c',
+            'c++': '.cxx'}
+
+class config (Command):
+
+    description = "prepare to build"
+
+    user_options = [
+        ('compiler=', None,
+         "specify the compiler type"),
+        ('cc=', None,
+         "specify the compiler executable"),
+        ('include-dirs=', 'I',
+         "list of directories to search for header files"),
+        ('define=', 'D',
+         "C preprocessor macros to define"),
+        ('undef=', 'U',
+         "C preprocessor macros to undefine"),
+        ('libraries=', 'l',
+         "external C libraries to link with"),
+        ('library-dirs=', 'L',
+         "directories to search for external C libraries"),
+
+        ('noisy', None,
+         "show every action (compile, link, run, ...) taken"),
+        ('dump-source', None,
+         "dump generated source files before attempting to compile them"),
+        ]
+
+
+    # The three standard command methods: since the "config" command
+    # does nothing by default, these are empty.
+
+    def initialize_options (self):
+        self.compiler = None
+        self.cc = None
+        self.include_dirs = None
+        #self.define = None
+        #self.undef = None
+        self.libraries = None
+        self.library_dirs = None
+
+        # maximal output for now
+        self.noisy = 1
+        self.dump_source = 1
+
+        # list of temporary files generated along-the-way that we have
+        # to clean at some point
+        self.temp_files = []
+
+    def finalize_options (self):
+        if self.include_dirs is None:
+            self.include_dirs = self.distribution.include_dirs or []
+        elif type(self.include_dirs) is StringType:
+            self.include_dirs = string.split(self.include_dirs, os.pathsep)
+
+        if self.libraries is None:
+            self.libraries = []
+        elif type(self.libraries) is StringType:
+            self.libraries = [self.libraries]
+
+        if self.library_dirs is None:
+            self.library_dirs = []
+        elif type(self.library_dirs) is StringType:
+            self.library_dirs = string.split(self.library_dirs, os.pathsep)
+
+
+    def run (self):
+        pass
+
+
+    # Utility methods for actual "config" commands.  The interfaces are
+    # loosely based on Autoconf macros of similar names.  Sub-classes
+    # may use these freely.
+
+    def _check_compiler (self):
+        """Check that 'self.compiler' really is a CCompiler object;
+        if not, make it one.
+        """
+        # We do this late, and only on-demand, because this is an expensive
+        # import.
+        from distutils.ccompiler import CCompiler, new_compiler
+        if not isinstance(self.compiler, CCompiler):
+            self.compiler = new_compiler(compiler=self.compiler,
+                                         verbose=self.noisy,
+                                         dry_run=self.dry_run,
+                                         force=1)
+            if self.include_dirs:
+                self.compiler.set_include_dirs(self.include_dirs)
+            if self.libraries:
+                self.compiler.set_libraries(self.libraries)
+            if self.library_dirs:
+                self.compiler.set_library_dirs(self.library_dirs)
+
+
+    def _gen_temp_sourcefile (self, body, headers, lang):
+        filename = "_configtest" + LANG_EXT[lang]
+        file = open(filename, "w")
+        if headers:
+            for header in headers:
+                file.write("#include <%s>\n" % header)
+            file.write("\n")
+        file.write(body)
+        if body[-1] != "\n":
+            file.write("\n")
+        file.close()
+        return filename
+
+    def _preprocess (self, body, headers, include_dirs, lang):
+        src = self._gen_temp_sourcefile(body, headers, lang)
+        out = "_configtest.i"
+        self.temp_files.extend([src, out])
+        self.compiler.preprocess(src, out, include_dirs=include_dirs)
+        return (src, out)
+
+    def _compile (self, body, headers, include_dirs, lang):
+        src = self._gen_temp_sourcefile(body, headers, lang)
+        if self.dump_source:
+            dump_file(src, "compiling '%s':" % src)
+        (obj,) = self.compiler.object_filenames([src])
+        self.temp_files.extend([src, obj])
+        self.compiler.compile([src], include_dirs=include_dirs)
+        return (src, obj)
+
+    def _link (self, body,
+               headers, include_dirs,
+               libraries, library_dirs, lang):
+        (src, obj) = self._compile(body, headers, include_dirs, lang)
+        prog = os.path.splitext(os.path.basename(src))[0]
+        self.compiler.link_executable([obj], prog,
+                                      libraries=libraries,
+                                      library_dirs=library_dirs)
+
+        prog = prog + self.compiler.exe_extension
+        self.temp_files.append(prog)
+
+        return (src, obj, prog)
+
+    def _clean (self, *filenames):
+        if not filenames:
+            filenames = self.temp_files
+            self.temp_files = []
+        self.announce("removing: " + string.join(filenames))
+        for filename in filenames:
+            try:
+                os.remove(filename)
+            except OSError:
+                pass
+
+
+    # XXX these ignore the dry-run flag: what to do, what to do? even if
+    # you want a dry-run build, you still need some sort of configuration
+    # info.  My inclination is to make it up to the real config command to
+    # consult 'dry_run', and assume a default (minimal) configuration if
+    # true.  The problem with trying to do it here is that you'd have to
+    # return either true or false from all the 'try' methods, neither of
+    # which is correct.
+
+    # XXX need access to the header search path and maybe default macros.
+
+    def try_cpp (self, body=None, headers=None, include_dirs=None, lang="c"):
+        """Construct a source file from 'body' (a string containing lines
+        of C/C++ code) and 'headers' (a list of header files to include)
+        and run it through the preprocessor.  Return true if the
+        preprocessor succeeded, false if there were any errors.
+        ('body' probably isn't of much use, but what the heck.)
+        """
+        from distutils.ccompiler import CompileError
+        self._check_compiler()
+        ok = 1
+        try:
+            self._preprocess(body, headers, include_dirs, lang)
+        except CompileError:
+            ok = 0
+
+        self._clean()
+        return ok
+
+    def search_cpp (self, pattern, body=None,
+                    headers=None, include_dirs=None, lang="c"):
+        """Construct a source file (just like 'try_cpp()'), run it through
+        the preprocessor, and return true if any line of the output matches
+        'pattern'.  'pattern' should either be a compiled regex object or a
+        string containing a regex.  If both 'body' and 'headers' are None,
+        preprocesses an empty file -- which can be useful to determine the
+        symbols the preprocessor and compiler set by default.
+        """
+
+        self._check_compiler()
+        (src, out) = self._preprocess(body, headers, include_dirs, lang)
+
+        if type(pattern) is StringType:
+            pattern = re.compile(pattern)
+
+        file = open(out)
+        match = 0
+        while 1:
+            line = file.readline()
+            if line == '':
+                break
+            if pattern.search(line):
+                match = 1
+                break
+
+        file.close()
+        self._clean()
+        return match
+
+    def try_compile (self, body, headers=None, include_dirs=None, lang="c"):
+        """Try to compile a source file built from 'body' and 'headers'.
+        Return true on success, false otherwise.
+        """
+        from distutils.ccompiler import CompileError
+        self._check_compiler()
+        try:
+            self._compile(body, headers, include_dirs, lang)
+            ok = 1
+        except CompileError:
+            ok = 0
+
+        self.announce(ok and "success!" or "failure.")
+        self._clean()
+        return ok
+
+    def try_link (self, body,
+                  headers=None, include_dirs=None,
+                  libraries=None, library_dirs=None,
+                  lang="c"):
+        """Try to compile and link a source file, built from 'body' and
+        'headers', to executable form.  Return true on success, false
+        otherwise.
+        """
+        from distutils.ccompiler import CompileError, LinkError
+        self._check_compiler()
+        try:
+            self._link(body, headers, include_dirs,
+                       libraries, library_dirs, lang)
+            ok = 1
+        except (CompileError, LinkError):
+            ok = 0
+
+        self.announce(ok and "success!" or "failure.")
+        self._clean()
+        return ok
+
+    def try_run (self, body,
+                 headers=None, include_dirs=None,
+                 libraries=None, library_dirs=None,
+                 lang="c"):
+        """Try to compile, link to an executable, and run a program
+        built from 'body' and 'headers'.  Return true on success, false
+        otherwise.
+        """
+        from distutils.ccompiler import CompileError, LinkError
+        self._check_compiler()
+        try:
+            src, obj, exe = self._link(body, headers, include_dirs,
+                                       libraries, library_dirs, lang)
+            self.spawn([exe])
+            ok = 1
+        except (CompileError, LinkError, DistutilsExecError):
+            ok = 0
+
+        self.announce(ok and "success!" or "failure.")
+        self._clean()
+        return ok
+
+
+    # -- High-level methods --------------------------------------------
+    # (these are the ones that are actually likely to be useful
+    # when implementing a real-world config command!)
+
+    def check_func (self, func,
+                    headers=None, include_dirs=None,
+                    libraries=None, library_dirs=None,
+                    decl=0, call=0):
+
+        """Determine if function 'func' is available by constructing a
+        source file that refers to 'func', and compiles and links it.
+        If everything succeeds, returns true; otherwise returns false.
+
+        The constructed source file starts out by including the header
+        files listed in 'headers'.  If 'decl' is true, it then declares
+        'func' (as "int func()"); you probably shouldn't supply 'headers'
+        and set 'decl' true in the same call, or you might get errors about
+        a conflicting declarations for 'func'.  Finally, the constructed
+        'main()' function either references 'func' or (if 'call' is true)
+        calls it.  'libraries' and 'library_dirs' are used when
+        linking.
+        """
+
+        self._check_compiler()
+        body = []
+        if decl:
+            body.append("int %s ();" % func)
+        body.append("int main () {")
+        if call:
+            body.append("  %s();" % func)
+        else:
+            body.append("  %s;" % func)
+        body.append("}")
+        body = string.join(body, "\n") + "\n"
+
+        return self.try_link(body, headers, include_dirs,
+                             libraries, library_dirs)
+
+    # check_func ()
+
+    def check_lib (self, library, library_dirs=None,
+                   headers=None, include_dirs=None, other_libraries=[]):
+        """Determine if 'library' is available to be linked against,
+        without actually checking that any particular symbols are provided
+        by it.  'headers' will be used in constructing the source file to
+        be compiled, but the only effect of this is to check if all the
+        header files listed are available.  Any libraries listed in
+        'other_libraries' will be included in the link, in case 'library'
+        has symbols that depend on other libraries.
+        """
+        self._check_compiler()
+        return self.try_link("int main (void) { }",
+                             headers, include_dirs,
+                             [library]+other_libraries, library_dirs)
+
+    def check_header (self, header, include_dirs=None,
+                      library_dirs=None, lang="c"):
+        """Determine if the system header file named by 'header_file'
+        exists and can be found by the preprocessor; return true if so,
+        false otherwise.
+        """
+        return self.try_cpp(body="/* No body */", headers=[header],
+                            include_dirs=include_dirs)
+
+
+# class config
+
+
+def dump_file (filename, head=None):
+    if head is None:
+        print filename + ":"
+    else:
+        print head
+
+    file = open(filename)
+    sys.stdout.write(file.read())
+    file.close()
diff --git a/lib-python/2.2/distutils/command/install.py b/lib-python/2.2/distutils/command/install.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/command/install.py
@@ -0,0 +1,598 @@
+"""distutils.command.install
+
+Implements the Distutils 'install' command."""
+
+# created 1999/03/13, Greg Ward
+
+__revision__ = "$Id$"
+
+import sys, os, string
+from types import *
+from distutils.core import Command, DEBUG
+from distutils.sysconfig import get_config_vars
+from distutils.errors import DistutilsPlatformError
+from distutils.file_util import write_file
+from distutils.util import convert_path, subst_vars, change_root
+from distutils.errors import DistutilsOptionError
+from glob import glob
+
+if sys.version < "2.2":
+    WINDOWS_SCHEME = {
+        'purelib': '$base',
+        'platlib': '$base',
+        'headers': '$base/Include/$dist_name',
+        'scripts': '$base/Scripts',
+        'data'   : '$base',
+    }
+else:
+    WINDOWS_SCHEME = {
+        'purelib': '$base/Lib/site-packages',
+        'platlib': '$base/Lib/site-packages',
+        'headers': '$base/Include/$dist_name',
+        'scripts': '$base/Scripts',
+        'data'   : '$base',
+    }
+
+INSTALL_SCHEMES = {
+    'unix_prefix': {
+        'purelib': '$base/lib/python$py_version_short/site-packages',
+        'platlib': '$platbase/lib/python$py_version_short/site-packages',
+        'headers': '$base/include/python$py_version_short/$dist_name',
+        'scripts': '$base/bin',
+        'data'   : '$base',
+        },
+    'unix_home': {
+        'purelib': '$base/lib/python',
+        'platlib': '$base/lib/python',
+        'headers': '$base/include/python/$dist_name',
+        'scripts': '$base/bin',
+        'data'   : '$base',
+        },
+    'nt': WINDOWS_SCHEME,
+    'mac': {
+        'purelib': '$base/Lib/site-packages',
+        'platlib': '$base/Lib/site-packages',
+        'headers': '$base/Include/$dist_name',
+        'scripts': '$base/Scripts',
+        'data'   : '$base',
+        },
+    'java': {
+        'purelib': '$base/Lib',
+        'platlib': '$base/Lib',
+        'headers': '$base/Include/$dist_name',
+        'scripts': '$base/Scripts',
+        'data'   : '$base',
+        },
+    }
+
+# The keys to an installation scheme; if any new types of files are to be
+# installed, be sure to add an entry to every installation scheme above,
+# and to SCHEME_KEYS here.
+SCHEME_KEYS = ('purelib', 'platlib', 'headers', 'scripts', 'data')
+
+
+class install (Command):
+
+    description = "install everything from build directory"
+
+    user_options = [
+        # Select installation scheme and set base director(y|ies)
+        ('prefix=', None,
+         "installation prefix"),
+        ('exec-prefix=', None,
+         "(Unix only) prefix for platform-specific files"),
+        ('home=', None,
+         "(Unix only) home directory to install under"),
+
+        # Or, just set the base director(y|ies)
+        ('install-base=', None,
+         "base installation directory (instead of --prefix or --home)"),
+        ('install-platbase=', None,
+         "base installation directory for platform-specific files " +
+         "(instead of --exec-prefix or --home)"),
+        ('root=', None,
+         "install everything relative to this alternate root directory"),
+
+        # Or, explicitly set the installation scheme
+        ('install-purelib=', None,
+         "installation directory for pure Python module distributions"),
+        ('install-platlib=', None,
+         "installation directory for non-pure module distributions"),
+        ('install-lib=', None,
+         "installation directory for all module distributions " +
+         "(overrides --install-purelib and --install-platlib)"),
+
+        ('install-headers=', None,
+         "installation directory for C/C++ headers"),
+        ('install-scripts=', None,
+         "installation directory for Python scripts"),
+        ('install-data=', None,
+         "installation directory for data files"),
+
+        # Byte-compilation options -- see install_lib.py for details, as
+        # these are duplicated from there (but only install_lib does
+        # anything with them).
+        ('compile', 'c', "compile .py to .pyc [default]"),
+        ('no-compile', None, "don't compile .py files"),
+        ('optimize=', 'O',
+         "also compile with optimization: -O1 for \"python -O\", "
+         "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
+
+        # Miscellaneous control options
+        ('force', 'f',
+         "force installation (overwrite any existing files)"),
+        ('skip-build', None,
+         "skip rebuilding everything (for testing/debugging)"),
+
+        # Where to install documentation (eventually!)
+        #('doc-format=', None, "format of documentation to generate"),
+        #('install-man=', None, "directory for Unix man pages"),
+        #('install-html=', None, "directory for HTML documentation"),
+        #('install-info=', None, "directory for GNU info files"),
+
+        ('record=', None,
+         "filename in which to record list of installed files"),
+        ]
+
+    boolean_options = ['compile', 'force', 'skip-build']
+    negative_opt = {'no-compile' : 'compile'}
+
+
+    def initialize_options (self):
+
+        # High-level options: these select both an installation base
+        # and scheme.
+        self.prefix = None
+        self.exec_prefix = None
+        self.home = None
+
+        # These select only the installation base; it's up to the user to
+        # specify the installation scheme (currently, that means supplying
+        # the --install-{platlib,purelib,scripts,data} options).
+        self.install_base = None
+        self.install_platbase = None
+        self.root = None
+
+        # These options are the actual installation directories; if not
+        # supplied by the user, they are filled in using the installation
+        # scheme implied by prefix/exec-prefix/home and the contents of
+        # that installation scheme.
+        self.install_purelib = None     # for pure module distributions
+        self.install_platlib = None     # non-pure (dists w/ extensions)
+        self.install_headers = None     # for C/C++ headers
+        self.install_lib = None         # set to either purelib or platlib
+        self.install_scripts = None
+        self.install_data = None
+
+        self.compile = None
+        self.optimize = None
+
+        # These two are for putting non-packagized distributions into their
+        # own directory and creating a .pth file if it makes sense.
+        # 'extra_path' comes from the setup file; 'install_path_file' can
+        # be turned off if it makes no sense to install a .pth file.  (But
+        # better to install it uselessly than to guess wrong and not
+        # install it when it's necessary and would be used!)  Currently,
+        # 'install_path_file' is always true unless some outsider meddles
+        # with it.
+        self.extra_path = None
+        self.install_path_file = 1
+
+        # 'force' forces installation, even if target files are not
+        # out-of-date.  'skip_build' skips running the "build" command,
+        # handy if you know it's not necessary.  'warn_dir' (which is *not*
+        # a user option, it's just there so the bdist_* commands can turn
+        # it off) determines whether we warn about installing to a
+        # directory not in sys.path.
+        self.force = 0
+        self.skip_build = 0
+        self.warn_dir = 1
+
+        # These are only here as a conduit from the 'build' command to the
+        # 'install_*' commands that do the real work.  ('build_base' isn't
+        # actually used anywhere, but it might be useful in future.)  They
+        # are not user options, because if the user told the install
+        # command where the build directory is, that wouldn't affect the
+        # build command.
+        self.build_base = None
+        self.build_lib = None
+
+        # Not defined yet because we don't know anything about
+        # documentation yet.
+        #self.install_man = None
+        #self.install_html = None
+        #self.install_info = None
+
+        self.record = None
+
+
+    # -- Option finalizing methods -------------------------------------
+    # (This is rather more involved than for most commands,
+    # because this is where the policy for installing third-
+    # party Python modules on various platforms given a wide
+    # array of user input is decided.  Yes, it's quite complex!)
+
+    def finalize_options (self):
+
+        # This method (and its pliant slaves, like 'finalize_unix()',
+        # 'finalize_other()', and 'select_scheme()') is where the default
+        # installation directories for modules, extension modules, and
+        # anything else we care to install from a Python module
+        # distribution.  Thus, this code makes a pretty important policy
+        # statement about how third-party stuff is added to a Python
+        # installation!  Note that the actual work of installation is done
+        # by the relatively simple 'install_*' commands; they just take
+        # their orders from the installation directory options determined
+        # here.
+
+        # Check for errors/inconsistencies in the options; first, stuff
+        # that's wrong on any platform.
+
+        if ((self.prefix or self.exec_prefix or self.home) and
+            (self.install_base or self.install_platbase)):
+            raise DistutilsOptionError, \
+                  ("must supply either prefix/exec-prefix/home or " +
+                   "install-base/install-platbase -- not both")
+
+        # Next, stuff that's wrong (or dubious) only on certain platforms.
+        if os.name == 'posix':
+            if self.home and (self.prefix or self.exec_prefix):
+                raise DistutilsOptionError, \
+                      ("must supply either home or prefix/exec-prefix -- " +
+                       "not both")
+        else:
+            if self.exec_prefix:
+                self.warn("exec-prefix option ignored on this platform")
+                self.exec_prefix = None
+            if self.home:
+                self.warn("home option ignored on this platform")
+                self.home = None
+
+        # Now the interesting logic -- so interesting that we farm it out
+        # to other methods.  The goal of these methods is to set the final
+        # values for the install_{lib,scripts,data,...}  options, using as
+        # input a heady brew of prefix, exec_prefix, home, install_base,
+        # install_platbase, user-supplied versions of
+        # install_{purelib,platlib,lib,scripts,data,...}, and the
+        # INSTALL_SCHEME dictionary above.  Phew!
+
+        self.dump_dirs("pre-finalize_{unix,other}")
+
+        if os.name == 'posix':
+            self.finalize_unix()
+        else:
+            self.finalize_other()
+
+        self.dump_dirs("post-finalize_{unix,other}()")
+
+        # Expand configuration variables, tilde, etc. in self.install_base
+        # and self.install_platbase -- that way, we can use $base or
+        # $platbase in the other installation directories and not worry
+        # about needing recursive variable expansion (shudder).
+
+        py_version = (string.split(sys.version))[0]
+        (prefix, exec_prefix) = get_config_vars('prefix', 'exec_prefix')
+        self.config_vars = {'dist_name': self.distribution.get_name(),
+                            'dist_version': self.distribution.get_version(),
+                            'dist_fullname': self.distribution.get_fullname(),
+                            'py_version': py_version,
+                            'py_version_short': py_version[0:3],
+                            'sys_prefix': prefix,
+                            'prefix': prefix,
+                            'sys_exec_prefix': exec_prefix,
+                            'exec_prefix': exec_prefix,
+                           }
+        self.expand_basedirs()
+
+        self.dump_dirs("post-expand_basedirs()")
+
+        # Now define config vars for the base directories so we can expand
+        # everything else.
+        self.config_vars['base'] = self.install_base
+        self.config_vars['platbase'] = self.install_platbase
+
+        if DEBUG:
+            from pprint import pprint
+            print "config vars:"
+            pprint(self.config_vars)
+
+        # Expand "~" and configuration variables in the installation
+        # directories.
+        self.expand_dirs()
+
+        self.dump_dirs("post-expand_dirs()")
+
+        # Pick the actual directory to install all modules to: either
+        # install_purelib or install_platlib, depending on whether this
+        # module distribution is pure or not.  Of course, if the user
+        # already specified install_lib, use their selection.
+        if self.install_lib is None:
+            if self.distribution.ext_modules: # has extensions: non-pure
+                self.install_lib = self.install_platlib
+            else:
+                self.install_lib = self.install_purelib
+
+
+        # Convert directories from Unix /-separated syntax to the local
+        # convention.
+        self.convert_paths('lib', 'purelib', 'platlib',
+                           'scripts', 'data', 'headers')
+
+        # Well, we're not actually fully completely finalized yet: we still
+        # have to deal with 'extra_path', which is the hack for allowing
+        # non-packagized module distributions (hello, Numerical Python!) to
+        # get their own directories.
+        self.handle_extra_path()
+        self.install_libbase = self.install_lib # needed for .pth file
+        self.install_lib = os.path.join(self.install_lib, self.extra_dirs)
+
+        # If a new root directory was supplied, make all the installation
+        # dirs relative to it.
+        if self.root is not None:
+            self.change_roots('libbase', 'lib', 'purelib', 'platlib',
+                              'scripts', 'data', 'headers')
+
+        self.dump_dirs("after prepending root")
+
+        # Find out the build directories, ie. where to install from.
+        self.set_undefined_options('build',
+                                   ('build_base', 'build_base'),
+                                   ('build_lib', 'build_lib'))
+
+        # Punt on doc directories for now -- after all, we're punting on
+        # documentation completely!
+
+    # finalize_options ()
+
+
+    def dump_dirs (self, msg):
+        if DEBUG:
+            from distutils.fancy_getopt import longopt_xlate
+            print msg + ":"
+            for opt in self.user_options:
+                opt_name = opt[0]
+                if opt_name[-1] == "=":
+                    opt_name = opt_name[0:-1]
+                opt_name = string.translate(opt_name, longopt_xlate)
+                val = getattr(self, opt_name)
+                print "  %s: %s" % (opt_name, val)
+
+
+    def finalize_unix (self):
+
+        if self.install_base is not None or self.install_platbase is not None:
+            if ((self.install_lib is None and
+                 self.install_purelib is None and
+                 self.install_platlib is None) or
+                self.install_headers is None or
+                self.install_scripts is None or
+                self.install_data is None):
+                raise DistutilsOptionError, \
+                      "install-base or install-platbase supplied, but " + \
+                      "installation scheme is incomplete"
+            return
+
+        if self.home is not None:
+            self.install_base = self.install_platbase = self.home
+            self.select_scheme("unix_home")
+        else:
+            if self.prefix is None:
+                if self.exec_prefix is not None:
+                    raise DistutilsOptionError, \
+                          "must not supply exec-prefix without prefix"
+
+                self.prefix = os.path.normpath(sys.prefix)
+                self.exec_prefix = os.path.normpath(sys.exec_prefix)
+
+            else:
+                if self.exec_prefix is None:
+                    self.exec_prefix = self.prefix
+
+            self.install_base = self.prefix
+            self.install_platbase = self.exec_prefix
+            self.select_scheme("unix_prefix")
+
+    # finalize_unix ()
+
+
+    def finalize_other (self):          # Windows and Mac OS for now
+
+        if self.prefix is None:
+            self.prefix = os.path.normpath(sys.prefix)
+
+        self.install_base = self.install_platbase = self.prefix
+        try:
+            self.select_scheme(os.name)
+        except KeyError:
+            raise DistutilsPlatformError, \
+                  "I don't know how to install stuff on '%s'" % os.name
+
+    # finalize_other ()
+
+
+    def select_scheme (self, name):
+        # it's the caller's problem if they supply a bad name!
+        scheme = INSTALL_SCHEMES[name]
+        for key in SCHEME_KEYS:
+            attrname = 'install_' + key
+            if getattr(self, attrname) is None:
+                setattr(self, attrname, scheme[key])
+
+
+    def _expand_attrs (self, attrs):
+        for attr in attrs:
+            val = getattr(self, attr)
+            if val is not None:
+                if os.name == 'posix':
+                    val = os.path.expanduser(val)
+                val = subst_vars(val, self.config_vars)
+                setattr(self, attr, val)
+
+
+    def expand_basedirs (self):
+        self._expand_attrs(['install_base',
+                            'install_platbase',
+                            'root'])
+
+    def expand_dirs (self):
+        self._expand_attrs(['install_purelib',
+                            'install_platlib',
+                            'install_lib',
+                            'install_headers',
+                            'install_scripts',
+                            'install_data',])
+
+
+    def convert_paths (self, *names):
+        for name in names:
+            attr = "install_" + name
+            setattr(self, attr, convert_path(getattr(self, attr)))
+
+
+    def handle_extra_path (self):
+
+        if self.extra_path is None:
+            self.extra_path = self.distribution.extra_path
+
+        if self.extra_path is not None:
+            if type(self.extra_path) is StringType:
+                self.extra_path = string.split(self.extra_path, ',')
+
+            if len(self.extra_path) == 1:
+                path_file = extra_dirs = self.extra_path[0]
+            elif len(self.extra_path) == 2:
+                (path_file, extra_dirs) = self.extra_path
+            else:
+                raise DistutilsOptionError, \
+                      "'extra_path' option must be a list, tuple, or " + \
+                      "comma-separated string with 1 or 2 elements"
+
+            # convert to local form in case Unix notation used (as it
+            # should be in setup scripts)
+            extra_dirs = convert_path(extra_dirs)
+
+        else:
+            path_file = None
+            extra_dirs = ''
+
+        # XXX should we warn if path_file and not extra_dirs? (in which
+        # case the path file would be harmless but pointless)
+        self.path_file = path_file
+        self.extra_dirs = extra_dirs
+
+    # handle_extra_path ()
+
+
+    def change_roots (self, *names):
+        for name in names:
+            attr = "install_" + name
+            setattr(self, attr, change_root(self.root, getattr(self, attr)))
+
+
+    # -- Command execution methods -------------------------------------
+
+    def run (self):
+
+        # Obviously have to build before we can install
+        if not self.skip_build:
+            self.run_command('build')
+
+        # Run all sub-commands (at least those that need to be run)
+        for cmd_name in self.get_sub_commands():
+            self.run_command(cmd_name)
+
+        if self.path_file:
+            self.create_path_file()
+
+        # write list of installed files, if requested.
+        if self.record:
+            outputs = self.get_outputs()
+            if self.root:               # strip any package prefix
+                root_len = len(self.root)
+                for counter in xrange(len(outputs)):
+                    outputs[counter] = outputs[counter][root_len:]
+            self.execute(write_file,
+                         (self.record, outputs),
+                         "writing list of installed files to '%s'" %
+                         self.record)
+
+        sys_path = map(os.path.normpath, sys.path)
+        sys_path = map(os.path.normcase, sys_path)
+        install_lib = os.path.normcase(os.path.normpath(self.install_lib))
+        if (self.warn_dir and
+            not (self.path_file and self.install_path_file) and
+            install_lib not in sys_path):
+            self.warn(("modules installed to '%s', which is not in " +
+                       "Python's module search path (sys.path) -- " +
+                       "you'll have to change the search path yourself") %
+                      self.install_lib)
+
+    # run ()
+
+    def create_path_file (self):
+        filename = os.path.join(self.install_libbase,
+                                self.path_file + ".pth")
+        if self.install_path_file:
+            self.execute(write_file,
+                         (filename, [self.extra_dirs]),
+                         "creating %s" % filename)
+        else:
+            self.warn("path file '%s' not created" % filename)
+
+
+    # -- Reporting methods ---------------------------------------------
+
+    def get_outputs (self):
+        # Assemble the outputs of all the sub-commands.
+        outputs = []
+        for cmd_name in self.get_sub_commands():
+            cmd = self.get_finalized_command(cmd_name)
+            # Add the contents of cmd.get_outputs(), ensuring
+            # that outputs doesn't contain duplicate entries
+            for filename in cmd.get_outputs():
+                if filename not in outputs:
+                    outputs.append(filename)
+
+        if self.path_file and self.install_path_file:
+            outputs.append(os.path.join(self.install_libbase,
+                                        self.path_file + ".pth"))
+
+        return outputs
+
+    def get_inputs (self):
+        # XXX gee, this looks familiar ;-(
+        inputs = []
+        for cmd_name in self.get_sub_commands():
+            cmd = self.get_finalized_command(cmd_name)
+            inputs.extend(cmd.get_inputs())
+
+        return inputs
+
+
+    # -- Predicates for sub-command list -------------------------------
+
+    def has_lib (self):
+        """Return true if the current distribution has any Python
+        modules to install."""
+        return (self.distribution.has_pure_modules() or
+                self.distribution.has_ext_modules())
+
+    def has_headers (self):
+        return self.distribution.has_headers()
+
+    def has_scripts (self):
+        return self.distribution.has_scripts()
+
+    def has_data (self):
+        return self.distribution.has_data_files()
+
+
+    # 'sub_commands': a list of commands this command might have to run to
+    # get its work done.  See cmd.py for more info.
+    sub_commands = [('install_lib',     has_lib),
+                    ('install_headers', has_headers),
+                    ('install_scripts', has_scripts),
+                    ('install_data',    has_data),
+                   ]
+
+# class install
diff --git a/lib-python/2.2/distutils/command/install_data.py b/lib-python/2.2/distutils/command/install_data.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/command/install_data.py
@@ -0,0 +1,83 @@
+"""distutils.command.install_data
+
+Implements the Distutils 'install_data' command, for installing
+platform-independent data files."""
+
+# contributed by Bastian Kleineidam
+
+__revision__ = "$Id$"
+
+import os
+from types import StringType
+from distutils.core import Command
+from distutils.util import change_root, convert_path
+
+class install_data (Command):
+
+    description = "install data files"
+
+    user_options = [
+        ('install-dir=', 'd',
+         "base directory for installing data files "
+         "(default: installation base dir)"),
+        ('root=', None,
+         "install everything relative to this alternate root directory"),
+        ('force', 'f', "force installation (overwrite existing files)"),
+        ]
+
+    boolean_options = ['force']
+
+    def initialize_options (self):
+        self.install_dir = None
+        self.outfiles = []
+        self.root = None
+        self.force = 0
+
+        self.data_files = self.distribution.data_files
+        self.warn_dir = 1
+
+    def finalize_options (self):
+        self.set_undefined_options('install',
+                                   ('install_data', 'install_dir'),
+                                   ('root', 'root'),
+                                   ('force', 'force'),
+                                  )
+
+    def run (self):
+        self.mkpath(self.install_dir)
+        for f in self.data_files:
+            if type(f) == StringType:
+                # it's a simple file, so copy it
+                f = convert_path(f)
+                if self.warn_dir:
+                    self.warn("setup script did not provide a directory for "
+                              "'%s' -- installing right in '%s'" %
+                              (f, self.install_dir))
+                (out, _) = self.copy_file(f, self.install_dir)
+                self.outfiles.append(out)
+            else:
+                # it's a tuple with path to install to and a list of files
+                dir = convert_path(f[0])
+                if not os.path.isabs(dir):
+                    dir = os.path.join(self.install_dir, dir)
+                elif self.root:
+                    dir = change_root(self.root, dir)
+                self.mkpath(dir)
+
+                if f[1] == []:
+                    # If there are no files listed, the user must be
+                    # trying to create an empty directory, so add the
+                    # directory to the list of output files.
+                    self.outfiles.append(dir)
+                else:
+                    # Copy files, adding them to the list of output files.
+                    for data in f[1]:
+                        data = convert_path(data)
+                        (out, _) = self.copy_file(data, dir)
+                        self.outfiles.append(out)
+
+    def get_inputs (self):
+        return self.data_files or []
+
+    def get_outputs (self):
+        return self.outfiles
diff --git a/lib-python/2.2/distutils/command/install_headers.py b/lib-python/2.2/distutils/command/install_headers.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/command/install_headers.py
@@ -0,0 +1,53 @@
+"""distutils.command.install_headers
+
+Implements the Distutils 'install_headers' command, to install C/C++ header
+files to the Python include directory."""
+
+# created 2000/05/26, Greg Ward
+
+__revision__ = "$Id$"
+
+import os
+from distutils.core import Command
+
+
+class install_headers (Command):
+
+    description = "install C/C++ header files"
+
+    user_options = [('install-dir=', 'd',
+                     "directory to install header files to"),
+                    ('force', 'f',
+                     "force installation (overwrite existing files)"),
+                   ]
+
+    boolean_options = ['force']
+
+    def initialize_options (self):
+        self.install_dir = None
+        self.force = 0
+        self.outfiles = []
+
+    def finalize_options (self):
+        self.set_undefined_options('install',
+                                   ('install_headers', 'install_dir'),
+                                   ('force', 'force'))
+
+
+    def run (self):
+        headers = self.distribution.headers
+        if not headers:
+            return
+
+        self.mkpath(self.install_dir)
+        for header in headers:
+            (out, _) = self.copy_file(header, self.install_dir)
+            self.outfiles.append(out)
+
+    def get_inputs (self):
+        return self.distribution.headers or []
+
+    def get_outputs (self):
+        return self.outfiles
+
+# class install_headers
diff --git a/lib-python/2.2/distutils/command/install_lib.py b/lib-python/2.2/distutils/command/install_lib.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/command/install_lib.py
@@ -0,0 +1,213 @@
+# created 1999/03/13, Greg Ward
+
+__revision__ = "$Id$"
+
+import sys, os, string
+from types import IntType
+from distutils.core import Command
+from distutils.errors import DistutilsOptionError
+from distutils.dir_util import copy_tree
+
+class install_lib (Command):
+
+    description = "install all Python modules (extensions and pure Python)"
+
+    # The byte-compilation options are a tad confusing.  Here are the
+    # possible scenarios:
+    #   1) no compilation at all (--no-compile --no-optimize)
+    #   2) compile .pyc only (--compile --no-optimize; default)
+    #   3) compile .pyc and "level 1" .pyo (--compile --optimize)
+    #   4) compile "level 1" .pyo only (--no-compile --optimize)
+    #   5) compile .pyc and "level 2" .pyo (--compile --optimize-more)
+    #   6) compile "level 2" .pyo only (--no-compile --optimize-more)
+    #
+    # The UI for this is two option, 'compile' and 'optimize'.
+    # 'compile' is strictly boolean, and only decides whether to
+    # generate .pyc files.  'optimize' is three-way (0, 1, or 2), and
+    # decides both whether to generate .pyo files and what level of
+    # optimization to use.
+
+    user_options = [
+        ('install-dir=', 'd', "directory to install to"),
+        ('build-dir=','b', "build directory (where to install from)"),
+        ('force', 'f', "force installation (overwrite existing files)"),
+        ('compile', 'c', "compile .py to .pyc [default]"),
+        ('no-compile', None, "don't compile .py files"),
+        ('optimize=', 'O',
+         "also compile with optimization: -O1 for \"python -O\", "
+         "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
+        ('skip-build', None, "skip the build steps"),
+        ]
+
+    boolean_options = ['force', 'compile', 'skip-build']
+    negative_opt = {'no-compile' : 'compile'}
+
+
+    def initialize_options (self):
+        # let the 'install' command dictate our installation directory
+        self.install_dir = None
+        self.build_dir = None
+        self.force = 0
+        self.compile = None
+        self.optimize = None
+        self.skip_build = None
+
+    def finalize_options (self):
+
+        # Get all the information we need to install pure Python modules
+        # from the umbrella 'install' command -- build (source) directory,
+        # install (target) directory, and whether to compile .py files.
+        self.set_undefined_options('install',
+                                   ('build_lib', 'build_dir'),
+                                   ('install_lib', 'install_dir'),
+                                   ('force', 'force'),
+                                   ('compile', 'compile'),
+                                   ('optimize', 'optimize'),
+                                   ('skip_build', 'skip_build'),
+                                  )
+
+        if self.compile is None:
+            self.compile = 1
+        if self.optimize is None:
+            self.optimize = 0
+
+        if type(self.optimize) is not IntType:
+            try:
+                self.optimize = int(self.optimize)
+                assert 0 <= self.optimize <= 2
+            except (ValueError, AssertionError):
+                raise DistutilsOptionError, "optimize must be 0, 1, or 2"
+
+    def run (self):
+
+        # Make sure we have built everything we need first
+        self.build()
+
+        # Install everything: simply dump the entire contents of the build
+        # directory to the installation directory (that's the beauty of
+        # having a build directory!)
+        outfiles = self.install()
+
+        # (Optionally) compile .py to .pyc
+        if outfiles is not None and self.distribution.has_pure_modules():
+            self.byte_compile(outfiles)
+
+    # run ()
+
+
+    # -- Top-level worker functions ------------------------------------
+    # (called from 'run()')
+
+    def build (self):
+        if not self.skip_build:
+            if self.distribution.has_pure_modules():
+                self.run_command('build_py')
+            if self.distribution.has_ext_modules():
+                self.run_command('build_ext')
+
+    def install (self):
+        if os.path.isdir(self.build_dir):
+            outfiles = self.copy_tree(self.build_dir, self.install_dir)
+        else:
+            self.warn("'%s' does not exist -- no Python modules to install" %
+                      self.build_dir)
+            return
+        return outfiles
+
+    def byte_compile (self, files):
+        from distutils.util import byte_compile
+
+        # Get the "--root" directory supplied to the "install" command,
+        # and use it as a prefix to strip off the purported filename
+        # encoded in bytecode files.  This is far from complete, but it
+        # should at least generate usable bytecode in RPM distributions.
+        install_root = self.get_finalized_command('install').root
+
+        if self.compile:
+            byte_compile(files, optimize=0,
+                         force=self.force,
+                         prefix=install_root,
+                         verbose=self.verbose, dry_run=self.dry_run)
+        if self.optimize > 0:
+            byte_compile(files, optimize=self.optimize,
+                         force=self.force,
+                         prefix=install_root,
+                         verbose=self.verbose, dry_run=self.dry_run)
+
+
+    # -- Utility methods -----------------------------------------------
+
+    def _mutate_outputs (self, has_any, build_cmd, cmd_option, output_dir):
+
+        if not has_any:
+            return []
+
+        build_cmd = self.get_finalized_command(build_cmd)
+        build_files = build_cmd.get_outputs()
+        build_dir = getattr(build_cmd, cmd_option)
+
+        prefix_len = len(build_dir) + len(os.sep)
+        outputs = []
+        for file in build_files:
+            outputs.append(os.path.join(output_dir, file[prefix_len:]))
+
+        return outputs
+
+    # _mutate_outputs ()
+
+    def _bytecode_filenames (self, py_filenames):
+        bytecode_files = []
+        for py_file in py_filenames:
+            if self.compile:
+                bytecode_files.append(py_file + "c")
+            if self.optimize > 0:
+                bytecode_files.append(py_file + "o")
+
+        return bytecode_files
+
+
+    # -- External interface --------------------------------------------
+    # (called by outsiders)
+
+    def get_outputs (self):
+        """Return the list of files that would be installed if this command
+        were actually run.  Not affected by the "dry-run" flag or whether
+        modules have actually been built yet.
+        """
+        pure_outputs = \
+            self._mutate_outputs(self.distribution.has_pure_modules(),
+                                 'build_py', 'build_lib',
+                                 self.install_dir)
+        if self.compile:
+            bytecode_outputs = self._bytecode_filenames(pure_outputs)
+        else:
+            bytecode_outputs = []
+
+        ext_outputs = \
+            self._mutate_outputs(self.distribution.has_ext_modules(),
+                                 'build_ext', 'build_lib',
+                                 self.install_dir)
+
+        return pure_outputs + bytecode_outputs + ext_outputs
+
+    # get_outputs ()
+
+    def get_inputs (self):
+        """Get the list of files that are input to this command, ie. the
+        files that get installed as they are named in the build tree.
+        The files in this list correspond one-to-one to the output
+        filenames returned by 'get_outputs()'.
+        """
+        inputs = []
+
+        if self.distribution.has_pure_modules():
+            build_py = self.get_finalized_command('build_py')
+            inputs.extend(build_py.get_outputs())
+
+        if self.distribution.has_ext_modules():
+            build_ext = self.get_finalized_command('build_ext')
+            inputs.extend(build_ext.get_outputs())
+
+        return inputs
+
+# class install_lib
diff --git a/lib-python/2.2/distutils/command/install_scripts.py b/lib-python/2.2/distutils/command/install_scripts.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/command/install_scripts.py
@@ -0,0 +1,63 @@
+"""distutils.command.install_scripts
+
+Implements the Distutils 'install_scripts' command, for installing
+Python scripts."""
+
+# contributed by Bastian Kleineidam
+
+__revision__ = "$Id$"
+
+import os
+from distutils.core import Command
+from stat import ST_MODE
+
+class install_scripts (Command):
+
+    description = "install scripts (Python or otherwise)"
+
+    user_options = [
+        ('install-dir=', 'd', "directory to install scripts to"),
+        ('build-dir=','b', "build directory (where to install from)"),
+        ('force', 'f', "force installation (overwrite existing files)"),
+        ('skip-build', None, "skip the build steps"),
+    ]
+
+    boolean_options = ['force', 'skip-build']
+
+
+    def initialize_options (self):
+        self.install_dir = None
+        self.force = 0
+        self.build_dir = None
+        self.skip_build = None
+
+    def finalize_options (self):
+        self.set_undefined_options('build', ('build_scripts', 'build_dir'))
+        self.set_undefined_options('install',
+                                   ('install_scripts', 'install_dir'),
+                                   ('force', 'force'),
+                                   ('skip_build', 'skip_build'),
+                                  )
+
+    def run (self):
+        if not self.skip_build:
+            self.run_command('build_scripts')
+        self.outfiles = self.copy_tree(self.build_dir, self.install_dir)
+        if os.name == 'posix':
+            # Set the executable bits (owner, group, and world) on
+            # all the scripts we just installed.
+            for file in self.get_outputs():
+                if self.dry_run:
+                    self.announce("changing mode of %s" % file)
+                else:
+                    mode = ((os.stat(file)[ST_MODE]) | 0555) & 07777
+                    self.announce("changing mode of %s to %o" % (file, mode))
+                    os.chmod(file, mode)
+
+    def get_inputs (self):
+        return self.distribution.scripts or []
+
+    def get_outputs(self):
+        return self.outfiles or []
+
+# class install_scripts
diff --git a/lib-python/2.2/distutils/command/sdist.py b/lib-python/2.2/distutils/command/sdist.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/command/sdist.py
@@ -0,0 +1,475 @@
+"""distutils.command.sdist
+
+Implements the Distutils 'sdist' command (create a source distribution)."""
+
+# created 1999/09/22, Greg Ward
+
+__revision__ = "$Id$"
+
+import sys, os, string
+from types import *
+from glob import glob
+from distutils.core import Command
+from distutils import dir_util, dep_util, file_util, archive_util
+from distutils.text_file import TextFile
+from distutils.errors import *
+from distutils.filelist import FileList
+
+
+def show_formats ():
+    """Print all possible values for the 'formats' option (used by
+    the "--help-formats" command-line option).
+    """
+    from distutils.fancy_getopt import FancyGetopt
+    from distutils.archive_util import ARCHIVE_FORMATS
+    formats=[]
+    for format in ARCHIVE_FORMATS.keys():
+        formats.append(("formats=" + format, None,
+                        ARCHIVE_FORMATS[format][2]))
+    formats.sort()
+    pretty_printer = FancyGetopt(formats)
+    pretty_printer.print_help(
+        "List of available source distribution formats:")
+
+class sdist (Command):
+
+    description = "create a source distribution (tarball, zip file, etc.)"
+
+    user_options = [
+        ('template=', 't',
+         "name of manifest template file [default: MANIFEST.in]"),
+        ('manifest=', 'm',
+         "name of manifest file [default: MANIFEST]"),
+        ('use-defaults', None,
+         "include the default file set in the manifest "
+         "[default; disable with --no-defaults]"),
+        ('no-defaults', None,
+         "don't include the default file set"),
+        ('prune', None,
+         "specifically exclude files/directories that should not be "
+         "distributed (build tree, RCS/CVS dirs, etc.) "
+         "[default; disable with --no-prune]"),
+        ('no-prune', None,
+         "don't automatically exclude anything"),
+        ('manifest-only', 'o',
+         "just regenerate the manifest and then stop "
+         "(implies --force-manifest)"),
+        ('force-manifest', 'f',
+         "forcibly regenerate the manifest and carry on as usual"),
+        ('formats=', None,
+         "formats for source distribution (comma-separated list)"),
+        ('keep-temp', 'k',
+         "keep the distribution tree around after creating " +
+         "archive file(s)"),
+        ('dist-dir=', 'd',
+         "directory to put the source distribution archive(s) in "
+         "[default: dist]"),
+        ]
+
+    boolean_options = ['use-defaults', 'prune',
+                       'manifest-only', 'force-manifest',
+                       'keep-temp']
+
+    help_options = [
+        ('help-formats', None,
+         "list available distribution formats", show_formats),
+        ]
+
+    negative_opt = {'no-defaults': 'use-defaults',
+                    'no-prune': 'prune' }
+
+    default_format = { 'posix': 'gztar',
+                       'nt': 'zip' }
+
+    def initialize_options (self):
+        # 'template' and 'manifest' are, respectively, the names of
+        # the manifest template and manifest file.
+        self.template = None
+        self.manifest = None
+
+        # 'use_defaults': if true, we will include the default file set
+        # in the manifest
+        self.use_defaults = 1
+        self.prune = 1
+
+        self.manifest_only = 0
+        self.force_manifest = 0
+
+        self.formats = None
+        self.keep_temp = 0
+        self.dist_dir = None
+
+        self.archive_files = None
+
+
+    def finalize_options (self):
+        if self.manifest is None:
+            self.manifest = "MANIFEST"
+        if self.template is None:
+            self.template = "MANIFEST.in"
+
+        self.ensure_string_list('formats')
+        if self.formats is None:
+            try:
+                self.formats = [self.default_format[os.name]]
+            except KeyError:
+                raise DistutilsPlatformError, \
+                      "don't know how to create source distributions " + \
+                      "on platform %s" % os.name
+
+        bad_format = archive_util.check_archive_formats(self.formats)
+        if bad_format:
+            raise DistutilsOptionError, \
+                  "unknown archive format '%s'" % bad_format
+
+        if self.dist_dir is None:
+            self.dist_dir = "dist"
+
+
+    def run (self):
+
+        # 'filelist' contains the list of files that will make up the
+        # manifest
+        self.filelist = FileList()
+
+        # Ensure that all required meta-data is given; warn if not (but
+        # don't die, it's not *that* serious!)
+        self.check_metadata()
+
+        # Do whatever it takes to get the list of files to process
+        # (process the manifest template, read an existing manifest,
+        # whatever).  File list is accumulated in 'self.filelist'.
+        self.get_file_list()
+
+        # If user just wanted us to regenerate the manifest, stop now.
+        if self.manifest_only:
+            return
+
+        # Otherwise, go ahead and create the source distribution tarball,
+        # or zipfile, or whatever.
+        self.make_distribution()
+
+
+    def check_metadata (self):
+        """Ensure that all required elements of meta-data (name, version,
+        URL, (author and author_email) or (maintainer and
+        maintainer_email)) are supplied by the Distribution object; warn if
+        any are missing.
+        """
+        metadata = self.distribution.metadata
+
+        missing = []
+        for attr in ('name', 'version', 'url'):
+            if not (hasattr(metadata, attr) and getattr(metadata, attr)):
+                missing.append(attr)
+
+        if missing:
+            self.warn("missing required meta-data: " +
+                      string.join(missing, ", "))
+
+        if metadata.author:
+            if not metadata.author_email:
+                self.warn("missing meta-data: if 'author' supplied, " +
+                          "'author_email' must be supplied too")
+        elif metadata.maintainer:
+            if not metadata.maintainer_email:
+                self.warn("missing meta-data: if 'maintainer' supplied, " +
+                          "'maintainer_email' must be supplied too")
+        else:
+            self.warn("missing meta-data: either (author and author_email) " +
+                      "or (maintainer and maintainer_email) " +
+                      "must be supplied")
+
+    # check_metadata ()
+
+
+    def get_file_list (self):
+        """Figure out the list of files to include in the source
+        distribution, and put it in 'self.filelist'.  This might involve
+        reading the manifest template (and writing the manifest), or just
+        reading the manifest, or just using the default file set -- it all
+        depends on the user's options and the state of the filesystem.
+        """
+
+        # If we have a manifest template, see if it's newer than the
+        # manifest; if so, we'll regenerate the manifest.
+        template_exists = os.path.isfile(self.template)
+        if template_exists:
+            template_newer = dep_util.newer(self.template, self.manifest)
+
+        # The contents of the manifest file almost certainly depend on the
+        # setup script as well as the manifest template -- so if the setup
+        # script is newer than the manifest, we'll regenerate the manifest
+        # from the template.  (Well, not quite: if we already have a
+        # manifest, but there's no template -- which will happen if the
+        # developer elects to generate a manifest some other way -- then we
+        # can't regenerate the manifest, so we don't.)
+        self.debug_print("checking if %s newer than %s" %
+                         (self.distribution.script_name, self.manifest))
+        setup_newer = dep_util.newer(self.distribution.script_name,
+                                     self.manifest)
+
+        # cases:
+        #   1) no manifest, template exists: generate manifest
+        #      (covered by 2a: no manifest == template newer)
+        #   2) manifest & template exist:
+        #      2a) template or setup script newer than manifest:
+        #          regenerate manifest
+        #      2b) manifest newer than both:
+        #          do nothing (unless --force or --manifest-only)
+        #   3) manifest exists, no template:
+        #      do nothing (unless --force or --manifest-only)
+        #   4) no manifest, no template: generate w/ warning ("defaults only")
+
+        manifest_outofdate = (template_exists and
+                              (template_newer or setup_newer))
+        force_regen = self.force_manifest or self.manifest_only
+        manifest_exists = os.path.isfile(self.manifest)
+        neither_exists = (not template_exists and not manifest_exists)
+
+        # Regenerate the manifest if necessary (or if explicitly told to)
+        if manifest_outofdate or neither_exists or force_regen:
+            if not template_exists:
+                self.warn(("manifest template '%s' does not exist " +
+                           "(using default file list)") %
+                          self.template)
+
+            self.filelist.findall()
+
+            # Add default file set to 'files'
+            if self.use_defaults:
+                self.add_defaults()
+
+            # Read manifest template if it exists
+            if template_exists:
+                self.read_template()
+
+            # Prune away any directories that don't belong in the source
+            # distribution
+            if self.prune:
+                self.prune_file_list()
+
+            # File list now complete -- sort it so that higher-level files
+            # come first
+            self.filelist.sort()
+
+            # Remove duplicates from the file list
+            self.filelist.remove_duplicates()
+
+            # And write complete file list (including default file set) to
+            # the manifest.
+            self.write_manifest()
+
+        # Don't regenerate the manifest, just read it in.
+        else:
+            self.read_manifest()
+
+    # get_file_list ()
+
+
+    def add_defaults (self):
+        """Add all the default files to self.filelist:
+          - README or README.txt
+          - setup.py
+          - test/test*.py
+          - all pure Python modules mentioned in setup script
+          - all C sources listed as part of extensions or C libraries
+            in the setup script (doesn't catch C headers!)
+        Warns if (README or README.txt) or setup.py are missing; everything
+        else is optional.
+        """
+
+        standards = [('README', 'README.txt'), self.distribution.script_name]
+        for fn in standards:
+            if type(fn) is TupleType:
+                alts = fn
+                got_it = 0
+                for fn in alts:
+                    if os.path.exists(fn):
+                        got_it = 1
+                        self.filelist.append(fn)
+                        break
+
+                if not got_it:
+                    self.warn("standard file not found: should have one of " +
+                              string.join(alts, ', '))
+            else:
+                if os.path.exists(fn):
+                    self.filelist.append(fn)
+                else:
+                    self.warn("standard file '%s' not found" % fn)
+
+        optional = ['test/test*.py', 'setup.cfg']
+        for pattern in optional:
+            files = filter(os.path.isfile, glob(pattern))
+            if files:
+                self.filelist.extend(files)
+
+        if self.distribution.has_pure_modules():
+            build_py = self.get_finalized_command('build_py')
+            self.filelist.extend(build_py.get_source_files())
+
+        if self.distribution.has_ext_modules():
+            build_ext = self.get_finalized_command('build_ext')
+            self.filelist.extend(build_ext.get_source_files())
+
+        if self.distribution.has_c_libraries():
+            build_clib = self.get_finalized_command('build_clib')
+            self.filelist.extend(build_clib.get_source_files())
+
+    # add_defaults ()
+
+
+    def read_template (self):
+
+        """Read and parse the manifest template file named by
+        'self.template' (usually "MANIFEST.in").  The parsing and
+        processing is done by 'self.filelist', which updates itself
+        accordingly.
+        """
+        self.announce("reading manifest template '%s'" % self.template)
+        template = TextFile(self.template,
+                            strip_comments=1,
+                            skip_blanks=1,
+                            join_lines=1,
+                            lstrip_ws=1,
+                            rstrip_ws=1,
+                            collapse_join=1)
+
+        while 1:
+            line = template.readline()
+            if line is None:            # end of file
+                break
+
+            try:
+                self.filelist.process_template_line(line)
+            except DistutilsTemplateError, msg:
+                self.warn("%s, line %d: %s" % (template.filename,
+                                               template.current_line,
+                                               msg))
+
+    # read_template ()
+
+
+    def prune_file_list (self):
+        """Prune off branches that might slip into the file list as created
+        by 'read_template()', but really don't belong there:
+          * the build tree (typically "build")
+          * the release tree itself (only an issue if we ran "sdist"
+            previously with --keep-temp, or it aborted)
+          * any RCS or CVS directories
+        """
+        build = self.get_finalized_command('build')
+        base_dir = self.distribution.get_fullname()
+
+        self.filelist.exclude_pattern(None, prefix=build.build_base)
+        self.filelist.exclude_pattern(None, prefix=base_dir)
+        self.filelist.exclude_pattern(r'/(RCS|CVS)/.*', is_regex=1)
+
+
+    def write_manifest (self):
+        """Write the file list in 'self.filelist' (presumably as filled in
+        by 'add_defaults()' and 'read_template()') to the manifest file
+        named by 'self.manifest'.
+        """
+        self.execute(file_util.write_file,
+                     (self.manifest, self.filelist.files),
+                     "writing manifest file '%s'" % self.manifest)
+
+    # write_manifest ()
+
+
+    def read_manifest (self):
+        """Read the manifest file (named by 'self.manifest') and use it to
+        fill in 'self.filelist', the list of files to include in the source
+        distribution.
+        """
+        self.announce("reading manifest file '%s'" % self.manifest)
+        manifest = open(self.manifest)
+        while 1:
+            line = manifest.readline()
+            if line == '':              # end of file
+                break
+            if line[-1] == '\n':
+                line = line[0:-1]
+            self.filelist.append(line)
+
+    # read_manifest ()
+
+
+    def make_release_tree (self, base_dir, files):
+        """Create the directory tree that will become the source
+        distribution archive.  All directories implied by the filenames in
+        'files' are created under 'base_dir', and then we hard link or copy
+        (if hard linking is unavailable) those files into place.
+        Essentially, this duplicates the developer's source tree, but in a
+        directory named after the distribution, containing only the files
+        to be distributed.
+        """
+        # Create all the directories under 'base_dir' necessary to
+        # put 'files' there; the 'mkpath()' is just so we don't die
+        # if the manifest happens to be empty.
+        self.mkpath(base_dir)
+        dir_util.create_tree(base_dir, files,
+                             verbose=self.verbose, dry_run=self.dry_run)
+
+        # And walk over the list of files, either making a hard link (if
+        # os.link exists) to each one that doesn't already exist in its
+        # corresponding location under 'base_dir', or copying each file
+        # that's out-of-date in 'base_dir'.  (Usually, all files will be
+        # out-of-date, because by default we blow away 'base_dir' when
+        # we're done making the distribution archives.)
+
+        if hasattr(os, 'link'):        # can make hard links on this system
+            link = 'hard'
+            msg = "making hard links in %s..." % base_dir
+        else:                           # nope, have to copy
+            link = None
+            msg = "copying files to %s..." % base_dir
+
+        if not files:
+            self.warn("no files to distribute -- empty manifest?")
+        else:
+            self.announce(msg)
+        for file in files:
+            if not os.path.isfile(file):
+                self.warn("'%s' not a regular file -- skipping" % file)
+            else:
+                dest = os.path.join(base_dir, file)
+                self.copy_file(file, dest, link=link)
+
+        self.distribution.metadata.write_pkg_info(base_dir)
+
+    # make_release_tree ()
+
+    def make_distribution (self):
+        """Create the source distribution(s).  First, we create the release
+        tree with 'make_release_tree()'; then, we create all required
+        archive files (according to 'self.formats') from the release tree.
+        Finally, we clean up by blowing away the release tree (unless
+        'self.keep_temp' is true).  The list of archive files created is
+        stored so it can be retrieved later by 'get_archive_files()'.
+        """
+        # Don't warn about missing meta-data here -- should be (and is!)
+        # done elsewhere.
+        base_dir = self.distribution.get_fullname()
+        base_name = os.path.join(self.dist_dir, base_dir)
+
+        self.make_release_tree(base_dir, self.filelist.files)
+        archive_files = []              # remember names of files we create
+        for fmt in self.formats:
+            file = self.make_archive(base_name, fmt, base_dir=base_dir)
+            archive_files.append(file)
+
+        self.archive_files = archive_files
+
+        if not self.keep_temp:
+            dir_util.remove_tree(base_dir, self.verbose, self.dry_run)
+
+    def get_archive_files (self):
+        """Return the list of archive files created when the command
+        was run, or None if the command hasn't run yet.
+        """
+        return self.archive_files
+
+# class sdist
diff --git a/lib-python/2.2/distutils/core.py b/lib-python/2.2/distutils/core.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/core.py
@@ -0,0 +1,231 @@
+"""distutils.core
+
+The only module that needs to be imported to use the Distutils; provides
+the 'setup' function (which is to be called from the setup script).  Also
+indirectly provides the Distribution and Command classes, although they are
+really defined in distutils.dist and distutils.cmd.
+"""
+
+# created 1999/03/01, Greg Ward
+
+__revision__ = "$Id$"
+
+import sys, os
+from types import *
+from distutils.errors import *
+from distutils.util import grok_environment_error
+
+# Mainly import these so setup scripts can "from distutils.core import" them.
+from distutils.dist import Distribution
+from distutils.cmd import Command
+from distutils.extension import Extension
+
+
+# This is a barebones help message generated displayed when the user
+# runs the setup script with no arguments at all.  More useful help
+# is generated with various --help options: global help, list commands,
+# and per-command help.
+USAGE = """\
+usage: %(script)s [global_opts] cmd1 [cmd1_opts] [cmd2 [cmd2_opts] ...]
+   or: %(script)s --help [cmd1 cmd2 ...]
+   or: %(script)s --help-commands
+   or: %(script)s cmd --help
+"""
+
+
+# If DISTUTILS_DEBUG is anything other than the empty string, we run in
+# debug mode.
+DEBUG = os.environ.get('DISTUTILS_DEBUG')
+
+def gen_usage (script_name):
+    script = os.path.basename(script_name)
+    return USAGE % vars()
+
+
+# Some mild magic to control the behaviour of 'setup()' from 'run_setup()'.
+_setup_stop_after = None
+_setup_distribution = None
+
+
+def setup (**attrs):
+    """The gateway to the Distutils: do everything your setup script needs
+    to do, in a highly flexible and user-driven way.  Briefly: create a
+    Distribution instance; find and parse config files; parse the command
+    line; run each Distutils command found there, customized by the options
+    supplied to 'setup()' (as keyword arguments), in config files, and on
+    the command line.
+
+    The Distribution instance might be an instance of a class supplied via
+    the 'distclass' keyword argument to 'setup'; if no such class is
+    supplied, then the Distribution class (in dist.py) is instantiated.
+    All other arguments to 'setup' (except for 'cmdclass') are used to set
+    attributes of the Distribution instance.
+
+    The 'cmdclass' argument, if supplied, is a dictionary mapping command
+    names to command classes.  Each command encountered on the command line
+    will be turned into a command class, which is in turn instantiated; any
+    class found in 'cmdclass' is used in place of the default, which is
+    (for command 'foo_bar') class 'foo_bar' in module
+    'distutils.command.foo_bar'.  The command class must provide a
+    'user_options' attribute which is a list of option specifiers for
+    'distutils.fancy_getopt'.  Any command-line options between the current
+    and the next command are used to set attributes of the current command
+    object.
+
+    When the entire command-line has been successfully parsed, calls the
+    'run()' method on each command object in turn.  This method will be
+    driven entirely by the Distribution object (which each command object
+    has a reference to, thanks to its constructor), and the
+    command-specific options that became attributes of each command
+    object.
+    """
+
+    global _setup_stop_after, _setup_distribution
+
+    # Determine the distribution class -- either caller-supplied or
+    # our Distribution (see below).
+    klass = attrs.get('distclass')
+    if klass:
+        del attrs['distclass']
+    else:
+        klass = Distribution
+
+    if not attrs.has_key('script_name'):
+        attrs['script_name'] = sys.argv[0]
+    if not attrs.has_key('script_args'):
+        attrs['script_args'] = sys.argv[1:]
+
+    # Create the Distribution instance, using the remaining arguments
+    # (ie. everything except distclass) to initialize it
+    try:
+        _setup_distribution = dist = klass(attrs)
+    except DistutilsSetupError, msg:
+        raise SystemExit, "error in setup script: %s" % msg
+
+    if _setup_stop_after == "init":
+        return dist
+
+    # Find and parse the config file(s): they will override options from
+    # the setup script, but be overridden by the command line.
+    dist.parse_config_files()
+
+    if DEBUG:
+        print "options (after parsing config files):"
+        dist.dump_option_dicts()
+
+    if _setup_stop_after == "config":
+        return dist
+
+    # Parse the command line; any command-line errors are the end user's
+    # fault, so turn them into SystemExit to suppress tracebacks.
+    try:
+        ok = dist.parse_command_line()
+    except DistutilsArgError, msg:
+        script = os.path.basename(dist.script_name)
+        raise SystemExit, \
+              gen_usage(dist.script_name) + "\nerror: %s" % msg
+
+    if DEBUG:
+        print "options (after parsing command line):"
+        dist.dump_option_dicts()
+
+    if _setup_stop_after == "commandline":
+        return dist
+
+    # And finally, run all the commands found on the command line.
+    if ok:
+        try:
+            dist.run_commands()
+        except KeyboardInterrupt:
+            raise SystemExit, "interrupted"
+        except (IOError, os.error), exc:
+            error = grok_environment_error(exc)
+
+            if DEBUG:
+                sys.stderr.write(error + "\n")
+                raise
+            else:
+                raise SystemExit, error
+
+        except (DistutilsExecError,
+                DistutilsFileError,
+                DistutilsOptionError,
+                CCompilerError), msg:
+            if DEBUG:
+                raise
+            else:
+                raise SystemExit, "error: " + str(msg)
+
+    return dist
+
+# setup ()
+
+
+def run_setup (script_name, script_args=None, stop_after="run"):
+    """Run a setup script in a somewhat controlled environment, and
+    return the Distribution instance that drives things.  This is useful
+    if you need to find out the distribution meta-data (passed as
+    keyword args from 'script' to 'setup()', or the contents of the
+    config files or command-line.
+
+    'script_name' is a file that will be run with 'execfile()';
+    'sys.argv[0]' will be replaced with 'script' for the duration of the
+    call.  'script_args' is a list of strings; if supplied,
+    'sys.argv[1:]' will be replaced by 'script_args' for the duration of
+    the call.
+
+    'stop_after' tells 'setup()' when to stop processing; possible
+    values:
+      init
+        stop after the Distribution instance has been created and
+        populated with the keyword arguments to 'setup()'
+      config
+        stop after config files have been parsed (and their data
+        stored in the Distribution instance)
+      commandline
+        stop after the command-line ('sys.argv[1:]' or 'script_args')
+        have been parsed (and the data stored in the Distribution)
+      run [default]
+        stop after all commands have been run (the same as if 'setup()'
+        had been called in the usual way
+
+    Returns the Distribution instance, which provides all information
+    used to drive the Distutils.
+    """
+    if stop_after not in ('init', 'config', 'commandline', 'run'):
+        raise ValueError, "invalid value for 'stop_after': %s" % `stop_after`
+
+    global _setup_stop_after, _setup_distribution
+    _setup_stop_after = stop_after
+
+    save_argv = sys.argv
+    g = {}
+    l = {}
+    try:
+        try:
+            sys.argv[0] = script_name
+            if script_args is not None:
+                sys.argv[1:] = script_args
+            execfile(script_name, g, l)
+        finally:
+            sys.argv = save_argv
+            _setup_stop_after = None
+    except SystemExit:
+        # Hmm, should we do something if exiting with a non-zero code
+        # (ie. error)?
+        pass
+    except:
+        raise
+
+    if _setup_distribution is None:
+        raise RuntimeError, \
+              ("'distutils.core.setup()' was never called -- "
+               "perhaps '%s' is not a Distutils setup script?") % \
+              script_name
+
+    # I wonder if the setup script's namespace -- g and l -- would be of
+    # any interest to callers?
+    #print "_setup_distribution:", _setup_distribution
+    return _setup_distribution
+
+# run_setup ()
diff --git a/lib-python/2.2/distutils/cygwinccompiler.py b/lib-python/2.2/distutils/cygwinccompiler.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/cygwinccompiler.py
@@ -0,0 +1,441 @@
+"""distutils.cygwinccompiler
+
+Provides the CygwinCCompiler class, a subclass of UnixCCompiler that
+handles the Cygwin port of the GNU C compiler to Windows.  It also contains
+the Mingw32CCompiler class which handles the mingw32 port of GCC (same as
+cygwin in no-cygwin mode).
+"""
+
+# problems:
+#
+# * if you use a msvc compiled python version (1.5.2)
+#   1. you have to insert a __GNUC__ section in its config.h
+#   2. you have to generate a import library for its dll
+#      - create a def-file for python??.dll
+#      - create a import library using
+#             dlltool --dllname python15.dll --def python15.def \
+#                       --output-lib libpython15.a
+#
+#   see also http://starship.python.net/crew/kernr/mingw32/Notes.html
+#
+# * We put export_symbols in a def-file, and don't use
+#   --export-all-symbols because it doesn't worked reliable in some
+#   tested configurations. And because other windows compilers also
+#   need their symbols specified this no serious problem.
+#
+# tested configurations:
+#
+# * cygwin gcc 2.91.57/ld 2.9.4/dllwrap 0.2.4 works
+#   (after patching python's config.h and for C++ some other include files)
+#   see also http://starship.python.net/crew/kernr/mingw32/Notes.html
+# * mingw32 gcc 2.95.2/ld 2.9.4/dllwrap 0.2.4 works
+#   (ld doesn't support -shared, so we use dllwrap)
+# * cygwin gcc 2.95.2/ld 2.10.90/dllwrap 2.10.90 works now
+#   - its dllwrap doesn't work, there is a bug in binutils 2.10.90
+#     see also http://sources.redhat.com/ml/cygwin/2000-06/msg01274.html
+#   - using gcc -mdll instead dllwrap doesn't work without -static because
+#     it tries to link against dlls instead their import libraries. (If
+#     it finds the dll first.)
+#     By specifying -static we force ld to link against the import libraries,
+#     this is windows standard and there are normally not the necessary symbols
+#     in the dlls.
+#   *** only the version of June 2000 shows these problems
+
+# created 2000/05/05, Rene Liebscher
+
+__revision__ = "$Id$"
+
+import os,sys,copy
+from distutils.ccompiler import gen_preprocess_options, gen_lib_options
+from distutils.unixccompiler import UnixCCompiler
+from distutils.file_util import write_file
+from distutils.errors import DistutilsExecError, CompileError, UnknownFileError
+
+class CygwinCCompiler (UnixCCompiler):
+
+    compiler_type = 'cygwin'
+    obj_extension = ".o"
+    static_lib_extension = ".a"
+    shared_lib_extension = ".dll"
+    static_lib_format = "lib%s%s"
+    shared_lib_format = "%s%s"
+    exe_extension = ".exe"
+
+    def __init__ (self,
+                  verbose=0,
+                  dry_run=0,
+                  force=0):
+
+        UnixCCompiler.__init__ (self, verbose, dry_run, force)
+
+        (status, details) = check_config_h()
+        self.debug_print("Python's GCC status: %s (details: %s)" %
+                         (status, details))
+        if status is not CONFIG_H_OK:
+            self.warn(
+                "Python's pyconfig.h doesn't seem to support your compiler.  " +
+                ("Reason: %s." % details) +
+                "Compiling may fail because of undefined preprocessor macros.")
+
+        (self.gcc_version, self.ld_version, self.dllwrap_version) = \
+            get_versions()
+        self.debug_print(self.compiler_type + ": gcc %s, ld %s, dllwrap %s\n" %
+                         (self.gcc_version,
+                          self.ld_version,
+                          self.dllwrap_version) )
+
+        # ld_version >= "2.10.90" should also be able to use
+        # gcc -mdll instead of dllwrap
+        # Older dllwraps had own version numbers, newer ones use the
+        # same as the rest of binutils ( also ld )
+        # dllwrap 2.10.90 is buggy
+        if self.ld_version >= "2.10.90":
+            self.linker_dll = "gcc"
+        else:
+            self.linker_dll = "dllwrap"
+
+        # Hard-code GCC because that's what this is all about.
+        # XXX optimization, warnings etc. should be customizable.
+        self.set_executables(compiler='gcc -mcygwin -O -Wall',
+                             compiler_so='gcc -mcygwin -mdll -O -Wall',
+                             linker_exe='gcc -mcygwin',
+                             linker_so=('%s -mcygwin -mdll -static' %
+                                        self.linker_dll))
+
+        # cygwin and mingw32 need different sets of libraries
+        if self.gcc_version == "2.91.57":
+            # cygwin shouldn't need msvcrt, but without the dlls will crash
+            # (gcc version 2.91.57) -- perhaps something about initialization
+            self.dll_libraries=["msvcrt"]
+            self.warn(
+                "Consider upgrading to a newer version of gcc")
+        else:
+            self.dll_libraries=[]
+
+    # __init__ ()
+
+    # not much different of the compile method in UnixCCompiler,
+    # but we have to insert some lines in the middle of it, so
+    # we put here a adapted version of it.
+    # (If we would call compile() in the base class, it would do some
+    # initializations a second time, this is why all is done here.)
+    def compile (self,
+                 sources,
+                 output_dir=None,
+                 macros=None,
+                 include_dirs=None,
+                 debug=0,
+                 extra_preargs=None,
+                 extra_postargs=None):
+
+        (output_dir, macros, include_dirs) = \
+            self._fix_compile_args (output_dir, macros, include_dirs)
+        (objects, skip_sources) = self._prep_compile (sources, output_dir)
+
+        # Figure out the options for the compiler command line.
+        pp_opts = gen_preprocess_options (macros, include_dirs)
+        cc_args = pp_opts + ['-c']
+        if debug:
+            cc_args[:0] = ['-g']
+        if extra_preargs:
+            cc_args[:0] = extra_preargs
+        if extra_postargs is None:
+            extra_postargs = []
+
+        # Compile all source files that weren't eliminated by
+        # '_prep_compile()'.
+        for i in range (len (sources)):
+            src = sources[i] ; obj = objects[i]
+            ext = (os.path.splitext (src))[1]
+            if skip_sources[src]:
+                self.announce ("skipping %s (%s up-to-date)" % (src, obj))
+            else:
+                self.mkpath (os.path.dirname (obj))
+                if ext == '.rc' or ext == '.res':
+                    # gcc needs '.res' and '.rc' compiled to object files !!!
+                    try:
+                        self.spawn (["windres","-i",src,"-o",obj])
+                    except DistutilsExecError, msg:
+                        raise CompileError, msg
+                else: # for other files use the C-compiler
+                    try:
+                        self.spawn (self.compiler_so + cc_args +
+                                [src, '-o', obj] +
+                                extra_postargs)
+                    except DistutilsExecError, msg:
+                        raise CompileError, msg
+
+        # Return *all* object filenames, not just the ones we just built.
+        return objects
+
+    # compile ()
+
+
+    def link (self,
+              target_desc,
+              objects,
+              output_filename,
+              output_dir=None,
+              libraries=None,
+              library_dirs=None,
+              runtime_library_dirs=None,
+              export_symbols=None,
+              debug=0,
+              extra_preargs=None,
+              extra_postargs=None,
+              build_temp=None):
+
+        # use separate copies, so we can modify the lists
+        extra_preargs = copy.copy(extra_preargs or [])
+        libraries = copy.copy(libraries or [])
+        objects = copy.copy(objects or [])
+
+        # Additional libraries
+        libraries.extend(self.dll_libraries)
+
+        # handle export symbols by creating a def-file
+        # with executables this only works with gcc/ld as linker
+        if ((export_symbols is not None) and
+            (target_desc != self.EXECUTABLE or self.linker_dll == "gcc")):
+            # (The linker doesn't do anything if output is up-to-date.
+            # So it would probably better to check if we really need this,
+            # but for this we had to insert some unchanged parts of
+            # UnixCCompiler, and this is not what we want.)
+
+            # we want to put some files in the same directory as the
+            # object files are, build_temp doesn't help much
+            # where are the object files
+            temp_dir = os.path.dirname(objects[0])
+            # name of dll to give the helper files the same base name
+            (dll_name, dll_extension) = os.path.splitext(
+                os.path.basename(output_filename))
+
+            # generate the filenames for these files
+            def_file = os.path.join(temp_dir, dll_name + ".def")
+            exp_file = os.path.join(temp_dir, dll_name + ".exp")
+            lib_file = os.path.join(temp_dir, 'lib' + dll_name + ".a")
+
+            # Generate .def file
+            contents = [
+                "LIBRARY %s" % os.path.basename(output_filename),
+                "EXPORTS"]
+            for sym in export_symbols:
+                contents.append(sym)
+            self.execute(write_file, (def_file, contents),
+                         "writing %s" % def_file)
+
+            # next add options for def-file and to creating import libraries
+
+            # dllwrap uses different options than gcc/ld
+            if self.linker_dll == "dllwrap":
+                extra_preargs.extend([#"--output-exp",exp_file,
+                                       "--output-lib",lib_file,
+                                     ])
+                # for dllwrap we have to use a special option
+                extra_preargs.extend(["--def", def_file])
+            # we use gcc/ld here and can be sure ld is >= 2.9.10
+            else:
+                # doesn't work: bfd_close build\...\libfoo.a: Invalid operation
+                #extra_preargs.extend(["-Wl,--out-implib,%s" % lib_file])
+                # for gcc/ld the def-file is specified as any other object files
+                objects.append(def_file)
+
+        #end: if ((export_symbols is not None) and
+        #        (target_desc != self.EXECUTABLE or self.linker_dll == "gcc")):
+
+        # who wants symbols and a many times larger output file
+        # should explicitly switch the debug mode on
+        # otherwise we let dllwrap/ld strip the output file
+        # (On my machine: 10KB < stripped_file < ??100KB
+        #   unstripped_file = stripped_file + XXX KB
+        #  ( XXX=254 for a typical python extension))
+        if not debug:
+            extra_preargs.append("-s")
+
+        UnixCCompiler.link(self,
+                           target_desc,
+                           objects,
+                           output_filename,
+                           output_dir,
+                           libraries,
+                           library_dirs,
+                           runtime_library_dirs,
+                           None, # export_symbols, we do this in our def-file
+                           debug,
+                           extra_preargs,
+                           extra_postargs,
+                           build_temp)
+
+    # link ()
+
+    # -- Miscellaneous methods -----------------------------------------
+
+    # overwrite the one from CCompiler to support rc and res-files
+    def object_filenames (self,
+                          source_filenames,
+                          strip_dir=0,
+                          output_dir=''):
+        if output_dir is None: output_dir = ''
+        obj_names = []
+        for src_name in source_filenames:
+            # use normcase to make sure '.rc' is really '.rc' and not '.RC'
+            (base, ext) = os.path.splitext (os.path.normcase(src_name))
+            if ext not in (self.src_extensions + ['.rc','.res']):
+                raise UnknownFileError, \
+                      "unknown file type '%s' (from '%s')" % \
+                      (ext, src_name)
+            if strip_dir:
+                base = os.path.basename (base)
+            if ext == '.res' or ext == '.rc':
+                # these need to be compiled to object files
+                obj_names.append (os.path.join (output_dir,
+                                            base + ext + self.obj_extension))
+            else:
+                obj_names.append (os.path.join (output_dir,
+                                            base + self.obj_extension))
+        return obj_names
+
+    # object_filenames ()
+
+# class CygwinCCompiler
+
+
+# the same as cygwin plus some additional parameters
+class Mingw32CCompiler (CygwinCCompiler):
+
+    compiler_type = 'mingw32'
+
+    def __init__ (self,
+                  verbose=0,
+                  dry_run=0,
+                  force=0):
+
+        CygwinCCompiler.__init__ (self, verbose, dry_run, force)
+
+        # A real mingw32 doesn't need to specify a different entry point,
+        # but cygwin 2.91.57 in no-cygwin-mode needs it.
+        if self.gcc_version <= "2.91.57":
+            entry_point = '--entry _DllMain at 12'
+        else:
+            entry_point = ''
+
+        self.set_executables(compiler='gcc -mno-cygwin -O -Wall',
+                             compiler_so='gcc -mno-cygwin -mdll -O -Wall',
+                             linker_exe='gcc -mno-cygwin',
+                             linker_so='%s -mno-cygwin -mdll -static %s'
+                                        % (self.linker_dll, entry_point))
+        # Maybe we should also append -mthreads, but then the finished
+        # dlls need another dll (mingwm10.dll see Mingw32 docs)
+        # (-mthreads: Support thread-safe exception handling on `Mingw32')
+
+        # no additional libraries needed
+        self.dll_libraries=[]
+
+    # __init__ ()
+
+# class Mingw32CCompiler
+
+# Because these compilers aren't configured in Python's pyconfig.h file by
+# default, we should at least warn the user if he is using a unmodified
+# version.
+
+CONFIG_H_OK = "ok"
+CONFIG_H_NOTOK = "not ok"
+CONFIG_H_UNCERTAIN = "uncertain"
+
+def check_config_h():
+
+    """Check if the current Python installation (specifically, pyconfig.h)
+    appears amenable to building extensions with GCC.  Returns a tuple
+    (status, details), where 'status' is one of the following constants:
+      CONFIG_H_OK
+        all is well, go ahead and compile
+      CONFIG_H_NOTOK
+        doesn't look good
+      CONFIG_H_UNCERTAIN
+        not sure -- unable to read pyconfig.h
+    'details' is a human-readable string explaining the situation.
+
+    Note there are two ways to conclude "OK": either 'sys.version' contains
+    the string "GCC" (implying that this Python was built with GCC), or the
+    installed "pyconfig.h" contains the string "__GNUC__".
+    """
+
+    # XXX since this function also checks sys.version, it's not strictly a
+    # "pyconfig.h" check -- should probably be renamed...
+
+    from distutils import sysconfig
+    import string
+    # if sys.version contains GCC then python was compiled with
+    # GCC, and the pyconfig.h file should be OK
+    if string.find(sys.version,"GCC") >= 0:
+        return (CONFIG_H_OK, "sys.version mentions 'GCC'")
+
+    fn = sysconfig.get_config_h_filename()
+    try:
+        # It would probably better to read single lines to search.
+        # But we do this only once, and it is fast enough
+        f = open(fn)
+        s = f.read()
+        f.close()
+
+    except IOError, exc:
+        # if we can't read this file, we cannot say it is wrong
+        # the compiler will complain later about this file as missing
+        return (CONFIG_H_UNCERTAIN,
+                "couldn't read '%s': %s" % (fn, exc.strerror))
+
+    else:
+        # "pyconfig.h" contains an "#ifdef __GNUC__" or something similar
+        if string.find(s,"__GNUC__") >= 0:
+            return (CONFIG_H_OK, "'%s' mentions '__GNUC__'" % fn)
+        else:
+            return (CONFIG_H_NOTOK, "'%s' does not mention '__GNUC__'" % fn)
+
+
+
+def get_versions():
+    """ Try to find out the versions of gcc, ld and dllwrap.
+        If not possible it returns None for it.
+    """
+    from distutils.version import StrictVersion
+    from distutils.spawn import find_executable
+    import re
+
+    gcc_exe = find_executable('gcc')
+    if gcc_exe:
+        out = os.popen(gcc_exe + ' -dumpversion','r')
+        out_string = out.read()
+        out.close()
+        result = re.search('(\d+\.\d+\.\d+)',out_string)
+        if result:
+            gcc_version = StrictVersion(result.group(1))
+        else:
+            gcc_version = None
+    else:
+        gcc_version = None
+    ld_exe = find_executable('ld')
+    if ld_exe:
+        out = os.popen(ld_exe + ' -v','r')
+        out_string = out.read()
+        out.close()
+        result = re.search('(\d+\.\d+\.\d+)',out_string)
+        if result:
+            ld_version = StrictVersion(result.group(1))
+        else:
+            ld_version = None
+    else:
+        ld_version = None
+    dllwrap_exe = find_executable('dllwrap')
+    if dllwrap_exe:
+        out = os.popen(dllwrap_exe + ' --version','r')
+        out_string = out.read()
+        out.close()
+        result = re.search(' (\d+\.\d+\.\d+)',out_string)
+        if result:
+            dllwrap_version = StrictVersion(result.group(1))
+        else:
+            dllwrap_version = None
+    else:
+        dllwrap_version = None
+    return (gcc_version, ld_version, dllwrap_version)
diff --git a/lib-python/2.2/distutils/dep_util.py b/lib-python/2.2/distutils/dep_util.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/dep_util.py
@@ -0,0 +1,115 @@
+"""distutils.dep_util
+
+Utility functions for simple, timestamp-based dependency of files
+and groups of files; also, function based entirely on such
+timestamp dependency analysis."""
+
+# created 2000/04/03, Greg Ward (extracted from util.py)
+
+__revision__ = "$Id$"
+
+import os
+from distutils.errors import DistutilsFileError
+
+
+def newer (source, target):
+    """Return true if 'source' exists and is more recently modified than
+    'target', or if 'source' exists and 'target' doesn't.  Return false if
+    both exist and 'target' is the same age or younger than 'source'.
+    Raise DistutilsFileError if 'source' does not exist.
+    """
+    if not os.path.exists(source):
+        raise DistutilsFileError, "file '%s' does not exist" % source
+    if not os.path.exists(target):
+        return 1
+
+    from stat import ST_MTIME
+    mtime1 = os.stat(source)[ST_MTIME]
+    mtime2 = os.stat(target)[ST_MTIME]
+
+    return mtime1 > mtime2
+
+# newer ()
+
+
+def newer_pairwise (sources, targets):
+    """Walk two filename lists in parallel, testing if each source is newer
+    than its corresponding target.  Return a pair of lists (sources,
+    targets) where source is newer than target, according to the semantics
+    of 'newer()'.
+    """
+    if len(sources) != len(targets):
+        raise ValueError, "'sources' and 'targets' must be same length"
+
+    # build a pair of lists (sources, targets) where  source is newer
+    n_sources = []
+    n_targets = []
+    for i in range(len(sources)):
+        if newer(sources[i], targets[i]):
+            n_sources.append(sources[i])
+            n_targets.append(targets[i])
+
+    return (n_sources, n_targets)
+
+# newer_pairwise ()
+
+
+def newer_group (sources, target, missing='error'):
+    """Return true if 'target' is out-of-date with respect to any file
+    listed in 'sources'.  In other words, if 'target' exists and is newer
+    than every file in 'sources', return false; otherwise return true.
+    'missing' controls what we do when a source file is missing; the
+    default ("error") is to blow up with an OSError from inside 'stat()';
+    if it is "ignore", we silently drop any missing source files; if it is
+    "newer", any missing source files make us assume that 'target' is
+    out-of-date (this is handy in "dry-run" mode: it'll make you pretend to
+    carry out commands that wouldn't work because inputs are missing, but
+    that doesn't matter because you're not actually going to run the
+    commands).
+    """
+    # If the target doesn't even exist, then it's definitely out-of-date.
+    if not os.path.exists(target):
+        return 1
+
+    # Otherwise we have to find out the hard way: if *any* source file
+    # is more recent than 'target', then 'target' is out-of-date and
+    # we can immediately return true.  If we fall through to the end
+    # of the loop, then 'target' is up-to-date and we return false.
+    from stat import ST_MTIME
+    target_mtime = os.stat(target)[ST_MTIME]
+    for source in sources:
+        if not os.path.exists(source):
+            if missing == 'error':      # blow up when we stat() the file
+                pass
+            elif missing == 'ignore':   # missing source dropped from
+                continue                #  target's dependency list
+            elif missing == 'newer':    # missing source means target is
+                return 1                #  out-of-date
+
+        source_mtime = os.stat(source)[ST_MTIME]
+        if source_mtime > target_mtime:
+            return 1
+    else:
+        return 0
+
+# newer_group ()
+
+
+# XXX this isn't used anywhere, and worse, it has the same name as a method
+# in Command with subtly different semantics.  (This one just has one
+# source -> one dest; that one has many sources -> one dest.)  Nuke it?
+def make_file (src, dst, func, args,
+               verbose=0, update_message=None, noupdate_message=None):
+    """Makes 'dst' from 'src' (both filenames) by calling 'func' with
+    'args', but only if it needs to: i.e. if 'dst' does not exist or 'src'
+    is newer than 'dst'.
+    """
+    if newer(src, dst):
+        if verbose and update_message:
+            print update_message
+        apply(func, args)
+    else:
+        if verbose and noupdate_message:
+            print noupdate_message
+
+# make_file ()
diff --git a/lib-python/2.2/distutils/dir_util.py b/lib-python/2.2/distutils/dir_util.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/dir_util.py
@@ -0,0 +1,219 @@
+"""distutils.dir_util
+
+Utility functions for manipulating directories and directory trees."""
+
+# created 2000/04/03, Greg Ward (extracted from util.py)
+
+__revision__ = "$Id$"
+
+import os
+from types import *
+from distutils.errors import DistutilsFileError, DistutilsInternalError
+
+
+# cache for by mkpath() -- in addition to cheapening redundant calls,
+# eliminates redundant "creating /foo/bar/baz" messages in dry-run mode
+_path_created = {}
+
+# I don't use os.makedirs because a) it's new to Python 1.5.2, and
+# b) it blows up if the directory already exists (I want to silently
+# succeed in that case).
+def mkpath (name, mode=0777, verbose=0, dry_run=0):
+    """Create a directory and any missing ancestor directories.  If the
+       directory already exists (or if 'name' is the empty string, which
+       means the current directory, which of course exists), then do
+       nothing.  Raise DistutilsFileError if unable to create some
+       directory along the way (eg. some sub-path exists, but is a file
+       rather than a directory).  If 'verbose' is true, print a one-line
+       summary of each mkdir to stdout.  Return the list of directories
+       actually created."""
+
+    global _path_created
+
+    # Detect a common bug -- name is None
+    if type(name) is not StringType:
+        raise DistutilsInternalError, \
+              "mkpath: 'name' must be a string (got %s)" % `name`
+
+    # XXX what's the better way to handle verbosity? print as we create
+    # each directory in the path (the current behaviour), or only announce
+    # the creation of the whole path? (quite easy to do the latter since
+    # we're not using a recursive algorithm)
+
+    name = os.path.normpath(name)
+    created_dirs = []
+    if os.path.isdir(name) or name == '':
+        return created_dirs
+    if _path_created.get(os.path.abspath(name)):
+        return created_dirs
+
+    (head, tail) = os.path.split(name)
+    tails = [tail]                      # stack of lone dirs to create
+
+    while head and tail and not os.path.isdir(head):
+        #print "splitting '%s': " % head,
+        (head, tail) = os.path.split(head)
+        #print "to ('%s','%s')" % (head, tail)
+        tails.insert(0, tail)          # push next higher dir onto stack
+
+    #print "stack of tails:", tails
+
+    # now 'head' contains the deepest directory that already exists
+    # (that is, the child of 'head' in 'name' is the highest directory
+    # that does *not* exist)
+    for d in tails:
+        #print "head = %s, d = %s: " % (head, d),
+        head = os.path.join(head, d)
+        abs_head = os.path.abspath(head)
+
+        if _path_created.get(abs_head):
+            continue
+
+        if verbose:
+            print "creating", head
+
+        if not dry_run:
+            try:
+                os.mkdir(head)
+                created_dirs.append(head)
+            except OSError, exc:
+                raise DistutilsFileError, \
+                      "could not create '%s': %s" % (head, exc[-1])
+
+        _path_created[abs_head] = 1
+    return created_dirs
+
+# mkpath ()
+
+
+def create_tree (base_dir, files, mode=0777, verbose=0, dry_run=0):
+
+    """Create all the empty directories under 'base_dir' needed to
+       put 'files' there.  'base_dir' is just the a name of a directory
+       which doesn't necessarily exist yet; 'files' is a list of filenames
+       to be interpreted relative to 'base_dir'.  'base_dir' + the
+       directory portion of every file in 'files' will be created if it
+       doesn't already exist.  'mode', 'verbose' and 'dry_run' flags are as
+       for 'mkpath()'."""
+
+    # First get the list of directories to create
+    need_dir = {}
+    for file in files:
+        need_dir[os.path.join(base_dir, os.path.dirname(file))] = 1
+    need_dirs = need_dir.keys()
+    need_dirs.sort()
+
+    # Now create them
+    for dir in need_dirs:
+        mkpath(dir, mode, verbose, dry_run)
+
+# create_tree ()
+
+
+def copy_tree (src, dst,
+               preserve_mode=1,
+               preserve_times=1,
+               preserve_symlinks=0,
+               update=0,
+               verbose=0,
+               dry_run=0):
+
+    """Copy an entire directory tree 'src' to a new location 'dst'.  Both
+       'src' and 'dst' must be directory names.  If 'src' is not a
+       directory, raise DistutilsFileError.  If 'dst' does not exist, it is
+       created with 'mkpath()'.  The end result of the copy is that every
+       file in 'src' is copied to 'dst', and directories under 'src' are
+       recursively copied to 'dst'.  Return the list of files that were
+       copied or might have been copied, using their output name.  The
+       return value is unaffected by 'update' or 'dry_run': it is simply
+       the list of all files under 'src', with the names changed to be
+       under 'dst'.
+
+       'preserve_mode' and 'preserve_times' are the same as for
+       'copy_file'; note that they only apply to regular files, not to
+       directories.  If 'preserve_symlinks' is true, symlinks will be
+       copied as symlinks (on platforms that support them!); otherwise
+       (the default), the destination of the symlink will be copied.
+       'update' and 'verbose' are the same as for 'copy_file'."""
+
+    from distutils.file_util import copy_file
+
+    if not dry_run and not os.path.isdir(src):
+        raise DistutilsFileError, \
+              "cannot copy tree '%s': not a directory" % src
+    try:
+        names = os.listdir(src)
+    except os.error, (errno, errstr):
+        if dry_run:
+            names = []
+        else:
+            raise DistutilsFileError, \
+                  "error listing files in '%s': %s" % (src, errstr)
+
+    if not dry_run:
+        mkpath(dst, verbose=verbose)
+
+    outputs = []
+
+    for n in names:
+        src_name = os.path.join(src, n)
+        dst_name = os.path.join(dst, n)
+
+        if preserve_symlinks and os.path.islink(src_name):
+            link_dest = os.readlink(src_name)
+            if verbose:
+                print "linking %s -> %s" % (dst_name, link_dest)
+            if not dry_run:
+                os.symlink(link_dest, dst_name)
+            outputs.append(dst_name)
+
+        elif os.path.isdir(src_name):
+            outputs.extend(
+                copy_tree(src_name, dst_name,
+                          preserve_mode, preserve_times, preserve_symlinks,
+                          update, verbose, dry_run))
+        else:
+            copy_file(src_name, dst_name,
+                      preserve_mode, preserve_times,
+                      update, None, verbose, dry_run)
+            outputs.append(dst_name)
+
+    return outputs
+
+# copy_tree ()
+
+# Helper for remove_tree()
+def _build_cmdtuple(path, cmdtuples):
+    for f in os.listdir(path):
+        real_f = os.path.join(path,f)
+        if os.path.isdir(real_f) and not os.path.islink(real_f):
+            _build_cmdtuple(real_f, cmdtuples)
+        else:
+            cmdtuples.append((os.remove, real_f))
+    cmdtuples.append((os.rmdir, path))
+
+
+def remove_tree (directory, verbose=0, dry_run=0):
+    """Recursively remove an entire directory tree.  Any errors are ignored
+    (apart from being reported to stdout if 'verbose' is true).
+    """
+    from distutils.util import grok_environment_error
+    global _path_created
+
+    if verbose:
+        print "removing '%s' (and everything under it)" % directory
+    if dry_run:
+        return
+    cmdtuples = []
+    _build_cmdtuple(directory, cmdtuples)
+    for cmd in cmdtuples:
+        try:
+            apply(cmd[0], (cmd[1],))
+            # remove dir from cache if it's already there
+            abspath = os.path.abspath(cmd[1])
+            if _path_created.has_key(abspath):
+                del _path_created[abspath]
+        except (IOError, OSError), exc:
+            if verbose:
+                print grok_environment_error(
+                    exc, "error removing %s: " % directory)
diff --git a/lib-python/2.2/distutils/dist.py b/lib-python/2.2/distutils/dist.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/dist.py
@@ -0,0 +1,1086 @@
+"""distutils.dist
+
+Provides the Distribution class, which represents the module distribution
+being built/installed/distributed.
+"""
+
+# created 2000/04/03, Greg Ward
+# (extricated from core.py; actually dates back to the beginning)
+
+__revision__ = "$Id$"
+
+import sys, os, string, re
+from types import *
+from copy import copy
+
+try:
+    import warnings
+except:
+    warnings = None
+
+from distutils.errors import *
+from distutils.fancy_getopt import FancyGetopt, translate_longopt
+from distutils.util import check_environ, strtobool, rfc822_escape
+
+
+# Regex to define acceptable Distutils command names.  This is not *quite*
+# the same as a Python NAME -- I don't allow leading underscores.  The fact
+# that they're very similar is no coincidence; the default naming scheme is
+# to look for a Python module named after the command.
+command_re = re.compile (r'^[a-zA-Z]([a-zA-Z0-9_]*)$')
+
+
+class Distribution:
+    """The core of the Distutils.  Most of the work hiding behind 'setup'
+    is really done within a Distribution instance, which farms the work out
+    to the Distutils commands specified on the command line.
+
+    Setup scripts will almost never instantiate Distribution directly,
+    unless the 'setup()' function is totally inadequate to their needs.
+    However, it is conceivable that a setup script might wish to subclass
+    Distribution for some specialized purpose, and then pass the subclass
+    to 'setup()' as the 'distclass' keyword argument.  If so, it is
+    necessary to respect the expectations that 'setup' has of Distribution.
+    See the code for 'setup()', in core.py, for details.
+    """
+
+
+    # 'global_options' describes the command-line options that may be
+    # supplied to the setup script prior to any actual commands.
+    # Eg. "./setup.py -n" or "./setup.py --quiet" both take advantage of
+    # these global options.  This list should be kept to a bare minimum,
+    # since every global option is also valid as a command option -- and we
+    # don't want to pollute the commands with too many options that they
+    # have minimal control over.
+    global_options = [('verbose', 'v', "run verbosely (default)"),
+                      ('quiet', 'q', "run quietly (turns verbosity off)"),
+                      ('dry-run', 'n', "don't actually do anything"),
+                      ('help', 'h', "show detailed help message"),
+                     ]
+
+    # options that are not propagated to the commands
+    display_options = [
+        ('help-commands', None,
+         "list all available commands"),
+        ('name', None,
+         "print package name"),
+        ('version', 'V',
+         "print package version"),
+        ('fullname', None,
+         "print <package name>-<version>"),
+        ('author', None,
+         "print the author's name"),
+        ('author-email', None,
+         "print the author's email address"),
+        ('maintainer', None,
+         "print the maintainer's name"),
+        ('maintainer-email', None,
+         "print the maintainer's email address"),
+        ('contact', None,
+         "print the maintainer's name if known, else the author's"),
+        ('contact-email', None,
+         "print the maintainer's email address if known, else the author's"),
+        ('url', None,
+         "print the URL for this package"),
+        ('license', None,
+         "print the license of the package"),
+        ('licence', None,
+         "alias for --license"),
+        ('description', None,
+         "print the package description"),
+        ('long-description', None,
+         "print the long package description"),
+        ('platforms', None,
+         "print the list of platforms"),
+        ('keywords', None,
+         "print the list of keywords"),
+        ]
+    display_option_names = map(lambda x: translate_longopt(x[0]),
+                               display_options)
+
+    # negative options are options that exclude other options
+    negative_opt = {'quiet': 'verbose'}
+
+
+    # -- Creation/initialization methods -------------------------------
+
+    def __init__ (self, attrs=None):
+        """Construct a new Distribution instance: initialize all the
+        attributes of a Distribution, and then use 'attrs' (a dictionary
+        mapping attribute names to values) to assign some of those
+        attributes their "real" values.  (Any attributes not mentioned in
+        'attrs' will be assigned to some null value: 0, None, an empty list
+        or dictionary, etc.)  Most importantly, initialize the
+        'command_obj' attribute to the empty dictionary; this will be
+        filled in with real command objects by 'parse_command_line()'.
+        """
+
+        # Default values for our command-line options
+        self.verbose = 1
+        self.dry_run = 0
+        self.help = 0
+        for attr in self.display_option_names:
+            setattr(self, attr, 0)
+
+        # Store the distribution meta-data (name, version, author, and so
+        # forth) in a separate object -- we're getting to have enough
+        # information here (and enough command-line options) that it's
+        # worth it.  Also delegate 'get_XXX()' methods to the 'metadata'
+        # object in a sneaky and underhanded (but efficient!) way.
+        self.metadata = DistributionMetadata()
+        for basename in self.metadata._METHOD_BASENAMES:
+            method_name = "get_" + basename
+            if hasattr(self.metadata, method_name):
+                setattr(self, method_name, getattr(self.metadata, method_name))
+
+        # 'cmdclass' maps command names to class objects, so we
+        # can 1) quickly figure out which class to instantiate when
+        # we need to create a new command object, and 2) have a way
+        # for the setup script to override command classes
+        self.cmdclass = {}
+
+        # 'script_name' and 'script_args' are usually set to sys.argv[0]
+        # and sys.argv[1:], but they can be overridden when the caller is
+        # not necessarily a setup script run from the command-line.
+        self.script_name = None
+        self.script_args = None
+
+        # 'command_options' is where we store command options between
+        # parsing them (from config files, the command-line, etc.) and when
+        # they are actually needed -- ie. when the command in question is
+        # instantiated.  It is a dictionary of dictionaries of 2-tuples:
+        #   command_options = { command_name : { option : (source, value) } }
+        self.command_options = {}
+
+        # These options are really the business of various commands, rather
+        # than of the Distribution itself.  We provide aliases for them in
+        # Distribution as a convenience to the developer.
+        self.packages = None
+        self.package_dir = None
+        self.py_modules = None
+        self.libraries = None
+        self.headers = None
+        self.ext_modules = None
+        self.ext_package = None
+        self.include_dirs = None
+        self.extra_path = None
+        self.scripts = None
+        self.data_files = None
+
+        # And now initialize bookkeeping stuff that can't be supplied by
+        # the caller at all.  'command_obj' maps command names to
+        # Command instances -- that's how we enforce that every command
+        # class is a singleton.
+        self.command_obj = {}
+
+        # 'have_run' maps command names to boolean values; it keeps track
+        # of whether we have actually run a particular command, to make it
+        # cheap to "run" a command whenever we think we might need to -- if
+        # it's already been done, no need for expensive filesystem
+        # operations, we just check the 'have_run' dictionary and carry on.
+        # It's only safe to query 'have_run' for a command class that has
+        # been instantiated -- a false value will be inserted when the
+        # command object is created, and replaced with a true value when
+        # the command is successfully run.  Thus it's probably best to use
+        # '.get()' rather than a straight lookup.
+        self.have_run = {}
+
+        # Now we'll use the attrs dictionary (ultimately, keyword args from
+        # the setup script) to possibly override any or all of these
+        # distribution options.
+
+        if attrs:
+
+            # Pull out the set of command options and work on them
+            # specifically.  Note that this order guarantees that aliased
+            # command options will override any supplied redundantly
+            # through the general options dictionary.
+            options = attrs.get('options')
+            if options:
+                del attrs['options']
+                for (command, cmd_options) in options.items():
+                    opt_dict = self.get_option_dict(command)
+                    for (opt, val) in cmd_options.items():
+                        opt_dict[opt] = ("setup script", val)
+
+            # Now work on the rest of the attributes.  Any attribute that's
+            # not already defined is invalid!
+            for (key,val) in attrs.items():
+                if hasattr(self.metadata, key):
+                    setattr(self.metadata, key, val)
+                elif hasattr(self, key):
+                    setattr(self, key, val)
+                else:
+                    msg = "Unknown distribution option: %s" % repr(key)
+                    if warnings is not None:
+                        warnings.warn(msg)
+                    else:
+                        sys.stderr.write(msg + "\n")
+
+        self.finalize_options()
+
+    # __init__ ()
+
+
+    def get_option_dict (self, command):
+        """Get the option dictionary for a given command.  If that
+        command's option dictionary hasn't been created yet, then create it
+        and return the new dictionary; otherwise, return the existing
+        option dictionary.
+        """
+
+        dict = self.command_options.get(command)
+        if dict is None:
+            dict = self.command_options[command] = {}
+        return dict
+
+
+    def dump_option_dicts (self, header=None, commands=None, indent=""):
+        from pprint import pformat
+
+        if commands is None:             # dump all command option dicts
+            commands = self.command_options.keys()
+            commands.sort()
+
+        if header is not None:
+            print indent + header
+            indent = indent + "  "
+
+        if not commands:
+            print indent + "no commands known yet"
+            return
+
+        for cmd_name in commands:
+            opt_dict = self.command_options.get(cmd_name)
+            if opt_dict is None:
+                print indent + "no option dict for '%s' command" % cmd_name
+            else:
+                print indent + "option dict for '%s' command:" % cmd_name
+                out = pformat(opt_dict)
+                for line in string.split(out, "\n"):
+                    print indent + "  " + line
+
+    # dump_option_dicts ()
+
+
+
+    # -- Config file finding/parsing methods ---------------------------
+
+    def find_config_files (self):
+        """Find as many configuration files as should be processed for this
+        platform, and return a list of filenames in the order in which they
+        should be parsed.  The filenames returned are guaranteed to exist
+        (modulo nasty race conditions).
+
+        There are three possible config files: distutils.cfg in the
+        Distutils installation directory (ie. where the top-level
+        Distutils __inst__.py file lives), a file in the user's home
+        directory named .pydistutils.cfg on Unix and pydistutils.cfg
+        on Windows/Mac, and setup.cfg in the current directory.
+        """
+        files = []
+        check_environ()
+
+        # Where to look for the system-wide Distutils config file
+        sys_dir = os.path.dirname(sys.modules['distutils'].__file__)
+
+        # Look for the system config file
+        sys_file = os.path.join(sys_dir, "distutils.cfg")
+        if os.path.isfile(sys_file):
+            files.append(sys_file)
+
+        # What to call the per-user config file
+        if os.name == 'posix':
+            user_filename = ".pydistutils.cfg"
+        else:
+            user_filename = "pydistutils.cfg"
+
+        # And look for the user config file
+        if os.environ.has_key('HOME'):
+            user_file = os.path.join(os.environ.get('HOME'), user_filename)
+            if os.path.isfile(user_file):
+                files.append(user_file)
+
+        # All platforms support local setup.cfg
+        local_file = "setup.cfg"
+        if os.path.isfile(local_file):
+            files.append(local_file)
+
+        return files
+
+    # find_config_files ()
+
+
+    def parse_config_files (self, filenames=None):
+
+        from ConfigParser import ConfigParser
+        from distutils.core import DEBUG
+
+        if filenames is None:
+            filenames = self.find_config_files()
+
+        if DEBUG: print "Distribution.parse_config_files():"
+
+        parser = ConfigParser()
+        for filename in filenames:
+            if DEBUG: print "  reading", filename
+            parser.read(filename)
+            for section in parser.sections():
+                options = parser.options(section)
+                opt_dict = self.get_option_dict(section)
+
+                for opt in options:
+                    if opt != '__name__':
+                        val = parser.get(section,opt)
+                        opt = string.replace(opt, '-', '_')
+                        opt_dict[opt] = (filename, val)
+
+            # Make the ConfigParser forget everything (so we retain
+            # the original filenames that options come from) -- gag,
+            # retch, puke -- another good reason for a distutils-
+            # specific config parser (sigh...)
+            parser.__init__()
+
+        # If there was a "global" section in the config file, use it
+        # to set Distribution options.
+
+        if self.command_options.has_key('global'):
+            for (opt, (src, val)) in self.command_options['global'].items():
+                alias = self.negative_opt.get(opt)
+                try:
+                    if alias:
+                        setattr(self, alias, not strtobool(val))
+                    elif opt in ('verbose', 'dry_run'): # ugh!
+                        setattr(self, opt, strtobool(val))
+                except ValueError, msg:
+                    raise DistutilsOptionError, msg
+
+    # parse_config_files ()
+
+
+    # -- Command-line parsing methods ----------------------------------
+
+    def parse_command_line (self):
+        """Parse the setup script's command line, taken from the
+        'script_args' instance attribute (which defaults to 'sys.argv[1:]'
+        -- see 'setup()' in core.py).  This list is first processed for
+        "global options" -- options that set attributes of the Distribution
+        instance.  Then, it is alternately scanned for Distutils commands
+        and options for that command.  Each new command terminates the
+        options for the previous command.  The allowed options for a
+        command are determined by the 'user_options' attribute of the
+        command class -- thus, we have to be able to load command classes
+        in order to parse the command line.  Any error in that 'options'
+        attribute raises DistutilsGetoptError; any error on the
+        command-line raises DistutilsArgError.  If no Distutils commands
+        were found on the command line, raises DistutilsArgError.  Return
+        true if command-line was successfully parsed and we should carry
+        on with executing commands; false if no errors but we shouldn't
+        execute commands (currently, this only happens if user asks for
+        help).
+        """
+        #
+        # We now have enough information to show the Macintosh dialog
+        # that allows the user to interactively specify the "command line".
+        #
+        if sys.platform == 'mac':
+            import EasyDialogs
+            cmdlist = self.get_command_list()
+            self.script_args = EasyDialogs.GetArgv(
+                self.global_options + self.display_options, cmdlist)
+
+        # We have to parse the command line a bit at a time -- global
+        # options, then the first command, then its options, and so on --
+        # because each command will be handled by a different class, and
+        # the options that are valid for a particular class aren't known
+        # until we have loaded the command class, which doesn't happen
+        # until we know what the command is.
+
+        self.commands = []
+        parser = FancyGetopt(self.global_options + self.display_options)
+        parser.set_negative_aliases(self.negative_opt)
+        parser.set_aliases({'licence': 'license'})
+        args = parser.getopt(args=self.script_args, object=self)
+        option_order = parser.get_option_order()
+
+        # for display options we return immediately
+        if self.handle_display_options(option_order):
+            return
+
+        while args:
+            args = self._parse_command_opts(parser, args)
+            if args is None:            # user asked for help (and got it)
+                return
+
+        # Handle the cases of --help as a "global" option, ie.
+        # "setup.py --help" and "setup.py --help command ...".  For the
+        # former, we show global options (--verbose, --dry-run, etc.)
+        # and display-only options (--name, --version, etc.); for the
+        # latter, we omit the display-only options and show help for
+        # each command listed on the command line.
+        if self.help:
+            self._show_help(parser,
+                            display_options=len(self.commands) == 0,
+                            commands=self.commands)
+            return
+
+        # Oops, no commands found -- an end-user error
+        if not self.commands:
+            raise DistutilsArgError, "no commands supplied"
+
+        # All is well: return true
+        return 1
+
+    # parse_command_line()
+
+    def _parse_command_opts (self, parser, args):
+        """Parse the command-line options for a single command.
+        'parser' must be a FancyGetopt instance; 'args' must be the list
+        of arguments, starting with the current command (whose options
+        we are about to parse).  Returns a new version of 'args' with
+        the next command at the front of the list; will be the empty
+        list if there are no more commands on the command line.  Returns
+        None if the user asked for help on this command.
+        """
+        # late import because of mutual dependence between these modules
+        from distutils.cmd import Command
+
+        # Pull the current command from the head of the command line
+        command = args[0]
+        if not command_re.match(command):
+            raise SystemExit, "invalid command name '%s'" % command
+        self.commands.append(command)
+
+        # Dig up the command class that implements this command, so we
+        # 1) know that it's a valid command, and 2) know which options
+        # it takes.
+        try:
+            cmd_class = self.get_command_class(command)
+        except DistutilsModuleError, msg:
+            raise DistutilsArgError, msg
+
+        # Require that the command class be derived from Command -- want
+        # to be sure that the basic "command" interface is implemented.
+        if not issubclass(cmd_class, Command):
+            raise DistutilsClassError, \
+                  "command class %s must subclass Command" % cmd_class
+
+        # Also make sure that the command object provides a list of its
+        # known options.
+        if not (hasattr(cmd_class, 'user_options') and
+                type(cmd_class.user_options) is ListType):
+            raise DistutilsClassError, \
+                  ("command class %s must provide " +
+                   "'user_options' attribute (a list of tuples)") % \
+                  cmd_class
+
+        # If the command class has a list of negative alias options,
+        # merge it in with the global negative aliases.
+        negative_opt = self.negative_opt
+        if hasattr(cmd_class, 'negative_opt'):
+            negative_opt = copy(negative_opt)
+            negative_opt.update(cmd_class.negative_opt)
+
+        # Check for help_options in command class.  They have a different
+        # format (tuple of four) so we need to preprocess them here.
+        if (hasattr(cmd_class, 'help_options') and
+            type(cmd_class.help_options) is ListType):
+            help_options = fix_help_options(cmd_class.help_options)
+        else:
+            help_options = []
+
+
+        # All commands support the global options too, just by adding
+        # in 'global_options'.
+        parser.set_option_table(self.global_options +
+                                cmd_class.user_options +
+                                help_options)
+        parser.set_negative_aliases(negative_opt)
+        (args, opts) = parser.getopt(args[1:])
+        if hasattr(opts, 'help') and opts.help:
+            self._show_help(parser, display_options=0, commands=[cmd_class])
+            return
+
+        if (hasattr(cmd_class, 'help_options') and
+            type(cmd_class.help_options) is ListType):
+            help_option_found=0
+            for (help_option, short, desc, func) in cmd_class.help_options:
+                if hasattr(opts, parser.get_attr_name(help_option)):
+                    help_option_found=1
+                    #print "showing help for option %s of command %s" % \
+                    #      (help_option[0],cmd_class)
+
+                    if callable(func):
+                        func()
+                    else:
+                        raise DistutilsClassError(
+                            "invalid help function %s for help option '%s': "
+                            "must be a callable object (function, etc.)"
+                            % (`func`, help_option))
+
+            if help_option_found:
+                return
+
+        # Put the options from the command-line into their official
+        # holding pen, the 'command_options' dictionary.
+        opt_dict = self.get_option_dict(command)
+        for (name, value) in vars(opts).items():
+            opt_dict[name] = ("command line", value)
+
+        return args
+
+    # _parse_command_opts ()
+
+
+    def finalize_options (self):
+        """Set final values for all the options on the Distribution
+        instance, analogous to the .finalize_options() method of Command
+        objects.
+        """
+
+        keywords = self.metadata.keywords
+        if keywords is not None:
+            if type(keywords) is StringType:
+                keywordlist = string.split(keywords, ',')
+                self.metadata.keywords = map(string.strip, keywordlist)
+
+        platforms = self.metadata.platforms
+        if platforms is not None:
+            if type(platforms) is StringType:
+                platformlist = string.split(platforms, ',')
+                self.metadata.platforms = map(string.strip, platformlist)
+
+    def _show_help (self,
+                    parser,
+                    global_options=1,
+                    display_options=1,
+                    commands=[]):
+        """Show help for the setup script command-line in the form of
+        several lists of command-line options.  'parser' should be a
+        FancyGetopt instance; do not expect it to be returned in the
+        same state, as its option table will be reset to make it
+        generate the correct help text.
+
+        If 'global_options' is true, lists the global options:
+        --verbose, --dry-run, etc.  If 'display_options' is true, lists
+        the "display-only" options: --name, --version, etc.  Finally,
+        lists per-command help for every command name or command class
+        in 'commands'.
+        """
+        # late import because of mutual dependence between these modules
+        from distutils.core import gen_usage
+        from distutils.cmd import Command
+
+        if global_options:
+            parser.set_option_table(self.global_options)
+            parser.print_help("Global options:")
+            print
+
+        if display_options:
+            parser.set_option_table(self.display_options)
+            parser.print_help(
+                "Information display options (just display " +
+                "information, ignore any commands)")
+            print
+
+        for command in self.commands:
+            if type(command) is ClassType and issubclass(command, Command):
+                klass = command
+            else:
+                klass = self.get_command_class(command)
+            if (hasattr(klass, 'help_options') and
+                type(klass.help_options) is ListType):
+                parser.set_option_table(klass.user_options +
+                                        fix_help_options(klass.help_options))
+            else:
+                parser.set_option_table(klass.user_options)
+            parser.print_help("Options for '%s' command:" % klass.__name__)
+            print
+
+        print gen_usage(self.script_name)
+        return
+
+    # _show_help ()
+
+
+    def handle_display_options (self, option_order):
+        """If there were any non-global "display-only" options
+        (--help-commands or the metadata display options) on the command
+        line, display the requested info and return true; else return
+        false.
+        """
+        from distutils.core import gen_usage
+
+        # User just wants a list of commands -- we'll print it out and stop
+        # processing now (ie. if they ran "setup --help-commands foo bar",
+        # we ignore "foo bar").
+        if self.help_commands:
+            self.print_commands()
+            print
+            print gen_usage(self.script_name)
+            return 1
+
+        # If user supplied any of the "display metadata" options, then
+        # display that metadata in the order in which the user supplied the
+        # metadata options.
+        any_display_options = 0
+        is_display_option = {}
+        for option in self.display_options:
+            is_display_option[option[0]] = 1
+
+        for (opt, val) in option_order:
+            if val and is_display_option.get(opt):
+                opt = translate_longopt(opt)
+                value = getattr(self.metadata, "get_"+opt)()
+                if opt in ['keywords', 'platforms']:
+                    print string.join(value, ',')
+                else:
+                    print value
+                any_display_options = 1
+
+        return any_display_options
+
+    # handle_display_options()
+
+    def print_command_list (self, commands, header, max_length):
+        """Print a subset of the list of all commands -- used by
+        'print_commands()'.
+        """
+
+        print header + ":"
+
+        for cmd in commands:
+            klass = self.cmdclass.get(cmd)
+            if not klass:
+                klass = self.get_command_class(cmd)
+            try:
+                description = klass.description
+            except AttributeError:
+                description = "(no description available)"
+
+            print "  %-*s  %s" % (max_length, cmd, description)
+
+    # print_command_list ()
+
+
+    def print_commands (self):
+        """Print out a help message listing all available commands with a
+        description of each.  The list is divided into "standard commands"
+        (listed in distutils.command.__all__) and "extra commands"
+        (mentioned in self.cmdclass, but not a standard command).  The
+        descriptions come from the command class attribute
+        'description'.
+        """
+
+        import distutils.command
+        std_commands = distutils.command.__all__
+        is_std = {}
+        for cmd in std_commands:
+            is_std[cmd] = 1
+
+        extra_commands = []
+        for cmd in self.cmdclass.keys():
+            if not is_std.get(cmd):
+                extra_commands.append(cmd)
+
+        max_length = 0
+        for cmd in (std_commands + extra_commands):
+            if len(cmd) > max_length:
+                max_length = len(cmd)
+
+        self.print_command_list(std_commands,
+                                "Standard commands",
+                                max_length)
+        if extra_commands:
+            print
+            self.print_command_list(extra_commands,
+                                    "Extra commands",
+                                    max_length)
+
+    # print_commands ()
+
+    def get_command_list (self):
+        """Get a list of (command, description) tuples.
+        The list is divided into "standard commands" (listed in
+        distutils.command.__all__) and "extra commands" (mentioned in
+        self.cmdclass, but not a standard command).  The descriptions come
+        from the command class attribute 'description'.
+        """
+        # Currently this is only used on Mac OS, for the Mac-only GUI
+        # Distutils interface (by Jack Jansen)
+
+        import distutils.command
+        std_commands = distutils.command.__all__
+        is_std = {}
+        for cmd in std_commands:
+            is_std[cmd] = 1
+
+        extra_commands = []
+        for cmd in self.cmdclass.keys():
+            if not is_std.get(cmd):
+                extra_commands.append(cmd)
+
+        rv = []
+        for cmd in (std_commands + extra_commands):
+            klass = self.cmdclass.get(cmd)
+            if not klass:
+                klass = self.get_command_class(cmd)
+            try:
+                description = klass.description
+            except AttributeError:
+                description = "(no description available)"
+            rv.append((cmd, description))
+        return rv
+
+    # -- Command class/object methods ----------------------------------
+
+    def get_command_class (self, command):
+        """Return the class that implements the Distutils command named by
+        'command'.  First we check the 'cmdclass' dictionary; if the
+        command is mentioned there, we fetch the class object from the
+        dictionary and return it.  Otherwise we load the command module
+        ("distutils.command." + command) and fetch the command class from
+        the module.  The loaded class is also stored in 'cmdclass'
+        to speed future calls to 'get_command_class()'.
+
+        Raises DistutilsModuleError if the expected module could not be
+        found, or if that module does not define the expected class.
+        """
+        klass = self.cmdclass.get(command)
+        if klass:
+            return klass
+
+        module_name = 'distutils.command.' + command
+        klass_name = command
+
+        try:
+            __import__ (module_name)
+            module = sys.modules[module_name]
+        except ImportError:
+            raise DistutilsModuleError, \
+                  "invalid command '%s' (no module named '%s')" % \
+                  (command, module_name)
+
+        try:
+            klass = getattr(module, klass_name)
+        except AttributeError:
+            raise DistutilsModuleError, \
+                  "invalid command '%s' (no class '%s' in module '%s')" \
+                  % (command, klass_name, module_name)
+
+        self.cmdclass[command] = klass
+        return klass
+
+    # get_command_class ()
+
+    def get_command_obj (self, command, create=1):
+        """Return the command object for 'command'.  Normally this object
+        is cached on a previous call to 'get_command_obj()'; if no command
+        object for 'command' is in the cache, then we either create and
+        return it (if 'create' is true) or return None.
+        """
+        from distutils.core import DEBUG
+        cmd_obj = self.command_obj.get(command)
+        if not cmd_obj and create:
+            if DEBUG:
+                print "Distribution.get_command_obj(): " \
+                      "creating '%s' command object" % command
+
+            klass = self.get_command_class(command)
+            cmd_obj = self.command_obj[command] = klass(self)
+            self.have_run[command] = 0
+
+            # Set any options that were supplied in config files
+            # or on the command line.  (NB. support for error
+            # reporting is lame here: any errors aren't reported
+            # until 'finalize_options()' is called, which means
+            # we won't report the source of the error.)
+            options = self.command_options.get(command)
+            if options:
+                self._set_command_options(cmd_obj, options)
+
+        return cmd_obj
+
+    def _set_command_options (self, command_obj, option_dict=None):
+        """Set the options for 'command_obj' from 'option_dict'.  Basically
+        this means copying elements of a dictionary ('option_dict') to
+        attributes of an instance ('command').
+
+        'command_obj' must be a Command instance.  If 'option_dict' is not
+        supplied, uses the standard option dictionary for this command
+        (from 'self.command_options').
+        """
+        from distutils.core import DEBUG
+
+        command_name = command_obj.get_command_name()
+        if option_dict is None:
+            option_dict = self.get_option_dict(command_name)
+
+        if DEBUG: print "  setting options for '%s' command:" % command_name
+        for (option, (source, value)) in option_dict.items():
+            if DEBUG: print "    %s = %s (from %s)" % (option, value, source)
+            try:
+                bool_opts = map(translate_longopt, command_obj.boolean_options)
+            except AttributeError:
+                bool_opts = []
+            try:
+                neg_opt = command_obj.negative_opt
+            except AttributeError:
+                neg_opt = {}
+
+            try:
+                is_string = type(value) is StringType
+                if neg_opt.has_key(option) and is_string:
+                    setattr(command_obj, neg_opt[option], not strtobool(value))
+                elif option in bool_opts and is_string:
+                    setattr(command_obj, option, strtobool(value))
+                elif hasattr(command_obj, option):
+                    setattr(command_obj, option, value)
+                else:
+                    raise DistutilsOptionError, \
+                          ("error in %s: command '%s' has no such option '%s'"
+                           % (source, command_name, option))
+            except ValueError, msg:
+                raise DistutilsOptionError, msg
+
+    def reinitialize_command (self, command, reinit_subcommands=0):
+        """Reinitializes a command to the state it was in when first
+        returned by 'get_command_obj()': ie., initialized but not yet
+        finalized.  This provides the opportunity to sneak option
+        values in programmatically, overriding or supplementing
+        user-supplied values from the config files and command line.
+        You'll have to re-finalize the command object (by calling
+        'finalize_options()' or 'ensure_finalized()') before using it for
+        real.
+
+        'command' should be a command name (string) or command object.  If
+        'reinit_subcommands' is true, also reinitializes the command's
+        sub-commands, as declared by the 'sub_commands' class attribute (if
+        it has one).  See the "install" command for an example.  Only
+        reinitializes the sub-commands that actually matter, ie. those
+        whose test predicates return true.
+
+        Returns the reinitialized command object.
+        """
+        from distutils.cmd import Command
+        if not isinstance(command, Command):
+            command_name = command
+            command = self.get_command_obj(command_name)
+        else:
+            command_name = command.get_command_name()
+
+        if not command.finalized:
+            return command
+        command.initialize_options()
+        command.finalized = 0
+        self.have_run[command_name] = 0
+        self._set_command_options(command)
+
+        if reinit_subcommands:
+            for sub in command.get_sub_commands():
+                self.reinitialize_command(sub, reinit_subcommands)
+
+        return command
+
+
+    # -- Methods that operate on the Distribution ----------------------
+
+    def announce (self, msg, level=1):
+        """Print 'msg' if 'level' is greater than or equal to the verbosity
+        level recorded in the 'verbose' attribute (which, currently, can be
+        only 0 or 1).
+        """
+        if self.verbose >= level:
+            print msg
+
+
+    def run_commands (self):
+        """Run each command that was seen on the setup script command line.
+        Uses the list of commands found and cache of command objects
+        created by 'get_command_obj()'.
+        """
+        for cmd in self.commands:
+            self.run_command(cmd)
+
+
+    # -- Methods that operate on its Commands --------------------------
+
+    def run_command (self, command):
+        """Do whatever it takes to run a command (including nothing at all,
+        if the command has already been run).  Specifically: if we have
+        already created and run the command named by 'command', return
+        silently without doing anything.  If the command named by 'command'
+        doesn't even have a command object yet, create one.  Then invoke
+        'run()' on that command object (or an existing one).
+        """
+        # Already been here, done that? then return silently.
+        if self.have_run.get(command):
+            return
+
+        self.announce("running " + command)
+        cmd_obj = self.get_command_obj(command)
+        cmd_obj.ensure_finalized()
+        cmd_obj.run()
+        self.have_run[command] = 1
+
+
+    # -- Distribution query methods ------------------------------------
+
+    def has_pure_modules (self):
+        return len(self.packages or self.py_modules or []) > 0
+
+    def has_ext_modules (self):
+        return self.ext_modules and len(self.ext_modules) > 0
+
+    def has_c_libraries (self):
+        return self.libraries and len(self.libraries) > 0
+
+    def has_modules (self):
+        return self.has_pure_modules() or self.has_ext_modules()
+
+    def has_headers (self):
+        return self.headers and len(self.headers) > 0
+
+    def has_scripts (self):
+        return self.scripts and len(self.scripts) > 0
+
+    def has_data_files (self):
+        return self.data_files and len(self.data_files) > 0
+
+    def is_pure (self):
+        return (self.has_pure_modules() and
+                not self.has_ext_modules() and
+                not self.has_c_libraries())
+
+    # -- Metadata query methods ----------------------------------------
+
+    # If you're looking for 'get_name()', 'get_version()', and so forth,
+    # they are defined in a sneaky way: the constructor binds self.get_XXX
+    # to self.metadata.get_XXX.  The actual code is in the
+    # DistributionMetadata class, below.
+
+# class Distribution
+
+
+class DistributionMetadata:
+    """Dummy class to hold the distribution meta-data: name, version,
+    author, and so forth.
+    """
+
+    _METHOD_BASENAMES = ("name", "version", "author", "author_email",
+                         "maintainer", "maintainer_email", "url",
+                         "license", "description", "long_description",
+                         "keywords", "platforms", "fullname", "contact",
+                         "contact_email", "licence")
+
+    def __init__ (self):
+        self.name = None
+        self.version = None
+        self.author = None
+        self.author_email = None
+        self.maintainer = None
+        self.maintainer_email = None
+        self.url = None
+        self.license = None
+        self.description = None
+        self.long_description = None
+        self.keywords = None
+        self.platforms = None
+
+    def write_pkg_info (self, base_dir):
+        """Write the PKG-INFO file into the release tree.
+        """
+
+        pkg_info = open( os.path.join(base_dir, 'PKG-INFO'), 'w')
+
+        pkg_info.write('Metadata-Version: 1.0\n')
+        pkg_info.write('Name: %s\n' % self.get_name() )
+        pkg_info.write('Version: %s\n' % self.get_version() )
+        pkg_info.write('Summary: %s\n' % self.get_description() )
+        pkg_info.write('Home-page: %s\n' % self.get_url() )
+        pkg_info.write('Author: %s\n' % self.get_contact() )
+        pkg_info.write('Author-email: %s\n' % self.get_contact_email() )
+        pkg_info.write('License: %s\n' % self.get_license() )
+
+        long_desc = rfc822_escape( self.get_long_description() )
+        pkg_info.write('Description: %s\n' % long_desc)
+
+        keywords = string.join( self.get_keywords(), ',')
+        if keywords:
+            pkg_info.write('Keywords: %s\n' % keywords )
+
+        for platform in self.get_platforms():
+            pkg_info.write('Platform: %s\n' % platform )
+
+        pkg_info.close()
+
+    # write_pkg_info ()
+
+    # -- Metadata query methods ----------------------------------------
+
+    def get_name (self):
+        return self.name or "UNKNOWN"
+
+    def get_version(self):
+        return self.version or "0.0.0"
+
+    def get_fullname (self):
+        return "%s-%s" % (self.get_name(), self.get_version())
+
+    def get_author(self):
+        return self.author or "UNKNOWN"
+
+    def get_author_email(self):
+        return self.author_email or "UNKNOWN"
+
+    def get_maintainer(self):
+        return self.maintainer or "UNKNOWN"
+
+    def get_maintainer_email(self):
+        return self.maintainer_email or "UNKNOWN"
+
+    def get_contact(self):
+        return (self.maintainer or
+                self.author or
+                "UNKNOWN")
+
+    def get_contact_email(self):
+        return (self.maintainer_email or
+                self.author_email or
+                "UNKNOWN")
+
+    def get_url(self):
+        return self.url or "UNKNOWN"
+
+    def get_license(self):
+        return self.license or "UNKNOWN"
+    get_licence = get_license
+
+    def get_description(self):
+        return self.description or "UNKNOWN"
+
+    def get_long_description(self):
+        return self.long_description or "UNKNOWN"
+
+    def get_keywords(self):
+        return self.keywords or []
+
+    def get_platforms(self):
+        return self.platforms or ["UNKNOWN"]
+
+# class DistributionMetadata
+
+
+def fix_help_options (options):
+    """Convert a 4-tuple 'help_options' list as found in various command
+    classes to the 3-tuple form required by FancyGetopt.
+    """
+    new_options = []
+    for help_tuple in options:
+        new_options.append(help_tuple[0:3])
+    return new_options
+
+
+if __name__ == "__main__":
+    dist = Distribution()
+    print "ok"
diff --git a/lib-python/2.2/distutils/errors.py b/lib-python/2.2/distutils/errors.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/errors.py
@@ -0,0 +1,99 @@
+"""distutils.errors
+
+Provides exceptions used by the Distutils modules.  Note that Distutils
+modules may raise standard exceptions; in particular, SystemExit is
+usually raised for errors that are obviously the end-user's fault
+(eg. bad command-line arguments).
+
+This module safe to use in "from ... import *" mode; it only exports
+symbols whose names start with "Distutils" and end with "Error"."""
+
+# created 1999/03/03, Greg Ward
+
+__revision__ = "$Id$"
+
+class DistutilsError (Exception):
+    """The root of all Distutils evil."""
+    pass
+
+class DistutilsModuleError (DistutilsError):
+    """Unable to load an expected module, or to find an expected class
+    within some module (in particular, command modules and classes)."""
+    pass
+
+class DistutilsClassError (DistutilsError):
+    """Some command class (or possibly distribution class, if anyone
+    feels a need to subclass Distribution) is found not to be holding
+    up its end of the bargain, ie. implementing some part of the
+    "command "interface."""
+    pass
+
+class DistutilsGetoptError (DistutilsError):
+    """The option table provided to 'fancy_getopt()' is bogus."""
+    pass
+
+class DistutilsArgError (DistutilsError):
+    """Raised by fancy_getopt in response to getopt.error -- ie. an
+    error in the command line usage."""
+    pass
+
+class DistutilsFileError (DistutilsError):
+    """Any problems in the filesystem: expected file not found, etc.
+    Typically this is for problems that we detect before IOError or
+    OSError could be raised."""
+    pass
+
+class DistutilsOptionError (DistutilsError):
+    """Syntactic/semantic errors in command options, such as use of
+    mutually conflicting options, or inconsistent options,
+    badly-spelled values, etc.  No distinction is made between option
+    values originating in the setup script, the command line, config
+    files, or what-have-you -- but if we *know* something originated in
+    the setup script, we'll raise DistutilsSetupError instead."""
+    pass
+
+class DistutilsSetupError (DistutilsError):
+    """For errors that can be definitely blamed on the setup script,
+    such as invalid keyword arguments to 'setup()'."""
+    pass
+
+class DistutilsPlatformError (DistutilsError):
+    """We don't know how to do something on the current platform (but
+    we do know how to do it on some platform) -- eg. trying to compile
+    C files on a platform not supported by a CCompiler subclass."""
+    pass
+
+class DistutilsExecError (DistutilsError):
+    """Any problems executing an external program (such as the C
+    compiler, when compiling C files)."""
+    pass
+
+class DistutilsInternalError (DistutilsError):
+    """Internal inconsistencies or impossibilities (obviously, this
+    should never be seen if the code is working!)."""
+    pass
+
+class DistutilsTemplateError (DistutilsError):
+    """Syntax error in a file list template."""
+
+
+# Exception classes used by the CCompiler implementation classes
+class CCompilerError (Exception):
+    """Some compile/link operation failed."""
+
+class PreprocessError (CCompilerError):
+    """Failure to preprocess one or more C/C++ files."""
+
+class CompileError (CCompilerError):
+    """Failure to compile one or more C/C++ source files."""
+
+class LibError (CCompilerError):
+    """Failure to create a static library from one or more C/C++ object
+    files."""
+
+class LinkError (CCompilerError):
+    """Failure to link one or more C/C++ object files into an executable
+    or shared library file."""
+
+class UnknownFileError (CCompilerError):
+    """Attempt to process an unknown file type."""
diff --git a/lib-python/2.2/distutils/extension.py b/lib-python/2.2/distutils/extension.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/extension.py
@@ -0,0 +1,231 @@
+"""distutils.extension
+
+Provides the Extension class, used to describe C/C++ extension
+modules in setup scripts."""
+
+# created 2000/05/30, Greg Ward
+
+__revision__ = "$Id$"
+
+import os, string, sys
+from types import *
+
+try:
+    import warnings
+except ImportError:
+    warnings = None
+
+# This class is really only used by the "build_ext" command, so it might
+# make sense to put it in distutils.command.build_ext.  However, that
+# module is already big enough, and I want to make this class a bit more
+# complex to simplify some common cases ("foo" module in "foo.c") and do
+# better error-checking ("foo.c" actually exists).
+#
+# Also, putting this in build_ext.py means every setup script would have to
+# import that large-ish module (indirectly, through distutils.core) in
+# order to do anything.
+
+class Extension:
+    """Just a collection of attributes that describes an extension
+    module and everything needed to build it (hopefully in a portable
+    way, but there are hooks that let you be as unportable as you need).
+
+    Instance attributes:
+      name : string
+        the full name of the extension, including any packages -- ie.
+        *not* a filename or pathname, but Python dotted name
+      sources : [string]
+        list of source filenames, relative to the distribution root
+        (where the setup script lives), in Unix form (slash-separated)
+        for portability.  Source files may be C, C++, SWIG (.i),
+        platform-specific resource files, or whatever else is recognized
+        by the "build_ext" command as source for a Python extension.
+      include_dirs : [string]
+        list of directories to search for C/C++ header files (in Unix
+        form for portability)
+      define_macros : [(name : string, value : string|None)]
+        list of macros to define; each macro is defined using a 2-tuple,
+        where 'value' is either the string to define it to or None to
+        define it without a particular value (equivalent of "#define
+        FOO" in source or -DFOO on Unix C compiler command line)
+      undef_macros : [string]
+        list of macros to undefine explicitly
+      library_dirs : [string]
+        list of directories to search for C/C++ libraries at link time
+      libraries : [string]
+        list of library names (not filenames or paths) to link against
+      runtime_library_dirs : [string]
+        list of directories to search for C/C++ libraries at run time
+        (for shared extensions, this is when the extension is loaded)
+      extra_objects : [string]
+        list of extra files to link with (eg. object files not implied
+        by 'sources', static library that must be explicitly specified,
+        binary resource files, etc.)
+      extra_compile_args : [string]
+        any extra platform- and compiler-specific information to use
+        when compiling the source files in 'sources'.  For platforms and
+        compilers where "command line" makes sense, this is typically a
+        list of command-line arguments, but for other platforms it could
+        be anything.
+      extra_link_args : [string]
+        any extra platform- and compiler-specific information to use
+        when linking object files together to create the extension (or
+        to create a new static Python interpreter).  Similar
+        interpretation as for 'extra_compile_args'.
+      export_symbols : [string]
+        list of symbols to be exported from a shared extension.  Not
+        used on all platforms, and not generally necessary for Python
+        extensions, which typically export exactly one symbol: "init" +
+        extension_name.
+    """
+
+    def __init__ (self, name, sources,
+                  include_dirs=None,
+                  define_macros=None,
+                  undef_macros=None,
+                  library_dirs=None,
+                  libraries=None,
+                  runtime_library_dirs=None,
+                  extra_objects=None,
+                  extra_compile_args=None,
+                  extra_link_args=None,
+                  export_symbols=None,
+                  **kw                      # To catch unknown keywords
+                 ):
+
+        assert type(name) is StringType, "'name' must be a string"
+        assert (type(sources) is ListType and
+                map(type, sources) == [StringType]*len(sources)), \
+                "'sources' must be a list of strings"
+
+        self.name = name
+        self.sources = sources
+        self.include_dirs = include_dirs or []
+        self.define_macros = define_macros or []
+        self.undef_macros = undef_macros or []
+        self.library_dirs = library_dirs or []
+        self.libraries = libraries or []
+        self.runtime_library_dirs = runtime_library_dirs or []
+        self.extra_objects = extra_objects or []
+        self.extra_compile_args = extra_compile_args or []
+        self.extra_link_args = extra_link_args or []
+        self.export_symbols = export_symbols or []
+
+        # If there are unknown keyword options, warn about them
+        if len(kw):
+            L = kw.keys() ; L.sort()
+            L = map(repr, L)
+            msg = "Unknown Extension options: " + string.join(L, ', ')
+            if warnings is not None:
+                warnings.warn(msg)
+            else:
+                sys.stderr.write(msg + '\n')
+# class Extension
+
+
+def read_setup_file (filename):
+    from distutils.sysconfig import \
+         parse_makefile, expand_makefile_vars, _variable_rx
+    from distutils.text_file import TextFile
+    from distutils.util import split_quoted
+
+    # First pass over the file to gather "VAR = VALUE" assignments.
+    vars = parse_makefile(filename)
+
+    # Second pass to gobble up the real content: lines of the form
+    #   <module> ... [<sourcefile> ...] [<cpparg> ...] [<library> ...]
+    file = TextFile(filename,
+                    strip_comments=1, skip_blanks=1, join_lines=1,
+                    lstrip_ws=1, rstrip_ws=1)
+    extensions = []
+
+    while 1:
+        line = file.readline()
+        if line is None:                # eof
+            break
+        if _variable_rx.match(line):    # VAR=VALUE, handled in first pass
+            continue
+
+        if line[0] == line[-1] == "*":
+            file.warn("'%s' lines not handled yet" % line)
+            continue
+
+        #print "original line: " + line
+        line = expand_makefile_vars(line, vars)
+        words = split_quoted(line)
+        #print "expanded line: " + line
+
+        # NB. this parses a slightly different syntax than the old
+        # makesetup script: here, there must be exactly one extension per
+        # line, and it must be the first word of the line.  I have no idea
+        # why the old syntax supported multiple extensions per line, as
+        # they all wind up being the same.
+
+        module = words[0]
+        ext = Extension(module, [])
+        append_next_word = None
+
+        for word in words[1:]:
+            if append_next_word is not None:
+                append_next_word.append(word)
+                append_next_word = None
+                continue
+
+            suffix = os.path.splitext(word)[1]
+            switch = word[0:2] ; value = word[2:]
+
+            if suffix in (".c", ".cc", ".cpp", ".cxx", ".c++", ".m", ".mm"):
+                # hmm, should we do something about C vs. C++ sources?
+                # or leave it up to the CCompiler implementation to
+                # worry about?
+                ext.sources.append(word)
+            elif switch == "-I":
+                ext.include_dirs.append(value)
+            elif switch == "-D":
+                equals = string.find(value, "=")
+                if equals == -1:        # bare "-DFOO" -- no value
+                    ext.define_macros.append((value, None))
+                else:                   # "-DFOO=blah"
+                    ext.define_macros.append((value[0:equals],
+                                              value[equals+2:]))
+            elif switch == "-U":
+                ext.undef_macros.append(value)
+            elif switch == "-C":        # only here 'cause makesetup has it!
+                ext.extra_compile_args.append(word)
+            elif switch == "-l":
+                ext.libraries.append(value)
+            elif switch == "-L":
+                ext.library_dirs.append(value)
+            elif switch == "-R":
+                ext.runtime_library_dirs.append(value)
+            elif word == "-rpath":
+                append_next_word = ext.runtime_library_dirs
+            elif word == "-Xlinker":
+                append_next_word = ext.extra_link_args
+            elif switch == "-u":
+                ext.extra_link_args.append(word)
+                if not value:
+                    append_next_word = ext.extra_link_args
+            elif suffix in (".a", ".so", ".sl", ".o"):
+                # NB. a really faithful emulation of makesetup would
+                # append a .o file to extra_objects only if it
+                # had a slash in it; otherwise, it would s/.o/.c/
+                # and append it to sources.  Hmmmm.
+                ext.extra_objects.append(word)
+            else:
+                file.warn("unrecognized argument '%s'" % word)
+
+        extensions.append(ext)
+
+        #print "module:", module
+        #print "source files:", source_files
+        #print "cpp args:", cpp_args
+        #print "lib args:", library_args
+
+        #extensions[module] = { 'sources': source_files,
+        #                       'cpp_args': cpp_args,
+        #                       'lib_args': library_args }
+
+    return extensions
+
+# read_setup_file ()
diff --git a/lib-python/2.2/distutils/fancy_getopt.py b/lib-python/2.2/distutils/fancy_getopt.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/fancy_getopt.py
@@ -0,0 +1,504 @@
+"""distutils.fancy_getopt
+
+Wrapper around the standard getopt module that provides the following
+additional features:
+  * short and long options are tied together
+  * options have help strings, so fancy_getopt could potentially
+    create a complete usage summary
+  * options set attributes of a passed-in object
+"""
+
+# created 1999/03/03, Greg Ward
+
+__revision__ = "$Id$"
+
+import sys, string, re
+from types import *
+import getopt
+from distutils.errors import *
+
+# Much like command_re in distutils.core, this is close to but not quite
+# the same as a Python NAME -- except, in the spirit of most GNU
+# utilities, we use '-' in place of '_'.  (The spirit of LISP lives on!)
+# The similarities to NAME are again not a coincidence...
+longopt_pat = r'[a-zA-Z](?:[a-zA-Z0-9-]*)'
+longopt_re = re.compile(r'^%s$' % longopt_pat)
+
+# For recognizing "negative alias" options, eg. "quiet=!verbose"
+neg_alias_re = re.compile("^(%s)=!(%s)$" % (longopt_pat, longopt_pat))
+
+# This is used to translate long options to legitimate Python identifiers
+# (for use as attributes of some object).
+longopt_xlate = string.maketrans('-', '_')
+
+# This records (option, value) pairs in the order seen on the command line;
+# it's close to what getopt.getopt() returns, but with short options
+# expanded.  (Ugh, this module should be OO-ified.)
+_option_order = None
+
+
+class FancyGetopt:
+    """Wrapper around the standard 'getopt()' module that provides some
+    handy extra functionality:
+      * short and long options are tied together
+      * options have help strings, and help text can be assembled
+        from them
+      * options set attributes of a passed-in object
+      * boolean options can have "negative aliases" -- eg. if
+        --quiet is the "negative alias" of --verbose, then "--quiet"
+        on the command line sets 'verbose' to false
+    """
+
+    def __init__ (self, option_table=None):
+
+        # The option table is (currently) a list of 3-tuples:
+        #   (long_option, short_option, help_string)
+        # if an option takes an argument, its long_option should have '='
+        # appended; short_option should just be a single character, no ':'
+        # in any case.  If a long_option doesn't have a corresponding
+        # short_option, short_option should be None.  All option tuples
+        # must have long options.
+        self.option_table = option_table
+
+        # 'option_index' maps long option names to entries in the option
+        # table (ie. those 3-tuples).
+        self.option_index = {}
+        if self.option_table:
+            self._build_index()
+
+        # 'alias' records (duh) alias options; {'foo': 'bar'} means
+        # --foo is an alias for --bar
+        self.alias = {}
+
+        # 'negative_alias' keeps track of options that are the boolean
+        # opposite of some other option
+        self.negative_alias = {}
+
+        # These keep track of the information in the option table.  We
+        # don't actually populate these structures until we're ready to
+        # parse the command-line, since the 'option_table' passed in here
+        # isn't necessarily the final word.
+        self.short_opts = []
+        self.long_opts = []
+        self.short2long = {}
+        self.attr_name = {}
+        self.takes_arg = {}
+
+        # And 'option_order' is filled up in 'getopt()'; it records the
+        # original order of options (and their values) on the command-line,
+        # but expands short options, converts aliases, etc.
+        self.option_order = []
+
+    # __init__ ()
+
+
+    def _build_index (self):
+        self.option_index.clear()
+        for option in self.option_table:
+            self.option_index[option[0]] = option
+
+    def set_option_table (self, option_table):
+        self.option_table = option_table
+        self._build_index()
+
+    def add_option (self, long_option, short_option=None, help_string=None):
+        if self.option_index.has_key(long_option):
+            raise DistutilsGetoptError, \
+                  "option conflict: already an option '%s'" % long_option
+        else:
+            option = (long_option, short_option, help_string)
+            self.option_table.append(option)
+            self.option_index[long_option] = option
+
+
+    def has_option (self, long_option):
+        """Return true if the option table for this parser has an
+        option with long name 'long_option'."""
+        return self.option_index.has_key(long_option)
+
+    def get_attr_name (self, long_option):
+        """Translate long option name 'long_option' to the form it
+        has as an attribute of some object: ie., translate hyphens
+        to underscores."""
+        return string.translate(long_option, longopt_xlate)
+
+
+    def _check_alias_dict (self, aliases, what):
+        assert type(aliases) is DictionaryType
+        for (alias, opt) in aliases.items():
+            if not self.option_index.has_key(alias):
+                raise DistutilsGetoptError, \
+                      ("invalid %s '%s': "
+                       "option '%s' not defined") % (what, alias, alias)
+            if not self.option_index.has_key(opt):
+                raise DistutilsGetoptError, \
+                      ("invalid %s '%s': "
+                       "aliased option '%s' not defined") % (what, alias, opt)
+
+    def set_aliases (self, alias):
+        """Set the aliases for this option parser."""
+        self._check_alias_dict(alias, "alias")
+        self.alias = alias
+
+    def set_negative_aliases (self, negative_alias):
+        """Set the negative aliases for this option parser.
+        'negative_alias' should be a dictionary mapping option names to
+        option names, both the key and value must already be defined
+        in the option table."""
+        self._check_alias_dict(negative_alias, "negative alias")
+        self.negative_alias = negative_alias
+
+
+    def _grok_option_table (self):
+        """Populate the various data structures that keep tabs on the
+        option table.  Called by 'getopt()' before it can do anything
+        worthwhile.
+        """
+        self.long_opts = []
+        self.short_opts = []
+        self.short2long.clear()
+
+        for option in self.option_table:
+            try:
+                (long, short, help) = option
+            except ValueError:
+                raise DistutilsGetoptError, \
+                      "invalid option tuple " + str(option)
+
+            # Type- and value-check the option names
+            if type(long) is not StringType or len(long) < 2:
+                raise DistutilsGetoptError, \
+                      ("invalid long option '%s': "
+                       "must be a string of length >= 2") % long
+
+            if (not ((short is None) or
+                     (type(short) is StringType and len(short) == 1))):
+                raise DistutilsGetoptError, \
+                      ("invalid short option '%s': "
+                       "must a single character or None") % short
+
+            self.long_opts.append(long)
+
+            if long[-1] == '=':             # option takes an argument?
+                if short: short = short + ':'
+                long = long[0:-1]
+                self.takes_arg[long] = 1
+            else:
+
+                # Is option is a "negative alias" for some other option (eg.
+                # "quiet" == "!verbose")?
+                alias_to = self.negative_alias.get(long)
+                if alias_to is not None:
+                    if self.takes_arg[alias_to]:
+                        raise DistutilsGetoptError, \
+                              ("invalid negative alias '%s': "
+                               "aliased option '%s' takes a value") % \
+                               (long, alias_to)
+
+                    self.long_opts[-1] = long # XXX redundant?!
+                    self.takes_arg[long] = 0
+
+                else:
+                    self.takes_arg[long] = 0
+
+            # If this is an alias option, make sure its "takes arg" flag is
+            # the same as the option it's aliased to.
+            alias_to = self.alias.get(long)
+            if alias_to is not None:
+                if self.takes_arg[long] != self.takes_arg[alias_to]:
+                    raise DistutilsGetoptError, \
+                          ("invalid alias '%s': inconsistent with "
+                           "aliased option '%s' (one of them takes a value, "
+                           "the other doesn't") % (long, alias_to)
+
+
+            # Now enforce some bondage on the long option name, so we can
+            # later translate it to an attribute name on some object.  Have
+            # to do this a bit late to make sure we've removed any trailing
+            # '='.
+            if not longopt_re.match(long):
+                raise DistutilsGetoptError, \
+                      ("invalid long option name '%s' " +
+                       "(must be letters, numbers, hyphens only") % long
+
+            self.attr_name[long] = self.get_attr_name(long)
+            if short:
+                self.short_opts.append(short)
+                self.short2long[short[0]] = long
+
+        # for option_table
+
+    # _grok_option_table()
+
+
+    def getopt (self, args=None, object=None):
+        """Parse the command-line options in 'args' and store the results
+        as attributes of 'object'.  If 'args' is None or not supplied, uses
+        'sys.argv[1:]'.  If 'object' is None or not supplied, creates a new
+        OptionDummy object, stores option values there, and returns a tuple
+        (args, object).  If 'object' is supplied, it is modified in place
+        and 'getopt()' just returns 'args'; in both cases, the returned
+        'args' is a modified copy of the passed-in 'args' list, which is
+        left untouched.
+        """
+        if args is None:
+            args = sys.argv[1:]
+        if object is None:
+            object = OptionDummy()
+            created_object = 1
+        else:
+            created_object = 0
+
+        self._grok_option_table()
+
+        short_opts = string.join(self.short_opts)
+        try:
+            (opts, args) = getopt.getopt(args, short_opts, self.long_opts)
+        except getopt.error, msg:
+            raise DistutilsArgError, msg
+
+        for (opt, val) in opts:
+            if len(opt) == 2 and opt[0] == '-': # it's a short option
+                opt = self.short2long[opt[1]]
+
+            elif len(opt) > 2 and opt[0:2] == '--':
+                opt = opt[2:]
+
+            else:
+                raise DistutilsInternalError, \
+                      "this can't happen: bad option string '%s'" % opt
+
+            alias = self.alias.get(opt)
+            if alias:
+                opt = alias
+
+            if not self.takes_arg[opt]:     # boolean option?
+                if val != '':               # shouldn't have a value!
+                    raise DistutilsInternalError, \
+                          "this can't happen: bad option value '%s'" % val
+
+                alias = self.negative_alias.get(opt)
+                if alias:
+                    opt = alias
+                    val = 0
+                else:
+                    val = 1
+
+            attr = self.attr_name[opt]
+            setattr(object, attr, val)
+            self.option_order.append((opt, val))
+
+        # for opts
+
+        if created_object:
+            return (args, object)
+        else:
+            return args
+
+    # getopt()
+
+
+    def get_option_order (self):
+        """Returns the list of (option, value) tuples processed by the
+        previous run of 'getopt()'.  Raises RuntimeError if
+        'getopt()' hasn't been called yet.
+        """
+        if self.option_order is None:
+            raise RuntimeError, "'getopt()' hasn't been called yet"
+        else:
+            return self.option_order
+
+
+    def generate_help (self, header=None):
+        """Generate help text (a list of strings, one per suggested line of
+        output) from the option table for this FancyGetopt object.
+        """
+        # Blithely assume the option table is good: probably wouldn't call
+        # 'generate_help()' unless you've already called 'getopt()'.
+
+        # First pass: determine maximum length of long option names
+        max_opt = 0
+        for option in self.option_table:
+            long = option[0]
+            short = option[1]
+            l = len(long)
+            if long[-1] == '=':
+                l = l - 1
+            if short is not None:
+                l = l + 5                   # " (-x)" where short == 'x'
+            if l > max_opt:
+                max_opt = l
+
+        opt_width = max_opt + 2 + 2 + 2     # room for indent + dashes + gutter
+
+        # Typical help block looks like this:
+        #   --foo       controls foonabulation
+        # Help block for longest option looks like this:
+        #   --flimflam  set the flim-flam level
+        # and with wrapped text:
+        #   --flimflam  set the flim-flam level (must be between
+        #               0 and 100, except on Tuesdays)
+        # Options with short names will have the short name shown (but
+        # it doesn't contribute to max_opt):
+        #   --foo (-f)  controls foonabulation
+        # If adding the short option would make the left column too wide,
+        # we push the explanation off to the next line
+        #   --flimflam (-l)
+        #               set the flim-flam level
+        # Important parameters:
+        #   - 2 spaces before option block start lines
+        #   - 2 dashes for each long option name
+        #   - min. 2 spaces between option and explanation (gutter)
+        #   - 5 characters (incl. space) for short option name
+
+        # Now generate lines of help text.  (If 80 columns were good enough
+        # for Jesus, then 78 columns are good enough for me!)
+        line_width = 78
+        text_width = line_width - opt_width
+        big_indent = ' ' * opt_width
+        if header:
+            lines = [header]
+        else:
+            lines = ['Option summary:']
+
+        for (long,short,help) in self.option_table:
+
+            text = wrap_text(help, text_width)
+            if long[-1] == '=':
+                long = long[0:-1]
+
+            # Case 1: no short option at all (makes life easy)
+            if short is None:
+                if text:
+                    lines.append("  --%-*s  %s" % (max_opt, long, text[0]))
+                else:
+                    lines.append("  --%-*s  " % (max_opt, long))
+
+            # Case 2: we have a short option, so we have to include it
+            # just after the long option
+            else:
+                opt_names = "%s (-%s)" % (long, short)
+                if text:
+                    lines.append("  --%-*s  %s" %
+                                 (max_opt, opt_names, text[0]))
+                else:
+                    lines.append("  --%-*s" % opt_names)
+
+            for l in text[1:]:
+                lines.append(big_indent + l)
+
+        # for self.option_table
+
+        return lines
+
+    # generate_help ()
+
+    def print_help (self, header=None, file=None):
+        if file is None:
+            file = sys.stdout
+        for line in self.generate_help(header):
+            file.write(line + "\n")
+
+# class FancyGetopt
+
+
+def fancy_getopt (options, negative_opt, object, args):
+    parser = FancyGetopt(options)
+    parser.set_negative_aliases(negative_opt)
+    return parser.getopt(args, object)
+
+
+WS_TRANS = string.maketrans(string.whitespace, ' ' * len(string.whitespace))
+
+def wrap_text (text, width):
+    """wrap_text(text : string, width : int) -> [string]
+
+    Split 'text' into multiple lines of no more than 'width' characters
+    each, and return the list of strings that results.
+    """
+
+    if text is None:
+        return []
+    if len(text) <= width:
+        return [text]
+
+    text = string.expandtabs(text)
+    text = string.translate(text, WS_TRANS)
+    chunks = re.split(r'( +|-+)', text)
+    chunks = filter(None, chunks)      # ' - ' results in empty strings
+    lines = []
+
+    while chunks:
+
+        cur_line = []                   # list of chunks (to-be-joined)
+        cur_len = 0                     # length of current line
+
+        while chunks:
+            l = len(chunks[0])
+            if cur_len + l <= width:    # can squeeze (at least) this chunk in
+                cur_line.append(chunks[0])
+                del chunks[0]
+                cur_len = cur_len + l
+            else:                       # this line is full
+                # drop last chunk if all space
+                if cur_line and cur_line[-1][0] == ' ':
+                    del cur_line[-1]
+                break
+
+        if chunks:                      # any chunks left to process?
+
+            # if the current line is still empty, then we had a single
+            # chunk that's too big too fit on a line -- so we break
+            # down and break it up at the line width
+            if cur_len == 0:
+                cur_line.append(chunks[0][0:width])
+                chunks[0] = chunks[0][width:]
+
+            # all-whitespace chunks at the end of a line can be discarded
+            # (and we know from the re.split above that if a chunk has
+            # *any* whitespace, it is *all* whitespace)
+            if chunks[0][0] == ' ':
+                del chunks[0]
+
+        # and store this line in the list-of-all-lines -- as a single
+        # string, of course!
+        lines.append(string.join(cur_line, ''))
+
+    # while chunks
+
+    return lines
+
+# wrap_text ()
+
+
+def translate_longopt (opt):
+    """Convert a long option name to a valid Python identifier by
+    changing "-" to "_".
+    """
+    return string.translate(opt, longopt_xlate)
+
+
+class OptionDummy:
+    """Dummy class just used as a place to hold command-line option
+    values as instance attributes."""
+
+    def __init__ (self, options=[]):
+        """Create a new OptionDummy instance.  The attributes listed in
+        'options' will be initialized to None."""
+        for opt in options:
+            setattr(self, opt, None)
+
+# class OptionDummy
+
+
+if __name__ == "__main__":
+    text = """\
+Tra-la-la, supercalifragilisticexpialidocious.
+How *do* you spell that odd word, anyways?
+(Someone ask Mary -- she'll know [or she'll
+say, "How should I know?"].)"""
+
+    for w in (10, 20, 30, 40):
+        print "width: %d" % w
+        print string.join(wrap_text(text, w), "\n")
+        print
diff --git a/lib-python/2.2/distutils/file_util.py b/lib-python/2.2/distutils/file_util.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/file_util.py
@@ -0,0 +1,258 @@
+"""distutils.file_util
+
+Utility functions for operating on single files.
+"""
+
+# created 2000/04/03, Greg Ward (extracted from util.py)
+
+__revision__ = "$Id$"
+
+import os
+from distutils.errors import DistutilsFileError
+
+
+# for generating verbose output in 'copy_file()'
+_copy_action = { None:   'copying',
+                 'hard': 'hard linking',
+                 'sym':  'symbolically linking' }
+
+
+def _copy_file_contents (src, dst, buffer_size=16*1024):
+    """Copy the file 'src' to 'dst'; both must be filenames.  Any error
+    opening either file, reading from 'src', or writing to 'dst', raises
+    DistutilsFileError.  Data is read/written in chunks of 'buffer_size'
+    bytes (default 16k).  No attempt is made to handle anything apart from
+    regular files.
+    """
+    # Stolen from shutil module in the standard library, but with
+    # custom error-handling added.
+
+    fsrc = None
+    fdst = None
+    try:
+        try:
+            fsrc = open(src, 'rb')
+        except os.error, (errno, errstr):
+            raise DistutilsFileError, \
+                  "could not open '%s': %s" % (src, errstr)
+
+        if os.path.exists(dst):
+            try:
+                os.unlink(dst)
+            except os.error, (errno, errstr):
+                raise DistutilsFileError, \
+                      "could not delete '%s': %s" % (dst, errstr)
+        
+        try:
+            fdst = open(dst, 'wb')
+        except os.error, (errno, errstr):
+            raise DistutilsFileError, \
+                  "could not create '%s': %s" % (dst, errstr)
+
+        while 1:
+            try:
+                buf = fsrc.read(buffer_size)
+            except os.error, (errno, errstr):
+                raise DistutilsFileError, \
+                      "could not read from '%s': %s" % (src, errstr)
+
+            if not buf:
+                break
+
+            try:
+                fdst.write(buf)
+            except os.error, (errno, errstr):
+                raise DistutilsFileError, \
+                      "could not write to '%s': %s" % (dst, errstr)
+
+    finally:
+        if fdst:
+            fdst.close()
+        if fsrc:
+            fsrc.close()
+
+# _copy_file_contents()
+
+
+def copy_file (src, dst,
+               preserve_mode=1,
+               preserve_times=1,
+               update=0,
+               link=None,
+               verbose=0,
+               dry_run=0):
+
+    """Copy a file 'src' to 'dst'.  If 'dst' is a directory, then 'src' is
+    copied there with the same name; otherwise, it must be a filename.  (If
+    the file exists, it will be ruthlessly clobbered.)  If 'preserve_mode'
+    is true (the default), the file's mode (type and permission bits, or
+    whatever is analogous on the current platform) is copied.  If
+    'preserve_times' is true (the default), the last-modified and
+    last-access times are copied as well.  If 'update' is true, 'src' will
+    only be copied if 'dst' does not exist, or if 'dst' does exist but is
+    older than 'src'.  If 'verbose' is true, then a one-line summary of the
+    copy will be printed to stdout.
+
+    'link' allows you to make hard links (os.link) or symbolic links
+    (os.symlink) instead of copying: set it to "hard" or "sym"; if it is
+    None (the default), files are copied.  Don't set 'link' on systems that
+    don't support it: 'copy_file()' doesn't check if hard or symbolic
+    linking is available.
+
+    Under Mac OS, uses the native file copy function in macostools; on
+    other systems, uses '_copy_file_contents()' to copy file contents.
+
+    Return a tuple (dest_name, copied): 'dest_name' is the actual name of
+    the output file, and 'copied' is true if the file was copied (or would
+    have been copied, if 'dry_run' true).
+    """
+    # XXX if the destination file already exists, we clobber it if
+    # copying, but blow up if linking.  Hmmm.  And I don't know what
+    # macostools.copyfile() does.  Should definitely be consistent, and
+    # should probably blow up if destination exists and we would be
+    # changing it (ie. it's not already a hard/soft link to src OR
+    # (not update) and (src newer than dst).
+
+    from distutils.dep_util import newer
+    from stat import ST_ATIME, ST_MTIME, ST_MODE, S_IMODE
+
+    if not os.path.isfile(src):
+        raise DistutilsFileError, \
+              "can't copy '%s': doesn't exist or not a regular file" % src
+
+    if os.path.isdir(dst):
+        dir = dst
+        dst = os.path.join(dst, os.path.basename(src))
+    else:
+        dir = os.path.dirname(dst)
+
+    if update and not newer(src, dst):
+        if verbose:
+            print "not copying %s (output up-to-date)" % src
+        return (dst, 0)
+
+    try:
+        action = _copy_action[link]
+    except KeyError:
+        raise ValueError, \
+              "invalid value '%s' for 'link' argument" % link
+    if verbose:
+        if os.path.basename(dst) == os.path.basename(src):
+            print "%s %s -> %s" % (action, src, dir)
+        else:
+            print "%s %s -> %s" % (action, src, dst)
+
+    if dry_run:
+        return (dst, 1)
+
+    # On Mac OS, use the native file copy routine
+    if os.name == 'mac':
+        import macostools
+        try:
+            macostools.copy(src, dst, 0, preserve_times)
+        except os.error, exc:
+            raise DistutilsFileError, \
+                  "could not copy '%s' to '%s': %s" % (src, dst, exc[-1])
+
+    # If linking (hard or symbolic), use the appropriate system call
+    # (Unix only, of course, but that's the caller's responsibility)
+    elif link == 'hard':
+        if not (os.path.exists(dst) and os.path.samefile(src, dst)):
+            os.link(src, dst)
+    elif link == 'sym':
+        if not (os.path.exists(dst) and os.path.samefile(src, dst)):
+            os.symlink(src, dst)
+
+    # Otherwise (non-Mac, not linking), copy the file contents and
+    # (optionally) copy the times and mode.
+    else:
+        _copy_file_contents(src, dst)
+        if preserve_mode or preserve_times:
+            st = os.stat(src)
+
+            # According to David Ascher <da at ski.org>, utime() should be done
+            # before chmod() (at least under NT).
+            if preserve_times:
+                os.utime(dst, (st[ST_ATIME], st[ST_MTIME]))
+            if preserve_mode:
+                os.chmod(dst, S_IMODE(st[ST_MODE]))
+
+    return (dst, 1)
+
+# copy_file ()
+
+
+# XXX I suspect this is Unix-specific -- need porting help!
+def move_file (src, dst,
+               verbose=0,
+               dry_run=0):
+
+    """Move a file 'src' to 'dst'.  If 'dst' is a directory, the file will
+    be moved into it with the same name; otherwise, 'src' is just renamed
+    to 'dst'.  Return the new full name of the file.
+
+    Handles cross-device moves on Unix using 'copy_file()'.  What about
+    other systems???
+    """
+    from os.path import exists, isfile, isdir, basename, dirname
+    import errno
+
+    if verbose:
+        print "moving %s -> %s" % (src, dst)
+
+    if dry_run:
+        return dst
+
+    if not isfile(src):
+        raise DistutilsFileError, \
+              "can't move '%s': not a regular file" % src
+
+    if isdir(dst):
+        dst = os.path.join(dst, basename(src))
+    elif exists(dst):
+        raise DistutilsFileError, \
+              "can't move '%s': destination '%s' already exists" % \
+              (src, dst)
+
+    if not isdir(dirname(dst)):
+        raise DistutilsFileError, \
+              "can't move '%s': destination '%s' not a valid path" % \
+              (src, dst)
+
+    copy_it = 0
+    try:
+        os.rename(src, dst)
+    except os.error, (num, msg):
+        if num == errno.EXDEV:
+            copy_it = 1
+        else:
+            raise DistutilsFileError, \
+                  "couldn't move '%s' to '%s': %s" % (src, dst, msg)
+
+    if copy_it:
+        copy_file(src, dst)
+        try:
+            os.unlink(src)
+        except os.error, (num, msg):
+            try:
+                os.unlink(dst)
+            except os.error:
+                pass
+            raise DistutilsFileError, \
+                  ("couldn't move '%s' to '%s' by copy/delete: " +
+                   "delete '%s' failed: %s") % \
+                  (src, dst, src, msg)
+
+    return dst
+
+# move_file ()
+
+
+def write_file (filename, contents):
+    """Create a file with the specified name and write 'contents' (a
+    sequence of strings without line terminators) to it.
+    """
+    f = open(filename, "w")
+    for line in contents:
+        f.write(line + "\n")
+    f.close()
diff --git a/lib-python/2.2/distutils/filelist.py b/lib-python/2.2/distutils/filelist.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/filelist.py
@@ -0,0 +1,367 @@
+"""distutils.filelist
+
+Provides the FileList class, used for poking about the filesystem
+and building lists of files.
+"""
+
+# created 2000/07/17, Rene Liebscher (as template.py)
+# most parts taken from commands/sdist.py
+# renamed 2000/07/29 (to filelist.py) and officially added to
+#  the Distutils source, Greg Ward
+
+__revision__ = "$Id$"
+
+import sys, os, string, re
+import fnmatch
+from types import *
+from glob import glob
+from distutils.util import convert_path
+from distutils.errors import DistutilsTemplateError, DistutilsInternalError
+
+class FileList:
+
+    """A list of files built by on exploring the filesystem and filtered by
+    applying various patterns to what we find there.
+
+    Instance attributes:
+      dir
+        directory from which files will be taken -- only used if
+        'allfiles' not supplied to constructor
+      files
+        list of filenames currently being built/filtered/manipulated
+      allfiles
+        complete list of files under consideration (ie. without any
+        filtering applied)
+    """
+
+    def __init__(self,
+                 warn=None,
+                 debug_print=None):
+        # use standard warning and debug functions if no other given
+        self.warn = warn or self.__warn
+        self.debug_print = debug_print or self.__debug_print
+
+        self.allfiles = None
+        self.files = []
+
+
+    def set_allfiles (self, allfiles):
+        self.allfiles = allfiles
+
+    def findall (self, dir=os.curdir):
+        self.allfiles = findall(dir)
+
+
+    # -- Fallback warning/debug functions ------------------------------
+
+    def __warn (self, msg):
+        sys.stderr.write("warning: %s\n" % msg)
+
+    def __debug_print (self, msg):
+        """Print 'msg' to stdout if the global DEBUG (taken from the
+        DISTUTILS_DEBUG environment variable) flag is true.
+        """
+        from distutils.core import DEBUG
+        if DEBUG:
+            print msg
+
+
+    # -- List-like methods ---------------------------------------------
+
+    def append (self, item):
+        self.files.append(item)
+
+    def extend (self, items):
+        self.files.extend(items)
+
+    def sort (self):
+        # Not a strict lexical sort!
+        sortable_files = map(os.path.split, self.files)
+        sortable_files.sort()
+        self.files = []
+        for sort_tuple in sortable_files:
+            self.files.append(apply(os.path.join, sort_tuple))
+
+
+    # -- Other miscellaneous utility methods ---------------------------
+
+    def remove_duplicates (self):
+        # Assumes list has been sorted!
+        for i in range(len(self.files)-1, 0, -1):
+            if self.files[i] == self.files[i-1]:
+                del self.files[i]
+
+
+    # -- "File template" methods ---------------------------------------
+
+    def _parse_template_line (self, line):
+        words = string.split(line)
+        action = words[0]
+
+        patterns = dir = dir_pattern = None
+
+        if action in ('include', 'exclude',
+                      'global-include', 'global-exclude'):
+            if len(words) < 2:
+                raise DistutilsTemplateError, \
+                      "'%s' expects <pattern1> <pattern2> ..." % action
+
+            patterns = map(convert_path, words[1:])
+
+        elif action in ('recursive-include', 'recursive-exclude'):
+            if len(words) < 3:
+                raise DistutilsTemplateError, \
+                      "'%s' expects <dir> <pattern1> <pattern2> ..." % action
+
+            dir = convert_path(words[1])
+            patterns = map(convert_path, words[2:])
+
+        elif action in ('graft', 'prune'):
+            if len(words) != 2:
+                raise DistutilsTemplateError, \
+                     "'%s' expects a single <dir_pattern>" % action
+
+            dir_pattern = convert_path(words[1])
+
+        else:
+            raise DistutilsTemplateError, "unknown action '%s'" % action
+
+        return (action, patterns, dir, dir_pattern)
+
+    # _parse_template_line ()
+
+
+    def process_template_line (self, line):
+
+        # Parse the line: split it up, make sure the right number of words
+        # is there, and return the relevant words.  'action' is always
+        # defined: it's the first word of the line.  Which of the other
+        # three are defined depends on the action; it'll be either
+        # patterns, (dir and patterns), or (dir_pattern).
+        (action, patterns, dir, dir_pattern) = self._parse_template_line(line)
+
+        # OK, now we know that the action is valid and we have the
+        # right number of words on the line for that action -- so we
+        # can proceed with minimal error-checking.
+        if action == 'include':
+            self.debug_print("include " + string.join(patterns))
+            for pattern in patterns:
+                if not self.include_pattern(pattern, anchor=1):
+                    self.warn("no files found matching '%s'" % pattern)
+
+        elif action == 'exclude':
+            self.debug_print("exclude " + string.join(patterns))
+            for pattern in patterns:
+                if not self.exclude_pattern(pattern, anchor=1):
+                    self.warn(
+                        "no previously-included files found matching '%s'"%
+                        pattern)
+
+        elif action == 'global-include':
+            self.debug_print("global-include " + string.join(patterns))
+            for pattern in patterns:
+                if not self.include_pattern(pattern, anchor=0):
+                    self.warn(("no files found matching '%s' " +
+                               "anywhere in distribution") %
+                              pattern)
+
+        elif action == 'global-exclude':
+            self.debug_print("global-exclude " + string.join(patterns))
+            for pattern in patterns:
+                if not self.exclude_pattern(pattern, anchor=0):
+                    self.warn(("no previously-included files matching '%s' " +
+                               "found anywhere in distribution") %
+                              pattern)
+
+        elif action == 'recursive-include':
+            self.debug_print("recursive-include %s %s" %
+                             (dir, string.join(patterns)))
+            for pattern in patterns:
+                if not self.include_pattern(pattern, prefix=dir):
+                    self.warn(("no files found matching '%s' " +
+                                "under directory '%s'") %
+                               (pattern, dir))
+
+        elif action == 'recursive-exclude':
+            self.debug_print("recursive-exclude %s %s" %
+                             (dir, string.join(patterns)))
+            for pattern in patterns:
+                if not self.exclude_pattern(pattern, prefix=dir):
+                    self.warn(("no previously-included files matching '%s' " +
+                               "found under directory '%s'") %
+                              (pattern, dir))
+
+        elif action == 'graft':
+            self.debug_print("graft " + dir_pattern)
+            if not self.include_pattern(None, prefix=dir_pattern):
+                self.warn("no directories found matching '%s'" % dir_pattern)
+
+        elif action == 'prune':
+            self.debug_print("prune " + dir_pattern)
+            if not self.exclude_pattern(None, prefix=dir_pattern):
+                self.warn(("no previously-included directories found " +
+                           "matching '%s'") %
+                          dir_pattern)
+        else:
+            raise DistutilsInternalError, \
+                  "this cannot happen: invalid action '%s'" % action
+
+    # process_template_line ()
+
+
+    # -- Filtering/selection methods -----------------------------------
+
+    def include_pattern (self, pattern,
+                         anchor=1, prefix=None, is_regex=0):
+        """Select strings (presumably filenames) from 'self.files' that
+        match 'pattern', a Unix-style wildcard (glob) pattern.  Patterns
+        are not quite the same as implemented by the 'fnmatch' module: '*'
+        and '?'  match non-special characters, where "special" is platform-
+        dependent: slash on Unix; colon, slash, and backslash on
+        DOS/Windows; and colon on Mac OS.
+
+        If 'anchor' is true (the default), then the pattern match is more
+        stringent: "*.py" will match "foo.py" but not "foo/bar.py".  If
+        'anchor' is false, both of these will match.
+
+        If 'prefix' is supplied, then only filenames starting with 'prefix'
+        (itself a pattern) and ending with 'pattern', with anything in between
+        them, will match.  'anchor' is ignored in this case.
+
+        If 'is_regex' is true, 'anchor' and 'prefix' are ignored, and
+        'pattern' is assumed to be either a string containing a regex or a
+        regex object -- no translation is done, the regex is just compiled
+        and used as-is.
+
+        Selected strings will be added to self.files.
+
+        Return 1 if files are found.
+        """
+        files_found = 0
+        pattern_re = translate_pattern(pattern, anchor, prefix, is_regex)
+        self.debug_print("include_pattern: applying regex r'%s'" %
+                         pattern_re.pattern)
+
+        # delayed loading of allfiles list
+        if self.allfiles is None:
+            self.findall()
+
+        for name in self.allfiles:
+            if pattern_re.search(name):
+                self.debug_print(" adding " + name)
+                self.files.append(name)
+                files_found = 1
+
+        return files_found
+
+    # include_pattern ()
+
+
+    def exclude_pattern (self, pattern,
+                         anchor=1, prefix=None, is_regex=0):
+        """Remove strings (presumably filenames) from 'files' that match
+        'pattern'.  Other parameters are the same as for
+        'include_pattern()', above.
+        The list 'self.files' is modified in place.
+        Return 1 if files are found.
+        """
+        files_found = 0
+        pattern_re = translate_pattern(pattern, anchor, prefix, is_regex)
+        self.debug_print("exclude_pattern: applying regex r'%s'" %
+                         pattern_re.pattern)
+        for i in range(len(self.files)-1, -1, -1):
+            if pattern_re.search(self.files[i]):
+                self.debug_print(" removing " + self.files[i])
+                del self.files[i]
+                files_found = 1
+
+        return files_found
+
+    # exclude_pattern ()
+
+# class FileList
+
+
+# ----------------------------------------------------------------------
+# Utility functions
+
+def findall (dir = os.curdir):
+    """Find all files under 'dir' and return the list of full filenames
+    (relative to 'dir').
+    """
+    from stat import ST_MODE, S_ISREG, S_ISDIR, S_ISLNK
+
+    list = []
+    stack = [dir]
+    pop = stack.pop
+    push = stack.append
+
+    while stack:
+        dir = pop()
+        names = os.listdir(dir)
+
+        for name in names:
+            if dir != os.curdir:        # avoid the dreaded "./" syndrome
+                fullname = os.path.join(dir, name)
+            else:
+                fullname = name
+
+            # Avoid excess stat calls -- just one will do, thank you!
+            stat = os.stat(fullname)
+            mode = stat[ST_MODE]
+            if S_ISREG(mode):
+                list.append(fullname)
+            elif S_ISDIR(mode) and not S_ISLNK(mode):
+                push(fullname)
+
+    return list
+
+
+def glob_to_re (pattern):
+    """Translate a shell-like glob pattern to a regular expression; return
+    a string containing the regex.  Differs from 'fnmatch.translate()' in
+    that '*' does not match "special characters" (which are
+    platform-specific).
+    """
+    pattern_re = fnmatch.translate(pattern)
+
+    # '?' and '*' in the glob pattern become '.' and '.*' in the RE, which
+    # IMHO is wrong -- '?' and '*' aren't supposed to match slash in Unix,
+    # and by extension they shouldn't match such "special characters" under
+    # any OS.  So change all non-escaped dots in the RE to match any
+    # character except the special characters.
+    # XXX currently the "special characters" are just slash -- i.e. this is
+    # Unix-only.
+    pattern_re = re.sub(r'(^|[^\\])\.', r'\1[^/]', pattern_re)
+    return pattern_re
+
+# glob_to_re ()
+
+
+def translate_pattern (pattern, anchor=1, prefix=None, is_regex=0):
+    """Translate a shell-like wildcard pattern to a compiled regular
+    expression.  Return the compiled regex.  If 'is_regex' true,
+    then 'pattern' is directly compiled to a regex (if it's a string)
+    or just returned as-is (assumes it's a regex object).
+    """
+    if is_regex:
+        if type(pattern) is StringType:
+            return re.compile(pattern)
+        else:
+            return pattern
+
+    if pattern:
+        pattern_re = glob_to_re(pattern)
+    else:
+        pattern_re = ''
+
+    if prefix is not None:
+        prefix_re = (glob_to_re(prefix))[0:-1] # ditch trailing $
+        pattern_re = "^" + os.path.join(prefix_re, ".*" + pattern_re)
+    else:                               # no prefix -- respect anchor flag
+        if anchor:
+            pattern_re = "^" + pattern_re
+
+    return re.compile(pattern_re)
+
+# translate_pattern ()
diff --git a/lib-python/2.2/distutils/msvccompiler.py b/lib-python/2.2/distutils/msvccompiler.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/msvccompiler.py
@@ -0,0 +1,515 @@
+"""distutils.msvccompiler
+
+Contains MSVCCompiler, an implementation of the abstract CCompiler class
+for the Microsoft Visual Studio."""
+
+
+# created 1999/08/19, Perry Stoll
+# hacked by Robin Becker and Thomas Heller to do a better job of
+#   finding DevStudio (through the registry)
+
+__revision__ = "$Id$"
+
+import sys, os, string
+from types import *
+from distutils.errors import \
+     DistutilsExecError, DistutilsPlatformError, \
+     CompileError, LibError, LinkError
+from distutils.ccompiler import \
+     CCompiler, gen_preprocess_options, gen_lib_options
+
+_can_read_reg = 0
+try:
+    import _winreg
+
+    _can_read_reg = 1
+    hkey_mod = _winreg
+
+    RegOpenKeyEx = _winreg.OpenKeyEx
+    RegEnumKey = _winreg.EnumKey
+    RegEnumValue = _winreg.EnumValue
+    RegError = _winreg.error
+
+except ImportError:
+    try:
+        import win32api
+        import win32con
+        _can_read_reg = 1
+        hkey_mod = win32con
+
+        RegOpenKeyEx = win32api.RegOpenKeyEx
+        RegEnumKey = win32api.RegEnumKey
+        RegEnumValue = win32api.RegEnumValue
+        RegError = win32api.error
+
+    except ImportError:
+        pass
+
+if _can_read_reg:
+    HKEY_CLASSES_ROOT = hkey_mod.HKEY_CLASSES_ROOT
+    HKEY_LOCAL_MACHINE = hkey_mod.HKEY_LOCAL_MACHINE
+    HKEY_CURRENT_USER = hkey_mod.HKEY_CURRENT_USER
+    HKEY_USERS = hkey_mod.HKEY_USERS
+
+
+
+def get_devstudio_versions ():
+    """Get list of devstudio versions from the Windows registry.  Return a
+       list of strings containing version numbers; the list will be
+       empty if we were unable to access the registry (eg. couldn't import
+       a registry-access module) or the appropriate registry keys weren't
+       found."""
+
+    if not _can_read_reg:
+        return []
+
+    K = 'Software\\Microsoft\\Devstudio'
+    L = []
+    for base in (HKEY_CLASSES_ROOT,
+                 HKEY_LOCAL_MACHINE,
+                 HKEY_CURRENT_USER,
+                 HKEY_USERS):
+        try:
+            k = RegOpenKeyEx(base,K)
+            i = 0
+            while 1:
+                try:
+                    p = RegEnumKey(k,i)
+                    if p[0] in '123456789' and p not in L:
+                        L.append(p)
+                except RegError:
+                    break
+                i = i + 1
+        except RegError:
+            pass
+    L.sort()
+    L.reverse()
+    return L
+
+# get_devstudio_versions ()
+
+
+def get_msvc_paths (path, version='6.0', platform='x86'):
+    """Get a list of devstudio directories (include, lib or path).  Return
+       a list of strings; will be empty list if unable to access the
+       registry or appropriate registry keys not found."""
+
+    if not _can_read_reg:
+        return []
+
+    L = []
+    if path=='lib':
+        path= 'Library'
+    path = string.upper(path + ' Dirs')
+    K = ('Software\\Microsoft\\Devstudio\\%s\\' +
+         'Build System\\Components\\Platforms\\Win32 (%s)\\Directories') % \
+        (version,platform)
+    for base in (HKEY_CLASSES_ROOT,
+                 HKEY_LOCAL_MACHINE,
+                 HKEY_CURRENT_USER,
+                 HKEY_USERS):
+        try:
+            k = RegOpenKeyEx(base,K)
+            i = 0
+            while 1:
+                try:
+                    (p,v,t) = RegEnumValue(k,i)
+                    if string.upper(p) == path:
+                        V = string.split(v,';')
+                        for v in V:
+                            if hasattr(v, "encode"):
+                                try:
+                                    v = v.encode("mbcs")
+                                except UnicodeError:
+                                    pass
+                            if v == '' or v in L: continue
+                            L.append(v)
+                        break
+                    i = i + 1
+                except RegError:
+                    break
+        except RegError:
+            pass
+    return L
+
+# get_msvc_paths()
+
+
+def find_exe (exe, version_number):
+    """Try to find an MSVC executable program 'exe' (from version
+       'version_number' of MSVC) in several places: first, one of the MSVC
+       program search paths from the registry; next, the directories in the
+       PATH environment variable.  If any of those work, return an absolute
+       path that is known to exist.  If none of them work, just return the
+       original program name, 'exe'."""
+
+    for p in get_msvc_paths ('path', version_number):
+        fn = os.path.join (os.path.abspath(p), exe)
+        if os.path.isfile(fn):
+            return fn
+
+    # didn't find it; try existing path
+    for p in string.split (os.environ['Path'],';'):
+        fn = os.path.join(os.path.abspath(p),exe)
+        if os.path.isfile(fn):
+            return fn
+
+    return exe                          # last desperate hope
+
+
+def set_path_env_var (name, version_number):
+    """Set environment variable 'name' to an MSVC path type value obtained
+       from 'get_msvc_paths()'.  This is equivalent to a SET command prior
+       to execution of spawned commands."""
+
+    p = get_msvc_paths (name, version_number)
+    if p:
+        os.environ[name] = string.join (p,';')
+
+
+class MSVCCompiler (CCompiler) :
+    """Concrete class that implements an interface to Microsoft Visual C++,
+       as defined by the CCompiler abstract class."""
+
+    compiler_type = 'msvc'
+
+    # Just set this so CCompiler's constructor doesn't barf.  We currently
+    # don't use the 'set_executables()' bureaucracy provided by CCompiler,
+    # as it really isn't necessary for this sort of single-compiler class.
+    # Would be nice to have a consistent interface with UnixCCompiler,
+    # though, so it's worth thinking about.
+    executables = {}
+
+    # Private class data (need to distinguish C from C++ source for compiler)
+    _c_extensions = ['.c']
+    _cpp_extensions = ['.cc', '.cpp', '.cxx']
+    _rc_extensions = ['.rc']
+    _mc_extensions = ['.mc']
+
+    # Needed for the filename generation methods provided by the
+    # base class, CCompiler.
+    src_extensions = (_c_extensions + _cpp_extensions +
+                      _rc_extensions + _mc_extensions)
+    res_extension = '.res'
+    obj_extension = '.obj'
+    static_lib_extension = '.lib'
+    shared_lib_extension = '.dll'
+    static_lib_format = shared_lib_format = '%s%s'
+    exe_extension = '.exe'
+
+
+    def __init__ (self,
+                  verbose=0,
+                  dry_run=0,
+                  force=0):
+
+        CCompiler.__init__ (self, verbose, dry_run, force)
+        versions = get_devstudio_versions ()
+
+        if versions:
+            version = versions[0]  # highest version
+
+            self.cc   = find_exe("cl.exe", version)
+            self.linker = find_exe("link.exe", version)
+            self.lib  = find_exe("lib.exe", version)
+            self.rc   = find_exe("rc.exe", version)     # resource compiler
+            self.mc   = find_exe("mc.exe", version)     # message compiler
+            set_path_env_var ('lib', version)
+            set_path_env_var ('include', version)
+            path=get_msvc_paths('path', version)
+            try:
+                for p in string.split(os.environ['path'],';'):
+                    path.append(p)
+            except KeyError:
+                pass
+            os.environ['path'] = string.join(path,';')
+        else:
+            # devstudio not found in the registry
+            self.cc = "cl.exe"
+            self.linker = "link.exe"
+            self.lib = "lib.exe"
+            self.rc = "rc.exe"
+            self.mc = "mc.exe"
+
+        self.preprocess_options = None
+        self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3', '/GX' ]
+        self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GX',
+                                      '/Z7', '/D_DEBUG']
+
+        self.ldflags_shared = ['/DLL', '/nologo', '/INCREMENTAL:NO']
+        self.ldflags_shared_debug = [
+            '/DLL', '/nologo', '/INCREMENTAL:no', '/pdb:None', '/DEBUG'
+            ]
+        self.ldflags_static = [ '/nologo']
+
+
+    # -- Worker methods ------------------------------------------------
+
+    def object_filenames (self,
+                          source_filenames,
+                          strip_dir=0,
+                          output_dir=''):
+        # Copied from ccompiler.py, extended to return .res as 'object'-file
+        # for .rc input file
+        if output_dir is None: output_dir = ''
+        obj_names = []
+        for src_name in source_filenames:
+            (base, ext) = os.path.splitext (src_name)
+            if ext not in self.src_extensions:
+                # Better to raise an exception instead of silently continuing
+                # and later complain about sources and targets having
+                # different lengths
+                raise CompileError ("Don't know how to compile %s" % src_name)
+            if strip_dir:
+                base = os.path.basename (base)
+            if ext in self._rc_extensions:
+                obj_names.append (os.path.join (output_dir,
+                                                base + self.res_extension))
+            elif ext in self._mc_extensions:
+                obj_names.append (os.path.join (output_dir,
+                                                base + self.res_extension))
+            else:
+                obj_names.append (os.path.join (output_dir,
+                                                base + self.obj_extension))
+        return obj_names
+
+    # object_filenames ()
+
+
+    def compile (self,
+                 sources,
+                 output_dir=None,
+                 macros=None,
+                 include_dirs=None,
+                 debug=0,
+                 extra_preargs=None,
+                 extra_postargs=None):
+
+        (output_dir, macros, include_dirs) = \
+            self._fix_compile_args (output_dir, macros, include_dirs)
+        (objects, skip_sources) = self._prep_compile (sources, output_dir)
+
+        if extra_postargs is None:
+            extra_postargs = []
+
+        pp_opts = gen_preprocess_options (macros, include_dirs)
+        compile_opts = extra_preargs or []
+        compile_opts.append ('/c')
+        if debug:
+            compile_opts.extend (self.compile_options_debug)
+        else:
+            compile_opts.extend (self.compile_options)
+
+        for i in range (len (sources)):
+            src = sources[i] ; obj = objects[i]
+            ext = (os.path.splitext (src))[1]
+
+            if skip_sources[src]:
+                self.announce ("skipping %s (%s up-to-date)" % (src, obj))
+            else:
+                self.mkpath (os.path.dirname (obj))
+
+                if ext in self._c_extensions:
+                    input_opt = "/Tc" + src
+                elif ext in self._cpp_extensions:
+                    input_opt = "/Tp" + src
+                elif ext in self._rc_extensions:
+                    # compile .RC to .RES file
+                    input_opt = src
+                    output_opt = "/fo" + obj
+                    try:
+                        self.spawn ([self.rc] +
+                                    [output_opt] + [input_opt])
+                    except DistutilsExecError, msg:
+                        raise CompileError, msg
+                    continue
+                elif ext in self._mc_extensions:
+
+                    # Compile .MC to .RC file to .RES file.
+                    #   * '-h dir' specifies the directory for the
+                    #     generated include file
+                    #   * '-r dir' specifies the target directory of the
+                    #     generated RC file and the binary message resource
+                    #     it includes
+                    #
+                    # For now (since there are no options to change this),
+                    # we use the source-directory for the include file and
+                    # the build directory for the RC file and message
+                    # resources. This works at least for win32all.
+
+                    h_dir = os.path.dirname (src)
+                    rc_dir = os.path.dirname (obj)
+                    try:
+                        # first compile .MC to .RC and .H file
+                        self.spawn ([self.mc] +
+                                    ['-h', h_dir, '-r', rc_dir] + [src])
+                        base, _ = os.path.splitext (os.path.basename (src))
+                        rc_file = os.path.join (rc_dir, base + '.rc')
+                        # then compile .RC to .RES file
+                        self.spawn ([self.rc] +
+                                    ["/fo" + obj] + [rc_file])
+
+                    except DistutilsExecError, msg:
+                        raise CompileError, msg
+                    continue
+                else:
+                    # how to handle this file?
+                    raise CompileError (
+                        "Don't know how to compile %s to %s" % \
+                        (src, obj))
+
+                output_opt = "/Fo" + obj
+                try:
+                    self.spawn ([self.cc] + compile_opts + pp_opts +
+                                [input_opt, output_opt] +
+                                extra_postargs)
+                except DistutilsExecError, msg:
+                    raise CompileError, msg
+
+        return objects
+
+    # compile ()
+
+
+    def create_static_lib (self,
+                           objects,
+                           output_libname,
+                           output_dir=None,
+                           debug=0,
+                           extra_preargs=None,
+                           extra_postargs=None):
+
+        (objects, output_dir) = self._fix_object_args (objects, output_dir)
+        output_filename = \
+            self.library_filename (output_libname, output_dir=output_dir)
+
+        if self._need_link (objects, output_filename):
+            lib_args = objects + ['/OUT:' + output_filename]
+            if debug:
+                pass                    # XXX what goes here?
+            if extra_preargs:
+                lib_args[:0] = extra_preargs
+            if extra_postargs:
+                lib_args.extend (extra_postargs)
+            try:
+                self.spawn ([self.lib] + lib_args)
+            except DistutilsExecError, msg:
+                raise LibError, msg
+
+        else:
+            self.announce ("skipping %s (up-to-date)" % output_filename)
+
+    # create_static_lib ()
+
+    def link (self,
+              target_desc,
+              objects,
+              output_filename,
+              output_dir=None,
+              libraries=None,
+              library_dirs=None,
+              runtime_library_dirs=None,
+              export_symbols=None,
+              debug=0,
+              extra_preargs=None,
+              extra_postargs=None,
+              build_temp=None):
+
+        (objects, output_dir) = self._fix_object_args (objects, output_dir)
+        (libraries, library_dirs, runtime_library_dirs) = \
+            self._fix_lib_args (libraries, library_dirs, runtime_library_dirs)
+
+        if runtime_library_dirs:
+            self.warn ("I don't know what to do with 'runtime_library_dirs': "
+                       + str (runtime_library_dirs))
+
+        lib_opts = gen_lib_options (self,
+                                    library_dirs, runtime_library_dirs,
+                                    libraries)
+        if output_dir is not None:
+            output_filename = os.path.join (output_dir, output_filename)
+
+        if self._need_link (objects, output_filename):
+
+            if target_desc == CCompiler.EXECUTABLE:
+                if debug:
+                    ldflags = self.ldflags_shared_debug[1:]
+                else:
+                    ldflags = self.ldflags_shared[1:]
+            else:
+                if debug:
+                    ldflags = self.ldflags_shared_debug
+                else:
+                    ldflags = self.ldflags_shared
+
+            export_opts = []
+            for sym in (export_symbols or []):
+                export_opts.append("/EXPORT:" + sym)
+
+            ld_args = (ldflags + lib_opts + export_opts +
+                       objects + ['/OUT:' + output_filename])
+
+            # The MSVC linker generates .lib and .exp files, which cannot be
+            # suppressed by any linker switches. The .lib files may even be
+            # needed! Make sure they are generated in the temporary build
+            # directory. Since they have different names for debug and release
+            # builds, they can go into the same directory.
+            if export_symbols is not None:
+                (dll_name, dll_ext) = os.path.splitext(
+                    os.path.basename(output_filename))
+                implib_file = os.path.join(
+                    os.path.dirname(objects[0]),
+                    self.library_filename(dll_name))
+                ld_args.append ('/IMPLIB:' + implib_file)
+
+            if extra_preargs:
+                ld_args[:0] = extra_preargs
+            if extra_postargs:
+                ld_args.extend(extra_postargs)
+
+            self.mkpath (os.path.dirname (output_filename))
+            try:
+                self.spawn ([self.linker] + ld_args)
+            except DistutilsExecError, msg:
+                raise LinkError, msg
+
+        else:
+            self.announce ("skipping %s (up-to-date)" % output_filename)
+
+    # link ()
+
+
+    # -- Miscellaneous methods -----------------------------------------
+    # These are all used by the 'gen_lib_options() function, in
+    # ccompiler.py.
+
+    def library_dir_option (self, dir):
+        return "/LIBPATH:" + dir
+
+    def runtime_library_dir_option (self, dir):
+        raise DistutilsPlatformError, \
+              "don't know how to set runtime library search path for MSVC++"
+
+    def library_option (self, lib):
+        return self.library_filename (lib)
+
+
+    def find_library_file (self, dirs, lib, debug=0):
+        # Prefer a debugging library if found (and requested), but deal
+        # with it if we don't have one.
+        if debug:
+            try_names = [lib + "_d", lib]
+        else:
+            try_names = [lib]
+        for dir in dirs:
+            for name in try_names:
+                libfile = os.path.join(dir, self.library_filename (name))
+                if os.path.exists(libfile):
+                    return libfile
+        else:
+            # Oops, didn't find it in *any* of 'dirs'
+            return None
+
+    # find_library_file ()
+
+# class MSVCCompiler
diff --git a/lib-python/2.2/distutils/mwerkscompiler.py b/lib-python/2.2/distutils/mwerkscompiler.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/mwerkscompiler.py
@@ -0,0 +1,217 @@
+"""distutils.mwerkscompiler
+
+Contains MWerksCompiler, an implementation of the abstract CCompiler class
+for MetroWerks CodeWarrior on the Macintosh. Needs work to support CW on
+Windows."""
+
+import sys, os, string
+from types import *
+from distutils.errors import \
+     DistutilsExecError, DistutilsPlatformError, \
+     CompileError, LibError, LinkError
+from distutils.ccompiler import \
+     CCompiler, gen_preprocess_options, gen_lib_options
+import distutils.util
+import distutils.dir_util
+import mkcwproject
+
+class MWerksCompiler (CCompiler) :
+    """Concrete class that implements an interface to MetroWerks CodeWarrior,
+       as defined by the CCompiler abstract class."""
+
+    compiler_type = 'mwerks'
+
+    # Just set this so CCompiler's constructor doesn't barf.  We currently
+    # don't use the 'set_executables()' bureaucracy provided by CCompiler,
+    # as it really isn't necessary for this sort of single-compiler class.
+    # Would be nice to have a consistent interface with UnixCCompiler,
+    # though, so it's worth thinking about.
+    executables = {}
+
+    # Private class data (need to distinguish C from C++ source for compiler)
+    _c_extensions = ['.c']
+    _cpp_extensions = ['.cc', '.cpp', '.cxx']
+    _rc_extensions = ['.r']
+    _exp_extension = '.exp'
+
+    # Needed for the filename generation methods provided by the
+    # base class, CCompiler.
+    src_extensions = (_c_extensions + _cpp_extensions +
+                      _rc_extensions)
+    res_extension = '.rsrc'
+    obj_extension = '.obj' # Not used, really
+    static_lib_extension = '.lib'
+    shared_lib_extension = '.slb'
+    static_lib_format = shared_lib_format = '%s%s'
+    exe_extension = ''
+
+
+    def __init__ (self,
+                  verbose=0,
+                  dry_run=0,
+                  force=0):
+
+        CCompiler.__init__ (self, verbose, dry_run, force)
+
+
+    def compile (self,
+                 sources,
+                 output_dir=None,
+                 macros=None,
+                 include_dirs=None,
+                 debug=0,
+                 extra_preargs=None,
+                 extra_postargs=None):
+        (output_dir, macros, include_dirs) = \
+           self._fix_compile_args (output_dir, macros, include_dirs)
+        self.__sources = sources
+        self.__macros = macros
+        self.__include_dirs = include_dirs
+        # Don't need extra_preargs and extra_postargs for CW
+        return []
+
+    def link (self,
+              target_desc,
+              objects,
+              output_filename,
+              output_dir=None,
+              libraries=None,
+              library_dirs=None,
+              runtime_library_dirs=None,
+              export_symbols=None,
+              debug=0,
+              extra_preargs=None,
+              extra_postargs=None,
+              build_temp=None):
+        # First fixup.
+        (objects, output_dir) = self._fix_object_args (objects, output_dir)
+        (libraries, library_dirs, runtime_library_dirs) = \
+            self._fix_lib_args (libraries, library_dirs, runtime_library_dirs)
+
+        # First examine a couple of options for things that aren't implemented yet
+        if not target_desc in (self.SHARED_LIBRARY, self.SHARED_OBJECT):
+            raise DistutilsPlatformError, 'Can only make SHARED_LIBRARY or SHARED_OBJECT targets on the Mac'
+        if runtime_library_dirs:
+            raise DistutilsPlatformError, 'Runtime library dirs not implemented yet'
+        if extra_preargs or extra_postargs:
+            raise DistutilsPlatformError, 'Runtime library dirs not implemented yet'
+        if len(export_symbols) != 1:
+            raise DistutilsPlatformError, 'Need exactly one export symbol'
+        # Next there are various things for which we need absolute pathnames.
+        # This is because we (usually) create the project in a subdirectory of
+        # where we are now, and keeping the paths relative is too much work right
+        # now.
+        sources = map(self._filename_to_abs, self.__sources)
+        include_dirs = map(self._filename_to_abs, self.__include_dirs)
+        if objects:
+            objects = map(self._filename_to_abs, objects)
+        else:
+            objects = []
+        if build_temp:
+            build_temp = self._filename_to_abs(build_temp)
+        else:
+            build_temp = os.curdir()
+        if output_dir:
+            output_filename = os.path.join(output_dir, output_filename)
+        # The output filename needs special handling: splitting it into dir and
+        # filename part. Actually I'm not sure this is really needed, but it
+        # can't hurt.
+        output_filename = self._filename_to_abs(output_filename)
+        output_dir, output_filename = os.path.split(output_filename)
+        # Now we need the short names of a couple of things for putting them
+        # into the project.
+        if output_filename[-8:] == '.ppc.slb':
+            basename = output_filename[:-8]
+        elif output_filename[-11:] == '.carbon.slb':
+            basename = output_filename[:-11]
+        else:
+            basename = os.path.strip(output_filename)[0]
+        projectname = basename + '.mcp'
+        targetname = basename
+        xmlname = basename + '.xml'
+        exportname = basename + '.mcp.exp'
+        prefixname = 'mwerks_%s_config.h'%basename
+        # Create the directories we need
+        distutils.dir_util.mkpath(build_temp, self.verbose, self.dry_run)
+        distutils.dir_util.mkpath(output_dir, self.verbose, self.dry_run)
+        # And on to filling in the parameters for the project builder
+        settings = {}
+        settings['mac_exportname'] = exportname
+        settings['mac_outputdir'] = output_dir
+        settings['mac_dllname'] = output_filename
+        settings['mac_targetname'] = targetname
+        settings['sysprefix'] = sys.prefix
+        settings['mac_sysprefixtype'] = 'Absolute'
+        sourcefilenames = []
+        sourcefiledirs = []
+        for filename in sources + objects:
+            dirname, filename = os.path.split(filename)
+            sourcefilenames.append(filename)
+            if not dirname in sourcefiledirs:
+                sourcefiledirs.append(dirname)
+        settings['sources'] = sourcefilenames
+        settings['libraries'] = libraries
+        settings['extrasearchdirs'] = sourcefiledirs + include_dirs + library_dirs
+        if self.dry_run:
+            print 'CALLING LINKER IN', os.getcwd()
+            for key, value in settings.items():
+                print '%20.20s %s'%(key, value)
+            return
+        # Build the export file
+        exportfilename = os.path.join(build_temp, exportname)
+        if self.verbose:
+            print '\tCreate export file', exportfilename
+        fp = open(exportfilename, 'w')
+        fp.write('%s\n'%export_symbols[0])
+        fp.close()
+        # Generate the prefix file, if needed, and put it in the settings
+        if self.__macros:
+            prefixfilename = os.path.join(os.getcwd(), os.path.join(build_temp, prefixname))
+            fp = open(prefixfilename, 'w')
+            fp.write('#include "mwerks_plugin_config.h"\n')
+            for name, value in self.__macros:
+                if value is None:
+                    fp.write('#define %s\n'%name)
+                else:
+                    fp.write('#define %s %s\n'%(name, value))
+            fp.close()
+            settings['prefixname'] = prefixname
+
+        # Build the XML file. We need the full pathname (only lateron, really)
+        # because we pass this pathname to CodeWarrior in an AppleEvent, and CW
+        # doesn't have a clue about our working directory.
+        xmlfilename = os.path.join(os.getcwd(), os.path.join(build_temp, xmlname))
+        if self.verbose:
+            print '\tCreate XML file', xmlfilename
+        xmlbuilder = mkcwproject.cwxmlgen.ProjectBuilder(settings)
+        xmlbuilder.generate()
+        xmldata = settings['tmp_projectxmldata']
+        fp = open(xmlfilename, 'w')
+        fp.write(xmldata)
+        fp.close()
+        # Generate the project. Again a full pathname.
+        projectfilename = os.path.join(os.getcwd(), os.path.join(build_temp, projectname))
+        if self.verbose:
+            print '\tCreate project file', projectfilename
+        mkcwproject.makeproject(xmlfilename, projectfilename)
+        # And build it
+        if self.verbose:
+            print '\tBuild project'
+        mkcwproject.buildproject(projectfilename)
+
+    def _filename_to_abs(self, filename):
+        # Some filenames seem to be unix-like. Convert to Mac names.
+##        if '/' in filename and ':' in filename:
+##           raise DistutilsPlatformError, 'Filename may be Unix or Mac style: %s'%filename
+##        if '/' in filename:
+##           filename = macurl2path(filename)
+        filename = distutils.util.convert_path(filename)
+        if not os.path.isabs(filename):
+            curdir = os.getcwd()
+            filename = os.path.join(curdir, filename)
+        # Finally remove .. components
+        components = string.split(filename, ':')
+        for i in range(1, len(components)):
+            if components[i] == '..':
+                components[i] = ''
+        return string.join(components, ':')
diff --git a/lib-python/2.2/distutils/spawn.py b/lib-python/2.2/distutils/spawn.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/spawn.py
@@ -0,0 +1,169 @@
+"""distutils.spawn
+
+Provides the 'spawn()' function, a front-end to various platform-
+specific functions for launching another program in a sub-process.
+Also provides the 'find_executable()' to search the path for a given
+executable name.
+"""
+
+# created 1999/07/24, Greg Ward
+
+__revision__ = "$Id$"
+
+import sys, os, string
+from distutils.errors import *
+
+
+def spawn (cmd,
+           search_path=1,
+           verbose=0,
+           dry_run=0):
+
+    """Run another program, specified as a command list 'cmd', in a new
+    process.  'cmd' is just the argument list for the new process, ie.
+    cmd[0] is the program to run and cmd[1:] are the rest of its arguments.
+    There is no way to run a program with a name different from that of its
+    executable.
+
+    If 'search_path' is true (the default), the system's executable search
+    path will be used to find the program; otherwise, cmd[0] must be the
+    exact path to the executable.  If 'verbose' is true, a one-line summary
+    of the command will be printed before it is run.  If 'dry_run' is true,
+    the command will not actually be run.
+
+    Raise DistutilsExecError if running the program fails in any way; just
+    return on success.
+    """
+    if os.name == 'posix':
+        _spawn_posix(cmd, search_path, verbose, dry_run)
+    elif os.name == 'nt':
+        _spawn_nt(cmd, search_path, verbose, dry_run)
+    else:
+        raise DistutilsPlatformError, \
+              "don't know how to spawn programs on platform '%s'" % os.name
+
+# spawn ()
+
+
+def _nt_quote_args (args):
+    """Quote command-line arguments for DOS/Windows conventions: just
+    wraps every argument which contains blanks in double quotes, and
+    returns a new argument list.
+    """
+
+    # XXX this doesn't seem very robust to me -- but if the Windows guys
+    # say it'll work, I guess I'll have to accept it.  (What if an arg
+    # contains quotes?  What other magic characters, other than spaces,
+    # have to be escaped?  Is there an escaping mechanism other than
+    # quoting?)
+
+    for i in range(len(args)):
+        if string.find(args[i], ' ') != -1:
+            args[i] = '"%s"' % args[i]
+    return args
+
+def _spawn_nt (cmd,
+               search_path=1,
+               verbose=0,
+               dry_run=0):
+
+    executable = cmd[0]
+    cmd = _nt_quote_args(cmd)
+    if search_path:
+        # either we find one or it stays the same
+        executable = find_executable(executable) or executable
+    if verbose:
+        print string.join([executable] + cmd[1:], ' ')
+    if not dry_run:
+        # spawn for NT requires a full path to the .exe
+        try:
+            rc = os.spawnv(os.P_WAIT, executable, cmd)
+        except OSError, exc:
+            # this seems to happen when the command isn't found
+            raise DistutilsExecError, \
+                  "command '%s' failed: %s" % (cmd[0], exc[-1])
+        if rc != 0:
+            # and this reflects the command running but failing
+            raise DistutilsExecError, \
+                  "command '%s' failed with exit status %d" % (cmd[0], rc)
+
+
+def _spawn_posix (cmd,
+                  search_path=1,
+                  verbose=0,
+                  dry_run=0):
+
+    if verbose:
+        print string.join(cmd, ' ')
+    if dry_run:
+        return
+    exec_fn = search_path and os.execvp or os.execv
+
+    pid = os.fork()
+
+    if pid == 0:                        # in the child
+        try:
+            #print "cmd[0] =", cmd[0]
+            #print "cmd =", cmd
+            exec_fn(cmd[0], cmd)
+        except OSError, e:
+            sys.stderr.write("unable to execute %s: %s\n" %
+                             (cmd[0], e.strerror))
+            os._exit(1)
+
+        sys.stderr.write("unable to execute %s for unknown reasons" % cmd[0])
+        os._exit(1)
+
+
+    else:                               # in the parent
+        # Loop until the child either exits or is terminated by a signal
+        # (ie. keep waiting if it's merely stopped)
+        while 1:
+            (pid, status) = os.waitpid(pid, 0)
+            if os.WIFSIGNALED(status):
+                raise DistutilsExecError, \
+                      "command '%s' terminated by signal %d" % \
+                      (cmd[0], os.WTERMSIG(status))
+
+            elif os.WIFEXITED(status):
+                exit_status = os.WEXITSTATUS(status)
+                if exit_status == 0:
+                    return              # hey, it succeeded!
+                else:
+                    raise DistutilsExecError, \
+                          "command '%s' failed with exit status %d" % \
+                          (cmd[0], exit_status)
+
+            elif os.WIFSTOPPED(status):
+                continue
+
+            else:
+                raise DistutilsExecError, \
+                      "unknown error executing '%s': termination status %d" % \
+                      (cmd[0], status)
+# _spawn_posix ()
+
+
+def find_executable(executable, path=None):
+    """Try to find 'executable' in the directories listed in 'path' (a
+    string listing directories separated by 'os.pathsep'; defaults to
+    os.environ['PATH']).  Returns the complete filename or None if not
+    found.
+    """
+    if path is None:
+        path = os.environ['PATH']
+    paths = string.split(path, os.pathsep)
+    (base, ext) = os.path.splitext(executable)
+    if (sys.platform == 'win32') and (ext != '.exe'):
+        executable = executable + '.exe'
+    if not os.path.isfile(executable):
+        for p in paths:
+            f = os.path.join(p, executable)
+            if os.path.isfile(f):
+                # the file exists, we have a shot at spawn working
+                return f
+        return None
+    else:
+        return executable
+
+# find_executable()
diff --git a/lib-python/2.2/distutils/sysconfig.py b/lib-python/2.2/distutils/sysconfig.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/sysconfig.py
@@ -0,0 +1,445 @@
+"""Provide access to Python's configuration information.  The specific
+configuration variables available depend heavily on the platform and
+configuration.  The values may be retrieved using
+get_config_var(name), and the list of variables is available via
+get_config_vars().keys().  Additional convenience functions are also
+available.
+
+Written by:   Fred L. Drake, Jr.
+Email:        <fdrake at acm.org>
+Initial date: 17-Dec-1998
+"""
+
+__revision__ = "$Id$"
+
+import os
+import re
+import string
+import sys
+
+from errors import DistutilsPlatformError
+
+# These are needed in a couple of spots, so just compute them once.
+PREFIX = os.path.normpath(sys.prefix)
+EXEC_PREFIX = os.path.normpath(sys.exec_prefix)
+
+# python_build: (Boolean) if true, we're either building Python or
+# building an extension with an un-installed Python, so we use
+# different (hard-wired) directories.
+
+argv0_path = os.path.dirname(os.path.abspath(sys.executable))
+landmark = os.path.join(argv0_path, "Modules", "Setup")
+
+python_build = os.path.isfile(landmark)
+
+del argv0_path, landmark
+
+# set_python_build() was present in 2.2 and 2.2.1; it's not needed
+# any more, but so 3rd party build scripts don't break, we leave
+# a do-nothing version:
+def set_python_build():
+    pass
+
+def get_python_inc(plat_specific=0, prefix=None):
+    """Return the directory containing installed Python header files.
+
+    If 'plat_specific' is false (the default), this is the path to the
+    non-platform-specific header files, i.e. Python.h and so on;
+    otherwise, this is the path to platform-specific header files
+    (namely pyconfig.h).
+
+    If 'prefix' is supplied, use it instead of sys.prefix or
+    sys.exec_prefix -- i.e., ignore 'plat_specific'.
+    """
+    if prefix is None:
+        prefix = plat_specific and EXEC_PREFIX or PREFIX
+    if os.name == "posix":
+        if python_build:
+            base = os.path.dirname(os.path.abspath(sys.executable))
+            if plat_specific:
+                inc_dir = base
+            else:
+                inc_dir = os.path.join(base, "Include")
+                if not os.path.exists(inc_dir):
+                    inc_dir = os.path.join(os.path.dirname(base), "Include")
+            return inc_dir
+        return os.path.join(prefix, "include", "python" + sys.version[:3])
+    elif os.name == "nt":
+        return os.path.join(prefix, "include")
+    elif os.name == "mac":
+        return os.path.join(prefix, "Include")
+    else:
+        raise DistutilsPlatformError(
+            "I don't know where Python installs its C header files "
+            "on platform '%s'" % os.name)
+
+
+def get_python_lib(plat_specific=0, standard_lib=0, prefix=None):
+    """Return the directory containing the Python library (standard or
+    site additions).
+
+    If 'plat_specific' is true, return the directory containing
+    platform-specific modules, i.e. any module from a non-pure-Python
+    module distribution; otherwise, return the platform-shared library
+    directory.  If 'standard_lib' is true, return the directory
+    containing standard Python library modules; otherwise, return the
+    directory for site-specific modules.
+
+    If 'prefix' is supplied, use it instead of sys.prefix or
+    sys.exec_prefix -- i.e., ignore 'plat_specific'.
+    """
+    if prefix is None:
+        prefix = plat_specific and EXEC_PREFIX or PREFIX
+
+    if os.name == "posix":
+        libpython = os.path.join(prefix,
+                                 "lib", "python" + sys.version[:3])
+        if standard_lib:
+            return libpython
+        else:
+            return os.path.join(libpython, "site-packages")
+
+    elif os.name == "nt":
+        if standard_lib:
+            return os.path.join(prefix, "Lib")
+        else:
+            if sys.version < "2.2":
+                return prefix
+            else:
+                return os.path.join(PREFIX, "Lib", "site-packages")
+
+    elif os.name == "mac":
+        if plat_specific:
+            if standard_lib:
+                return os.path.join(prefix, "Lib", "lib-dynload")
+            else:
+                return os.path.join(prefix, "Lib", "site-packages")
+        else:
+            if standard_lib:
+                return os.path.join(prefix, "Lib")
+            else:
+                return os.path.join(prefix, "Lib", "site-packages")
+    elif os.name == "java":
+        if standard_lib:
+            return os.path.join(prefix, "Lib")
+        else:
+            return os.path.join(prefix, "Lib", "site-packages")
+    else:
+        raise DistutilsPlatformError(
+            "I don't know where Python installs its library "
+            "on platform '%s'" % os.name)
+
+
+def customize_compiler(compiler):
+    """Do any platform-specific customization of a CCompiler instance.
+
+    Mainly needed on Unix, so we can plug in the information that
+    varies across Unices and is stored in Python's Makefile.
+    """
+    if compiler.compiler_type == "unix":
+        (cc, opt, ccshared, ldshared, so_ext) = \
+            get_config_vars('CC', 'OPT', 'CCSHARED', 'LDSHARED', 'SO')
+
+        cc_cmd = cc + ' ' + opt
+        compiler.set_executables(
+            preprocessor=cc + " -E",    # not always!
+            compiler=cc_cmd,
+            compiler_so=cc_cmd + ' ' + ccshared,
+            linker_so=ldshared,
+            linker_exe=cc)
+
+        compiler.shared_lib_extension = so_ext
+
+
+def get_config_h_filename():
+    """Return full pathname of installed pyconfig.h file."""
+    if python_build:
+        inc_dir = os.curdir
+    else:
+        inc_dir = get_python_inc(plat_specific=1)
+    if sys.version < '2.2':
+        config_h = 'config.h'
+    else:
+        # The name of the config.h file changed in 2.2
+        config_h = 'pyconfig.h'
+    return os.path.join(inc_dir, config_h)
+
+
+def get_makefile_filename():
+    """Return full pathname of installed Makefile from the Python build."""
+    if python_build:
+        return os.path.join(os.path.dirname(sys.executable), "Makefile")
+    lib_dir = get_python_lib(plat_specific=1, standard_lib=1)
+    return os.path.join(lib_dir, "config", "Makefile")
+
+
+def parse_config_h(fp, g=None):
+    """Parse a config.h-style file.
+
+    A dictionary containing name/value pairs is returned.  If an
+    optional dictionary is passed in as the second argument, it is
+    used instead of a new dictionary.
+    """
+    if g is None:
+        g = {}
+    define_rx = re.compile("#define ([A-Z][A-Z0-9_]+) (.*)\n")
+    undef_rx = re.compile("/[*] #undef ([A-Z][A-Z0-9_]+) [*]/\n")
+    #
+    while 1:
+        line = fp.readline()
+        if not line:
+            break
+        m = define_rx.match(line)
+        if m:
+            n, v = m.group(1, 2)
+            try: v = string.atoi(v)
+            except ValueError: pass
+            g[n] = v
+        else:
+            m = undef_rx.match(line)
+            if m:
+                g[m.group(1)] = 0
+    return g
+
+
+# Regexes needed for parsing Makefile (and similar syntaxes,
+# like old-style Setup files).
+_variable_rx = re.compile("([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)")
+_findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)")
+_findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}")
+
+def parse_makefile(fn, g=None):
+    """Parse a Makefile-style file.
+
+    A dictionary containing name/value pairs is returned.  If an
+    optional dictionary is passed in as the second argument, it is
+    used instead of a new dictionary.
+    """
+    from distutils.text_file import TextFile
+    fp = TextFile(fn, strip_comments=1, skip_blanks=1, join_lines=1)
+
+    if g is None:
+        g = {}
+    done = {}
+    notdone = {}
+
+    while 1:
+        line = fp.readline()
+        if line is None:                # eof
+            break
+        m = _variable_rx.match(line)
+        if m:
+            n, v = m.group(1, 2)
+            v = string.strip(v)
+            if "$" in v:
+                notdone[n] = v
+            else:
+                try: v = string.atoi(v)
+                except ValueError: pass
+                done[n] = v
+
+    # do variable interpolation here
+    while notdone:
+        for name in notdone.keys():
+            value = notdone[name]
+            m = _findvar1_rx.search(value) or _findvar2_rx.search(value)
+            if m:
+                n = m.group(1)
+                if done.has_key(n):
+                    after = value[m.end():]
+                    value = value[:m.start()] + str(done[n]) + after
+                    if "$" in after:
+                        notdone[name] = value
+                    else:
+                        try: value = string.atoi(value)
+                        except ValueError:
+                            done[name] = string.strip(value)
+                        else:
+                            done[name] = value
+                        del notdone[name]
+                elif notdone.has_key(n):
+                    # get it on a subsequent round
+                    pass
+                else:
+                    done[n] = ""
+                    after = value[m.end():]
+                    value = value[:m.start()] + after
+                    if "$" in after:
+                        notdone[name] = value
+                    else:
+                        try: value = string.atoi(value)
+                        except ValueError:
+                            done[name] = string.strip(value)
+                        else:
+                            done[name] = value
+                        del notdone[name]
+            else:
+                # bogus variable reference; just drop it since we can't deal
+                del notdone[name]
+
+    fp.close()
+
+    # save the results in the global dictionary
+    g.update(done)
+    return g
+
+
+def expand_makefile_vars(s, vars):
+    """Expand Makefile-style variables -- "${foo}" or "$(foo)" -- in
+    'string' according to 'vars' (a dictionary mapping variable names to
+    values).  Variables not present in 'vars' are silently expanded to the
+    empty string.  The variable values in 'vars' should not contain further
+    variable expansions; if 'vars' is the output of 'parse_makefile()',
+    you're fine.  Returns a variable-expanded version of 's'.
+    """
+
+    # This algorithm does multiple expansion, so if vars['foo'] contains
+    # "${bar}", it will expand ${foo} to ${bar}, and then expand
+    # ${bar}... and so forth.  This is fine as long as 'vars' comes from
+    # 'parse_makefile()', which takes care of such expansions eagerly,
+    # according to make's variable expansion semantics.
+
+    while 1:
+        m = _findvar1_rx.search(s) or _findvar2_rx.search(s)
+        if m:
+            name = m.group(1)
+            (beg, end) = m.span()
+            s = s[0:beg] + vars.get(m.group(1)) + s[end:]
+        else:
+            break
+    return s
+
+
+_config_vars = None
+
+def _init_posix():
+    """Initialize the module as appropriate for POSIX systems."""
+    g = {}
+    # load the installed Makefile:
+    try:
+        filename = get_makefile_filename()
+        parse_makefile(filename, g)
+    except IOError, msg:
+        my_msg = "invalid Python installation: unable to open %s" % filename
+        if hasattr(msg, "strerror"):
+            my_msg = my_msg + " (%s)" % msg.strerror
+
+        raise DistutilsPlatformError(my_msg)
+
+
+    # On AIX, there are wrong paths to the linker scripts in the Makefile
+    # -- these paths are relative to the Python source, but when installed
+    # the scripts are in another directory.
+    if python_build:
+        g['LDSHARED'] = g['BLDSHARED']
+
+    elif sys.version < '2.1':
+        # The following two branches are for 1.5.2 compatibility.
+        if sys.platform == 'aix4':          # what about AIX 3.x ?
+            # Linker script is in the config directory, not in Modules as the
+            # Makefile says.
+            python_lib = get_python_lib(standard_lib=1)
+            ld_so_aix = os.path.join(python_lib, 'config', 'ld_so_aix')
+            python_exp = os.path.join(python_lib, 'config', 'python.exp')
+
+            g['LDSHARED'] = "%s %s -bI:%s" % (ld_so_aix, g['CC'], python_exp)
+
+        elif sys.platform == 'beos':
+            # Linker script is in the config directory.  In the Makefile it is
+            # relative to the srcdir, which after installation no longer makes
+            # sense.
+            python_lib = get_python_lib(standard_lib=1)
+            linkerscript_name = os.path.basename(string.split(g['LDSHARED'])[0])
+            linkerscript = os.path.join(python_lib, 'config', linkerscript_name)
+
+            # XXX this isn't the right place to do this: adding the Python
+            # library to the link, if needed, should be in the "build_ext"
+            # command.  (It's also needed for non-MS compilers on Windows, and
+            # it's taken care of for them by the 'build_ext.get_libraries()'
+            # method.)
+            g['LDSHARED'] = ("%s -L%s/lib -lpython%s" %
+                             (linkerscript, PREFIX, sys.version[0:3]))
+
+    global _config_vars
+    _config_vars = g
+
+
+def _init_nt():
+    """Initialize the module as appropriate for NT"""
+    g = {}
+    # set basic install directories
+    g['LIBDEST'] = get_python_lib(plat_specific=0, standard_lib=1)
+    g['BINLIBDEST'] = get_python_lib(plat_specific=1, standard_lib=1)
+
+    # XXX hmmm.. a normal install puts include files here
+    g['INCLUDEPY'] = get_python_inc(plat_specific=0)
+
+    g['SO'] = '.pyd'
+    g['EXE'] = ".exe"
+
+    global _config_vars
+    _config_vars = g
+
+
+def _init_mac():
+    """Initialize the module as appropriate for Macintosh systems"""
+    g = {}
+    # set basic install directories
+    g['LIBDEST'] = get_python_lib(plat_specific=0, standard_lib=1)
+    g['BINLIBDEST'] = get_python_lib(plat_specific=1, standard_lib=1)
+
+    # XXX hmmm.. a normal install puts include files here
+    g['INCLUDEPY'] = get_python_inc(plat_specific=0)
+
+    import MacOS
+    if not hasattr(MacOS, 'runtimemodel'):
+        g['SO'] = '.ppc.slb'
+    else:
+        g['SO'] = '.%s.slb' % MacOS.runtimemodel
+
+    # XXX are these used anywhere?
+    g['install_lib'] = os.path.join(EXEC_PREFIX, "Lib")
+    g['install_platlib'] = os.path.join(EXEC_PREFIX, "Mac", "Lib")
+
+    global _config_vars
+    _config_vars = g
+
+
+def get_config_vars(*args):
+    """With no arguments, return a dictionary of all configuration
+    variables relevant for the current platform.  Generally this includes
+    everything needed to build extensions and install both pure modules and
+    extensions.  On Unix, this means every variable defined in Python's
+    installed Makefile; on Windows and Mac OS it's a much smaller set.
+
+    With arguments, return a list of values that result from looking up
+    each argument in the configuration variable dictionary.
+    """
+    global _config_vars
+    if _config_vars is None:
+        func = globals().get("_init_" + os.name)
+        if func:
+            func()
+        else:
+            _config_vars = {}
+
+        # Normalized versions of prefix and exec_prefix are handy to have;
+        # in fact, these are the standard versions used most places in the
+        # Distutils.
+        _config_vars['prefix'] = PREFIX
+        _config_vars['exec_prefix'] = EXEC_PREFIX
+
+    if args:
+        vals = []
+        for name in args:
+            vals.append(_config_vars.get(name))
+        return vals
+    else:
+        return _config_vars
+
+def get_config_var(name):
+    """Return the value of a single variable using the dictionary
+    returned by 'get_config_vars()'.  Equivalent to
+    get_config_vars().get(name)
+    """
+    return get_config_vars().get(name)
diff --git a/lib-python/2.2/distutils/text_file.py b/lib-python/2.2/distutils/text_file.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/text_file.py
@@ -0,0 +1,384 @@
+"""text_file
+
+provides the TextFile class, which gives an interface to text files
+that (optionally) takes care of stripping comments, ignoring blank
+lines, and joining lines with backslashes."""
+
+# created 1999/01/12, Greg Ward
+
+__revision__ = "$Id$"
+
+from types import *
+import sys, os, string
+
+
+class TextFile:
+
+    """Provides a file-like object that takes care of all the things you
+       commonly want to do when processing a text file that has some
+       line-by-line syntax: strip comments (as long as "#" is your
+       comment character), skip blank lines, join adjacent lines by
+       escaping the newline (ie. backslash at end of line), strip
+       leading and/or trailing whitespace.  All of these are optional
+       and independently controllable.
+
+       Provides a 'warn()' method so you can generate warning messages that
+       report physical line number, even if the logical line in question
+       spans multiple physical lines.  Also provides 'unreadline()' for
+       implementing line-at-a-time lookahead.
+
+       Constructor is called as:
+
+           TextFile (filename=None, file=None, **options)
+
+       It bombs (RuntimeError) if both 'filename' and 'file' are None;
+       'filename' should be a string, and 'file' a file object (or
+       something that provides 'readline()' and 'close()' methods).  It is
+       recommended that you supply at least 'filename', so that TextFile
+       can include it in warning messages.  If 'file' is not supplied,
+       TextFile creates its own using the 'open()' builtin.
+
+       The options are all boolean, and affect the value returned by
+       'readline()':
+         strip_comments [default: true]
+           strip from "#" to end-of-line, as well as any whitespace
+           leading up to the "#" -- unless it is escaped by a backslash
+         lstrip_ws [default: false]
+           strip leading whitespace from each line before returning it
+         rstrip_ws [default: true]
+           strip trailing whitespace (including line terminator!) from
+           each line before returning it
+         skip_blanks [default: true}
+           skip lines that are empty *after* stripping comments and
+           whitespace.  (If both lstrip_ws and rstrip_ws are false,
+           then some lines may consist of solely whitespace: these will
+           *not* be skipped, even if 'skip_blanks' is true.)
+         join_lines [default: false]
+           if a backslash is the last non-newline character on a line
+           after stripping comments and whitespace, join the following line
+           to it to form one "logical line"; if N consecutive lines end
+           with a backslash, then N+1 physical lines will be joined to
+           form one logical line.
+         collapse_join [default: false]
+           strip leading whitespace from lines that are joined to their
+           predecessor; only matters if (join_lines and not lstrip_ws)
+
+       Note that since 'rstrip_ws' can strip the trailing newline, the
+       semantics of 'readline()' must differ from those of the builtin file
+       object's 'readline()' method!  In particular, 'readline()' returns
+       None for end-of-file: an empty string might just be a blank line (or
+       an all-whitespace line), if 'rstrip_ws' is true but 'skip_blanks' is
+       not."""
+
+    default_options = { 'strip_comments': 1,
+                        'skip_blanks':    1,
+                        'lstrip_ws':      0,
+                        'rstrip_ws':      1,
+                        'join_lines':     0,
+                        'collapse_join':  0,
+                      }
+
+    def __init__ (self, filename=None, file=None, **options):
+        """Construct a new TextFile object.  At least one of 'filename'
+           (a string) and 'file' (a file-like object) must be supplied.
+           They keyword argument options are described above and affect
+           the values returned by 'readline()'."""
+
+        if filename is None and file is None:
+            raise RuntimeError, \
+                  "you must supply either or both of 'filename' and 'file'"
+
+        # set values for all options -- either from client option hash
+        # or fallback to default_options
+        for opt in self.default_options.keys():
+            if options.has_key (opt):
+                setattr (self, opt, options[opt])
+
+            else:
+                setattr (self, opt, self.default_options[opt])
+
+        # sanity check client option hash
+        for opt in options.keys():
+            if not self.default_options.has_key (opt):
+                raise KeyError, "invalid TextFile option '%s'" % opt
+
+        if file is None:
+            self.open (filename)
+        else:
+            self.filename = filename
+            self.file = file
+            self.current_line = 0       # assuming that file is at BOF!
+
+        # 'linebuf' is a stack of lines that will be emptied before we
+        # actually read from the file; it's only populated by an
+        # 'unreadline()' operation
+        self.linebuf = []
+
+
+    def open (self, filename):
+        """Open a new file named 'filename'.  This overrides both the
+           'filename' and 'file' arguments to the constructor."""
+
+        self.filename = filename
+        self.file = open (self.filename, 'r')
+        self.current_line = 0
+
+
+    def close (self):
+        """Close the current file and forget everything we know about it
+           (filename, current line number)."""
+
+        self.file.close ()
+        self.file = None
+        self.filename = None
+        self.current_line = None
+
+
+    def gen_error (self, msg, line=None):
+        outmsg = []
+        if line is None:
+            line = self.current_line
+        outmsg.append(self.filename + ", ")
+        if type (line) in (ListType, TupleType):
+            outmsg.append("lines %d-%d: " % tuple (line))
+        else:
+            outmsg.append("line %d: " % line)
+        outmsg.append(str(msg))
+        return string.join(outmsg, "")
+
+
+    def error (self, msg, line=None):
+        raise ValueError, "error: " + self.gen_error(msg, line)
+
+    def warn (self, msg, line=None):
+        """Print (to stderr) a warning message tied to the current logical
+           line in the current file.  If the current logical line in the
+           file spans multiple physical lines, the warning refers to the
+           whole range, eg. "lines 3-5".  If 'line' supplied, it overrides
+           the current line number; it may be a list or tuple to indicate a
+           range of physical lines, or an integer for a single physical
+           line."""
+        sys.stderr.write("warning: " + self.gen_error(msg, line) + "\n")
+
+
+    def readline (self):
+        """Read and return a single logical line from the current file (or
+           from an internal buffer if lines have previously been "unread"
+           with 'unreadline()').  If the 'join_lines' option is true, this
+           may involve reading multiple physical lines concatenated into a
+           single string.  Updates the current line number, so calling
+           'warn()' after 'readline()' emits a warning about the physical
+           line(s) just read.  Returns None on end-of-file, since the empty
+           string can occur if 'rstrip_ws' is true but 'strip_blanks' is
+           not."""
+
+        # If any "unread" lines waiting in 'linebuf', return the top
+        # one.  (We don't actually buffer read-ahead data -- lines only
+        # get put in 'linebuf' if the client explicitly does an
+        # 'unreadline()'.
+        if self.linebuf:
+            line = self.linebuf[-1]
+            del self.linebuf[-1]
+            return line
+
+        buildup_line = ''
+
+        while 1:
+            # read the line, make it None if EOF
+            line = self.file.readline()
+            if line == '': line = None
+
+            if self.strip_comments and line:
+
+                # Look for the first "#" in the line.  If none, never
+                # mind.  If we find one and it's the first character, or
+                # is not preceded by "\", then it starts a comment --
+                # strip the comment, strip whitespace before it, and
+                # carry on.  Otherwise, it's just an escaped "#", so
+                # unescape it (and any other escaped "#"'s that might be
+                # lurking in there) and otherwise leave the line alone.
+
+                pos = string.find (line, "#")
+                if pos == -1:           # no "#" -- no comments
+                    pass
+
+                # It's definitely a comment -- either "#" is the first
+                # character, or it's elsewhere and unescaped.
+                elif pos == 0 or line[pos-1] != "\\":
+                    # Have to preserve the trailing newline, because it's
+                    # the job of a later step (rstrip_ws) to remove it --
+                    # and if rstrip_ws is false, we'd better preserve it!
+                    # (NB. this means that if the final line is all comment
+                    # and has no trailing newline, we will think that it's
+                    # EOF; I think that's OK.)
+                    eol = (line[-1] == '\n') and '\n' or ''
+                    line = line[0:pos] + eol
+
+                    # If all that's left is whitespace, then skip line
+                    # *now*, before we try to join it to 'buildup_line' --
+                    # that way constructs like
+                    #   hello \\
+                    #   # comment that should be ignored
+                    #   there
+                    # result in "hello there".
+                    if string.strip(line) == "":
+                        continue
+
+                else:                   # it's an escaped "#"
+                    line = string.replace (line, "\\#", "#")
+
+
+            # did previous line end with a backslash? then accumulate
+            if self.join_lines and buildup_line:
+                # oops: end of file
+                if line is None:
+                    self.warn ("continuation line immediately precedes "
+                               "end-of-file")
+                    return buildup_line
+
+                if self.collapse_join:
+                    line = string.lstrip (line)
+                line = buildup_line + line
+
+                # careful: pay attention to line number when incrementing it
+                if type (self.current_line) is ListType:
+                    self.current_line[1] = self.current_line[1] + 1
+                else:
+                    self.current_line = [self.current_line,
+                                         self.current_line+1]
+            # just an ordinary line, read it as usual
+            else:
+                if line is None:        # eof
+                    return None
+
+                # still have to be careful about incrementing the line number!
+                if type (self.current_line) is ListType:
+                    self.current_line = self.current_line[1] + 1
+                else:
+                    self.current_line = self.current_line + 1
+
+
+            # strip whitespace however the client wants (leading and
+            # trailing, or one or the other, or neither)
+            if self.lstrip_ws and self.rstrip_ws:
+                line = string.strip (line)
+            elif self.lstrip_ws:
+                line = string.lstrip (line)
+            elif self.rstrip_ws:
+                line = string.rstrip (line)
+
+            # blank line (whether we rstrip'ed or not)? skip to next line
+            # if appropriate
+            if (line == '' or line == '\n') and self.skip_blanks:
+                continue
+
+            if self.join_lines:
+                if line[-1] == '\\':
+                    buildup_line = line[:-1]
+                    continue
+
+                if line[-2:] == '\\\n':
+                    buildup_line = line[0:-2] + '\n'
+                    continue
+
+            # well, I guess there's some actual content there: return it
+            return line
+
+    # readline ()
+
+
+    def readlines (self):
+        """Read and return the list of all logical lines remaining in the
+           current file."""
+
+        lines = []
+        while 1:
+            line = self.readline()
+            if line is None:
+                return lines
+            lines.append (line)
+
+
+    def unreadline (self, line):
+        """Push 'line' (a string) onto an internal buffer that will be
+           checked by future 'readline()' calls.  Handy for implementing
+           a parser with line-at-a-time lookahead."""
+
+        self.linebuf.append (line)
+
+
+if __name__ == "__main__":
+    test_data = """# test file
+
+line 3 \\
+# intervening comment
+  continues on next line
+"""
+    # result 1: no fancy options
+    result1 = map (lambda x: x + "\n", string.split (test_data, "\n")[0:-1])
+
+    # result 2: just strip comments
+    result2 = ["\n",
+               "line 3 \\\n",
+               "  continues on next line\n"]
+
+    # result 3: just strip blank lines
+    result3 = ["# test file\n",
+               "line 3 \\\n",
+               "# intervening comment\n",
+               "  continues on next line\n"]
+
+    # result 4: default, strip comments, blank lines, and trailing whitespace
+    result4 = ["line 3 \\",
+               "  continues on next line"]
+
+    # result 5: strip comments and blanks, plus join lines (but don't
+    # "collapse" joined lines
+    result5 = ["line 3   continues on next line"]
+
+    # result 6: strip comments and blanks, plus join lines (and
+    # "collapse" joined lines
+    result6 = ["line 3 continues on next line"]
+
+    def test_input (count, description, file, expected_result):
+        result = file.readlines ()
+        # result = string.join (result, '')
+        if result == expected_result:
+            print "ok %d (%s)" % (count, description)
+        else:
+            print "not ok %d (%s):" % (count, description)
+            print "** expected:"
+            print expected_result
+            print "** received:"
+            print result
+
+
+    filename = "test.txt"
+    out_file = open (filename, "w")
+    out_file.write (test_data)
+    out_file.close ()
+
+    in_file = TextFile (filename, strip_comments=0, skip_blanks=0,
+                        lstrip_ws=0, rstrip_ws=0)
+    test_input (1, "no processing", in_file, result1)
+
+    in_file = TextFile (filename, strip_comments=1, skip_blanks=0,
+                        lstrip_ws=0, rstrip_ws=0)
+    test_input (2, "strip comments", in_file, result2)
+
+    in_file = TextFile (filename, strip_comments=0, skip_blanks=1,
+                        lstrip_ws=0, rstrip_ws=0)
+    test_input (3, "strip blanks", in_file, result3)
+
+    in_file = TextFile (filename)
+    test_input (4, "default processing", in_file, result4)
+
+    in_file = TextFile (filename, strip_comments=1, skip_blanks=1,
+                        join_lines=1, rstrip_ws=1)
+    test_input (5, "join lines without collapsing", in_file, result5)
+
+    in_file = TextFile (filename, strip_comments=1, skip_blanks=1,
+                        join_lines=1, rstrip_ws=1, collapse_join=1)
+    test_input (6, "join lines with collapsing", in_file, result6)
+
+    os.remove (filename)
diff --git a/lib-python/2.2/distutils/unixccompiler.py b/lib-python/2.2/distutils/unixccompiler.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/unixccompiler.py
@@ -0,0 +1,308 @@
+"""distutils.unixccompiler
+
+Contains the UnixCCompiler class, a subclass of CCompiler that handles
+the "typical" Unix-style command-line C compiler:
+  * macros defined with -Dname[=value]
+  * macros undefined with -Uname
+  * include search directories specified with -Idir
+  * libraries specified with -lllib
+  * library search directories specified with -Ldir
+  * compile handled by 'cc' (or similar) executable with -c option:
+    compiles .c to .o
+  * link static library handled by 'ar' command (possibly with 'ranlib')
+  * link shared library handled by 'cc -shared'
+"""
+
+# created 1999/07/05, Greg Ward
+
+__revision__ = "$Id$"
+
+import string, re, os, sys
+from types import *
+from copy import copy
+from distutils import sysconfig
+from distutils.dep_util import newer
+from distutils.ccompiler import \
+     CCompiler, gen_preprocess_options, gen_lib_options
+from distutils.errors import \
+     DistutilsExecError, CompileError, LibError, LinkError
+
+# XXX Things not currently handled:
+#   * optimization/debug/warning flags; we just use whatever's in Python's
+#     Makefile and live with it.  Is this adequate?  If not, we might
+#     have to have a bunch of subclasses GNUCCompiler, SGICCompiler,
+#     SunCCompiler, and I suspect down that road lies madness.
+#   * even if we don't know a warning flag from an optimization flag,
+#     we need some way for outsiders to feed preprocessor/compiler/linker
+#     flags in to us -- eg. a sysadmin might want to mandate certain flags
+#     via a site config file, or a user might want to set something for
+#     compiling this module distribution only via the setup.py command
+#     line, whatever.  As long as these options come from something on the
+#     current system, they can be as system-dependent as they like, and we
+#     should just happily stuff them into the preprocessor/compiler/linker
+#     options and carry on.
+
+
+class UnixCCompiler (CCompiler):
+
+    compiler_type = 'unix'
+
+    # These are used by CCompiler in two places: the constructor sets
+    # instance attributes 'preprocessor', 'compiler', etc. from them, and
+    # 'set_executable()' allows any of these to be set.  The defaults here
+    # are pretty generic; they will probably have to be set by an outsider
+    # (eg. using information discovered by the sysconfig about building
+    # Python extensions).
+    executables = {'preprocessor' : None,
+                   'compiler'     : ["cc"],
+                   'compiler_so'  : ["cc"],
+                   'linker_so'    : ["cc", "-shared"],
+                   'linker_exe'   : ["cc"],
+                   'archiver'     : ["ar", "-cr"],
+                   'ranlib'       : None,
+                  }
+
+    if sys.platform[:6] == "darwin":
+        executables['ranlib'] = ["ranlib"]
+
+    # Needed for the filename generation methods provided by the base
+    # class, CCompiler.  NB. whoever instantiates/uses a particular
+    # UnixCCompiler instance should set 'shared_lib_ext' -- we set a
+    # reasonable common default here, but it's not necessarily used on all
+    # Unices!
+
+    src_extensions = [".c",".C",".cc",".cxx",".cpp",".m"]
+    obj_extension = ".o"
+    static_lib_extension = ".a"
+    shared_lib_extension = ".so"
+    dylib_lib_extension = ".dylib"
+    static_lib_format = shared_lib_format = dylib_lib_format = "lib%s%s"
+
+
+
+    def __init__ (self,
+                  verbose=0,
+                  dry_run=0,
+                  force=0):
+        CCompiler.__init__ (self, verbose, dry_run, force)
+
+
+    def preprocess (self,
+                    source,
+                    output_file=None,
+                    macros=None,
+                    include_dirs=None,
+                    extra_preargs=None,
+                    extra_postargs=None):
+
+        (_, macros, include_dirs) = \
+            self._fix_compile_args(None, macros, include_dirs)
+        pp_opts = gen_preprocess_options(macros, include_dirs)
+        pp_args = self.preprocessor + pp_opts
+        if output_file:
+            pp_args.extend(['-o', output_file])
+        if extra_preargs:
+            pp_args[:0] = extra_preargs
+        if extra_postargs:
+            pp_args.extend(extra_postargs)
+
+        # We need to preprocess: either we're being forced to, or we're
+        # generating output to stdout, or there's a target output file and
+        # the source file is newer than the target (or the target doesn't
+        # exist).
+        if self.force or output_file is None or newer(source, output_file):
+            if output_file:
+                self.mkpath(os.path.dirname(output_file))
+            try:
+                self.spawn(pp_args)
+            except DistutilsExecError, msg:
+                raise CompileError, msg
+
+
+    def compile (self,
+                 sources,
+                 output_dir=None,
+                 macros=None,
+                 include_dirs=None,
+                 debug=0,
+                 extra_preargs=None,
+                 extra_postargs=None):
+
+        (output_dir, macros, include_dirs) = \
+            self._fix_compile_args(output_dir, macros, include_dirs)
+        (objects, skip_sources) = self._prep_compile(sources, output_dir)
+
+        # Figure out the options for the compiler command line.
+        pp_opts = gen_preprocess_options(macros, include_dirs)
+        cc_args = pp_opts + ['-c']
+        if debug:
+            cc_args[:0] = ['-g']
+        if extra_preargs:
+            cc_args[:0] = extra_preargs
+        if extra_postargs is None:
+            extra_postargs = []
+
+        # Compile all source files that weren't eliminated by
+        # '_prep_compile()'.
+        for i in range(len(sources)):
+            src = sources[i] ; obj = objects[i]
+            if skip_sources[src]:
+                self.announce("skipping %s (%s up-to-date)" % (src, obj))
+            else:
+                self.mkpath(os.path.dirname(obj))
+                try:
+                    self.spawn(self.compiler_so + cc_args +
+                               [src, '-o', obj] +
+                               extra_postargs)
+                except DistutilsExecError, msg:
+                    raise CompileError, msg
+
+        # Return *all* object filenames, not just the ones we just built.
+        return objects
+
+    # compile ()
+
+
+    def create_static_lib (self,
+                           objects,
+                           output_libname,
+                           output_dir=None,
+                           debug=0):
+
+        (objects, output_dir) = self._fix_object_args(objects, output_dir)
+
+        output_filename = \
+            self.library_filename(output_libname, output_dir=output_dir)
+
+        if self._need_link(objects, output_filename):
+            self.mkpath(os.path.dirname(output_filename))
+            self.spawn(self.archiver +
+                       [output_filename] +
+                       objects + self.objects)
+
+            # Not many Unices required ranlib anymore -- SunOS 4.x is, I
+            # think the only major Unix that does.  Maybe we need some
+            # platform intelligence here to skip ranlib if it's not
+            # needed -- or maybe Python's configure script took care of
+            # it for us, hence the check for leading colon.
+            if self.ranlib:
+                try:
+                    self.spawn(self.ranlib + [output_filename])
+                except DistutilsExecError, msg:
+                    raise LibError, msg
+        else:
+            self.announce("skipping %s (up-to-date)" % output_filename)
+
+    # create_static_lib ()
+
+
+    def link (self,
+              target_desc,
+              objects,
+              output_filename,
+              output_dir=None,
+              libraries=None,
+              library_dirs=None,
+              runtime_library_dirs=None,
+              export_symbols=None,
+              debug=0,
+              extra_preargs=None,
+              extra_postargs=None,
+              build_temp=None):
+
+        (objects, output_dir) = self._fix_object_args(objects, output_dir)
+        (libraries, library_dirs, runtime_library_dirs) = \
+            self._fix_lib_args(libraries, library_dirs, runtime_library_dirs)
+
+        lib_opts = gen_lib_options(self,
+                                   library_dirs, runtime_library_dirs,
+                                   libraries)
+        if type(output_dir) not in (StringType, NoneType):
+            raise TypeError, "'output_dir' must be a string or None"
+        if output_dir is not None:
+            output_filename = os.path.join(output_dir, output_filename)
+
+        if self._need_link(objects, output_filename):
+            ld_args = (objects + self.objects +
+                       lib_opts + ['-o', output_filename])
+            if debug:
+                ld_args[:0] = ['-g']
+            if extra_preargs:
+                ld_args[:0] = extra_preargs
+            if extra_postargs:
+                ld_args.extend(extra_postargs)
+            self.mkpath(os.path.dirname(output_filename))
+            try:
+                if target_desc == CCompiler.EXECUTABLE:
+                    self.spawn(self.linker_exe + ld_args)
+                else:
+                    self.spawn(self.linker_so + ld_args)
+            except DistutilsExecError, msg:
+                raise LinkError, msg
+        else:
+            self.announce("skipping %s (up-to-date)" % output_filename)
+
+    # link ()
+
+
+    # -- Miscellaneous methods -----------------------------------------
+    # These are all used by the 'gen_lib_options() function, in
+    # ccompiler.py.
+
+    def library_dir_option (self, dir):
+        return "-L" + dir
+
+    def runtime_library_dir_option (self, dir):
+        # XXX Hackish, at the very least.  See Python bug #445902:
+        # http://sourceforge.net/tracker/index.php
+        #   ?func=detail&aid=445902&group_id=5470&atid=105470
+        # Linkers on different platforms need different options to
+        # specify that directories need to be added to the list of
+        # directories searched for dependencies when a dynamic library
+        # is sought.  GCC has to be told to pass the -R option through
+        # to the linker, whereas other compilers just know this.
+        # Other compilers may need something slightly different.  At
+        # this time, there's no way to determine this information from
+        # the configuration data stored in the Python installation, so
+        # we use this hack.
+        compiler = os.path.basename(sysconfig.get_config_var("CC"))
+        if sys.platform[:6] == "darwin":
+            # MacOSX's linker doesn't understand the -R flag at all
+            return "-L" + dir
+        if compiler == "gcc" or compiler == "g++":
+            return "-Wl,-R" + dir
+        else:
+            return "-R" + dir
+
+    def library_option (self, lib):
+        return "-l" + lib
+
+
+    def find_library_file (self, dirs, lib, debug=0):
+
+        for dir in dirs:
+            shared = os.path.join(
+                dir, self.library_filename(lib, lib_type='shared'))
+            dylib = os.path.join(
+                dir, self.library_filename(lib, lib_type='dylib'))
+            static = os.path.join(
+                dir, self.library_filename(lib, lib_type='static'))
+
+            # We're second-guessing the linker here, with not much hard
+            # data to go on: GCC seems to prefer the shared library, so I'm
+            # assuming that *all* Unix C compilers do.  And of course I'm
+            # ignoring even GCC's "-static" option.  So sue me.
+            if os.path.exists(dylib):
+                return dylib
+            elif os.path.exists(shared):
+                return shared
+            elif os.path.exists(static):
+                return static
+
+        else:
+            # Oops, didn't find it in *any* of 'dirs'
+            return None
+
+    # find_library_file ()
+
+# class UnixCCompiler
diff --git a/lib-python/2.2/distutils/util.py b/lib-python/2.2/distutils/util.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/util.py
@@ -0,0 +1,458 @@
+"""distutils.util
+
+Miscellaneous utility functions -- anything that doesn't fit into
+one of the other *util.py modules.
+"""
+
+# created 1999/03/08, Greg Ward
+
+__revision__ = "$Id$"
+
+import sys, os, string, re
+from distutils.errors import DistutilsPlatformError
+from distutils.dep_util import newer
+from distutils.spawn import spawn
+
+
+def get_platform ():
+    """Return a string that identifies the current platform.  This is used
+    mainly to distinguish platform-specific build directories and
+    platform-specific built distributions.  Typically includes the OS name
+    and version and the architecture (as supplied by 'os.uname()'),
+    although the exact information included depends on the OS; eg. for IRIX
+    the architecture isn't particularly important (IRIX only runs on SGI
+    hardware), but for Linux the kernel version isn't particularly
+    important.
+
+    Examples of returned values:
+       linux-i586
+       linux-alpha (?)
+       solaris-2.6-sun4u
+       irix-5.3
+       irix64-6.2
+
+    For non-POSIX platforms, currently just returns 'sys.platform'.
+    """
+    if os.name != "posix" or not hasattr(os, 'uname'):
+        # XXX what about the architecture? NT is Intel or Alpha,
+        # Mac OS is M68k or PPC, etc.
+        return sys.platform
+
+    # Try to distinguish various flavours of Unix
+
+    (osname, host, release, version, machine) = os.uname()
+
+    # Convert the OS name to lowercase, remove '/' characters
+    # (to accommodate BSD/OS), and translate spaces (for "Power Macintosh")
+    osname = string.lower(osname)
+    osname = string.replace(osname, '/', '')
+    machine = string.replace(machine, ' ', '_')
+
+    if osname[:5] == "linux":
+        # At least on Linux/Intel, 'machine' is the processor --
+        # i386, etc.
+        # XXX what about Alpha, SPARC, etc?
+        return  "%s-%s" % (osname, machine)
+    elif osname[:5] == "sunos":
+        if release[0] >= "5":           # SunOS 5 == Solaris 2
+            osname = "solaris"
+            release = "%d.%s" % (int(release[0]) - 3, release[2:])
+        # fall through to standard osname-release-machine representation
+    elif osname[:4] == "irix":              # could be "irix64"!
+        return "%s-%s" % (osname, release)
+    elif osname[:3] == "aix":
+        return "%s-%s.%s" % (osname, version, release)
+    elif osname[:6] == "cygwin":
+        osname = "cygwin"
+        rel_re = re.compile (r'[\d.]+')
+        m = rel_re.match(release)
+        if m:
+            release = m.group()
+
+    return "%s-%s-%s" % (osname, release, machine)
+
+# get_platform ()
+
+
+def convert_path (pathname):
+    """Return 'pathname' as a name that will work on the native filesystem,
+    i.e. split it on '/' and put it back together again using the current
+    directory separator.  Needed because filenames in the setup script are
+    always supplied in Unix style, and have to be converted to the local
+    convention before we can actually use them in the filesystem.  Raises
+    ValueError on non-Unix-ish systems if 'pathname' either starts or
+    ends with a slash.
+    """
+    if os.sep == '/':
+        return pathname
+    if not pathname:
+        return pathname
+    if pathname[0] == '/':
+        raise ValueError, "path '%s' cannot be absolute" % pathname
+    if pathname[-1] == '/':
+        raise ValueError, "path '%s' cannot end with '/'" % pathname
+
+    paths = string.split(pathname, '/')
+    while '.' in paths:
+        paths.remove('.')
+    if not paths:
+        return os.curdir
+    return apply(os.path.join, paths)
+
+# convert_path ()
+
+
+def change_root (new_root, pathname):
+    """Return 'pathname' with 'new_root' prepended.  If 'pathname' is
+    relative, this is equivalent to "os.path.join(new_root,pathname)".
+    Otherwise, it requires making 'pathname' relative and then joining the
+    two, which is tricky on DOS/Windows and Mac OS.
+    """
+    if os.name == 'posix':
+        if not os.path.isabs(pathname):
+            return os.path.join(new_root, pathname)
+        else:
+            return os.path.join(new_root, pathname[1:])
+
+    elif os.name == 'nt':
+        (drive, path) = os.path.splitdrive(pathname)
+        if path[0] == '\\':
+            path = path[1:]
+        return os.path.join(new_root, path)
+
+    elif os.name == 'mac':
+        if not os.path.isabs(pathname):
+            return os.path.join(new_root, pathname)
+        else:
+            # Chop off volume name from start of path
+            elements = string.split(pathname, ":", 1)
+            pathname = ":" + elements[1]
+            return os.path.join(new_root, pathname)
+
+    else:
+        raise DistutilsPlatformError, \
+              "nothing known about platform '%s'" % os.name
+
+
+_environ_checked = 0
+def check_environ ():
+    """Ensure that 'os.environ' has all the environment variables we
+    guarantee that users can use in config files, command-line options,
+    etc.  Currently this includes:
+      HOME - user's home directory (Unix only)
+      PLAT - description of the current platform, including hardware
+             and OS (see 'get_platform()')
+    """
+    global _environ_checked
+    if _environ_checked:
+        return
+
+    if os.name == 'posix' and not os.environ.has_key('HOME'):
+        import pwd
+        os.environ['HOME'] = pwd.getpwuid(os.getuid())[5]
+
+    if not os.environ.has_key('PLAT'):
+        os.environ['PLAT'] = get_platform()
+
+    _environ_checked = 1
+
+
+def subst_vars (s, local_vars):
+    """Perform shell/Perl-style variable substitution on 'string'.  Every
+    occurrence of '$' followed by a name is considered a variable, and
+    variable is substituted by the value found in the 'local_vars'
+    dictionary, or in 'os.environ' if it's not in 'local_vars'.
+    'os.environ' is first checked/augmented to guarantee that it contains
+    certain values: see 'check_environ()'.  Raise ValueError for any
+    variables not found in either 'local_vars' or 'os.environ'.
+    """
+    check_environ()
+    def _subst (match, local_vars=local_vars):
+        var_name = match.group(1)
+        if local_vars.has_key(var_name):
+            return str(local_vars[var_name])
+        else:
+            return os.environ[var_name]
+
+    try:
+        return re.sub(r'\$([a-zA-Z_][a-zA-Z_0-9]*)', _subst, s)
+    except KeyError, var:
+        raise ValueError, "invalid variable '$%s'" % var
+
+# subst_vars ()
+
+
+def grok_environment_error (exc, prefix="error: "):
+    """Generate a useful error message from an EnvironmentError (IOError or
+    OSError) exception object.  Handles Python 1.5.1 and 1.5.2 styles, and
+    does what it can to deal with exception objects that don't have a
+    filename (which happens when the error is due to a two-file operation,
+    such as 'rename()' or 'link()'.  Returns the error message as a string
+    prefixed with 'prefix'.
+    """
+    # check for Python 1.5.2-style {IO,OS}Error exception objects
+    if hasattr(exc, 'filename') and hasattr(exc, 'strerror'):
+        if exc.filename:
+            error = prefix + "%s: %s" % (exc.filename, exc.strerror)
+        else:
+            # two-argument functions in posix module don't
+            # include the filename in the exception object!
+            error = prefix + "%s" % exc.strerror
+    else:
+        error = prefix + str(exc[-1])
+
+    return error
+
+
+# Needed by 'split_quoted()'
+_wordchars_re = re.compile(r'[^\\\'\"%s ]*' % string.whitespace)
+_squote_re = re.compile(r"'(?:[^'\\]|\\.)*'")
+_dquote_re = re.compile(r'"(?:[^"\\]|\\.)*"')
+
+def split_quoted (s):
+    """Split a string up according to Unix shell-like rules for quotes and
+    backslashes.  In short: words are delimited by spaces, as long as those
+    spaces are not escaped by a backslash, or inside a quoted string.
+    Single and double quotes are equivalent, and the quote characters can
+    be backslash-escaped.  The backslash is stripped from any two-character
+    escape sequence, leaving only the escaped character.  The quote
+    characters are stripped from any quoted string.  Returns a list of
+    words.
+    """
+
+    # This is a nice algorithm for splitting up a single string, since it
+    # doesn't require character-by-character examination.  It was a little
+    # bit of a brain-bender to get it working right, though...
+
+    s = string.strip(s)
+    words = []
+    pos = 0
+
+    while s:
+        m = _wordchars_re.match(s, pos)
+        end = m.end()
+        if end == len(s):
+            words.append(s[:end])
+            break
+
+        if s[end] in string.whitespace: # unescaped, unquoted whitespace: now
+            words.append(s[:end])       # we definitely have a word delimiter
+            s = string.lstrip(s[end:])
+            pos = 0
+
+        elif s[end] == '\\':            # preserve whatever is being escaped;
+                                        # will become part of the current word
+            s = s[:end] + s[end+1:]
+            pos = end+1
+
+        else:
+            if s[end] == "'":           # slurp singly-quoted string
+                m = _squote_re.match(s, end)
+            elif s[end] == '"':         # slurp doubly-quoted string
+                m = _dquote_re.match(s, end)
+            else:
+                raise RuntimeError, \
+                      "this can't happen (bad char '%c')" % s[end]
+
+            if m is None:
+                raise ValueError, \
+                      "bad string (mismatched %s quotes?)" % s[end]
+
+            (beg, end) = m.span()
+            s = s[:beg] + s[beg+1:end-1] + s[end:]
+            pos = m.end() - 2
+
+        if pos >= len(s):
+            words.append(s)
+            break
+
+    return words
+
+# split_quoted ()
+
+
+def execute (func, args, msg=None, verbose=0, dry_run=0):
+    """Perform some action that affects the outside world (eg.  by writing
+    to the filesystem).  Such actions are special because they are disabled
+    by the 'dry_run' flag, and announce themselves if 'verbose' is true.
+    This method takes care of all that bureaucracy for you; all you have to
+    do is supply the function to call and an argument tuple for it (to
+    embody the "external action" being performed), and an optional message
+    to print.
+    """
+    # Generate a message if we weren't passed one
+    if msg is None:
+        msg = "%s%s" % (func.__name__, `args`)
+        if msg[-2:] == ',)':        # correct for singleton tuple
+            msg = msg[0:-2] + ')'
+
+    # Print it if verbosity level is high enough
+    if verbose:
+        print msg
+
+    # And do it, as long as we're not in dry-run mode
+    if not dry_run:
+        apply(func, args)
+
+# execute()
+
+
+def strtobool (val):
+    """Convert a string representation of truth to true (1) or false (0).
+    True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
+    are 'n', 'no', 'f', 'false', 'off', and '0'.  Raises ValueError if
+    'val' is anything else.
+    """
+    val = string.lower(val)
+    if val in ('y', 'yes', 't', 'true', 'on', '1'):
+        return 1
+    elif val in ('n', 'no', 'f', 'false', 'off', '0'):
+        return 0
+    else:
+        raise ValueError, "invalid truth value %s" % `val`
+
+
+def byte_compile (py_files,
+                  optimize=0, force=0,
+                  prefix=None, base_dir=None,
+                  verbose=1, dry_run=0,
+                  direct=None):
+    """Byte-compile a collection of Python source files to either .pyc
+    or .pyo files in the same directory.  'py_files' is a list of files
+    to compile; any files that don't end in ".py" are silently skipped.
+    'optimize' must be one of the following:
+      0 - don't optimize (generate .pyc)
+      1 - normal optimization (like "python -O")
+      2 - extra optimization (like "python -OO")
+    If 'force' is true, all files are recompiled regardless of
+    timestamps.
+
+    The source filename encoded in each bytecode file defaults to the
+    filenames listed in 'py_files'; you can modify these with 'prefix' and
+    'basedir'.  'prefix' is a string that will be stripped off of each
+    source filename, and 'base_dir' is a directory name that will be
+    prepended (after 'prefix' is stripped).  You can supply either or both
+    (or neither) of 'prefix' and 'base_dir', as you wish.
+
+    If 'verbose' is true, prints out a report of each file.  If 'dry_run'
+    is true, doesn't actually do anything that would affect the filesystem.
+
+    Byte-compilation is either done directly in this interpreter process
+    with the standard py_compile module, or indirectly by writing a
+    temporary script and executing it.  Normally, you should let
+    'byte_compile()' figure out to use direct compilation or not (see
+    the source for details).  The 'direct' flag is used by the script
+    generated in indirect mode; unless you know what you're doing, leave
+    it set to None.
+    """
+
+    # First, if the caller didn't force us into direct or indirect mode,
+    # figure out which mode we should be in.  We take a conservative
+    # approach: choose direct mode *only* if the current interpreter is
+    # in debug mode and optimize is 0.  If we're not in debug mode (-O
+    # or -OO), we don't know which level of optimization this
+    # interpreter is running with, so we can't do direct
+    # byte-compilation and be certain that it's the right thing.  Thus,
+    # always compile indirectly if the current interpreter is in either
+    # optimize mode, or if either optimization level was requested by
+    # the caller.
+    if direct is None:
+        direct = (__debug__ and optimize == 0)
+
+    # "Indirect" byte-compilation: write a temporary script and then
+    # run it with the appropriate flags.
+    if not direct:
+        from tempfile import mktemp
+        script_name = mktemp(".py")
+        if verbose:
+            print "writing byte-compilation script '%s'" % script_name
+        if not dry_run:
+            script = open(script_name, "w")
+
+            script.write("""\
+from distutils.util import byte_compile
+files = [
+""")
+
+            # XXX would be nice to write absolute filenames, just for
+            # safety's sake (script should be more robust in the face of
+            # chdir'ing before running it).  But this requires abspath'ing
+            # 'prefix' as well, and that breaks the hack in build_lib's
+            # 'byte_compile()' method that carefully tacks on a trailing
+            # slash (os.sep really) to make sure the prefix here is "just
+            # right".  This whole prefix business is rather delicate -- the
+            # problem is that it's really a directory, but I'm treating it
+            # as a dumb string, so trailing slashes and so forth matter.
+
+            #py_files = map(os.path.abspath, py_files)
+            #if prefix:
+            #    prefix = os.path.abspath(prefix)
+
+            script.write(string.join(map(repr, py_files), ",\n") + "]\n")
+            script.write("""
+byte_compile(files, optimize=%s, force=%s,
+             prefix=%s, base_dir=%s,
+             verbose=%s, dry_run=0,
+             direct=1)
+""" % (`optimize`, `force`, `prefix`, `base_dir`, `verbose`))
+
+            script.close()
+
+        cmd = [sys.executable, script_name]
+        if optimize == 1:
+            cmd.insert(1, "-O")
+        elif optimize == 2:
+            cmd.insert(1, "-OO")
+        spawn(cmd, verbose=verbose, dry_run=dry_run)
+        execute(os.remove, (script_name,), "removing %s" % script_name,
+                verbose=verbose, dry_run=dry_run)
+
+    # "Direct" byte-compilation: use the py_compile module to compile
+    # right here, right now.  Note that the script generated in indirect
+    # mode simply calls 'byte_compile()' in direct mode, a weird sort of
+    # cross-process recursion.  Hey, it works!
+    else:
+        from py_compile import compile
+
+        for file in py_files:
+            if file[-3:] != ".py":
+                # This lets us be lazy and not filter filenames in
+                # the "install_lib" command.
+                continue
+
+            # Terminology from the py_compile module:
+            #   cfile - byte-compiled file
+            #   dfile - purported source filename (same as 'file' by default)
+            cfile = file + (__debug__ and "c" or "o")
+            dfile = file
+            if prefix:
+                if file[:len(prefix)] != prefix:
+                    raise ValueError, \
+                          ("invalid prefix: filename %s doesn't start with %s"
+                           % (`file`, `prefix`))
+                dfile = dfile[len(prefix):]
+            if base_dir:
+                dfile = os.path.join(base_dir, dfile)
+
+            cfile_base = os.path.basename(cfile)
+            if direct:
+                if force or newer(file, cfile):
+                    if verbose:
+                        print "byte-compiling %s to %s" % (file, cfile_base)
+                    if not dry_run:
+                        compile(file, cfile, dfile)
+                else:
+                    if verbose:
+                        print "skipping byte-compilation of %s to %s" % \
+                              (file, cfile_base)
+
+# byte_compile ()
+
+def rfc822_escape (header):
+    """Return a version of the string escaped for inclusion in an
+    RFC-822 header, by ensuring there are 8 spaces space after each newline.
+    """
+    lines = string.split(header, '\n')
+    lines = map(string.strip, lines)
+    header = string.join(lines, '\n' + 8*' ')
+    return header
diff --git a/lib-python/2.2/distutils/version.py b/lib-python/2.2/distutils/version.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/version.py
@@ -0,0 +1,301 @@
+#
+# distutils/version.py
+#
+# Implements multiple version numbering conventions for the
+# Python Module Distribution Utilities.
+#
+# written by Greg Ward, 1998/12/17
+#
+# $Id$
+#
+
+"""Provides classes to represent module version numbers (one class for
+each style of version numbering).  There are currently two such classes
+implemented: StrictVersion and LooseVersion.
+
+Every version number class implements the following interface:
+  * the 'parse' method takes a string and parses it to some internal
+    representation; if the string is an invalid version number,
+    'parse' raises a ValueError exception
+  * the class constructor takes an optional string argument which,
+    if supplied, is passed to 'parse'
+  * __str__ reconstructs the string that was passed to 'parse' (or
+    an equivalent string -- ie. one that will generate an equivalent
+    version number instance)
+  * __repr__ generates Python code to recreate the version number instance
+  * __cmp__ compares the current instance with either another instance
+    of the same class or a string (which will be parsed to an instance
+    of the same class, thus must follow the same rules)
+"""
+
+import string, re
+from types import StringType
+
+class Version:
+    """Abstract base class for version numbering classes.  Just provides
+    constructor (__init__) and reproducer (__repr__), because those
+    seem to be the same for all version numbering classes.
+    """
+
+    def __init__ (self, vstring=None):
+        if vstring:
+            self.parse(vstring)
+
+    def __repr__ (self):
+        return "%s ('%s')" % (self.__class__.__name__, str(self))
+
+
+# Interface for version-number classes -- must be implemented
+# by the following classes (the concrete ones -- Version should
+# be treated as an abstract class).
+#    __init__ (string) - create and take same action as 'parse'
+#                        (string parameter is optional)
+#    parse (string)    - convert a string representation to whatever
+#                        internal representation is appropriate for
+#                        this style of version numbering
+#    __str__ (self)    - convert back to a string; should be very similar
+#                        (if not identical to) the string supplied to parse
+#    __repr__ (self)   - generate Python code to recreate
+#                        the instance
+#    __cmp__ (self, other) - compare two version numbers ('other' may
+#                        be an unparsed version string, or another
+#                        instance of your version class)
+
+
+class StrictVersion (Version):
+
+    """Version numbering for anal retentives and software idealists.
+    Implements the standard interface for version number classes as
+    described above.  A version number consists of two or three
+    dot-separated numeric components, with an optional "pre-release" tag
+    on the end.  The pre-release tag consists of the letter 'a' or 'b'
+    followed by a number.  If the numeric components of two version
+    numbers are equal, then one with a pre-release tag will always
+    be deemed earlier (lesser) than one without.
+
+    The following are valid version numbers (shown in the order that
+    would be obtained by sorting according to the supplied cmp function):
+
+        0.4       0.4.0  (these two are equivalent)
+        0.4.1
+        0.5a1
+        0.5b3
+        0.5
+        0.9.6
+        1.0
+        1.0.4a3
+        1.0.4b1
+        1.0.4
+
+    The following are examples of invalid version numbers:
+
+        1
+        2.7.2.2
+        1.3.a4
+        1.3pl1
+        1.3c4
+
+    The rationale for this version numbering system will be explained
+    in the distutils documentation.
+    """
+
+    version_re = re.compile(r'^(\d+) \. (\d+) (\. (\d+))? ([ab](\d+))?$',
+                            re.VERBOSE)
+
+
+    def parse (self, vstring):
+        match = self.version_re.match(vstring)
+        if not match:
+            raise ValueError, "invalid version number '%s'" % vstring
+
+        (major, minor, patch, prerelease, prerelease_num) = \
+            match.group(1, 2, 4, 5, 6)
+
+        if patch:
+            self.version = tuple(map(string.atoi, [major, minor, patch]))
+        else:
+            self.version = tuple(map(string.atoi, [major, minor]) + [0])
+
+        if prerelease:
+            self.prerelease = (prerelease[0], string.atoi(prerelease_num))
+        else:
+            self.prerelease = None
+
+
+    def __str__ (self):
+
+        if self.version[2] == 0:
+            vstring = string.join(map(str, self.version[0:2]), '.')
+        else:
+            vstring = string.join(map(str, self.version), '.')
+
+        if self.prerelease:
+            vstring = vstring + self.prerelease[0] + str(self.prerelease[1])
+
+        return vstring
+
+
+    def __cmp__ (self, other):
+        if isinstance(other, StringType):
+            other = StrictVersion(other)
+
+        compare = cmp(self.version, other.version)
+        if (compare == 0):              # have to compare prerelease
+
+            # case 1: neither has prerelease; they're equal
+            # case 2: self has prerelease, other doesn't; other is greater
+            # case 3: self doesn't have prerelease, other does: self is greater
+            # case 4: both have prerelease: must compare them!
+
+            if (not self.prerelease and not other.prerelease):
+                return 0
+            elif (self.prerelease and not other.prerelease):
+                return -1
+            elif (not self.prerelease and other.prerelease):
+                return 1
+            elif (self.prerelease and other.prerelease):
+                return cmp(self.prerelease, other.prerelease)
+
+        else:                           # numeric versions don't match --
+            return compare              # prerelease stuff doesn't matter
+
+
+# end class StrictVersion
+
+
+# The rules according to Greg Stein:
+# 1) a version number has 1 or more numbers separate by a period or by
+#    sequences of letters. If only periods, then these are compared
+#    left-to-right to determine an ordering.
+# 2) sequences of letters are part of the tuple for comparison and are
+#    compared lexicographically
+# 3) recognize the numeric components may have leading zeroes
+#
+# The LooseVersion class below implements these rules: a version number
+# string is split up into a tuple of integer and string components, and
+# comparison is a simple tuple comparison.  This means that version
+# numbers behave in a predictable and obvious way, but a way that might
+# not necessarily be how people *want* version numbers to behave.  There
+# wouldn't be a problem if people could stick to purely numeric version
+# numbers: just split on period and compare the numbers as tuples.
+# However, people insist on putting letters into their version numbers;
+# the most common purpose seems to be:
+#   - indicating a "pre-release" version
+#     ('alpha', 'beta', 'a', 'b', 'pre', 'p')
+#   - indicating a post-release patch ('p', 'pl', 'patch')
+# but of course this can't cover all version number schemes, and there's
+# no way to know what a programmer means without asking him.
+#
+# The problem is what to do with letters (and other non-numeric
+# characters) in a version number.  The current implementation does the
+# obvious and predictable thing: keep them as strings and compare
+# lexically within a tuple comparison.  This has the desired effect if
+# an appended letter sequence implies something "post-release":
+# eg. "0.99" < "0.99pl14" < "1.0", and "5.001" < "5.001m" < "5.002".
+#
+# However, if letters in a version number imply a pre-release version,
+# the "obvious" thing isn't correct.  Eg. you would expect that
+# "1.5.1" < "1.5.2a2" < "1.5.2", but under the tuple/lexical comparison
+# implemented here, this just isn't so.
+#
+# Two possible solutions come to mind.  The first is to tie the
+# comparison algorithm to a particular set of semantic rules, as has
+# been done in the StrictVersion class above.  This works great as long
+# as everyone can go along with bondage and discipline.  Hopefully a
+# (large) subset of Python module programmers will agree that the
+# particular flavour of bondage and discipline provided by StrictVersion
+# provides enough benefit to be worth using, and will submit their
+# version numbering scheme to its domination.  The free-thinking
+# anarchists in the lot will never give in, though, and something needs
+# to be done to accommodate them.
+#
+# Perhaps a "moderately strict" version class could be implemented that
+# lets almost anything slide (syntactically), and makes some heuristic
+# assumptions about non-digits in version number strings.  This could
+# sink into special-case-hell, though; if I was as talented and
+# idiosyncratic as Larry Wall, I'd go ahead and implement a class that
+# somehow knows that "1.2.1" < "1.2.2a2" < "1.2.2" < "1.2.2pl3", and is
+# just as happy dealing with things like "2g6" and "1.13++".  I don't
+# think I'm smart enough to do it right though.
+#
+# In any case, I've coded the test suite for this module (see
+# ../test/test_version.py) specifically to fail on things like comparing
+# "1.2a2" and "1.2".  That's not because the *code* is doing anything
+# wrong, it's because the simple, obvious design doesn't match my
+# complicated, hairy expectations for real-world version numbers.  It
+# would be a snap to fix the test suite to say, "Yep, LooseVersion does
+# the Right Thing" (ie. the code matches the conception).  But I'd rather
+# have a conception that matches common notions about version numbers.
+
+class LooseVersion (Version):
+
+    """Version numbering for anarchists and software realists.
+    Implements the standard interface for version number classes as
+    described above.  A version number consists of a series of numbers,
+    separated by either periods or strings of letters.  When comparing
+    version numbers, the numeric components will be compared
+    numerically, and the alphabetic components lexically.  The following
+    are all valid version numbers, in no particular order:
+
+        1.5.1
+        1.5.2b2
+        161
+        3.10a
+        8.02
+        3.4j
+        1996.07.12
+        3.2.pl0
+        3.1.1.6
+        2g6
+        11g
+        0.960923
+        2.2beta29
+        1.13++
+        5.5.kw
+        2.0b1pl0
+
+    In fact, there is no such thing as an invalid version number under
+    this scheme; the rules for comparison are simple and predictable,
+    but may not always give the results you want (for some definition
+    of "want").
+    """
+
+    component_re = re.compile(r'(\d+ | [a-z]+ | \.)', re.VERBOSE)
+
+    def __init__ (self, vstring=None):
+        if vstring:
+            self.parse(vstring)
+
+
+    def parse (self, vstring):
+        # I've given up on thinking I can reconstruct the version string
+        # from the parsed tuple -- so I just store the string here for
+        # use by __str__
+        self.vstring = vstring
+        components = filter(lambda x: x and x != '.',
+                            self.component_re.split(vstring))
+        for i in range(len(components)):
+            try:
+                components[i] = int(components[i])
+            except ValueError:
+                pass
+
+        self.version = components
+
+
+    def __str__ (self):
+        return self.vstring
+
+
+    def __repr__ (self):
+        return "LooseVersion ('%s')" % str(self)
+
+
+    def __cmp__ (self, other):
+        if isinstance(other, StringType):
+            other = LooseVersion(other)
+
+        return cmp(self.version, other.version)
+
+
+# end class LooseVersion
diff --git a/lib-python/2.2/doctest.py b/lib-python/2.2/doctest.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/doctest.py
@@ -0,0 +1,1173 @@
+# Module doctest.
+# Released to the public domain 16-Jan-2001,
+# by Tim Peters (tim.one at home.com).
+
+# Provided as-is; use at your own risk; no warranty; no promises; enjoy!
+
+"""Module doctest -- a framework for running examples in docstrings.
+
+NORMAL USAGE
+
+In normal use, end each module M with:
+
+def _test():
+    import doctest, M           # replace M with your module's name
+    return doctest.testmod(M)   # ditto
+
+if __name__ == "__main__":
+    _test()
+
+Then running the module as a script will cause the examples in the
+docstrings to get executed and verified:
+
+python M.py
+
+This won't display anything unless an example fails, in which case the
+failing example(s) and the cause(s) of the failure(s) are printed to stdout
+(why not stderr? because stderr is a lame hack <0.2 wink>), and the final
+line of output is "Test failed.".
+
+Run it with the -v switch instead:
+
+python M.py -v
+
+and a detailed report of all examples tried is printed to stdout, along
+with assorted summaries at the end.
+
+You can force verbose mode by passing "verbose=1" to testmod, or prohibit
+it by passing "verbose=0".  In either of those cases, sys.argv is not
+examined by testmod.
+
+In any case, testmod returns a 2-tuple of ints (f, t), where f is the
+number of docstring examples that failed and t is the total number of
+docstring examples attempted.
+
+
+WHICH DOCSTRINGS ARE EXAMINED?
+
++ M.__doc__.
+
++ f.__doc__ for all functions f in M.__dict__.values(), except those
+  with private names and those defined in other modules.
+
++ C.__doc__ for all classes C in M.__dict__.values(), except those with
+  private names and those defined in other modules.
+
++ If M.__test__ exists and "is true", it must be a dict, and
+  each entry maps a (string) name to a function object, class object, or
+  string.  Function and class object docstrings found from M.__test__
+  are searched even if the name is private, and strings are searched
+  directly as if they were docstrings.  In output, a key K in M.__test__
+  appears with name
+      <name of M>.__test__.K
+
+Any classes found are recursively searched similarly, to test docstrings in
+their contained methods and nested classes.  Private names reached from M's
+globals are skipped, but all names reached from M.__test__ are searched.
+
+By default, a name is considered to be private if it begins with an
+underscore (like "_my_func") but doesn't both begin and end with (at least)
+two underscores (like "__init__").  You can change the default by passing
+your own "isprivate" function to testmod.
+
+If you want to test docstrings in objects with private names too, stuff
+them into an M.__test__ dict, or see ADVANCED USAGE below (e.g., pass your
+own isprivate function to Tester's constructor, or call the rundoc method
+of a Tester instance).
+
+WHAT'S THE EXECUTION CONTEXT?
+
+By default, each time testmod finds a docstring to test, it uses a *copy*
+of M's globals (so that running tests on a module doesn't change the
+module's real globals, and so that one test in M can't leave behind crumbs
+that accidentally allow another test to work).  This means examples can
+freely use any names defined at top-level in M.  It also means that sloppy
+imports (see above) can cause examples in external docstrings to use
+globals inappropriate for them.
+
+You can force use of your own dict as the execution context by passing
+"globs=your_dict" to testmod instead.  Presumably this would be a copy of
+M.__dict__ merged with the globals from other imported modules.
+
+
+WHAT IF I WANT TO TEST A WHOLE PACKAGE?
+
+Piece o' cake, provided the modules do their testing from docstrings.
+Here's the test.py I use for the world's most elaborate Rational/
+floating-base-conversion pkg (which I'll distribute some day):
+
+from Rational import Cvt
+from Rational import Format
+from Rational import machprec
+from Rational import Rat
+from Rational import Round
+from Rational import utils
+
+modules = (Cvt,
+           Format,
+           machprec,
+           Rat,
+           Round,
+           utils)
+
+def _test():
+    import doctest
+    import sys
+    verbose = "-v" in sys.argv
+    for mod in modules:
+        doctest.testmod(mod, verbose=verbose, report=0)
+    doctest.master.summarize()
+
+if __name__ == "__main__":
+    _test()
+
+IOW, it just runs testmod on all the pkg modules.  testmod remembers the
+names and outcomes (# of failures, # of tries) for each item it's seen, and
+passing "report=0" prevents it from printing a summary in verbose mode.
+Instead, the summary is delayed until all modules have been tested, and
+then "doctest.master.summarize()" forces the summary at the end.
+
+So this is very nice in practice:  each module can be tested individually
+with almost no work beyond writing up docstring examples, and collections
+of modules can be tested too as a unit with no more work than the above.
+
+
+WHAT ABOUT EXCEPTIONS?
+
+No problem, as long as the only output generated by the example is the
+traceback itself.  For example:
+
+    >>> [1, 2, 3].remove(42)
+    Traceback (most recent call last):
+      File "<stdin>", line 1, in ?
+    ValueError: list.remove(x): x not in list
+    >>>
+
+Note that only the exception type and value are compared (specifically,
+only the last line in the traceback).
+
+
+ADVANCED USAGE
+
+doctest.testmod() captures the testing policy I find most useful most
+often.  You may want other policies.
+
+testmod() actually creates a local instance of class doctest.Tester, runs
+appropriate methods of that class, and merges the results into global
+Tester instance doctest.master.
+
+You can create your own instances of doctest.Tester, and so build your own
+policies, or even run methods of doctest.master directly.  See
+doctest.Tester.__doc__ for details.
+
+
+SO WHAT DOES A DOCSTRING EXAMPLE LOOK LIKE ALREADY!?
+
+Oh ya.  It's easy!  In most cases a copy-and-paste of an interactive
+console session works fine -- just make sure the leading whitespace is
+rigidly consistent (you can mix tabs and spaces if you're too lazy to do it
+right, but doctest is not in the business of guessing what you think a tab
+means).
+
+    >>> # comments are ignored
+    >>> x = 12
+    >>> x
+    12
+    >>> if x == 13:
+    ...     print "yes"
+    ... else:
+    ...     print "no"
+    ...     print "NO"
+    ...     print "NO!!!"
+    ...
+    no
+    NO
+    NO!!!
+    >>>
+
+Any expected output must immediately follow the final ">>>" or "..." line
+containing the code, and the expected output (if any) extends to the next
+">>>" or all-whitespace line.  That's it.
+
+Bummers:
+
++ Expected output cannot contain an all-whitespace line, since such a line
+  is taken to signal the end of expected output.
+
++ Output to stdout is captured, but not output to stderr (exception
+  tracebacks are captured via a different means).
+
++ If you continue a line via backslashing in an interactive session, or for
+  any other reason use a backslash, you need to double the backslash in the
+  docstring version.  This is simply because you're in a string, and so the
+  backslash must be escaped for it to survive intact.  Like:
+
+>>> if "yes" == \\
+...     "y" +   \\
+...     "es":   # in the source code you'll see the doubled backslashes
+...     print 'yes'
+yes
+
+The starting column doesn't matter:
+
+>>> assert "Easy!"
+     >>> import math
+            >>> math.floor(1.9)
+            1.0
+
+and as many leading whitespace characters are stripped from the expected
+output as appeared in the initial ">>>" line that triggered it.
+
+If you execute this very file, the examples above will be found and
+executed, leading to this output in verbose mode:
+
+Running doctest.__doc__
+Trying: [1, 2, 3].remove(42)
+Expecting:
+Traceback (most recent call last):
+  File "<stdin>", line 1, in ?
+ValueError: list.remove(x): x not in list
+ok
+Trying: x = 12
+Expecting: nothing
+ok
+Trying: x
+Expecting: 12
+ok
+Trying:
+if x == 13:
+    print "yes"
+else:
+    print "no"
+    print "NO"
+    print "NO!!!"
+Expecting:
+no
+NO
+NO!!!
+ok
+... and a bunch more like that, with this summary at the end:
+
+5 items had no tests:
+    doctest.Tester.__init__
+    doctest.Tester.run__test__
+    doctest.Tester.summarize
+    doctest.run_docstring_examples
+    doctest.testmod
+12 items passed all tests:
+   8 tests in doctest
+   6 tests in doctest.Tester
+  10 tests in doctest.Tester.merge
+  14 tests in doctest.Tester.rundict
+   3 tests in doctest.Tester.rundoc
+   3 tests in doctest.Tester.runstring
+   2 tests in doctest.__test__._TestClass
+   2 tests in doctest.__test__._TestClass.__init__
+   2 tests in doctest.__test__._TestClass.get
+   1 tests in doctest.__test__._TestClass.square
+   2 tests in doctest.__test__.string
+   7 tests in doctest.is_private
+60 tests in 17 items.
+60 passed and 0 failed.
+Test passed.
+"""
+
+__all__ = [
+    'testmod',
+    'run_docstring_examples',
+    'is_private',
+    'Tester',
+]
+
+import __future__
+
+import re
+PS1 = ">>>"
+PS2 = "..."
+_isPS1 = re.compile(r"(\s*)" + re.escape(PS1)).match
+_isPS2 = re.compile(r"(\s*)" + re.escape(PS2)).match
+_isEmpty = re.compile(r"\s*$").match
+_isComment = re.compile(r"\s*#").match
+del re
+
+from types import StringTypes as _StringTypes
+
+from inspect import isclass    as _isclass
+from inspect import isfunction as _isfunction
+from inspect import ismodule   as _ismodule
+from inspect import classify_class_attrs as _classify_class_attrs
+
+# Extract interactive examples from a string.  Return a list of triples,
+# (source, outcome, lineno).  "source" is the source code, and ends
+# with a newline iff the source spans more than one line.  "outcome" is
+# the expected output if any, else an empty string.  When not empty,
+# outcome always ends with a newline.  "lineno" is the line number,
+# 0-based wrt the start of the string, of the first source line.
+
+def _extract_examples(s):
+    isPS1, isPS2 = _isPS1, _isPS2
+    isEmpty, isComment = _isEmpty, _isComment
+    examples = []
+    lines = s.split("\n")
+    i, n = 0, len(lines)
+    while i < n:
+        line = lines[i]
+        i = i + 1
+        m = isPS1(line)
+        if m is None:
+            continue
+        j = m.end(0)  # beyond the prompt
+        if isEmpty(line, j) or isComment(line, j):
+            # a bare prompt or comment -- not interesting
+            continue
+        lineno = i - 1
+        if line[j] != " ":
+            raise ValueError("line " + `lineno` + " of docstring lacks "
+                "blank after " + PS1 + ": " + line)
+        j = j + 1
+        blanks = m.group(1)
+        nblanks = len(blanks)
+        # suck up this and following PS2 lines
+        source = []
+        while 1:
+            source.append(line[j:])
+            line = lines[i]
+            m = isPS2(line)
+            if m:
+                if m.group(1) != blanks:
+                    raise ValueError("inconsistent leading whitespace "
+                        "in line " + `i` + " of docstring: " + line)
+                i = i + 1
+            else:
+                break
+        if len(source) == 1:
+            source = source[0]
+        else:
+            # get rid of useless null line from trailing empty "..."
+            if source[-1] == "":
+                del source[-1]
+            source = "\n".join(source) + "\n"
+        # suck up response
+        if isPS1(line) or isEmpty(line):
+            expect = ""
+        else:
+            expect = []
+            while 1:
+                if line[:nblanks] != blanks:
+                    raise ValueError("inconsistent leading whitespace "
+                        "in line " + `i` + " of docstring: " + line)
+                expect.append(line[nblanks:])
+                i = i + 1
+                line = lines[i]
+                if isPS1(line) or isEmpty(line):
+                    break
+            expect = "\n".join(expect) + "\n"
+        examples.append( (source, expect, lineno) )
+    return examples
+
+# Capture stdout when running examples.
+
+class _SpoofOut:
+    def __init__(self):
+        self.clear()
+    def write(self, s):
+        self.buf.append(s)
+    def get(self):
+        guts = "".join(self.buf)
+        # If anything at all was written, make sure there's a trailing
+        # newline.  There's no way for the expected output to indicate
+        # that a trailing newline is missing.
+        if guts and not guts.endswith("\n"):
+            guts = guts + "\n"
+        # Prevent softspace from screwing up the next test case, in
+        # case they used print with a trailing comma in an example.
+        if hasattr(self, "softspace"):
+            del self.softspace
+        return guts
+    def clear(self):
+        self.buf = []
+        if hasattr(self, "softspace"):
+            del self.softspace
+    def flush(self):
+        # JPython calls flush
+        pass
+
+# Display some tag-and-msg pairs nicely, keeping the tag and its msg
+# on the same line when that makes sense.
+
+def _tag_out(printer, *tag_msg_pairs):
+    for tag, msg in tag_msg_pairs:
+        printer(tag + ":")
+        msg_has_nl = msg[-1:] == "\n"
+        msg_has_two_nl = msg_has_nl and \
+                        msg.find("\n") < len(msg) - 1
+        if len(tag) + len(msg) < 76 and not msg_has_two_nl:
+            printer(" ")
+        else:
+            printer("\n")
+        printer(msg)
+        if not msg_has_nl:
+            printer("\n")
+
+# Run list of examples, in context globs.  "out" can be used to display
+# stuff to "the real" stdout, and fakeout is an instance of _SpoofOut
+# that captures the examples' std output.  Return (#failures, #tries).
+
+def _run_examples_inner(out, fakeout, examples, globs, verbose, name,
+                        compileflags):
+    import sys, traceback
+    OK, BOOM, FAIL = range(3)
+    NADA = "nothing"
+    stderr = _SpoofOut()
+    failures = 0
+    for source, want, lineno in examples:
+        if verbose:
+            _tag_out(out, ("Trying", source),
+                          ("Expecting", want or NADA))
+        fakeout.clear()
+        try:
+            exec compile(source, "<string>", "single",
+                         compileflags, 1) in globs
+            got = fakeout.get()
+            state = OK
+        except:
+            # See whether the exception was expected.
+            if want.find("Traceback (innermost last):\n") == 0 or \
+               want.find("Traceback (most recent call last):\n") == 0:
+                # Only compare exception type and value - the rest of
+                # the traceback isn't necessary.
+                want = want.split('\n')[-2] + '\n'
+                exc_type, exc_val = sys.exc_info()[:2]
+                got = traceback.format_exception_only(exc_type, exc_val)[-1]
+                state = OK
+            else:
+                # unexpected exception
+                stderr.clear()
+                traceback.print_exc(file=stderr)
+                state = BOOM
+
+        if state == OK:
+            if got == want:
+                if verbose:
+                    out("ok\n")
+                continue
+            state = FAIL
+
+        assert state in (FAIL, BOOM)
+        failures = failures + 1
+        out("*" * 65 + "\n")
+        _tag_out(out, ("Failure in example", source))
+        out("from line #" + `lineno` + " of " + name + "\n")
+        if state == FAIL:
+            _tag_out(out, ("Expected", want or NADA), ("Got", got))
+        else:
+            assert state == BOOM
+            _tag_out(out, ("Exception raised", stderr.get()))
+
+    return failures, len(examples)
+
+# Get the future-flags associated with the future features that have been
+# imported into globs.
+
+def _extract_future_flags(globs):
+    flags = 0
+    for fname in __future__.all_feature_names:
+        feature = globs.get(fname, None)
+        if feature is getattr(__future__, fname):
+            flags |= feature.compiler_flag
+    return flags
+
+# Run list of examples, in a shallow copy of context (dict) globs.
+# Return (#failures, #tries).
+
+def _run_examples(examples, globs, verbose, name, compileflags):
+    import sys
+    saveout = sys.stdout
+    globs = globs.copy()
+    try:
+        sys.stdout = fakeout = _SpoofOut()
+        x = _run_examples_inner(saveout.write, fakeout, examples,
+                                globs, verbose, name, compileflags)
+    finally:
+        sys.stdout = saveout
+        # While Python gc can clean up most cycles on its own, it doesn't
+        # chase frame objects.  This is especially irksome when running
+        # generator tests that raise exceptions, because a named generator-
+        # iterator gets an entry in globs, and the generator-iterator
+        # object's frame's traceback info points back to globs.  This is
+        # easy to break just by clearing the namespace.  This can also
+        # help to break other kinds of cycles, and even for cycles that
+        # gc can break itself it's better to break them ASAP.
+        globs.clear()
+    return x
+
+def run_docstring_examples(f, globs, verbose=0, name="NoName",
+                           compileflags=None):
+    """f, globs, verbose=0, name="NoName" -> run examples from f.__doc__.
+
+    Use (a shallow copy of) dict globs as the globals for execution.
+    Return (#failures, #tries).
+
+    If optional arg verbose is true, print stuff even if there are no
+    failures.
+    Use string name in failure msgs.
+    """
+
+    try:
+        doc = f.__doc__
+        if not doc:
+            # docstring empty or None
+            return 0, 0
+        # just in case CT invents a doc object that has to be forced
+        # to look like a string <0.9 wink>
+        doc = str(doc)
+    except:
+        return 0, 0
+
+    e = _extract_examples(doc)
+    if not e:
+        return 0, 0
+    if compileflags is None:
+        compileflags = _extract_future_flags(globs)
+    return _run_examples(e, globs, verbose, name, compileflags)
+
+def is_private(prefix, base):
+    """prefix, base -> true iff name prefix + "." + base is "private".
+
+    Prefix may be an empty string, and base does not contain a period.
+    Prefix is ignored (although functions you write conforming to this
+    protocol may make use of it).
+    Return true iff base begins with an (at least one) underscore, but
+    does not both begin and end with (at least) two underscores.
+
+    >>> is_private("a.b", "my_func")
+    0
+    >>> is_private("____", "_my_func")
+    1
+    >>> is_private("someclass", "__init__")
+    0
+    >>> is_private("sometypo", "__init_")
+    1
+    >>> is_private("x.y.z", "_")
+    1
+    >>> is_private("_x.y.z", "__")
+    0
+    >>> is_private("", "")  # senseless but consistent
+    0
+    """
+
+    return base[:1] == "_" and not base[:2] == "__" == base[-2:]
+
+# Determine if a class of function was defined in the given module.
+
+def _from_module(module, object):
+    if _isfunction(object):
+        return module.__dict__ is object.func_globals
+    if _isclass(object):
+        return module.__name__ == object.__module__
+    raise ValueError("object must be a class or function")
+
+class Tester:
+    """Class Tester -- runs docstring examples and accumulates stats.
+
+In normal use, function doctest.testmod() hides all this from you,
+so use that if you can.  Create your own instances of Tester to do
+fancier things.
+
+Methods:
+    runstring(s, name)
+        Search string s for examples to run; use name for logging.
+        Return (#failures, #tries).
+
+    rundoc(object, name=None)
+        Search object.__doc__ for examples to run; use name (or
+        object.__name__) for logging.  Return (#failures, #tries).
+
+    rundict(d, name, module=None)
+        Search for examples in docstrings in all of d.values(); use name
+        for logging.  Exclude functions and classes not defined in module
+        if specified.  Return (#failures, #tries).
+
+    run__test__(d, name)
+        Treat dict d like module.__test__.  Return (#failures, #tries).
+
+    summarize(verbose=None)
+        Display summary of testing results, to stdout.  Return
+        (#failures, #tries).
+
+    merge(other)
+        Merge in the test results from Tester instance "other".
+
+>>> from doctest import Tester
+>>> t = Tester(globs={'x': 42}, verbose=0)
+>>> t.runstring(r'''
+...      >>> x = x * 2
+...      >>> print x
+...      42
+... ''', 'XYZ')
+*****************************************************************
+Failure in example: print x
+from line #2 of XYZ
+Expected: 42
+Got: 84
+(1, 2)
+>>> t.runstring(">>> x = x * 2\\n>>> print x\\n84\\n", 'example2')
+(0, 2)
+>>> t.summarize()
+*****************************************************************
+1 items had failures:
+   1 of   2 in XYZ
+***Test Failed*** 1 failures.
+(1, 4)
+>>> t.summarize(verbose=1)
+1 items passed all tests:
+   2 tests in example2
+*****************************************************************
+1 items had failures:
+   1 of   2 in XYZ
+4 tests in 2 items.
+3 passed and 1 failed.
+***Test Failed*** 1 failures.
+(1, 4)
+>>>
+"""
+
+    def __init__(self, mod=None, globs=None, verbose=None,
+                 isprivate=None):
+        """mod=None, globs=None, verbose=None, isprivate=None
+
+See doctest.__doc__ for an overview.
+
+Optional keyword arg "mod" is a module, whose globals are used for
+executing examples.  If not specified, globs must be specified.
+
+Optional keyword arg "globs" gives a dict to be used as the globals
+when executing examples; if not specified, use the globals from
+module mod.
+
+In either case, a copy of the dict is used for each docstring
+examined.
+
+Optional keyword arg "verbose" prints lots of stuff if true, only
+failures if false; by default, it's true iff "-v" is in sys.argv.
+
+Optional keyword arg "isprivate" specifies a function used to determine
+whether a name is private.  The default function is doctest.is_private;
+see its docs for details.
+"""
+
+        if mod is None and globs is None:
+            raise TypeError("Tester.__init__: must specify mod or globs")
+        if mod is not None and not _ismodule(mod):
+            raise TypeError("Tester.__init__: mod must be a module; " +
+                            `mod`)
+        if globs is None:
+            globs = mod.__dict__
+        self.globs = globs
+
+        if verbose is None:
+            import sys
+            verbose = "-v" in sys.argv
+        self.verbose = verbose
+
+        if isprivate is None:
+            isprivate = is_private
+        self.isprivate = isprivate
+
+        self.name2ft = {}   # map name to (#failures, #trials) pair
+
+        self.compileflags = _extract_future_flags(globs)
+
+    def runstring(self, s, name):
+        """
+        s, name -> search string s for examples to run, logging as name.
+
+        Use string name as the key for logging the outcome.
+        Return (#failures, #examples).
+
+        >>> t = Tester(globs={}, verbose=1)
+        >>> test = r'''
+        ...    # just an example
+        ...    >>> x = 1 + 2
+        ...    >>> x
+        ...    3
+        ... '''
+        >>> t.runstring(test, "Example")
+        Running string Example
+        Trying: x = 1 + 2
+        Expecting: nothing
+        ok
+        Trying: x
+        Expecting: 3
+        ok
+        0 of 2 examples failed in string Example
+        (0, 2)
+        """
+
+        if self.verbose:
+            print "Running string", name
+        f = t = 0
+        e = _extract_examples(s)
+        if e:
+            f, t = _run_examples(e, self.globs, self.verbose, name,
+                                 self.compileflags)
+        if self.verbose:
+            print f, "of", t, "examples failed in string", name
+        self.__record_outcome(name, f, t)
+        return f, t
+
+    def rundoc(self, object, name=None):
+        """
+        object, name=None -> search object.__doc__ for examples to run.
+
+        Use optional string name as the key for logging the outcome;
+        by default use object.__name__.
+        Return (#failures, #examples).
+        If object is a class object, search recursively for method
+        docstrings too.
+        object.__doc__ is examined regardless of name, but if object is
+        a class, whether private names reached from object are searched
+        depends on the constructor's "isprivate" argument.
+
+        >>> t = Tester(globs={}, verbose=0)
+        >>> def _f():
+        ...     '''Trivial docstring example.
+        ...     >>> assert 2 == 2
+        ...     '''
+        ...     return 32
+        ...
+        >>> t.rundoc(_f)  # expect 0 failures in 1 example
+        (0, 1)
+        """
+
+        if name is None:
+            try:
+                name = object.__name__
+            except AttributeError:
+                raise ValueError("Tester.rundoc: name must be given "
+                    "when object.__name__ doesn't exist; " + `object`)
+        if self.verbose:
+            print "Running", name + ".__doc__"
+        f, t = run_docstring_examples(object, self.globs, self.verbose, name,
+                                      self.compileflags)
+        if self.verbose:
+            print f, "of", t, "examples failed in", name + ".__doc__"
+        self.__record_outcome(name, f, t)
+        if _isclass(object):
+            # In 2.2, class and static methods complicate life.  Build
+            # a dict "that works", by hook or by crook.
+            d = {}
+            for tag, kind, homecls, value in _classify_class_attrs(object):
+
+                if homecls is not object:
+                    # Only look at names defined immediately by the class.
+                    continue
+
+                elif self.isprivate(name, tag):
+                    continue
+
+                elif kind == "method":
+                    # value is already a function
+                    d[tag] = value
+
+                elif kind == "static method":
+                    # value isn't a function, but getattr reveals one
+                    d[tag] = getattr(object, tag)
+
+                elif kind == "class method":
+                    # Hmm.  A classmethod object doesn't seem to reveal
+                    # enough.  But getattr turns it into a bound method,
+                    # and from there .im_func retrieves the underlying
+                    # function.
+                    d[tag] = getattr(object, tag).im_func
+
+                elif kind == "property":
+                    # The methods implementing the property have their
+                    # own docstrings -- but the property may have one too.
+                    if value.__doc__ is not None:
+                        d[tag] = str(value.__doc__)
+
+                elif kind == "data":
+                    # Grab nested classes.
+                    if _isclass(value):
+                        d[tag] = value
+
+                else:
+                    raise ValueError("teach doctest about %r" % kind)
+
+            f2, t2 = self.run__test__(d, name)
+            f += f2
+            t += t2
+
+        return f, t
+
+    def rundict(self, d, name, module=None):
+        """
+        d, name, module=None -> search for docstring examples in d.values().
+
+        For k, v in d.items() such that v is a function or class,
+        do self.rundoc(v, name + "." + k).  Whether this includes
+        objects with private names depends on the constructor's
+        "isprivate" argument.  If module is specified, functions and
+        classes that are not defined in module are excluded.
+        Return aggregate (#failures, #examples).
+
+        Build and populate two modules with sample functions to test that
+        exclusion of external functions and classes works.
+
+        >>> import new
+        >>> m1 = new.module('_m1')
+        >>> m2 = new.module('_m2')
+        >>> test_data = \"""
+        ... def _f():
+        ...     '''>>> assert 1 == 1
+        ...     '''
+        ... def g():
+        ...    '''>>> assert 2 != 1
+        ...    '''
+        ... class H:
+        ...    '''>>> assert 2 > 1
+        ...    '''
+        ...    def bar(self):
+        ...        '''>>> assert 1 < 2
+        ...        '''
+        ... \"""
+        >>> exec test_data in m1.__dict__
+        >>> exec test_data in m2.__dict__
+        >>> m1.__dict__.update({"f2": m2._f, "g2": m2.g, "h2": m2.H})
+
+        Tests that objects outside m1 are excluded:
+
+        >>> t = Tester(globs={}, verbose=0)
+        >>> t.rundict(m1.__dict__, "rundict_test", m1)  # _f, f2 and g2 and h2 skipped
+        (0, 3)
+
+        Again, but with a custom isprivate function allowing _f:
+
+        >>> t = Tester(globs={}, verbose=0, isprivate=lambda x,y: 0)
+        >>> t.rundict(m1.__dict__, "rundict_test_pvt", m1)  # Only f2, g2 and h2 skipped
+        (0, 4)
+
+        And once more, not excluding stuff outside m1:
+
+        >>> t = Tester(globs={}, verbose=0, isprivate=lambda x,y: 0)
+        >>> t.rundict(m1.__dict__, "rundict_test_pvt")  # None are skipped.
+        (0, 8)
+
+        The exclusion of objects from outside the designated module is
+        meant to be invoked automagically by testmod.
+
+        >>> testmod(m1)
+        (0, 3)
+
+        """
+
+        if not hasattr(d, "items"):
+            raise TypeError("Tester.rundict: d must support .items(); " +
+                            `d`)
+        f = t = 0
+        # Run the tests by alpha order of names, for consistency in
+        # verbose-mode output.
+        names = d.keys()
+        names.sort()
+        for thisname in names:
+            value = d[thisname]
+            if _isfunction(value) or _isclass(value):
+                if module and not _from_module(module, value):
+                    continue
+                f2, t2 = self.__runone(value, name + "." + thisname)
+                f = f + f2
+                t = t + t2
+        return f, t
+
+    def run__test__(self, d, name):
+        """d, name -> Treat dict d like module.__test__.
+
+        Return (#failures, #tries).
+        See testmod.__doc__ for details.
+        """
+
+        failures = tries = 0
+        prefix = name + "."
+        savepvt = self.isprivate
+        try:
+            self.isprivate = lambda *args: 0
+            # Run the tests by alpha order of names, for consistency in
+            # verbose-mode output.
+            keys = d.keys()
+            keys.sort()
+            for k in keys:
+                v = d[k]
+                thisname = prefix + k
+                if type(v) in _StringTypes:
+                    f, t = self.runstring(v, thisname)
+                elif _isfunction(v) or _isclass(v):
+                    f, t = self.rundoc(v, thisname)
+                else:
+                    raise TypeError("Tester.run__test__: values in "
+                            "dict must be strings, functions "
+                            "or classes; " + `v`)
+                failures = failures + f
+                tries = tries + t
+        finally:
+            self.isprivate = savepvt
+        return failures, tries
+
+    def summarize(self, verbose=None):
+        """
+        verbose=None -> summarize results, return (#failures, #tests).
+
+        Print summary of test results to stdout.
+        Optional arg 'verbose' controls how wordy this is.  By
+        default, use the verbose setting established by the
+        constructor.
+        """
+
+        if verbose is None:
+            verbose = self.verbose
+        notests = []
+        passed = []
+        failed = []
+        totalt = totalf = 0
+        for x in self.name2ft.items():
+            name, (f, t) = x
+            assert f <= t
+            totalt = totalt + t
+            totalf = totalf + f
+            if t == 0:
+                notests.append(name)
+            elif f == 0:
+                passed.append( (name, t) )
+            else:
+                failed.append(x)
+        if verbose:
+            if notests:
+                print len(notests), "items had no tests:"
+                notests.sort()
+                for thing in notests:
+                    print "   ", thing
+            if passed:
+                print len(passed), "items passed all tests:"
+                passed.sort()
+                for thing, count in passed:
+                    print " %3d tests in %s" % (count, thing)
+        if failed:
+            print "*" * 65
+            print len(failed), "items had failures:"
+            failed.sort()
+            for thing, (f, t) in failed:
+                print " %3d of %3d in %s" % (f, t, thing)
+        if verbose:
+            print totalt, "tests in", len(self.name2ft), "items."
+            print totalt - totalf, "passed and", totalf, "failed."
+        if totalf:
+            print "***Test Failed***", totalf, "failures."
+        elif verbose:
+            print "Test passed."
+        return totalf, totalt
+
+    def merge(self, other):
+        """
+        other -> merge in test results from the other Tester instance.
+
+        If self and other both have a test result for something
+        with the same name, the (#failures, #tests) results are
+        summed, and a warning is printed to stdout.
+
+        >>> from doctest import Tester
+        >>> t1 = Tester(globs={}, verbose=0)
+        >>> t1.runstring('''
+        ... >>> x = 12
+        ... >>> print x
+        ... 12
+        ... ''', "t1example")
+        (0, 2)
+        >>>
+        >>> t2 = Tester(globs={}, verbose=0)
+        >>> t2.runstring('''
+        ... >>> x = 13
+        ... >>> print x
+        ... 13
+        ... ''', "t2example")
+        (0, 2)
+        >>> common = ">>> assert 1 + 2 == 3\\n"
+        >>> t1.runstring(common, "common")
+        (0, 1)
+        >>> t2.runstring(common, "common")
+        (0, 1)
+        >>> t1.merge(t2)
+        *** Tester.merge: 'common' in both testers; summing outcomes.
+        >>> t1.summarize(1)
+        3 items passed all tests:
+           2 tests in common
+           2 tests in t1example
+           2 tests in t2example
+        6 tests in 3 items.
+        6 passed and 0 failed.
+        Test passed.
+        (0, 6)
+        >>>
+        """
+
+        d = self.name2ft
+        for name, (f, t) in other.name2ft.items():
+            if d.has_key(name):
+                print "*** Tester.merge: '" + name + "' in both" \
+                    " testers; summing outcomes."
+                f2, t2 = d[name]
+                f = f + f2
+                t = t + t2
+            d[name] = f, t
+
+    def __record_outcome(self, name, f, t):
+        if self.name2ft.has_key(name):
+            print "*** Warning: '" + name + "' was tested before;", \
+                "summing outcomes."
+            f2, t2 = self.name2ft[name]
+            f = f + f2
+            t = t + t2
+        self.name2ft[name] = f, t
+
+    def __runone(self, target, name):
+        if "." in name:
+            i = name.rindex(".")
+            prefix, base = name[:i], name[i+1:]
+        else:
+            prefix, base = "", base
+        if self.isprivate(prefix, base):
+            return 0, 0
+        return self.rundoc(target, name)
+
+master = None
+
+def testmod(m, name=None, globs=None, verbose=None, isprivate=None,
+               report=1):
+    """m, name=None, globs=None, verbose=None, isprivate=None, report=1
+
+    Test examples in docstrings in functions and classes reachable from
+    module m, starting with m.__doc__.  Private names are skipped.
+
+    Also test examples reachable from dict m.__test__ if it exists and is
+    not None.  m.__dict__ maps names to functions, classes and strings;
+    function and class docstrings are tested even if the name is private;
+    strings are tested directly, as if they were docstrings.
+
+    Return (#failures, #tests).
+
+    See doctest.__doc__ for an overview.
+
+    Optional keyword arg "name" gives the name of the module; by default
+    use m.__name__.
+
+    Optional keyword arg "globs" gives a dict to be used as the globals
+    when executing examples; by default, use m.__dict__.  A copy of this
+    dict is actually used for each docstring, so that each docstring's
+    examples start with a clean slate.
+
+    Optional keyword arg "verbose" prints lots of stuff if true, prints
+    only failures if false; by default, it's true iff "-v" is in sys.argv.
+
+    Optional keyword arg "isprivate" specifies a function used to
+    determine whether a name is private.  The default function is
+    doctest.is_private; see its docs for details.
+
+    Optional keyword arg "report" prints a summary at the end when true,
+    else prints nothing at the end.  In verbose mode, the summary is
+    detailed, else very brief (in fact, empty if all tests passed).
+
+    Advanced tomfoolery:  testmod runs methods of a local instance of
+    class doctest.Tester, then merges the results into (or creates)
+    global Tester instance doctest.master.  Methods of doctest.master
+    can be called directly too, if you want to do something unusual.
+    Passing report=0 to testmod is especially useful then, to delay
+    displaying a summary.  Invoke doctest.master.summarize(verbose)
+    when you're done fiddling.
+    """
+
+    global master
+
+    if not _ismodule(m):
+        raise TypeError("testmod: module required; " + `m`)
+    if name is None:
+        name = m.__name__
+    tester = Tester(m, globs=globs, verbose=verbose, isprivate=isprivate)
+    failures, tries = tester.rundoc(m, name)
+    f, t = tester.rundict(m.__dict__, name, m)
+    failures = failures + f
+    tries = tries + t
+    if hasattr(m, "__test__"):
+        testdict = m.__test__
+        if testdict:
+            if not hasattr(testdict, "items"):
+                raise TypeError("testmod: module.__test__ must support "
+                                ".items(); " + `testdict`)
+            f, t = tester.run__test__(testdict, name + ".__test__")
+            failures = failures + f
+            tries = tries + t
+    if report:
+        tester.summarize()
+    if master is None:
+        master = tester
+    else:
+        master.merge(tester)
+    return failures, tries
+
+class _TestClass:
+    """
+    A pointless class, for sanity-checking of docstring testing.
+
+    Methods:
+        square()
+        get()
+
+    >>> _TestClass(13).get() + _TestClass(-12).get()
+    1
+    >>> hex(_TestClass(13).square().get())
+    '0xa9'
+    """
+
+    def __init__(self, val):
+        """val -> _TestClass object with associated value val.
+
+        >>> t = _TestClass(123)
+        >>> print t.get()
+        123
+        """
+
+        self.val = val
+
+    def square(self):
+        """square() -> square TestClass's associated value
+
+        >>> _TestClass(13).square().get()
+        169
+        """
+
+        self.val = self.val ** 2
+        return self
+
+    def get(self):
+        """get() -> return TestClass's associated value.
+
+        >>> x = _TestClass(-42)
+        >>> print x.get()
+        -42
+        """
+
+        return self.val
+
+__test__ = {"_TestClass": _TestClass,
+            "string": r"""
+                      Example of a string object, searched as-is.
+                      >>> x = 1; y = 2
+                      >>> x + y, x * y
+                      (3, 2)
+                      """
+           }
+
+def _test():
+    import doctest
+    return doctest.testmod(doctest)
+
+if __name__ == "__main__":
+    _test()
diff --git a/lib-python/2.2/dospath.py b/lib-python/2.2/dospath.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/dospath.py
@@ -0,0 +1,341 @@
+"""Common operations on DOS pathnames."""
+
+import os
+import stat
+
+__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
+           "basename","dirname","commonprefix","getsize","getmtime",
+           "getatime","islink","exists","isdir","isfile","ismount",
+           "walk","expanduser","expandvars","normpath","abspath","realpath"]
+
+def normcase(s):
+    """Normalize the case of a pathname.
+    On MS-DOS it maps the pathname to lowercase, turns slashes into
+    backslashes.
+    Other normalizations (such as optimizing '../' away) are not allowed
+    (this is done by normpath).
+    Previously, this version mapped invalid consecutive characters to a
+    single '_', but this has been removed.  This functionality should
+    possibly be added as a new function."""
+
+    return s.replace("/", "\\").lower()
+
+
+def isabs(s):
+    """Return whether a path is absolute.
+    Trivial in Posix, harder on the Mac or MS-DOS.
+    For DOS it is absolute if it starts with a slash or backslash (current
+    volume), or if a pathname after the volume letter and colon starts with
+    a slash or backslash."""
+
+    s = splitdrive(s)[1]
+    return s != '' and s[:1] in '/\\'
+
+
+def join(a, *p):
+    """Join two (or more) paths."""
+
+    path = a
+    for b in p:
+        if isabs(b):
+            path = b
+        elif path == '' or path[-1:] in '/\\:':
+            path = path + b
+        else:
+            path = path + "\\" + b
+    return path
+
+
+def splitdrive(p):
+    """Split a path into a drive specification (a drive letter followed
+    by a colon) and path specification.
+    It is always true that drivespec + pathspec == p."""
+
+    if p[1:2] == ':':
+        return p[0:2], p[2:]
+    return '', p
+
+
+def split(p):
+    """Split a path into head (everything up to the last '/') and tail
+    (the rest).  After the trailing '/' is stripped, the invariant
+    join(head, tail) == p holds.
+    The resulting head won't end in '/' unless it is the root."""
+
+    d, p = splitdrive(p)
+    # set i to index beyond p's last slash
+    i = len(p)
+    while i and p[i-1] not in '/\\':
+        i = i - 1
+    head, tail = p[:i], p[i:]  # now tail has no slashes
+    # remove trailing slashes from head, unless it's all slashes
+    head2 = head
+    while head2 and head2[-1] in '/\\':
+        head2 = head2[:-1]
+    head = head2 or head
+    return d + head, tail
+
+
+def splitext(p):
+    """Split a path into root and extension.
+    The extension is everything starting at the first dot in the last
+    pathname component; the root is everything before that.
+    It is always true that root + ext == p."""
+
+    root, ext = '', ''
+    for c in p:
+        if c in '/\\':
+            root, ext = root + ext + c, ''
+        elif c == '.' or ext:
+            ext = ext + c
+        else:
+            root = root + c
+    return root, ext
+
+
+def basename(p):
+    """Return the tail (basename) part of a path."""
+
+    return split(p)[1]
+
+
+def dirname(p):
+    """Return the head (dirname) part of a path."""
+
+    return split(p)[0]
+
+
+def commonprefix(m):
+    """Return the longest prefix of all list elements."""
+
+    if not m: return ''
+    prefix = m[0]
+    for item in m:
+        for i in range(len(prefix)):
+            if prefix[:i+1] != item[:i+1]:
+                prefix = prefix[:i]
+                if i == 0: return ''
+                break
+    return prefix
+
+
+# Get size, mtime, atime of files.
+
+def getsize(filename):
+    """Return the size of a file, reported by os.stat()."""
+    st = os.stat(filename)
+    return st[stat.ST_SIZE]
+
+def getmtime(filename):
+    """Return the last modification time of a file, reported by os.stat()."""
+    st = os.stat(filename)
+    return st[stat.ST_MTIME]
+
+def getatime(filename):
+    """Return the last access time of a file, reported by os.stat()."""
+    st = os.stat(filename)
+    return st[stat.ST_ATIME]
+
+
+def islink(path):
+    """Is a path a symbolic link?
+    This will always return false on systems where posix.lstat doesn't exist."""
+
+    return 0
+
+
+def exists(path):
+    """Does a path exist?
+    This is false for dangling symbolic links."""
+
+    try:
+        st = os.stat(path)
+    except os.error:
+        return 0
+    return 1
+
+
+def isdir(path):
+    """Is a path a dos directory?"""
+
+    try:
+        st = os.stat(path)
+    except os.error:
+        return 0
+    return stat.S_ISDIR(st[stat.ST_MODE])
+
+
+def isfile(path):
+    """Is a path a regular file?"""
+
+    try:
+        st = os.stat(path)
+    except os.error:
+        return 0
+    return stat.S_ISREG(st[stat.ST_MODE])
+
+
+def ismount(path):
+    """Is a path a mount point?"""
+    # XXX This degenerates in: 'is this the root?' on DOS
+
+    return isabs(splitdrive(path)[1])
+
+
+def walk(top, func, arg):
+    """Directory tree walk with callback function.
+
+    For each directory in the directory tree rooted at top (including top
+    itself, but excluding '.' and '..'), call func(arg, dirname, fnames).
+    dirname is the name of the directory, and fnames a list of the names of
+    the files and subdirectories in dirname (excluding '.' and '..').  func
+    may modify the fnames list in-place (e.g. via del or slice assignment),
+    and walk will only recurse into the subdirectories whose names remain in
+    fnames; this can be used to implement a filter, or to impose a specific
+    order of visiting.  No semantics are defined for, or required of, arg,
+    beyond that arg is always passed to func.  It can be used, e.g., to pass
+    a filename pattern, or a mutable object designed to accumulate
+    statistics.  Passing None for arg is common."""
+
+    try:
+        names = os.listdir(top)
+    except os.error:
+        return
+    func(arg, top, names)
+    exceptions = ('.', '..')
+    for name in names:
+        if name not in exceptions:
+            name = join(top, name)
+            if isdir(name):
+                walk(name, func, arg)
+
+
+def expanduser(path):
+    """Expand paths beginning with '~' or '~user'.
+    '~' means $HOME; '~user' means that user's home directory.
+    If the path doesn't begin with '~', or if the user or $HOME is unknown,
+    the path is returned unchanged (leaving error reporting to whatever
+    function is called with the expanded path as argument).
+    See also module 'glob' for expansion of *, ? and [...] in pathnames.
+    (A function should also be defined to do full *sh-style environment
+    variable expansion.)"""
+
+    if path[:1] != '~':
+        return path
+    i, n = 1, len(path)
+    while i < n and path[i] not in '/\\':
+        i = i+1
+    if i == 1:
+        if not os.environ.has_key('HOME'):
+            return path
+        userhome = os.environ['HOME']
+    else:
+        return path
+    return userhome + path[i:]
+
+
+def expandvars(path):
+    """Expand paths containing shell variable substitutions.
+    The following rules apply:
+        - no expansion within single quotes
+        - no escape character, except for '$$' which is translated into '$'
+        - ${varname} is accepted.
+        - varnames can be made out of letters, digits and the character '_'"""
+    # XXX With COMMAND.COM you can use any characters in a variable name,
+    # XXX except '^|<>='.
+
+    if '$' not in path:
+        return path
+    import string
+    varchars = string.ascii_letters + string.digits + "_-"
+    res = ''
+    index = 0
+    pathlen = len(path)
+    while index < pathlen:
+        c = path[index]
+        if c == '\'':   # no expansion within single quotes
+            path = path[index + 1:]
+            pathlen = len(path)
+            try:
+                index = path.index('\'')
+                res = res + '\'' + path[:index + 1]
+            except ValueError:
+                res = res + path
+                index = pathlen -1
+        elif c == '$':  # variable or '$$'
+            if path[index + 1:index + 2] == '$':
+                res = res + c
+                index = index + 1
+            elif path[index + 1:index + 2] == '{':
+                path = path[index+2:]
+                pathlen = len(path)
+                try:
+                    index = path.index('}')
+                    var = path[:index]
+                    if os.environ.has_key(var):
+                        res = res + os.environ[var]
+                except ValueError:
+                    res = res + path
+                    index = pathlen - 1
+            else:
+                var = ''
+                index = index + 1
+                c = path[index:index + 1]
+                while c != '' and c in varchars:
+                    var = var + c
+                    index = index + 1
+                    c = path[index:index + 1]
+                if os.environ.has_key(var):
+                    res = res + os.environ[var]
+                if c != '':
+                    res = res + c
+        else:
+            res = res + c
+        index = index + 1
+    return res
+
+
+def normpath(path):
+    """Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A/B.
+    Also, components of the path are silently truncated to 8+3 notation."""
+
+    path = path.replace("/", "\\")
+    prefix, path = splitdrive(path)
+    while path[:1] == "\\":
+        prefix = prefix + "\\"
+        path = path[1:]
+    comps = path.split("\\")
+    i = 0
+    while i < len(comps):
+        if comps[i] == '.':
+            del comps[i]
+        elif comps[i] == '..' and i > 0 and \
+                      comps[i-1] not in ('', '..'):
+            del comps[i-1:i+1]
+            i = i - 1
+        elif comps[i] == '' and i > 0 and comps[i-1] != '':
+            del comps[i]
+        elif '.' in comps[i]:
+            comp = comps[i].split('.')
+            comps[i] = comp[0][:8] + '.' + comp[1][:3]
+            i = i + 1
+        elif len(comps[i]) > 8:
+            comps[i] = comps[i][:8]
+            i = i + 1
+        else:
+            i = i + 1
+    # If the path is now empty, substitute '.'
+    if not prefix and not comps:
+        comps.append('.')
+    return prefix + "\\".join(comps)
+
+
+
+def abspath(path):
+    """Return an absolute path."""
+    if not isabs(path):
+        path = join(os.getcwd(), path)
+    return normpath(path)
+
+# realpath is a no-op on systems without islink support
+realpath = abspath
diff --git a/lib-python/2.2/dumbdbm.py b/lib-python/2.2/dumbdbm.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/dumbdbm.py
@@ -0,0 +1,170 @@
+"""A dumb and slow but simple dbm clone.
+
+For database spam, spam.dir contains the index (a text file),
+spam.bak *may* contain a backup of the index (also a text file),
+while spam.dat contains the data (a binary file).
+
+XXX TO DO:
+
+- seems to contain a bug when updating...
+
+- reclaim free space (currently, space once occupied by deleted or expanded
+items is never reused)
+
+- support concurrent access (currently, if two processes take turns making
+updates, they can mess up the index)
+
+- support efficient access to large databases (currently, the whole index
+is read when the database is opened, and some updates rewrite the whole index)
+
+- support opening for read-only (flag = 'm')
+
+"""
+
+import os as _os
+import __builtin__
+
+_open = __builtin__.open
+
+_BLOCKSIZE = 512
+
+error = IOError                         # For anydbm
+
+class _Database:
+
+    def __init__(self, file, mode):
+        self._mode = mode
+        self._dirfile = file + _os.extsep + 'dir'
+        self._datfile = file + _os.extsep + 'dat'
+        self._bakfile = file + _os.extsep + 'bak'
+        # Mod by Jack: create data file if needed
+        try:
+            f = _open(self._datfile, 'r')
+        except IOError:
+            f = _open(self._datfile, 'w', self._mode)
+        f.close()
+        self._update()
+
+    def _update(self):
+        self._index = {}
+        try:
+            f = _open(self._dirfile)
+        except IOError:
+            pass
+        else:
+            while 1:
+                line = f.readline().rstrip()
+                if not line: break
+                key, (pos, siz) = eval(line)
+                self._index[key] = (pos, siz)
+            f.close()
+
+    def _commit(self):
+        try: _os.unlink(self._bakfile)
+        except _os.error: pass
+        try: _os.rename(self._dirfile, self._bakfile)
+        except _os.error: pass
+        f = _open(self._dirfile, 'w', self._mode)
+        for key, (pos, siz) in self._index.items():
+            f.write("%s, (%s, %s)\n" % (`key`, `pos`, `siz`))
+        f.close()
+
+    def __getitem__(self, key):
+        pos, siz = self._index[key]     # may raise KeyError
+        f = _open(self._datfile, 'rb')
+        f.seek(pos)
+        dat = f.read(siz)
+        f.close()
+        return dat
+
+    def _addval(self, val):
+        f = _open(self._datfile, 'rb+')
+        f.seek(0, 2)
+        pos = int(f.tell())
+## Does not work under MW compiler
+##              pos = ((pos + _BLOCKSIZE - 1) / _BLOCKSIZE) * _BLOCKSIZE
+##              f.seek(pos)
+        npos = ((pos + _BLOCKSIZE - 1) // _BLOCKSIZE) * _BLOCKSIZE
+        f.write('\0'*(npos-pos))
+        pos = npos
+
+        f.write(val)
+        f.close()
+        return (pos, len(val))
+
+    def _setval(self, pos, val):
+        f = _open(self._datfile, 'rb+')
+        f.seek(pos)
+        f.write(val)
+        f.close()
+        return (pos, len(val))
+
+    def _addkey(self, key, (pos, siz)):
+        self._index[key] = (pos, siz)
+        f = _open(self._dirfile, 'a', self._mode)
+        f.write("%s, (%s, %s)\n" % (`key`, `pos`, `siz`))
+        f.close()
+
+    def __setitem__(self, key, val):
+        if not type(key) == type('') == type(val):
+            raise TypeError, "keys and values must be strings"
+        if not self._index.has_key(key):
+            (pos, siz) = self._addval(val)
+            self._addkey(key, (pos, siz))
+        else:
+            pos, siz = self._index[key]
+            oldblocks = (siz + _BLOCKSIZE - 1) / _BLOCKSIZE
+            newblocks = (len(val) + _BLOCKSIZE - 1) / _BLOCKSIZE
+            if newblocks <= oldblocks:
+                pos, siz = self._setval(pos, val)
+                self._index[key] = pos, siz
+            else:
+                pos, siz = self._addval(val)
+                self._index[key] = pos, siz
+
+    def __delitem__(self, key):
+        del self._index[key]
+        self._commit()
+
+    def keys(self):
+        return self._index.keys()
+
+    def has_key(self, key):
+        return self._index.has_key(key)
+
+    def __contains__(self, key):
+        return self._index.has_key(key)
+
+    def iterkeys(self):
+        return self._index.iterkeys()
+    __iter__ = iterkeys
+
+    def __len__(self):
+        return len(self._index)
+
+    def close(self):
+        self._commit()
+        self._index = None
+        self._datfile = self._dirfile = self._bakfile = None
+
+    def __del__(self):
+        if self._index is not None:
+            self._commit()
+
+
+
+def open(file, flag=None, mode=0666):
+    """Open the database file, filename, and return corresponding object.
+
+    The flag argument, used to control how the database is opened in the
+    other DBM implementations, is ignored in the dumbdbm module; the
+    database is always opened for update, and will be created if it does
+    not exist.
+
+    The optional mode argument is the UNIX mode of the file, used only when
+    the database has to be created.  It defaults to octal code 0666 (and
+    will be modified by the prevailing umask).
+
+    """
+    # flag, mode arguments are currently ignored
+    return _Database(file, mode)
diff --git a/lib-python/2.2/email/Charset.py b/lib-python/2.2/email/Charset.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/Charset.py
@@ -0,0 +1,393 @@
+# Copyright (C) 2001,2002 Python Software Foundation
+# Author: che at debian.org (Ben Gertzfield), barry at zope.com (Barry Warsaw)
+
+from types import UnicodeType
+from email.Encoders import encode_7or8bit
+import email.base64MIME
+import email.quopriMIME
+
+def _isunicode(s):
+    return isinstance(s, UnicodeType)
+
+# Python 2.2.1 and beyond has these symbols
+try:
+    True, False
+except NameError:
+    True = 1
+    False = 0
+
+
+
+# Flags for types of header encodings
+QP     = 1   # Quoted-Printable
+BASE64 = 2   # Base64
+SHORTEST = 3 # the shorter of QP and base64, but only for headers
+
+# In "=?charset?q?hello_world?=", the =?, ?q?, and ?= add up to 7
+MISC_LEN = 7
+
+DEFAULT_CHARSET = 'us-ascii'
+
+
+
+# Defaults
+CHARSETS = {
+    # input        header enc  body enc output conv
+    'iso-8859-1':  (QP,        QP,      None),
+    'iso-8859-2':  (QP,        QP,      None),
+    'iso-8859-3':  (QP,        QP,      None),
+    'iso-8859-4':  (QP,        QP,      None),
+    # iso-8859-5 is Cyrillic, and not especially used
+    # iso-8859-6 is Arabic, also not particularly used
+    # iso-8859-7 is Greek, QP will not make it readable
+    # iso-8859-8 is Hebrew, QP will not make it readable
+    'iso-8859-9':  (QP,        QP,      None),
+    'iso-8859-10': (QP,        QP,      None),
+    # iso-8859-11 is Thai, QP will not make it readable
+    'iso-8859-13': (QP,        QP,      None),
+    'iso-8859-14': (QP,        QP,      None),
+    'iso-8859-15': (QP,        QP,      None),
+    'windows-1252':(QP,        QP,      None),
+    'viscii':      (QP,        QP,      None),
+    'us-ascii':    (None,      None,    None),
+    'big5':        (BASE64,    BASE64,  None),
+    'gb2312':      (BASE64,    BASE64,  None),
+    'euc-jp':      (BASE64,    None,    'iso-2022-jp'),
+    'shift_jis':   (BASE64,    None,    'iso-2022-jp'),
+    'iso-2022-jp': (BASE64,    None,    None),
+    'koi8-r':      (BASE64,    BASE64,  None),
+    'utf-8':       (SHORTEST,  BASE64, 'utf-8'),
+    # We're making this one up to represent raw unencoded 8-bit
+    '8bit':        (None,      BASE64, 'utf-8'),
+    }
+
+# Aliases for other commonly-used names for character sets.  Map
+# them to the real ones used in email.
+ALIASES = {
+    'latin_1': 'iso-8859-1',
+    'latin-1': 'iso-8859-1',
+    'latin_2': 'iso-8859-2',
+    'latin-2': 'iso-8859-2',
+    'latin_3': 'iso-8859-3',
+    'latin-3': 'iso-8859-3',
+    'latin_4': 'iso-8859-4',
+    'latin-4': 'iso-8859-4',
+    'latin_5': 'iso-8859-9',
+    'latin-5': 'iso-8859-9',
+    'latin_6': 'iso-8859-10',
+    'latin-6': 'iso-8859-10',
+    'latin_7': 'iso-8859-13',
+    'latin-7': 'iso-8859-13',
+    'latin_8': 'iso-8859-14',
+    'latin-8': 'iso-8859-14',
+    'latin_9': 'iso-8859-15',
+    'latin-9': 'iso-8859-15',
+    'cp949':   'ks_c_5601-1987',
+    'euc_jp':  'euc-jp',
+    'euc_kr':  'euc-kr',
+    'ascii':   'us-ascii',
+    }
+
+# Map charsets to their Unicode codec strings.  Note that Python doesn't come
+# with any Asian codecs by default.  Here's where to get them:
+#
+# Japanese -- http://www.asahi-net.or.jp/~rd6t-kjym/python
+# Korean   -- http://sf.net/projects/koco
+# Chinese  -- http://sf.net/projects/python-codecs
+#
+# Note that these codecs have their own lifecycle and may be in varying states
+# of stability and useability.
+
+CODEC_MAP = {
+    'euc-jp':      'japanese.euc-jp',
+    'iso-2022-jp': 'japanese.iso-2022-jp',
+    'shift_jis':   'japanese.shift_jis',
+    'euc-kr':      'korean.euc-kr',
+    'ks_c_5601-1987': 'korean.cp949',
+    'iso-2022-kr': 'korean.iso-2022-kr',
+    'johab':       'korean.johab',
+    'gb2132':      'eucgb2312_cn',
+    'big5':        'big5_tw',
+    'utf-8':       'utf-8',
+    # Hack: We don't want *any* conversion for stuff marked us-ascii, as all
+    # sorts of garbage might be sent to us in the guise of 7-bit us-ascii.
+    # Let that stuff pass through without conversion to/from Unicode.
+    'us-ascii':    None,
+    }
+
+
+
+# Convenience functions for extending the above mappings
+def add_charset(charset, header_enc=None, body_enc=None, output_charset=None):
+    """Add character set properties to the global registry.
+
+    charset is the input character set, and must be the canonical name of a
+    character set.
+
+    Optional header_enc and body_enc is either Charset.QP for
+    quoted-printable, Charset.BASE64 for base64 encoding, Charset.SHORTEST for
+    the shortest of qp or base64 encoding, or None for no encoding.  SHORTEST
+    is only valid for header_enc.  It describes how message headers and
+    message bodies in the input charset are to be encoded.  Default is no
+    encoding.
+
+    Optional output_charset is the character set that the output should be
+    in.  Conversions will proceed from input charset, to Unicode, to the
+    output charset when the method Charset.convert() is called.  The default
+    is to output in the same character set as the input.
+
+    Both input_charset and output_charset must have Unicode codec entries in
+    the module's charset-to-codec mapping; use add_codec(charset, codecname)
+    to add codecs the module does not know about.  See the codecs module's
+    documentation for more information.
+    """
+    if body_enc == SHORTEST:
+        raise ValueError, 'SHORTEST not allowed for body_enc'
+    CHARSETS[charset] = (header_enc, body_enc, output_charset)
+
+
+def add_alias(alias, canonical):
+    """Add a character set alias.
+
+    alias is the alias name, e.g. latin-1
+    canonical is the character set's canonical name, e.g. iso-8859-1
+    """
+    ALIASES[alias] = canonical
+
+
+def add_codec(charset, codecname):
+    """Add a codec that map characters in the given charset to/from Unicode.
+
+    charset is the canonical name of a character set.  codecname is the name
+    of a Python codec, as appropriate for the second argument to the unicode()
+    built-in, or to the encode() method of a Unicode string.
+    """
+    CODEC_MAP[charset] = codecname
+
+
+
+class Charset:
+    """Map character sets to their email properties.
+
+    This class provides information about the requirements imposed on email
+    for a specific character set.  It also provides convenience routines for
+    converting between character sets, given the availability of the
+    applicable codecs.  Given a character set, it will do its best to provide
+    information on how to use that character set in an email in an
+    RFC-compliant way.
+
+    Certain character sets must be encoded with quoted-printable or base64
+    when used in email headers or bodies.  Certain character sets must be
+    converted outright, and are not allowed in email.  Instances of this
+    module expose the following information about a character set:
+
+    input_charset: The initial character set specified.  Common aliases
+                   are converted to their `official' email names (e.g. latin_1
+                   is converted to iso-8859-1).  Defaults to 7-bit us-ascii.
+
+    header_encoding: If the character set must be encoded before it can be
+                     used in an email header, this attribute will be set to
+                     Charset.QP (for quoted-printable), Charset.BASE64 (for
+                     base64 encoding), or Charset.SHORTEST for the shortest of
+                     QP or BASE64 encoding.  Otherwise, it will be None.
+
+    body_encoding: Same as header_encoding, but describes the encoding for the
+                   mail message's body, which indeed may be different than the
+                   header encoding.  Charset.SHORTEST is not allowed for
+                   body_encoding.
+
+    output_charset: Some character sets must be converted before the can be
+                    used in email headers or bodies.  If the input_charset is
+                    one of them, this attribute will contain the name of the
+                    charset output will be converted to.  Otherwise, it will
+                    be None.
+
+    input_codec: The name of the Python codec used to convert the
+                 input_charset to Unicode.  If no conversion codec is
+                 necessary, this attribute will be None.
+
+    output_codec: The name of the Python codec used to convert Unicode
+                  to the output_charset.  If no conversion codec is necessary,
+                  this attribute will have the same value as the input_codec.
+    """
+    def __init__(self, input_charset=DEFAULT_CHARSET):
+        # RFC 2046, $4.1.2 says charsets are not case sensitive
+        input_charset = input_charset.lower()
+        # Set the input charset after filtering through the aliases
+        self.input_charset = ALIASES.get(input_charset, input_charset)
+        # We can try to guess which encoding and conversion to use by the
+        # charset_map dictionary.  Try that first, but let the user override
+        # it.
+        henc, benc, conv = CHARSETS.get(self.input_charset,
+                                        (SHORTEST, BASE64, None))
+        # Set the attributes, allowing the arguments to override the default.
+        self.header_encoding = henc
+        self.body_encoding = benc
+        self.output_charset = ALIASES.get(conv, conv)
+        # Now set the codecs.  If one isn't defined for input_charset,
+        # guess and try a Unicode codec with the same name as input_codec.
+        self.input_codec = CODEC_MAP.get(self.input_charset,
+                                         self.input_charset)
+        self.output_codec = CODEC_MAP.get(self.output_charset,
+                                            self.input_codec)
+
+    def __str__(self):
+        return self.input_charset.lower()
+
+    __repr__ = __str__
+
+    def __eq__(self, other):
+        return str(self) == str(other).lower()
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+    def get_body_encoding(self):
+        """Return the content-transfer-encoding used for body encoding.
+
+        This is either the string `quoted-printable' or `base64' depending on
+        the encoding used, or it is a function in which case you should call
+        the function with a single argument, the Message object being
+        encoded.  The function should then set the Content-Transfer-Encoding
+        header itself to whatever is appropriate.
+
+        Returns "quoted-printable" if self.body_encoding is QP.
+        Returns "base64" if self.body_encoding is BASE64.
+        Returns "7bit" otherwise.
+        """
+        assert self.body_encoding <> SHORTEST
+        if self.body_encoding == QP:
+            return 'quoted-printable'
+        elif self.body_encoding == BASE64:
+            return 'base64'
+        else:
+            return encode_7or8bit
+
+    def convert(self, s):
+        """Convert a string from the input_codec to the output_codec."""
+        if self.input_codec <> self.output_codec:
+            return unicode(s, self.input_codec).encode(self.output_codec)
+        else:
+            return s
+
+    def to_splittable(self, s):
+        """Convert a possibly multibyte string to a safely splittable format.
+
+        Uses the input_codec to try and convert the string to Unicode, so it
+        can be safely split on character boundaries (even for multibyte
+        characters).
+
+        Returns the string as-is if it isn't known how to convert it to
+        Unicode with the input_charset.
+
+        Characters that could not be converted to Unicode will be replaced
+        with the Unicode replacement character U+FFFD.
+        """
+        if _isunicode(s) or self.input_codec is None:
+            return s
+        try:
+            return unicode(s, self.input_codec, 'replace')
+        except LookupError:
+            # Input codec not installed on system, so return the original
+            # string unchanged.
+            return s
+
+    def from_splittable(self, ustr, to_output=True):
+        """Convert a splittable string back into an encoded string.
+
+        Uses the proper codec to try and convert the string from Unicode back
+        into an encoded format.  Return the string as-is if it is not Unicode,
+        or if it could not be converted from Unicode.
+
+        Characters that could not be converted from Unicode will be replaced
+        with an appropriate character (usually '?').
+
+        If to_output is True (the default), uses output_codec to convert to an
+        encoded format.  If to_output is False, uses input_codec.
+        """
+        if to_output:
+            codec = self.output_codec
+        else:
+            codec = self.input_codec
+        if not _isunicode(ustr) or codec is None:
+            return ustr
+        try:
+            return ustr.encode(codec, 'replace')
+        except LookupError:
+            # Output codec not installed
+            return ustr
+
+    def get_output_charset(self):
+        """Return the output character set.
+
+        This is self.output_charset if that is not None, otherwise it is
+        self.input_charset.
+        """
+        return self.output_charset or self.input_charset
+
+    def encoded_header_len(self, s):
+        """Return the length of the encoded header string."""
+        cset = self.get_output_charset()
+        # The len(s) of a 7bit encoding is len(s)
+        if self.header_encoding == BASE64:
+            return email.base64MIME.base64_len(s) + len(cset) + MISC_LEN
+        elif self.header_encoding == QP:
+            return email.quopriMIME.header_quopri_len(s) + len(cset) + MISC_LEN
+        elif self.header_encoding == SHORTEST:
+            lenb64 = email.base64MIME.base64_len(s)
+            lenqp = email.quopriMIME.header_quopri_len(s)
+            return min(lenb64, lenqp) + len(cset) + MISC_LEN
+        else:
+            return len(s)
+
+    def header_encode(self, s, convert=False):
+        """Header-encode a string, optionally converting it to output_charset.
+
+        If convert is True, the string will be converted from the input
+        charset to the output charset automatically.  This is not useful for
+        multibyte character sets, which have line length issues (multibyte
+        characters must be split on a character, not a byte boundary); use the
+        high-level Header class to deal with these issues.  convert defaults
+        to False.
+
+        The type of encoding (base64 or quoted-printable) will be based on
+        self.header_encoding.
+        """
+        cset = self.get_output_charset()
+        if convert:
+            s = self.convert(s)
+        # 7bit/8bit encodings return the string unchanged (modulo conversions)
+        if self.header_encoding == BASE64:
+            return email.base64MIME.header_encode(s, cset)
+        elif self.header_encoding == QP:
+            return email.quopriMIME.header_encode(s, cset, maxlinelen=None)
+        elif self.header_encoding == SHORTEST:
+            lenb64 = email.base64MIME.base64_len(s)
+            lenqp = email.quopriMIME.header_quopri_len(s)
+            if lenb64 < lenqp:
+                return email.base64MIME.header_encode(s, cset)
+            else:
+                return email.quopriMIME.header_encode(s, cset, maxlinelen=None)
+        else:
+            return s
+
+    def body_encode(self, s, convert=True):
+        """Body-encode a string and convert it to output_charset.
+
+        If convert is True (the default), the string will be converted from
+        the input charset to output charset automatically.  Unlike
+        header_encode(), there are no issues with byte boundaries and
+        multibyte charsets in email bodies, so this is usually pretty safe.
+
+        The type of encoding (base64 or quoted-printable) will be based on
+        self.body_encoding.
+        """
+        if convert:
+            s = self.convert(s)
+        # 7bit/8bit encodings return the string unchanged (module conversions)
+        if self.body_encoding is BASE64:
+            return email.base64MIME.body_encode(s)
+        elif self.body_encoding is QP:
+            return email.quopriMIME.body_encode(s)
+        else:
+            return s
diff --git a/lib-python/2.2/email/Encoders.py b/lib-python/2.2/email/Encoders.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/Encoders.py
@@ -0,0 +1,94 @@
+# Copyright (C) 2001,2002 Python Software Foundation
+# Author: barry at zope.com (Barry Warsaw)
+
+"""Module containing encoding functions for Image.Image and Text.Text.
+"""
+
+import base64
+
+
+
+# Helpers
+try:
+    from quopri import encodestring as _encodestring
+
+    def _qencode(s):
+        enc = _encodestring(s, quotetabs=1)
+        # Must encode spaces, which quopri.encodestring() doesn't do
+        return enc.replace(' ', '=20')
+except ImportError:
+    # Python 2.1 doesn't have quopri.encodestring()
+    from cStringIO import StringIO
+    import quopri as _quopri
+
+    def _qencode(s):
+        if not s:
+            return s
+        hasnewline = (s[-1] == '\n')
+        infp = StringIO(s)
+        outfp = StringIO()
+        _quopri.encode(infp, outfp, quotetabs=1)
+        # Python 2.x's encode() doesn't encode spaces even when quotetabs==1
+        value = outfp.getvalue().replace(' ', '=20')
+        if not hasnewline and value[-1] == '\n':
+            return value[:-1]
+        return value
+
+
+def _bencode(s):
+    # We can't quite use base64.encodestring() since it tacks on a "courtesy
+    # newline".  Blech!
+    if not s:
+        return s
+    hasnewline = (s[-1] == '\n')
+    value = base64.encodestring(s)
+    if not hasnewline and value[-1] == '\n':
+        return value[:-1]
+    return value
+
+
+
+def encode_base64(msg):
+    """Encode the message's payload in Base64.
+
+    Also, add an appropriate Content-Transfer-Encoding header.
+    """
+    orig = msg.get_payload()
+    encdata = _bencode(orig)
+    msg.set_payload(encdata)
+    msg['Content-Transfer-Encoding'] = 'base64'
+
+
+
+def encode_quopri(msg):
+    """Encode the message's payload in quoted-printable.
+
+    Also, add an appropriate Content-Transfer-Encoding header.
+    """
+    orig = msg.get_payload()
+    encdata = _qencode(orig)
+    msg.set_payload(encdata)
+    msg['Content-Transfer-Encoding'] = 'quoted-printable'
+
+
+
+def encode_7or8bit(msg):
+    """Set the Content-Transfer-Encoding header to 7bit or 8bit."""
+    orig = msg.get_payload()
+    if orig is None:
+        # There's no payload.  For backwards compatibility we use 7bit
+        msg['Content-Transfer-Encoding'] = '7bit'
+        return
+    # We play a trick to make this go fast.  If encoding to ASCII succeeds, we
+    # know the data must be 7bit, otherwise treat it as 8bit.
+    try:
+        orig.encode('ascii')
+    except UnicodeError:
+        msg['Content-Transfer-Encoding'] = '8bit'
+    else:
+        msg['Content-Transfer-Encoding'] = '7bit'
+
+
+
+def encode_noop(msg):
+    """Do nothing."""
diff --git a/lib-python/2.2/email/Errors.py b/lib-python/2.2/email/Errors.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/Errors.py
@@ -0,0 +1,26 @@
+# Copyright (C) 2001,2002 Python Software Foundation
+# Author: barry at zope.com (Barry Warsaw)
+
+"""email package exception classes.
+"""
+
+
+
+class MessageError(Exception):
+    """Base class for errors in the email package."""
+
+
+class MessageParseError(MessageError):
+    """Base class for message parsing errors."""
+
+
+class HeaderParseError(MessageParseError):
+    """Error while parsing headers."""
+
+
+class BoundaryError(MessageParseError):
+    """Couldn't find terminating boundary."""
+
+
+class MultipartConversionError(MessageError, TypeError):
+    """Conversion to a multipart is prohibited."""
diff --git a/lib-python/2.2/email/Generator.py b/lib-python/2.2/email/Generator.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/Generator.py
@@ -0,0 +1,378 @@
+# Copyright (C) 2001,2002 Python Software Foundation
+# Author: barry at zope.com (Barry Warsaw)
+
+"""Classes to generate plain text from a message object tree.
+"""
+
+import re
+import sys
+import time
+import locale
+import random
+
+from types import ListType, StringType
+from cStringIO import StringIO
+
+from email.Header import Header
+from email.Parser import NLCRE
+
+try:
+    from email._compat22 import _isstring
+except SyntaxError:
+    from email._compat21 import _isstring
+
+try:
+    True, False
+except NameError:
+    True = 1
+    False = 0
+
+EMPTYSTRING = ''
+SEMISPACE = '; '
+BAR = '|'
+UNDERSCORE = '_'
+NL = '\n'
+NLTAB = '\n\t'
+SEMINLTAB = ';\n\t'
+SPACE8 = ' ' * 8
+
+fcre = re.compile(r'^From ', re.MULTILINE)
+
+def _is8bitstring(s):
+    if isinstance(s, StringType):
+        try:
+            unicode(s, 'us-ascii')
+        except UnicodeError:
+            return True
+    return False
+
+
+
+class Generator:
+    """Generates output from a Message object tree.
+
+    This basic generator writes the message to the given file object as plain
+    text.
+    """
+    #
+    # Public interface
+    #
+
+    def __init__(self, outfp, mangle_from_=True, maxheaderlen=78):
+        """Create the generator for message flattening.
+
+        outfp is the output file-like object for writing the message to.  It
+        must have a write() method.
+
+        Optional mangle_from_ is a flag that, when True (the default), escapes
+        From_ lines in the body of the message by putting a `>' in front of
+        them.
+
+        Optional maxheaderlen specifies the longest length for a non-continued
+        header.  When a header line is longer (in characters, with tabs
+        expanded to 8 spaces), than maxheaderlen, the header will be broken on
+        semicolons and continued as per RFC 2822.  If no semicolon is found,
+        then the header is left alone.  Set to zero to disable wrapping
+        headers.  Default is 78, as recommended (but not required by RFC
+        2822.
+        """
+        self._fp = outfp
+        self._mangle_from_ = mangle_from_
+        self.__maxheaderlen = maxheaderlen
+
+    def write(self, s):
+        # Just delegate to the file object
+        self._fp.write(s)
+
+    def flatten(self, msg, unixfrom=False):
+        """Print the message object tree rooted at msg to the output file
+        specified when the Generator instance was created.
+
+        unixfrom is a flag that forces the printing of a Unix From_ delimiter
+        before the first object in the message tree.  If the original message
+        has no From_ delimiter, a `standard' one is crafted.  By default, this
+        is False to inhibit the printing of any From_ delimiter.
+
+        Note that for subobjects, no From_ line is printed.
+        """
+        if unixfrom:
+            ufrom = msg.get_unixfrom()
+            if not ufrom:
+                ufrom = 'From nobody ' + time.ctime(time.time())
+            print >> self._fp, ufrom
+        self._write(msg)
+
+    # For backwards compatibility, but this is slower
+    __call__ = flatten
+
+    def clone(self, fp):
+        """Clone this generator with the exact same options."""
+        return self.__class__(fp, self._mangle_from_, self.__maxheaderlen)
+
+    #
+    # Protected interface - undocumented ;/
+    #
+
+    def _write(self, msg):
+        # We can't write the headers yet because of the following scenario:
+        # say a multipart message includes the boundary string somewhere in
+        # its body.  We'd have to calculate the new boundary /before/ we write
+        # the headers so that we can write the correct Content-Type:
+        # parameter.
+        #
+        # The way we do this, so as to make the _handle_*() methods simpler,
+        # is to cache any subpart writes into a StringIO.  The we write the
+        # headers and the StringIO contents.  That way, subpart handlers can
+        # Do The Right Thing, and can still modify the Content-Type: header if
+        # necessary.
+        oldfp = self._fp
+        try:
+            self._fp = sfp = StringIO()
+            self._dispatch(msg)
+        finally:
+            self._fp = oldfp
+        # Write the headers.  First we see if the message object wants to
+        # handle that itself.  If not, we'll do it generically.
+        meth = getattr(msg, '_write_headers', None)
+        if meth is None:
+            self._write_headers(msg)
+        else:
+            meth(self)
+        self._fp.write(sfp.getvalue())
+
+    def _dispatch(self, msg):
+        # Get the Content-Type: for the message, then try to dispatch to
+        # self._handle_<maintype>_<subtype>().  If there's no handler for the
+        # full MIME type, then dispatch to self._handle_<maintype>().  If
+        # that's missing too, then dispatch to self._writeBody().
+        main = msg.get_content_maintype()
+        sub = msg.get_content_subtype()
+        specific = UNDERSCORE.join((main, sub)).replace('-', '_')
+        meth = getattr(self, '_handle_' + specific, None)
+        if meth is None:
+            generic = main.replace('-', '_')
+            meth = getattr(self, '_handle_' + generic, None)
+            if meth is None:
+                meth = self._writeBody
+        meth(msg)
+
+    #
+    # Default handlers
+    #
+
+    def _write_headers(self, msg):
+        for h, v in msg.items():
+            print >> self._fp, '%s:' % h,
+            if self.__maxheaderlen == 0:
+                # Explicit no-wrapping
+                print >> self._fp, v
+            elif isinstance(v, Header):
+                # Header instances know what to do
+                print >> self._fp, v.encode()
+            elif _is8bitstring(v):
+                # If we have raw 8bit data in a byte string, we have no idea
+                # what the encoding is.  There is no safe way to split this
+                # string.  If it's ascii-subset, then we could do a normal
+                # ascii split, but if it's multibyte then we could break the
+                # string.  There's no way to know so the least harm seems to
+                # be to not split the string and risk it being too long.
+                print >> self._fp, v
+            else:
+                # Header's got lots of smarts, so use it.
+                print >> self._fp, Header(
+                    v, maxlinelen=self.__maxheaderlen,
+                    header_name=h, continuation_ws='\t').encode()
+        # A blank line always separates headers from body
+        print >> self._fp
+
+    #
+    # Handlers for writing types and subtypes
+    #
+
+    def _handle_text(self, msg):
+        payload = msg.get_payload()
+        if payload is None:
+            return
+        cset = msg.get_charset()
+        if cset is not None:
+            payload = cset.body_encode(payload)
+        if not _isstring(payload):
+            raise TypeError, 'string payload expected: %s' % type(payload)
+        if self._mangle_from_:
+            payload = fcre.sub('>From ', payload)
+        self._fp.write(payload)
+
+    # Default body handler
+    _writeBody = _handle_text
+
+    def _handle_multipart(self, msg):
+        # The trick here is to write out each part separately, merge them all
+        # together, and then make sure that the boundary we've chosen isn't
+        # present in the payload.
+        msgtexts = []
+        subparts = msg.get_payload()
+        if subparts is None:
+            # Nothing has ever been attached
+            boundary = msg.get_boundary(failobj=_make_boundary())
+            print >> self._fp, '--' + boundary
+            print >> self._fp, '\n'
+            print >> self._fp, '--' + boundary + '--'
+            return
+        elif _isstring(subparts):
+            # e.g. a non-strict parse of a message with no starting boundary.
+            self._fp.write(subparts)
+            return
+        elif not isinstance(subparts, ListType):
+            # Scalar payload
+            subparts = [subparts]
+        for part in subparts:
+            s = StringIO()
+            g = self.clone(s)
+            g.flatten(part, unixfrom=False)
+            msgtexts.append(s.getvalue())
+        # Now make sure the boundary we've selected doesn't appear in any of
+        # the message texts.
+        alltext = NL.join(msgtexts)
+        # BAW: What about boundaries that are wrapped in double-quotes?
+        boundary = msg.get_boundary(failobj=_make_boundary(alltext))
+        # If we had to calculate a new boundary because the body text
+        # contained that string, set the new boundary.  We don't do it
+        # unconditionally because, while set_boundary() preserves order, it
+        # doesn't preserve newlines/continuations in headers.  This is no big
+        # deal in practice, but turns out to be inconvenient for the unittest
+        # suite.
+        if msg.get_boundary() <> boundary:
+            msg.set_boundary(boundary)
+        # Write out any preamble
+        if msg.preamble is not None:
+            self._fp.write(msg.preamble)
+            # If preamble is the empty string, the length of the split will be
+            # 1, but the last element will be the empty string.  If it's
+            # anything else but does not end in a line separator, the length
+            # will be > 1 and not end in an empty string.  We need to
+            # guarantee a newline after the preamble, but don't add too many.
+            plines = NLCRE.split(msg.preamble)
+            if plines <> [''] and plines[-1] <> '':
+                self._fp.write('\n')
+        # First boundary is a bit different; it doesn't have a leading extra
+        # newline.
+        print >> self._fp, '--' + boundary
+        # Join and write the individual parts
+        joiner = '\n--' + boundary + '\n'
+        self._fp.write(joiner.join(msgtexts))
+        print >> self._fp, '\n--' + boundary + '--',
+        # Write out any epilogue
+        if msg.epilogue is not None:
+            if not msg.epilogue.startswith('\n'):
+                print >> self._fp
+            self._fp.write(msg.epilogue)
+
+    def _handle_message_delivery_status(self, msg):
+        # We can't just write the headers directly to self's file object
+        # because this will leave an extra newline between the last header
+        # block and the boundary.  Sigh.
+        blocks = []
+        for part in msg.get_payload():
+            s = StringIO()
+            g = self.clone(s)
+            g.flatten(part, unixfrom=False)
+            text = s.getvalue()
+            lines = text.split('\n')
+            # Strip off the unnecessary trailing empty line
+            if lines and lines[-1] == '':
+                blocks.append(NL.join(lines[:-1]))
+            else:
+                blocks.append(text)
+        # Now join all the blocks with an empty line.  This has the lovely
+        # effect of separating each block with an empty line, but not adding
+        # an extra one after the last one.
+        self._fp.write(NL.join(blocks))
+
+    def _handle_message(self, msg):
+        s = StringIO()
+        g = self.clone(s)
+        # The payload of a message/rfc822 part should be a multipart sequence
+        # of length 1.  The zeroth element of the list should be the Message
+        # object for the subpart.  Extract that object, stringify it, and
+        # write it out.
+        g.flatten(msg.get_payload(0), unixfrom=False)
+        self._fp.write(s.getvalue())
+
+
+
+class DecodedGenerator(Generator):
+    """Generator a text representation of a message.
+
+    Like the Generator base class, except that non-text parts are substituted
+    with a format string representing the part.
+    """
+    def __init__(self, outfp, mangle_from_=True, maxheaderlen=78, fmt=None):
+        """Like Generator.__init__() except that an additional optional
+        argument is allowed.
+
+        Walks through all subparts of a message.  If the subpart is of main
+        type `text', then it prints the decoded payload of the subpart.
+
+        Otherwise, fmt is a format string that is used instead of the message
+        payload.  fmt is expanded with the following keywords (in
+        %(keyword)s format):
+
+        type       : Full MIME type of the non-text part
+        maintype   : Main MIME type of the non-text part
+        subtype    : Sub-MIME type of the non-text part
+        filename   : Filename of the non-text part
+        description: Description associated with the non-text part
+        encoding   : Content transfer encoding of the non-text part
+
+        The default value for fmt is None, meaning
+
+        [Non-text (%(type)s) part of message omitted, filename %(filename)s]
+        """
+        Generator.__init__(self, outfp, mangle_from_, maxheaderlen)
+        if fmt is None:
+            fmt = ('[Non-text (%(type)s) part of message omitted, '
+                   'filename %(filename)s]')
+        self._fmt = fmt
+
+    def _dispatch(self, msg):
+        for part in msg.walk():
+            maintype = part.get_main_type('text')
+            if maintype == 'text':
+                print >> self, part.get_payload(decode=True)
+            elif maintype == 'multipart':
+                # Just skip this
+                pass
+            else:
+                print >> self, self._fmt % {
+                    'type'       : part.get_type('[no MIME type]'),
+                    'maintype'   : part.get_main_type('[no main MIME type]'),
+                    'subtype'    : part.get_subtype('[no sub-MIME type]'),
+                    'filename'   : part.get_filename('[no filename]'),
+                    'description': part.get('Content-Description',
+                                            '[no description]'),
+                    'encoding'   : part.get('Content-Transfer-Encoding',
+                                            '[no encoding]'),
+                    }
+
+
+
+# Helper
+_width = len(repr(sys.maxint-1))
+_fmt = '%%0%dd' % _width
+
+def _make_boundary(text=None):
+    # Craft a random boundary.  If text is given, ensure that the chosen
+    # boundary doesn't appear in the text.
+    token = random.randint(0, sys.maxint-1)
+    boundary = ('=' * 15) + (_fmt % token) + '=='
+    if text is None:
+        return boundary
+    b = boundary
+    counter = 0
+    while True:
+        cre = re.compile('^--' + re.escape(b) + '(--)?$', re.MULTILINE)
+        if not cre.search(text):
+            break
+        b = boundary + '.' + str(counter)
+        counter += 1
+    return b
diff --git a/lib-python/2.2/email/Header.py b/lib-python/2.2/email/Header.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/Header.py
@@ -0,0 +1,515 @@
+# Copyright (C) 2002 Python Software Foundation
+# Author: che at debian.org (Ben Gertzfield), barry at zope.com (Barry Warsaw)
+
+"""Header encoding and decoding functionality."""
+
+import re
+import binascii
+from types import StringType, UnicodeType
+
+import email.quopriMIME
+import email.base64MIME
+from email.Errors import HeaderParseError
+from email.Charset import Charset
+
+try:
+    from email._compat22 import _floordiv
+except SyntaxError:
+    # Python 2.1 spells integer division differently
+    from email._compat21 import _floordiv
+
+try:
+    True, False
+except NameError:
+    True = 1
+    False = 0
+
+CRLFSPACE = '\r\n '
+CRLF = '\r\n'
+NL = '\n'
+SPACE = ' '
+USPACE = u' '
+SPACE8 = ' ' * 8
+EMPTYSTRING = ''
+UEMPTYSTRING = u''
+
+MAXLINELEN = 76
+
+ENCODE = 1
+DECODE = 2
+
+USASCII = Charset('us-ascii')
+UTF8 = Charset('utf-8')
+
+# Match encoded-word strings in the form =?charset?q?Hello_World?=
+ecre = re.compile(r'''
+  =\?                   # literal =?
+  (?P<charset>[^?]*?)   # non-greedy up to the next ? is the charset
+  \?                    # literal ?
+  (?P<encoding>[qb])    # either a "q" or a "b", case insensitive
+  \?                    # literal ?
+  (?P<encoded>.*?)      # non-greedy up to the next ?= is the encoded string
+  \?=                   # literal ?=
+  ''', re.VERBOSE | re.IGNORECASE)
+
+pcre = re.compile('([,;])')
+
+# Field name regexp, including trailing colon, but not separating whitespace,
+# according to RFC 2822.  Character range is from tilde to exclamation mark.
+# For use with .match()
+fcre = re.compile(r'[\041-\176]+:$')
+
+
+
+# Helpers
+_max_append = email.quopriMIME._max_append
+
+
+
+def decode_header(header):
+    """Decode a message header value without converting charset.
+
+    Returns a list of (decoded_string, charset) pairs containing each of the
+    decoded parts of the header.  Charset is None for non-encoded parts of the
+    header, otherwise a lower-case string containing the name of the character
+    set specified in the encoded string.
+
+    An email.Errors.HeaderParseError may be raised when certain decoding error
+    occurs (e.g. a base64 decoding exception).
+    """
+    # If no encoding, just return the header
+    header = str(header)
+    if not ecre.search(header):
+        return [(header, None)]
+    decoded = []
+    dec = ''
+    for line in header.splitlines():
+        # This line might not have an encoding in it
+        if not ecre.search(line):
+            decoded.append((line, None))
+            continue
+        parts = ecre.split(line)
+        while parts:
+            unenc = parts.pop(0).strip()
+            if unenc:
+                # Should we continue a long line?
+                if decoded and decoded[-1][1] is None:
+                    decoded[-1] = (decoded[-1][0] + SPACE + unenc, None)
+                else:
+                    decoded.append((unenc, None))
+            if parts:
+                charset, encoding = [s.lower() for s in parts[0:2]]
+                encoded = parts[2]
+                dec = None
+                if encoding == 'q':
+                    dec = email.quopriMIME.header_decode(encoded)
+                elif encoding == 'b':
+                    try:
+                        dec = email.base64MIME.decode(encoded)
+                    except binascii.Error:
+                        # Turn this into a higher level exception.  BAW: Right
+                        # now we throw the lower level exception away but
+                        # when/if we get exception chaining, we'll preserve it.
+                        raise HeaderParseError
+                if dec is None:
+                    dec = encoded
+
+                if decoded and decoded[-1][1] == charset:
+                    decoded[-1] = (decoded[-1][0] + dec, decoded[-1][1])
+                else:
+                    decoded.append((dec, charset))
+            del parts[0:3]
+    return decoded
+
+
+
+def make_header(decoded_seq, maxlinelen=None, header_name=None,
+                continuation_ws=' '):
+    """Create a Header from a sequence of pairs as returned by decode_header()
+
+    decode_header() takes a header value string and returns a sequence of
+    pairs of the format (decoded_string, charset) where charset is the string
+    name of the character set.
+
+    This function takes one of those sequence of pairs and returns a Header
+    instance.  Optional maxlinelen, header_name, and continuation_ws are as in
+    the Header constructor.
+    """
+    h = Header(maxlinelen=maxlinelen, header_name=header_name,
+               continuation_ws=continuation_ws)
+    for s, charset in decoded_seq:
+        # None means us-ascii but we can simply pass it on to h.append()
+        if charset is not None and not isinstance(charset, Charset):
+            charset = Charset(charset)
+        h.append(s, charset)
+    return h
+
+
+
+class Header:
+    def __init__(self, s=None, charset=None,
+                 maxlinelen=None, header_name=None,
+                 continuation_ws=' ', errors='strict'):
+        """Create a MIME-compliant header that can contain many character sets.
+
+        Optional s is the initial header value.  If None, the initial header
+        value is not set.  You can later append to the header with .append()
+        method calls.  s may be a byte string or a Unicode string, but see the
+        .append() documentation for semantics.
+
+        Optional charset serves two purposes: it has the same meaning as the
+        charset argument to the .append() method.  It also sets the default
+        character set for all subsequent .append() calls that omit the charset
+        argument.  If charset is not provided in the constructor, the us-ascii
+        charset is used both as s's initial charset and as the default for
+        subsequent .append() calls.
+
+        The maximum line length can be specified explicit via maxlinelen.  For
+        splitting the first line to a shorter value (to account for the field
+        header which isn't included in s, e.g. `Subject') pass in the name of
+        the field in header_name.  The default maxlinelen is 76.
+
+        continuation_ws must be RFC 2822 compliant folding whitespace (usually
+        either a space or a hard tab) which will be prepended to continuation
+        lines.
+
+        errors is passed through to the .append() call.
+        """
+        if charset is None:
+            charset = USASCII
+        if not isinstance(charset, Charset):
+            charset = Charset(charset)
+        self._charset = charset
+        self._continuation_ws = continuation_ws
+        cws_expanded_len = len(continuation_ws.replace('\t', SPACE8))
+        # BAW: I believe `chunks' and `maxlinelen' should be non-public.
+        self._chunks = []
+        if s is not None:
+            self.append(s, charset, errors)
+        if maxlinelen is None:
+            maxlinelen = MAXLINELEN
+        if header_name is None:
+            # We don't know anything about the field header so the first line
+            # is the same length as subsequent lines.
+            self._firstlinelen = maxlinelen
+        else:
+            # The first line should be shorter to take into account the field
+            # header.  Also subtract off 2 extra for the colon and space.
+            self._firstlinelen = maxlinelen - len(header_name) - 2
+        # Second and subsequent lines should subtract off the length in
+        # columns of the continuation whitespace prefix.
+        self._maxlinelen = maxlinelen - cws_expanded_len
+
+    def __str__(self):
+        """A synonym for self.encode()."""
+        return self.encode()
+
+    def __unicode__(self):
+        """Helper for the built-in unicode function."""
+        uchunks = []
+        lastcs = None
+        for s, charset in self._chunks:
+            # We must preserve spaces between encoded and non-encoded word
+            # boundaries, which means for us we need to add a space when we go
+            # from a charset to None/us-ascii, or from None/us-ascii to a
+            # charset.  Only do this for the second and subsequent chunks.
+            nextcs = charset
+            if uchunks:
+                if lastcs not in (None, 'us-ascii'):
+                    if nextcs in (None, 'us-ascii'):
+                        uchunks.append(USPACE)
+                        nextcs = None
+                elif nextcs not in (None, 'us-ascii'):
+                    uchunks.append(USPACE)
+            lastcs = nextcs
+            uchunks.append(unicode(s, str(charset)))
+        return UEMPTYSTRING.join(uchunks)
+
+    # Rich comparison operators for equality only.  BAW: does it make sense to
+    # have or explicitly disable <, <=, >, >= operators?
+    def __eq__(self, other):
+        # other may be a Header or a string.  Both are fine so coerce
+        # ourselves to a string, swap the args and do another comparison.
+        return other == self.encode()
+
+    def __ne__(self, other):
+        return not self == other
+
+    def append(self, s, charset=None, errors='strict'):
+        """Append a string to the MIME header.
+
+        Optional charset, if given, should be a Charset instance or the name
+        of a character set (which will be converted to a Charset instance).  A
+        value of None (the default) means that the charset given in the
+        constructor is used.
+
+        s may be a byte string or a Unicode string.  If it is a byte string
+        (i.e. isinstance(s, StringType) is true), then charset is the encoding
+        of that byte string, and a UnicodeError will be raised if the string
+        cannot be decoded with that charset.  If s is a Unicode string, then
+        charset is a hint specifying the character set of the characters in
+        the string.  In this case, when producing an RFC 2822 compliant header
+        using RFC 2047 rules, the Unicode string will be encoded using the
+        following charsets in order: us-ascii, the charset hint, utf-8.  The
+        first character set not to provoke a UnicodeError is used.
+
+        Optional `errors' is passed as the third argument to any unicode() or
+        ustr.encode() call.
+        """
+        if charset is None:
+            charset = self._charset
+        elif not isinstance(charset, Charset):
+            charset = Charset(charset)
+        # If the charset is our faux 8bit charset, leave the string unchanged
+        if charset <> '8bit':
+            # We need to test that the string can be converted to unicode and
+            # back to a byte string, given the input and output codecs of the
+            # charset.
+            if isinstance(s, StringType):
+                # Possibly raise UnicodeError if the byte string can't be
+                # converted to a unicode with the input codec of the charset.
+                incodec = charset.input_codec or 'us-ascii'
+                ustr = unicode(s, incodec, errors)
+                # Now make sure that the unicode could be converted back to a
+                # byte string with the output codec, which may be different
+                # than the iput coded.  Still, use the original byte string.
+                outcodec = charset.output_codec or 'us-ascii'
+                ustr.encode(outcodec, errors)
+            elif isinstance(s, UnicodeType):
+                # Now we have to be sure the unicode string can be converted
+                # to a byte string with a reasonable output codec.  We want to
+                # use the byte string in the chunk.
+                for charset in USASCII, charset, UTF8:
+                    try:
+                        outcodec = charset.output_codec or 'us-ascii'
+                        s = s.encode(outcodec, errors)
+                        break
+                    except UnicodeError:
+                        pass
+                else:
+                    assert False, 'utf-8 conversion failed'
+        self._chunks.append((s, charset))
+
+    def _split(self, s, charset, maxlinelen, splitchars):
+        # Split up a header safely for use with encode_chunks.
+        splittable = charset.to_splittable(s)
+        encoded = charset.from_splittable(splittable, True)
+        elen = charset.encoded_header_len(encoded)
+        # If the line's encoded length first, just return it
+        if elen <= maxlinelen:
+            return [(encoded, charset)]
+        # If we have undetermined raw 8bit characters sitting in a byte
+        # string, we really don't know what the right thing to do is.  We
+        # can't really split it because it might be multibyte data which we
+        # could break if we split it between pairs.  The least harm seems to
+        # be to not split the header at all, but that means they could go out
+        # longer than maxlinelen.
+        if charset == '8bit':
+            return [(s, charset)]
+        # BAW: I'm not sure what the right test here is.  What we're trying to
+        # do is be faithful to RFC 2822's recommendation that ($2.2.3):
+        #
+        # "Note: Though structured field bodies are defined in such a way that
+        #  folding can take place between many of the lexical tokens (and even
+        #  within some of the lexical tokens), folding SHOULD be limited to
+        #  placing the CRLF at higher-level syntactic breaks."
+        #
+        # For now, I can only imagine doing this when the charset is us-ascii,
+        # although it's possible that other charsets may also benefit from the
+        # higher-level syntactic breaks.
+        elif charset == 'us-ascii':
+            return self._split_ascii(s, charset, maxlinelen, splitchars)
+        # BAW: should we use encoded?
+        elif elen == len(s):
+            # We can split on _maxlinelen boundaries because we know that the
+            # encoding won't change the size of the string
+            splitpnt = maxlinelen
+            first = charset.from_splittable(splittable[:splitpnt], False)
+            last = charset.from_splittable(splittable[splitpnt:], False)
+        else:
+            # Binary search for split point
+            first, last = _binsplit(splittable, charset, maxlinelen)
+        # first is of the proper length so just wrap it in the appropriate
+        # chrome.  last must be recursively split.
+        fsplittable = charset.to_splittable(first)
+        fencoded = charset.from_splittable(fsplittable, True)
+        chunk = [(fencoded, charset)]
+        return chunk + self._split(last, charset, self._maxlinelen, splitchars)
+
+    def _split_ascii(self, s, charset, firstlen, splitchars):
+        chunks = _split_ascii(s, firstlen, self._maxlinelen,
+                              self._continuation_ws, splitchars)
+        return zip(chunks, [charset]*len(chunks))
+
+    def _encode_chunks(self, newchunks, maxlinelen):
+        # MIME-encode a header with many different charsets and/or encodings.
+        #
+        # Given a list of pairs (string, charset), return a MIME-encoded
+        # string suitable for use in a header field.  Each pair may have
+        # different charsets and/or encodings, and the resulting header will
+        # accurately reflect each setting.
+        #
+        # Each encoding can be email.Utils.QP (quoted-printable, for
+        # ASCII-like character sets like iso-8859-1), email.Utils.BASE64
+        # (Base64, for non-ASCII like character sets like KOI8-R and
+        # iso-2022-jp), or None (no encoding).
+        #
+        # Each pair will be represented on a separate line; the resulting
+        # string will be in the format:
+        #
+        # =?charset1?q?Mar=EDa_Gonz=E1lez_Alonso?=\n
+        #  =?charset2?b?SvxyZ2VuIEL2aW5n?="
+        chunks = []
+        for header, charset in newchunks:
+            if not header:
+                continue
+            if charset is None or charset.header_encoding is None:
+                s = header
+            else:
+                s = charset.header_encode(header)
+            # Don't add more folding whitespace than necessary
+            if chunks and chunks[-1].endswith(' '):
+                extra = ''
+            else:
+                extra = ' '
+            _max_append(chunks, s, maxlinelen, extra)
+        joiner = NL + self._continuation_ws
+        return joiner.join(chunks)
+
+    def encode(self, splitchars=';, '):
+        """Encode a message header into an RFC-compliant format.
+
+        There are many issues involved in converting a given string for use in
+        an email header.  Only certain character sets are readable in most
+        email clients, and as header strings can only contain a subset of
+        7-bit ASCII, care must be taken to properly convert and encode (with
+        Base64 or quoted-printable) header strings.  In addition, there is a
+        75-character length limit on any given encoded header field, so
+        line-wrapping must be performed, even with double-byte character sets.
+
+        This method will do its best to convert the string to the correct
+        character set used in email, and encode and line wrap it safely with
+        the appropriate scheme for that character set.
+
+        If the given charset is not known or an error occurs during
+        conversion, this function will return the header untouched.
+
+        Optional splitchars is a string containing characters to split long
+        ASCII lines on, in rough support of RFC 2822's `highest level
+        syntactic breaks'.  This doesn't affect RFC 2047 encoded lines.
+        """
+        newchunks = []
+        maxlinelen = self._firstlinelen
+        lastlen = 0
+        for s, charset in self._chunks:
+            # The first bit of the next chunk should be just long enough to
+            # fill the next line.  Don't forget the space separating the
+            # encoded words.
+            targetlen = maxlinelen - lastlen - 1
+            if targetlen < charset.encoded_header_len(''):
+                # Stick it on the next line
+                targetlen = maxlinelen
+            newchunks += self._split(s, charset, targetlen, splitchars)
+            lastchunk, lastcharset = newchunks[-1]
+            lastlen = lastcharset.encoded_header_len(lastchunk)
+        return self._encode_chunks(newchunks, maxlinelen)
+
+
+
+def _split_ascii(s, firstlen, restlen, continuation_ws, splitchars):
+    lines = []
+    maxlen = firstlen
+    for line in s.splitlines():
+        # Ignore any leading whitespace (i.e. continuation whitespace) already
+        # on the line, since we'll be adding our own.
+        line = line.lstrip()
+        if len(line) < maxlen:
+            lines.append(line)
+            maxlen = restlen
+            continue
+        # Attempt to split the line at the highest-level syntactic break
+        # possible.  Note that we don't have a lot of smarts about field
+        # syntax; we just try to break on semi-colons, then commas, then
+        # whitespace.
+        for ch in splitchars:
+            if line.find(ch) >= 0:
+                break
+        else:
+            # There's nothing useful to split the line on, not even spaces, so
+            # just append this line unchanged
+            lines.append(line)
+            maxlen = restlen
+            continue
+        # Now split the line on the character plus trailing whitespace
+        cre = re.compile(r'%s\s*' % ch)
+        if ch in ';,':
+            eol = ch
+        else:
+            eol = ''
+        joiner = eol + ' '
+        joinlen = len(joiner)
+        wslen = len(continuation_ws.replace('\t', SPACE8))
+        this = []
+        linelen = 0
+        for part in cre.split(line):
+            curlen = linelen + max(0, len(this)-1) * joinlen
+            partlen = len(part)
+            onfirstline = not lines
+            # We don't want to split after the field name, if we're on the
+            # first line and the field name is present in the header string.
+            if ch == ' ' and onfirstline and \
+                   len(this) == 1 and fcre.match(this[0]):
+                this.append(part)
+                linelen += partlen
+            elif curlen + partlen > maxlen:
+                if this:
+                    lines.append(joiner.join(this) + eol)
+                # If this part is longer than maxlen and we aren't already
+                # splitting on whitespace, try to recursively split this line
+                # on whitespace.
+                if partlen > maxlen and ch <> ' ':
+                    subl = _split_ascii(part, maxlen, restlen,
+                                        continuation_ws, ' ')
+                    lines.extend(subl[:-1])
+                    this = [subl[-1]]
+                else:
+                    this = [part]
+                linelen = wslen + len(this[-1])
+                maxlen = restlen
+            else:
+                this.append(part)
+                linelen += partlen
+        # Put any left over parts on a line by themselves
+        if this:
+            lines.append(joiner.join(this))
+    return lines
+
+
+
+def _binsplit(splittable, charset, maxlinelen):
+    i = 0
+    j = len(splittable)
+    while i < j:
+        # Invariants:
+        # 1. splittable[:k] fits for all k <= i (note that we *assume*,
+        #    at the start, that splittable[:0] fits).
+        # 2. splittable[:k] does not fit for any k > j (at the start,
+        #    this means we shouldn't look at any k > len(splittable)).
+        # 3. We don't know about splittable[:k] for k in i+1..j.
+        # 4. We want to set i to the largest k that fits, with i <= k <= j.
+        #
+        m = (i+j+1) >> 1  # ceiling((i+j)/2); i < m <= j
+        chunk = charset.from_splittable(splittable[:m], True)
+        chunklen = charset.encoded_header_len(chunk)
+        if chunklen <= maxlinelen:
+            # m is acceptable, so is a new lower bound.
+            i = m
+        else:
+            # m is not acceptable, so final i must be < m.
+            j = m - 1
+    # i == j.  Invariant #1 implies that splittable[:i] fits, and
+    # invariant #2 implies that splittable[:i+1] does not fit, so i
+    # is what we're looking for.
+    first = charset.from_splittable(splittable[:i], False)
+    last  = charset.from_splittable(splittable[i:], False)
+    return first, last
diff --git a/lib-python/2.2/email/Iterators.py b/lib-python/2.2/email/Iterators.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/Iterators.py
@@ -0,0 +1,25 @@
+# Copyright (C) 2001,2002 Python Software Foundation
+# Author: barry at zope.com (Barry Warsaw)
+
+"""Various types of useful iterators and generators.
+"""
+
+import sys
+
+try:
+    from email._compat22 import body_line_iterator, typed_subpart_iterator
+except SyntaxError:
+    # Python 2.1 doesn't have generators
+    from email._compat21 import body_line_iterator, typed_subpart_iterator
+
+
+
+def _structure(msg, fp=None, level=0):
+    """A handy debugging aid"""
+    if fp is None:
+        fp = sys.stdout
+    tab = ' ' * (level * 4)
+    print >> fp, tab + msg.get_content_type()
+    if msg.is_multipart():
+        for subpart in msg.get_payload():
+            _structure(subpart, fp, level+1)
diff --git a/lib-python/2.2/email/MIMEAudio.py b/lib-python/2.2/email/MIMEAudio.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/MIMEAudio.py
@@ -0,0 +1,71 @@
+# Author: Anthony Baxter
+
+"""Class representing audio/* type MIME documents.
+"""
+
+import sndhdr
+from cStringIO import StringIO
+
+from email import Errors
+from email import Encoders
+from email.MIMENonMultipart import MIMENonMultipart
+
+
+
+_sndhdr_MIMEmap = {'au'  : 'basic',
+                   'wav' :'x-wav',
+                   'aiff':'x-aiff',
+                   'aifc':'x-aiff',
+                   }
+
+# There are others in sndhdr that don't have MIME types. :(
+# Additional ones to be added to sndhdr? midi, mp3, realaudio, wma??
+def _whatsnd(data):
+    """Try to identify a sound file type.
+
+    sndhdr.what() has a pretty cruddy interface, unfortunately.  This is why
+    we re-do it here.  It would be easier to reverse engineer the Unix 'file'
+    command and use the standard 'magic' file, as shipped with a modern Unix.
+    """
+    hdr = data[:512]
+    fakefile = StringIO(hdr)
+    for testfn in sndhdr.tests:
+        res = testfn(hdr, fakefile)
+        if res is not None:
+            return _sndhdr_MIMEmap.get(res[0])
+    return None
+
+
+
+class MIMEAudio(MIMENonMultipart):
+    """Class for generating audio/* MIME documents."""
+
+    def __init__(self, _audiodata, _subtype=None,
+                 _encoder=Encoders.encode_base64, **_params):
+        """Create an audio/* type MIME document.
+
+        _audiodata is a string containing the raw audio data.  If this data
+        can be decoded by the standard Python `sndhdr' module, then the
+        subtype will be automatically included in the Content-Type header.
+        Otherwise, you can specify  the specific audio subtype via the
+        _subtype parameter.  If _subtype is not given, and no subtype can be
+        guessed, a TypeError is raised.
+
+        _encoder is a function which will perform the actual encoding for
+        transport of the image data.  It takes one argument, which is this
+        Image instance.  It should use get_payload() and set_payload() to
+        change the payload to the encoded form.  It should also add any
+        Content-Transfer-Encoding or other headers to the message as
+        necessary.  The default encoding is Base64.
+
+        Any additional keyword arguments are passed to the base class
+        constructor, which turns them into parameters on the Content-Type
+        header.
+        """
+        if _subtype is None:
+            _subtype = _whatsnd(_audiodata)
+        if _subtype is None:
+            raise TypeError, 'Could not find audio MIME subtype'
+        MIMENonMultipart.__init__(self, 'audio', _subtype, **_params)
+        self.set_payload(_audiodata)
+        _encoder(self)
diff --git a/lib-python/2.2/email/MIMEBase.py b/lib-python/2.2/email/MIMEBase.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/MIMEBase.py
@@ -0,0 +1,24 @@
+# Copyright (C) 2001,2002 Python Software Foundation
+# Author: barry at zope.com (Barry Warsaw)
+
+"""Base class for MIME specializations.
+"""
+
+from email import Message
+
+
+
+class MIMEBase(Message.Message):
+    """Base class for MIME specializations."""
+
+    def __init__(self, _maintype, _subtype, **_params):
+        """This constructor adds a Content-Type: and a MIME-Version: header.
+
+        The Content-Type: header is taken from the _maintype and _subtype
+        arguments.  Additional parameters for this header are taken from the
+        keyword arguments.
+        """
+        Message.Message.__init__(self)
+        ctype = '%s/%s' % (_maintype, _subtype)
+        self.add_header('Content-Type', ctype, **_params)
+        self['MIME-Version'] = '1.0'
diff --git a/lib-python/2.2/email/MIMEImage.py b/lib-python/2.2/email/MIMEImage.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/MIMEImage.py
@@ -0,0 +1,45 @@
+# Copyright (C) 2001,2002 Python Software Foundation
+# Author: barry at zope.com (Barry Warsaw)
+
+"""Class representing image/* type MIME documents.
+"""
+
+import imghdr
+
+from email import Errors
+from email import Encoders
+from email.MIMENonMultipart import MIMENonMultipart
+
+
+
+class MIMEImage(MIMENonMultipart):
+    """Class for generating image/* type MIME documents."""
+
+    def __init__(self, _imagedata, _subtype=None,
+                 _encoder=Encoders.encode_base64, **_params):
+        """Create an image/* type MIME document.
+
+        _imagedata is a string containing the raw image data.  If this data
+        can be decoded by the standard Python `imghdr' module, then the
+        subtype will be automatically included in the Content-Type header.
+        Otherwise, you can specify the specific image subtype via the _subtype
+        parameter.
+
+        _encoder is a function which will perform the actual encoding for
+        transport of the image data.  It takes one argument, which is this
+        Image instance.  It should use get_payload() and set_payload() to
+        change the payload to the encoded form.  It should also add any
+        Content-Transfer-Encoding or other headers to the message as
+        necessary.  The default encoding is Base64.
+
+        Any additional keyword arguments are passed to the base class
+        constructor, which turns them into parameters on the Content-Type
+        header.
+        """
+        if _subtype is None:
+            _subtype = imghdr.what(None, _imagedata)
+        if _subtype is None:
+            raise TypeError, 'Could not guess image MIME subtype'
+        MIMENonMultipart.__init__(self, 'image', _subtype, **_params)
+        self.set_payload(_imagedata)
+        _encoder(self)
diff --git a/lib-python/2.2/email/MIMEMessage.py b/lib-python/2.2/email/MIMEMessage.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/MIMEMessage.py
@@ -0,0 +1,32 @@
+# Copyright (C) 2001,2002 Python Software Foundation
+# Author: barry at zope.com (Barry Warsaw)
+
+"""Class representing message/* MIME documents.
+"""
+
+from email import Message
+from email.MIMENonMultipart import MIMENonMultipart
+
+
+
+class MIMEMessage(MIMENonMultipart):
+    """Class representing message/* MIME documents."""
+
+    def __init__(self, _msg, _subtype='rfc822'):
+        """Create a message/* type MIME document.
+
+        _msg is a message object and must be an instance of Message, or a
+        derived class of Message, otherwise a TypeError is raised.
+
+        Optional _subtype defines the subtype of the contained message.  The
+        default is "rfc822" (this is defined by the MIME standard, even though
+        the term "rfc822" is technically outdated by RFC 2822).
+        """
+        MIMENonMultipart.__init__(self, 'message', _subtype)
+        if not isinstance(_msg, Message.Message):
+            raise TypeError, 'Argument is not an instance of Message'
+        # It's convenient to use this base class method.  We need to do it
+        # this way or we'll get an exception
+        Message.Message.attach(self, _msg)
+        # And be sure our default type is set correctly
+        self.set_default_type('message/rfc822')
diff --git a/lib-python/2.2/email/MIMEMultipart.py b/lib-python/2.2/email/MIMEMultipart.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/MIMEMultipart.py
@@ -0,0 +1,37 @@
+# Copyright (C) 2002 Python Software Foundation
+# Author: barry at zope.com (Barry Warsaw)
+
+"""Base class for MIME multipart/* type messages.
+"""
+
+from email import MIMEBase
+
+
+
+class MIMEMultipart(MIMEBase.MIMEBase):
+    """Base class for MIME multipart/* type messages."""
+
+    def __init__(self, _subtype='mixed', boundary=None, *_subparts, **_params):
+        """Creates a multipart/* type message.
+
+        By default, creates a multipart/mixed message, with proper
+        Content-Type and MIME-Version headers.
+
+        _subtype is the subtype of the multipart content type, defaulting to
+        `mixed'.
+
+        boundary is the multipart boundary string.  By default it is
+        calculated as needed.
+
+        _subparts is a sequence of initial subparts for the payload.  It
+        must be possible to convert this sequence to a list.  You can always
+        attach new subparts to the message by using the attach() method.
+
+        Additional parameters for the Content-Type header are taken from the
+        keyword arguments (or passed into the _params argument).
+        """
+        MIMEBase.MIMEBase.__init__(self, 'multipart', _subtype, **_params)
+        if _subparts:
+            self.attach(*list(_subparts))
+        if boundary:
+            self.set_boundary(boundary)
diff --git a/lib-python/2.2/email/MIMENonMultipart.py b/lib-python/2.2/email/MIMENonMultipart.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/MIMENonMultipart.py
@@ -0,0 +1,24 @@
+# Copyright (C) 2002 Python Software Foundation
+# Author: barry at zope.com (Barry Warsaw)
+
+"""Base class for MIME type messages that are not multipart.
+"""
+
+from email import Errors
+from email import MIMEBase
+
+
+
+class MIMENonMultipart(MIMEBase.MIMEBase):
+    """Base class for MIME multipart/* type messages."""
+
+    __pychecker__ = 'unusednames=payload'
+
+    def attach(self, payload):
+        # The public API prohibits attaching multiple subparts to MIMEBase
+        # derived subtypes since none of them are, by definition, of content
+        # type multipart/*
+        raise Errors.MultipartConversionError(
+            'Cannot attach additional subparts to non-multipart/*')
+
+    del __pychecker__
diff --git a/lib-python/2.2/email/MIMEText.py b/lib-python/2.2/email/MIMEText.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/MIMEText.py
@@ -0,0 +1,45 @@
+# Copyright (C) 2001,2002 Python Software Foundation
+# Author: barry at zope.com (Barry Warsaw)
+
+"""Class representing text/* type MIME documents.
+"""
+
+import warnings
+from email.MIMENonMultipart import MIMENonMultipart
+from email.Encoders import encode_7or8bit
+
+
+
+class MIMEText(MIMENonMultipart):
+    """Class for generating text/* type MIME documents."""
+
+    def __init__(self, _text, _subtype='plain', _charset='us-ascii',
+                 _encoder=None):
+        """Create a text/* type MIME document.
+
+        _text is the string for this message object.
+
+        _subtype is the MIME sub content type, defaulting to "plain".
+
+        _charset is the character set parameter added to the Content-Type
+        header.  This defaults to "us-ascii".  Note that as a side-effect, the
+        Content-Transfer-Encoding header will also be set.
+
+        The use of the _encoder is deprecated.  The encoding of the payload,
+        and the setting of the character set parameter now happens implicitly
+        based on the _charset argument.  If _encoder is supplied, then a
+        DeprecationWarning is used, and the _encoder functionality may
+        override any header settings indicated by _charset.  This is probably
+        not what you want.
+        """
+        MIMENonMultipart.__init__(self, 'text', _subtype,
+                                  **{'charset': _charset})
+        self.set_payload(_text, _charset)
+        if _encoder is not None:
+            warnings.warn('_encoder argument is obsolete.',
+                          DeprecationWarning, 2)
+            # Because set_payload() with a _charset will set its own
+            # Content-Transfer-Encoding header, we need to delete the
+            # existing one or will end up with two of them. :(
+            del self['content-transfer-encoding']
+            _encoder(self)
diff --git a/lib-python/2.2/email/Message.py b/lib-python/2.2/email/Message.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/Message.py
@@ -0,0 +1,837 @@
+# Copyright (C) 2001,2002 Python Software Foundation
+# Author: barry at zope.com (Barry Warsaw)
+
+"""Basic message object for the email package object model.
+"""
+
+import re
+import uu
+import binascii
+import warnings
+from cStringIO import StringIO
+from types import ListType, TupleType, StringType
+
+# Intrapackage imports
+from email import Utils
+from email import Errors
+from email import Charset
+
+SEMISPACE = '; '
+
+try:
+    True, False
+except NameError:
+    True = 1
+    False = 0
+
+# Regular expression used to split header parameters.  BAW: this may be too
+# simple.  It isn't strictly RFC 2045 (section 5.1) compliant, but it catches
+# most headers found in the wild.  We may eventually need a full fledged
+# parser eventually.
+paramre = re.compile(r'\s*;\s*')
+# Regular expression that matches `special' characters in parameters, the
+# existance of which force quoting of the parameter value.
+tspecials = re.compile(r'[ \(\)<>@,;:\\"/\[\]\?=]')
+
+
+
+# Helper functions
+def _formatparam(param, value=None, quote=True):
+    """Convenience function to format and return a key=value pair.
+
+    This will quote the value if needed or if quote is true.
+    """
+    if value is not None and len(value) > 0:
+        # TupleType is used for RFC 2231 encoded parameter values where items
+        # are (charset, language, value).  charset is a string, not a Charset
+        # instance.
+        if isinstance(value, TupleType):
+            # Encode as per RFC 2231
+            param += '*'
+            value = Utils.encode_rfc2231(value[2], value[0], value[1])
+        # BAW: Please check this.  I think that if quote is set it should
+        # force quoting even if not necessary.
+        if quote or tspecials.search(value):
+            return '%s="%s"' % (param, Utils.quote(value))
+        else:
+            return '%s=%s' % (param, value)
+    else:
+        return param
+
+def _parseparam(s):
+    plist = []
+    while s[:1] == ';':
+        s = s[1:]
+        end = s.find(';')
+        while end > 0 and s.count('"', 0, end) % 2:
+            end = s.find(';', end + 1)
+        if end < 0:
+            end = len(s)
+        f = s[:end]
+        if '=' in f:
+            i = f.index('=')
+            f = f[:i].strip().lower() + '=' + f[i+1:].strip()
+        plist.append(f.strip())
+        s = s[end:]
+    return plist
+
+
+def _unquotevalue(value):
+    if isinstance(value, TupleType):
+        return value[0], value[1], Utils.unquote(value[2])
+    else:
+        return Utils.unquote(value)
+
+
+
+class Message:
+    """Basic message object.
+
+    A message object is defined as something that has a bunch of RFC 2822
+    headers and a payload.  It may optionally have an envelope header
+    (a.k.a. Unix-From or From_ header).  If the message is a container (i.e. a
+    multipart or a message/rfc822), then the payload is a list of Message
+    objects, otherwise it is a string.
+
+    Message objects implement part of the `mapping' interface, which assumes
+    there is exactly one occurrance of the header per message.  Some headers
+    do in fact appear multiple times (e.g. Received) and for those headers,
+    you must use the explicit API to set or get all the headers.  Not all of
+    the mapping methods are implemented.
+    """
+    def __init__(self):
+        self._headers = []
+        self._unixfrom = None
+        self._payload = None
+        self._charset = None
+        # Defaults for multipart messages
+        self.preamble = self.epilogue = None
+        # Default content type
+        self._default_type = 'text/plain'
+
+    def __str__(self):
+        """Return the entire formatted message as a string.
+        This includes the headers, body, and envelope header.
+        """
+        return self.as_string(unixfrom=True)
+
+    def as_string(self, unixfrom=False):
+        """Return the entire formatted message as a string.
+        Optional `unixfrom' when True, means include the Unix From_ envelope
+        header.
+
+        This is a convenience method and may not generate the message exactly
+        as you intend.  For more flexibility, use the flatten() method of a
+        Generator instance.
+        """
+        from email.Generator import Generator
+        fp = StringIO()
+        g = Generator(fp)
+        g.flatten(self, unixfrom=unixfrom)
+        return fp.getvalue()
+
+    def is_multipart(self):
+        """Return True if the message consists of multiple parts."""
+        if isinstance(self._payload, ListType):
+            return True
+        return False
+
+    #
+    # Unix From_ line
+    #
+    def set_unixfrom(self, unixfrom):
+        self._unixfrom = unixfrom
+
+    def get_unixfrom(self):
+        return self._unixfrom
+
+    #
+    # Payload manipulation.
+    #
+    def add_payload(self, payload):
+        """Add the given payload to the current payload.
+
+        If the current payload is empty, then the current payload will be made
+        a scalar, set to the given value.
+
+        Note: This method is deprecated.  Use .attach() instead.
+        """
+        warnings.warn('add_payload() is deprecated, use attach() instead.',
+                      DeprecationWarning, 2)
+        if self._payload is None:
+            self._payload = payload
+        elif isinstance(self._payload, ListType):
+            self._payload.append(payload)
+        elif self.get_main_type() not in (None, 'multipart'):
+            raise Errors.MultipartConversionError(
+                'Message main content type must be "multipart" or missing')
+        else:
+            self._payload = [self._payload, payload]
+
+    def attach(self, payload):
+        """Add the given payload to the current payload.
+
+        The current payload will always be a list of objects after this method
+        is called.  If you want to set the payload to a scalar object, use
+        set_payload() instead.
+        """
+        if self._payload is None:
+            self._payload = [payload]
+        else:
+            self._payload.append(payload)
+
+    def get_payload(self, i=None, decode=False):
+        """Return a reference to the payload.
+
+        The payload will either be a list object or a string.  If you mutate
+        the list object, you modify the message's payload in place.  Optional
+        i returns that index into the payload.
+
+        Optional decode is a flag indicating whether the payload should be
+        decoded or not, according to the Content-Transfer-Encoding header
+        (default is False).
+
+        When True and the message is not a multipart, the payload will be
+        decoded if this header's value is `quoted-printable' or `base64'.  If
+        some other encoding is used, or the header is missing, or if the
+        payload has bogus data (i.e. bogus base64 or uuencoded data), the
+        payload is returned as-is.
+
+        If the message is a multipart and the decode flag is True, then None
+        is returned.
+        """
+        if i is None:
+            payload = self._payload
+        elif not isinstance(self._payload, ListType):
+            raise TypeError, i
+        else:
+            payload = self._payload[i]
+        if decode:
+            if self.is_multipart():
+                return None
+            cte = self.get('content-transfer-encoding', '').lower()
+            if cte == 'quoted-printable':
+                return Utils._qdecode(payload)
+            elif cte == 'base64':
+                try:
+                    return Utils._bdecode(payload)
+                except binascii.Error:
+                    # Incorrect padding
+                    return payload
+            elif cte in ('x-uuencode', 'uuencode', 'uue', 'x-uue'):
+                sfp = StringIO()
+                try:
+                    uu.decode(StringIO(payload+'\n'), sfp)
+                    payload = sfp.getvalue()
+                except uu.Error:
+                    # Some decoding problem
+                    return payload
+        # Everything else, including encodings with 8bit or 7bit are returned
+        # unchanged.
+        return payload
+
+    def set_payload(self, payload, charset=None):
+        """Set the payload to the given value.
+
+        Optional charset sets the message's default character set.  See
+        set_charset() for details.
+        """
+        self._payload = payload
+        if charset is not None:
+            self.set_charset(charset)
+
+    def set_charset(self, charset):
+        """Set the charset of the payload to a given character set.
+
+        charset can be a Charset instance, a string naming a character set, or
+        None.  If it is a string it will be converted to a Charset instance.
+        If charset is None, the charset parameter will be removed from the
+        Content-Type field.  Anything else will generate a TypeError.
+
+        The message will be assumed to be of type text/* encoded with
+        charset.input_charset.  It will be converted to charset.output_charset
+        and encoded properly, if needed, when generating the plain text
+        representation of the message.  MIME headers (MIME-Version,
+        Content-Type, Content-Transfer-Encoding) will be added as needed.
+
+        """
+        if charset is None:
+            self.del_param('charset')
+            self._charset = None
+            return
+        if isinstance(charset, StringType):
+            charset = Charset.Charset(charset)
+        if not isinstance(charset, Charset.Charset):
+            raise TypeError, charset
+        # BAW: should we accept strings that can serve as arguments to the
+        # Charset constructor?
+        self._charset = charset
+        if not self.has_key('MIME-Version'):
+            self.add_header('MIME-Version', '1.0')
+        if not self.has_key('Content-Type'):
+            self.add_header('Content-Type', 'text/plain',
+                            charset=charset.get_output_charset())
+        else:
+            self.set_param('charset', charset.get_output_charset())
+        if not self.has_key('Content-Transfer-Encoding'):
+            cte = charset.get_body_encoding()
+            if callable(cte):
+                cte(self)
+            else:
+                self.add_header('Content-Transfer-Encoding', cte)
+
+    def get_charset(self):
+        """Return the Charset instance associated with the message's payload.
+        """
+        return self._charset
+
+    #
+    # MAPPING INTERFACE (partial)
+    #
+    def __len__(self):
+        """Return the total number of headers, including duplicates."""
+        return len(self._headers)
+
+    def __getitem__(self, name):
+        """Get a header value.
+
+        Return None if the header is missing instead of raising an exception.
+
+        Note that if the header appeared multiple times, exactly which
+        occurrance gets returned is undefined.  Use getall() to get all
+        the values matching a header field name.
+        """
+        return self.get(name)
+
+    def __setitem__(self, name, val):
+        """Set the value of a header.
+
+        Note: this does not overwrite an existing header with the same field
+        name.  Use __delitem__() first to delete any existing headers.
+        """
+        self._headers.append((name, val))
+
+    def __delitem__(self, name):
+        """Delete all occurrences of a header, if present.
+
+        Does not raise an exception if the header is missing.
+        """
+        name = name.lower()
+        newheaders = []
+        for k, v in self._headers:
+            if k.lower() <> name:
+                newheaders.append((k, v))
+        self._headers = newheaders
+
+    def __contains__(self, name):
+        return name.lower() in [k.lower() for k, v in self._headers]
+
+    def has_key(self, name):
+        """Return true if the message contains the header."""
+        missing = []
+        return self.get(name, missing) is not missing
+
+    def keys(self):
+        """Return a list of all the message's header field names.
+
+        These will be sorted in the order they appeared in the original
+        message, or were added to the message, and may contain duplicates.
+        Any fields deleted and re-inserted are always appended to the header
+        list.
+        """
+        return [k for k, v in self._headers]
+
+    def values(self):
+        """Return a list of all the message's header values.
+
+        These will be sorted in the order they appeared in the original
+        message, or were added to the message, and may contain duplicates.
+        Any fields deleted and re-inserted are always appended to the header
+        list.
+        """
+        return [v for k, v in self._headers]
+
+    def items(self):
+        """Get all the message's header fields and values.
+
+        These will be sorted in the order they appeared in the original
+        message, or were added to the message, and may contain duplicates.
+        Any fields deleted and re-inserted are always appended to the header
+        list.
+        """
+        return self._headers[:]
+
+    def get(self, name, failobj=None):
+        """Get a header value.
+
+        Like __getitem__() but return failobj instead of None when the field
+        is missing.
+        """
+        name = name.lower()
+        for k, v in self._headers:
+            if k.lower() == name:
+                return v
+        return failobj
+
+    #
+    # Additional useful stuff
+    #
+
+    def get_all(self, name, failobj=None):
+        """Return a list of all the values for the named field.
+
+        These will be sorted in the order they appeared in the original
+        message, and may contain duplicates.  Any fields deleted and
+        re-inserted are always appended to the header list.
+
+        If no such fields exist, failobj is returned (defaults to None).
+        """
+        values = []
+        name = name.lower()
+        for k, v in self._headers:
+            if k.lower() == name:
+                values.append(v)
+        if not values:
+            return failobj
+        return values
+
+    def add_header(self, _name, _value, **_params):
+        """Extended header setting.
+
+        name is the header field to add.  keyword arguments can be used to set
+        additional parameters for the header field, with underscores converted
+        to dashes.  Normally the parameter will be added as key="value" unless
+        value is None, in which case only the key will be added.
+
+        Example:
+
+        msg.add_header('content-disposition', 'attachment', filename='bud.gif')
+        """
+        parts = []
+        for k, v in _params.items():
+            if v is None:
+                parts.append(k.replace('_', '-'))
+            else:
+                parts.append(_formatparam(k.replace('_', '-'), v))
+        if _value is not None:
+            parts.insert(0, _value)
+        self._headers.append((_name, SEMISPACE.join(parts)))
+
+    def replace_header(self, _name, _value):
+        """Replace a header.
+
+        Replace the first matching header found in the message, retaining
+        header order and case.  If no matching header was found, a KeyError is
+        raised.
+        """
+        _name = _name.lower()
+        for i, (k, v) in zip(range(len(self._headers)), self._headers):
+            if k.lower() == _name:
+                self._headers[i] = (k, _value)
+                break
+        else:
+            raise KeyError, _name
+
+    #
+    # These methods are silently deprecated in favor of get_content_type() and
+    # friends (see below).  They will be noisily deprecated in email 3.0.
+    #
+
+    def get_type(self, failobj=None):
+        """Returns the message's content type.
+
+        The returned string is coerced to lowercase and returned as a single
+        string of the form `maintype/subtype'.  If there was no Content-Type
+        header in the message, failobj is returned (defaults to None).
+        """
+        missing = []
+        value = self.get('content-type', missing)
+        if value is missing:
+            return failobj
+        return paramre.split(value)[0].lower().strip()
+
+    def get_main_type(self, failobj=None):
+        """Return the message's main content type if present."""
+        missing = []
+        ctype = self.get_type(missing)
+        if ctype is missing:
+            return failobj
+        if ctype.count('/') <> 1:
+            return failobj
+        return ctype.split('/')[0]
+
+    def get_subtype(self, failobj=None):
+        """Return the message's content subtype if present."""
+        missing = []
+        ctype = self.get_type(missing)
+        if ctype is missing:
+            return failobj
+        if ctype.count('/') <> 1:
+            return failobj
+        return ctype.split('/')[1]
+
+    #
+    # Use these three methods instead of the three above.
+    #
+
+    def get_content_type(self):
+        """Return the message's content type.
+
+        The returned string is coerced to lower case of the form
+        `maintype/subtype'.  If there was no Content-Type header in the
+        message, the default type as given by get_default_type() will be
+        returned.  Since according to RFC 2045, messages always have a default
+        type this will always return a value.
+
+        RFC 2045 defines a message's default type to be text/plain unless it
+        appears inside a multipart/digest container, in which case it would be
+        message/rfc822.
+        """
+        missing = []
+        value = self.get('content-type', missing)
+        if value is missing:
+            # This should have no parameters
+            return self.get_default_type()
+        ctype = paramre.split(value)[0].lower().strip()
+        # RFC 2045, section 5.2 says if its invalid, use text/plain
+        if ctype.count('/') <> 1:
+            return 'text/plain'
+        return ctype
+
+    def get_content_maintype(self):
+        """Return the message's main content type.
+
+        This is the `maintype' part of the string returned by
+        get_content_type().
+        """
+        ctype = self.get_content_type()
+        return ctype.split('/')[0]
+
+    def get_content_subtype(self):
+        """Returns the message's sub-content type.
+
+        This is the `subtype' part of the string returned by
+        get_content_type().
+        """
+        ctype = self.get_content_type()
+        return ctype.split('/')[1]
+
+    def get_default_type(self):
+        """Return the `default' content type.
+
+        Most messages have a default content type of text/plain, except for
+        messages that are subparts of multipart/digest containers.  Such
+        subparts have a default content type of message/rfc822.
+        """
+        return self._default_type
+
+    def set_default_type(self, ctype):
+        """Set the `default' content type.
+
+        ctype should be either "text/plain" or "message/rfc822", although this
+        is not enforced.  The default content type is not stored in the
+        Content-Type header.
+        """
+        self._default_type = ctype
+
+    def _get_params_preserve(self, failobj, header):
+        # Like get_params() but preserves the quoting of values.  BAW:
+        # should this be part of the public interface?
+        missing = []
+        value = self.get(header, missing)
+        if value is missing:
+            return failobj
+        params = []
+        for p in _parseparam(';' + value):
+            try:
+                name, val = p.split('=', 1)
+                name = name.strip()
+                val = val.strip()
+            except ValueError:
+                # Must have been a bare attribute
+                name = p.strip()
+                val = ''
+            params.append((name, val))
+        params = Utils.decode_params(params)
+        return params
+
+    def get_params(self, failobj=None, header='content-type', unquote=True):
+        """Return the message's Content-Type parameters, as a list.
+
+        The elements of the returned list are 2-tuples of key/value pairs, as
+        split on the `=' sign.  The left hand side of the `=' is the key,
+        while the right hand side is the value.  If there is no `=' sign in
+        the parameter the value is the empty string.  The value is as
+        described in the get_param() method.
+
+        Optional failobj is the object to return if there is no Content-Type
+        header.  Optional header is the header to search instead of
+        Content-Type.  If unquote is True, the value is unquoted.
+        """
+        missing = []
+        params = self._get_params_preserve(missing, header)
+        if params is missing:
+            return failobj
+        if unquote:
+            return [(k, _unquotevalue(v)) for k, v in params]
+        else:
+            return params
+
+    def get_param(self, param, failobj=None, header='content-type',
+                  unquote=True):
+        """Return the parameter value if found in the Content-Type header.
+
+        Optional failobj is the object to return if there is no Content-Type
+        header, or the Content-Type header has no such parameter.  Optional
+        header is the header to search instead of Content-Type.
+
+        Parameter keys are always compared case insensitively.  The return
+        value can either be a string, or a 3-tuple if the parameter was RFC
+        2231 encoded.  When it's a 3-tuple, the elements of the value are of
+        the form (CHARSET, LANGUAGE, VALUE).  Note that both CHARSET and
+        LANGUAGE can be None, in which case you should consider VALUE to be
+        encoded in the us-ascii charset.  You can usually ignore LANGUAGE.
+
+        Your application should be prepared to deal with 3-tuple return
+        values, and can convert the parameter to a Unicode string like so:
+
+            param = msg.get_param('foo')
+            if isinstance(param, tuple):
+                param = unicode(param[2], param[0] or 'us-ascii')
+
+        In any case, the parameter value (either the returned string, or the
+        VALUE item in the 3-tuple) is always unquoted, unless unquote is set
+        to False.
+        """
+        if not self.has_key(header):
+            return failobj
+        for k, v in self._get_params_preserve(failobj, header):
+            if k.lower() == param.lower():
+                if unquote:
+                    return _unquotevalue(v)
+                else:
+                    return v
+        return failobj
+
+    def set_param(self, param, value, header='Content-Type', requote=True,
+                  charset=None, language=''):
+        """Set a parameter in the Content-Type header.
+
+        If the parameter already exists in the header, its value will be
+        replaced with the new value.
+
+        If header is Content-Type and has not yet been defined for this
+        message, it will be set to "text/plain" and the new parameter and
+        value will be appended as per RFC 2045.
+
+        An alternate header can specified in the header argument, and all
+        parameters will be quoted as necessary unless requote is False.
+
+        If charset is specified, the parameter will be encoded according to RFC
+        2231.  Optional language specifies the RFC 2231 language, defaulting
+        to the empty string.  Both charset and language should be strings.
+        """
+        if not isinstance(value, TupleType) and charset:
+            value = (charset, language, value)
+
+        if not self.has_key(header) and header.lower() == 'content-type':
+            ctype = 'text/plain'
+        else:
+            ctype = self.get(header)
+        if not self.get_param(param, header=header):
+            if not ctype:
+                ctype = _formatparam(param, value, requote)
+            else:
+                ctype = SEMISPACE.join(
+                    [ctype, _formatparam(param, value, requote)])
+        else:
+            ctype = ''
+            for old_param, old_value in self.get_params(header=header,
+                                                        unquote=requote):
+                append_param = ''
+                if old_param.lower() == param.lower():
+                    append_param = _formatparam(param, value, requote)
+                else:
+                    append_param = _formatparam(old_param, old_value, requote)
+                if not ctype:
+                    ctype = append_param
+                else:
+                    ctype = SEMISPACE.join([ctype, append_param])
+        if ctype <> self.get(header):
+            del self[header]
+            self[header] = ctype
+
+    def del_param(self, param, header='content-type', requote=True):
+        """Remove the given parameter completely from the Content-Type header.
+
+        The header will be re-written in place without the parameter or its
+        value. All values will be quoted as necessary unless requote is
+        False.  Optional header specifies an alternative to the Content-Type
+        header.
+        """
+        if not self.has_key(header):
+            return
+        new_ctype = ''
+        for p, v in self.get_params(header, unquote=requote):
+            if p.lower() <> param.lower():
+                if not new_ctype:
+                    new_ctype = _formatparam(p, v, requote)
+                else:
+                    new_ctype = SEMISPACE.join([new_ctype,
+                                                _formatparam(p, v, requote)])
+        if new_ctype <> self.get(header):
+            del self[header]
+            self[header] = new_ctype
+
+    def set_type(self, type, header='Content-Type', requote=True):
+        """Set the main type and subtype for the Content-Type header.
+
+        type must be a string in the form "maintype/subtype", otherwise a
+        ValueError is raised.
+
+        This method replaces the Content-Type header, keeping all the
+        parameters in place.  If requote is False, this leaves the existing
+        header's quoting as is.  Otherwise, the parameters will be quoted (the
+        default).
+
+        An alternative header can be specified in the header argument.  When
+        the Content-Type header is set, we'll always also add a MIME-Version
+        header.
+        """
+        # BAW: should we be strict?
+        if not type.count('/') == 1:
+            raise ValueError
+        # Set the Content-Type, you get a MIME-Version
+        if header.lower() == 'content-type':
+            del self['mime-version']
+            self['MIME-Version'] = '1.0'
+        if not self.has_key(header):
+            self[header] = type
+            return
+        params = self.get_params(header, unquote=requote)
+        del self[header]
+        self[header] = type
+        # Skip the first param; it's the old type.
+        for p, v in params[1:]:
+            self.set_param(p, v, header, requote)
+
+    def get_filename(self, failobj=None):
+        """Return the filename associated with the payload if present.
+
+        The filename is extracted from the Content-Disposition header's
+        `filename' parameter, and it is unquoted.
+        """
+        missing = []
+        filename = self.get_param('filename', missing, 'content-disposition')
+        if filename is missing:
+            return failobj
+        if isinstance(filename, TupleType):
+            # It's an RFC 2231 encoded parameter
+            newvalue = _unquotevalue(filename)
+            return unicode(newvalue[2], newvalue[0] or 'us-ascii')
+        else:
+            newvalue = _unquotevalue(filename.strip())
+            return newvalue
+
+    def get_boundary(self, failobj=None):
+        """Return the boundary associated with the payload if present.
+
+        The boundary is extracted from the Content-Type header's `boundary'
+        parameter, and it is unquoted.
+        """
+        missing = []
+        boundary = self.get_param('boundary', missing)
+        if boundary is missing:
+            return failobj
+        if isinstance(boundary, TupleType):
+            # RFC 2231 encoded, so decode.  It better end up as ascii
+            charset = boundary[0] or 'us-ascii'
+            return unicode(boundary[2], charset).encode('us-ascii')
+        return _unquotevalue(boundary.strip())
+
+    def set_boundary(self, boundary):
+        """Set the boundary parameter in Content-Type to 'boundary'.
+
+        This is subtly different than deleting the Content-Type header and
+        adding a new one with a new boundary parameter via add_header().  The
+        main difference is that using the set_boundary() method preserves the
+        order of the Content-Type header in the original message.
+
+        HeaderParseError is raised if the message has no Content-Type header.
+        """
+        missing = []
+        params = self._get_params_preserve(missing, 'content-type')
+        if params is missing:
+            # There was no Content-Type header, and we don't know what type
+            # to set it to, so raise an exception.
+            raise Errors.HeaderParseError, 'No Content-Type header found'
+        newparams = []
+        foundp = False
+        for pk, pv in params:
+            if pk.lower() == 'boundary':
+                newparams.append(('boundary', '"%s"' % boundary))
+                foundp = True
+            else:
+                newparams.append((pk, pv))
+        if not foundp:
+            # The original Content-Type header had no boundary attribute.
+            # Tack one one the end.  BAW: should we raise an exception
+            # instead???
+            newparams.append(('boundary', '"%s"' % boundary))
+        # Replace the existing Content-Type header with the new value
+        newheaders = []
+        for h, v in self._headers:
+            if h.lower() == 'content-type':
+                parts = []
+                for k, v in newparams:
+                    if v == '':
+                        parts.append(k)
+                    else:
+                        parts.append('%s=%s' % (k, v))
+                newheaders.append((h, SEMISPACE.join(parts)))
+
+            else:
+                newheaders.append((h, v))
+        self._headers = newheaders
+
+    try:
+        from email._compat22 import walk
+    except SyntaxError:
+        # Must be using Python 2.1
+        from email._compat21 import walk
+
+    def get_content_charset(self, failobj=None):
+        """Return the charset parameter of the Content-Type header.
+
+        The returned string is always coerced to lower case.  If there is no
+        Content-Type header, or if that header has no charset parameter,
+        failobj is returned.
+        """
+        missing = []
+        charset = self.get_param('charset', missing)
+        if charset is missing:
+            return failobj
+        if isinstance(charset, TupleType):
+            # RFC 2231 encoded, so decode it, and it better end up as ascii.
+            pcharset = charset[0] or 'us-ascii'
+            charset = unicode(charset[2], pcharset).encode('us-ascii')
+        # RFC 2046, $4.1.2 says charsets are not case sensitive
+        return charset.lower()
+
+    def get_charsets(self, failobj=None):
+        """Return a list containing the charset(s) used in this message.
+
+        The returned list of items describes the Content-Type headers'
+        charset parameter for this message and all the subparts in its
+        payload.
+
+        Each item will either be a string (the value of the charset parameter
+        in the Content-Type header of that part) or the value of the
+        'failobj' parameter (defaults to None), if the part does not have a
+        main MIME type of "text", or the charset is not defined.
+
+        The list will contain one string for each part of the message, plus
+        one for the container message (i.e. self), so that a non-multipart
+        message will still return a list of length 1.
+        """
+        return [part.get_content_charset(failobj) for part in self.walk()]
diff --git a/lib-python/2.2/email/Parser.py b/lib-python/2.2/email/Parser.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/Parser.py
@@ -0,0 +1,292 @@
+# Copyright (C) 2001,2002 Python Software Foundation
+# Author: barry at zope.com (Barry Warsaw)
+
+"""A parser of RFC 2822 and MIME email messages.
+"""
+
+import re
+from cStringIO import StringIO
+from types import ListType
+
+from email import Errors
+from email import Message
+
+EMPTYSTRING = ''
+NL = '\n'
+
+try:
+    True, False
+except NameError:
+    True = 1
+    False = 0
+
+NLCRE = re.compile('\r\n|\r|\n')
+
+
+
+class Parser:
+    def __init__(self, _class=Message.Message, strict=False):
+        """Parser of RFC 2822 and MIME email messages.
+
+        Creates an in-memory object tree representing the email message, which
+        can then be manipulated and turned over to a Generator to return the
+        textual representation of the message.
+
+        The string must be formatted as a block of RFC 2822 headers and header
+        continuation lines, optionally preceeded by a `Unix-from' header.  The
+        header block is terminated either by the end of the string or by a
+        blank line.
+
+        _class is the class to instantiate for new message objects when they
+        must be created.  This class must have a constructor that can take
+        zero arguments.  Default is Message.Message.
+
+        Optional strict tells the parser to be strictly RFC compliant or to be
+        more forgiving in parsing of ill-formatted MIME documents.  When
+        non-strict mode is used, the parser will try to make up for missing or
+        erroneous boundaries and other peculiarities seen in the wild.
+        Default is non-strict parsing.
+        """
+        self._class = _class
+        self._strict = strict
+
+    def parse(self, fp, headersonly=False):
+        """Create a message structure from the data in a file.
+
+        Reads all the data from the file and returns the root of the message
+        structure.  Optional headersonly is a flag specifying whether to stop
+        parsing after reading the headers or not.  The default is False,
+        meaning it parses the entire contents of the file.
+        """
+        root = self._class()
+        firstbodyline = self._parseheaders(root, fp)
+        if not headersonly:
+            self._parsebody(root, fp, firstbodyline)
+        return root
+
+    def parsestr(self, text, headersonly=False):
+        """Create a message structure from a string.
+
+        Returns the root of the message structure.  Optional headersonly is a
+        flag specifying whether to stop parsing after reading the headers or
+        not.  The default is False, meaning it parses the entire contents of
+        the file.
+        """
+        return self.parse(StringIO(text), headersonly=headersonly)
+
+    def _parseheaders(self, container, fp):
+        # Parse the headers, returning a list of header/value pairs.  None as
+        # the header means the Unix-From header.
+        lastheader = ''
+        lastvalue = []
+        lineno = 0
+        firstbodyline = None
+        while True:
+            # Don't strip the line before we test for the end condition,
+            # because whitespace-only header lines are RFC compliant
+            # continuation lines.
+            line = fp.readline()
+            if not line:
+                break
+            line = line.splitlines()[0]
+            if not line:
+                break
+            # Ignore the trailing newline
+            lineno += 1
+            # Check for initial Unix From_ line
+            if line.startswith('From '):
+                if lineno == 1:
+                    container.set_unixfrom(line)
+                    continue
+                elif self._strict:
+                    raise Errors.HeaderParseError(
+                        'Unix-from in headers after first rfc822 header')
+                else:
+                    # ignore the wierdly placed From_ line
+                    # XXX: maybe set unixfrom anyway? or only if not already?
+                    continue
+            # Header continuation line
+            if line[0] in ' \t':
+                if not lastheader:
+                    raise Errors.HeaderParseError(
+                        'Continuation line seen before first header')
+                lastvalue.append(line)
+                continue
+            # Normal, non-continuation header.  BAW: this should check to make
+            # sure it's a legal header, e.g. doesn't contain spaces.  Also, we
+            # should expose the header matching algorithm in the API, and
+            # allow for a non-strict parsing mode (that ignores the line
+            # instead of raising the exception).
+            i = line.find(':')
+            if i < 0:
+                if self._strict:
+                    raise Errors.HeaderParseError(
+                        "Not a header, not a continuation: ``%s''" % line)
+                elif lineno == 1 and line.startswith('--'):
+                    # allow through duplicate boundary tags.
+                    continue
+                else:
+                    # There was no separating blank line as mandated by RFC
+                    # 2822, but we're in non-strict mode.  So just offer up
+                    # this current line as the first body line.
+                    firstbodyline = line
+                    break
+            if lastheader:
+                container[lastheader] = NL.join(lastvalue)
+            lastheader = line[:i]
+            lastvalue = [line[i+1:].lstrip()]
+        # Make sure we retain the last header
+        if lastheader:
+            container[lastheader] = NL.join(lastvalue)
+        return firstbodyline
+
+    def _parsebody(self, container, fp, firstbodyline=None):
+        # Parse the body, but first split the payload on the content-type
+        # boundary if present.
+        boundary = container.get_boundary()
+        isdigest = (container.get_content_type() == 'multipart/digest')
+        # If there's a boundary, split the payload text into its constituent
+        # parts and parse each separately.  Otherwise, just parse the rest of
+        # the body as a single message.  Note: any exceptions raised in the
+        # recursive parse need to have their line numbers coerced.
+        if boundary:
+            preamble = epilogue = None
+            # Split into subparts.  The first boundary we're looking for won't
+            # always have a leading newline since we're at the start of the
+            # body text, and there's not always a preamble before the first
+            # boundary.
+            separator = '--' + boundary
+            payload = fp.read()
+            if firstbodyline is not None:
+                payload = firstbodyline + '\n' + payload
+            # We use an RE here because boundaries can have trailing
+            # whitespace.
+            mo = re.search(
+                r'(?P<sep>' + re.escape(separator) + r')(?P<ws>[ \t]*)',
+                payload)
+            if not mo:
+                if self._strict:
+                    raise Errors.BoundaryError(
+                        "Couldn't find starting boundary: %s" % boundary)
+                container.set_payload(payload)
+                return
+            start = mo.start()
+            if start > 0:
+                # there's some pre-MIME boundary preamble
+                preamble = payload[0:start]
+            # Find out what kind of line endings we're using
+            start += len(mo.group('sep')) + len(mo.group('ws'))
+            mo = NLCRE.search(payload, start)
+            if mo:
+                start += len(mo.group(0))
+            # We create a compiled regexp first because we need to be able to
+            # specify the start position, and the module function doesn't
+            # support this signature. :(
+            cre = re.compile('(?P<sep>\r\n|\r|\n)' +
+                             re.escape(separator) + '--')
+            mo = cre.search(payload, start)
+            if mo:
+                terminator = mo.start()
+                linesep = mo.group('sep')
+                if mo.end() < len(payload):
+                    # There's some post-MIME boundary epilogue
+                    epilogue = payload[mo.end():]
+            elif self._strict:
+                raise Errors.BoundaryError(
+                        "Couldn't find terminating boundary: %s" % boundary)
+            else:
+                # Handle the case of no trailing boundary.  Check that it ends
+                # in a blank line.  Some cases (spamspamspam) don't even have
+                # that!
+                mo = re.search('(?P<sep>\r\n|\r|\n){2}$', payload)
+                if not mo:
+                    mo = re.search('(?P<sep>\r\n|\r|\n)$', payload)
+                    if not mo:
+                        raise Errors.BoundaryError(
+                          'No terminating boundary and no trailing empty line')
+                linesep = mo.group('sep')
+                terminator = len(payload)
+            # We split the textual payload on the boundary separator, which
+            # includes the trailing newline. If the container is a
+            # multipart/digest then the subparts are by default message/rfc822
+            # instead of text/plain.  In that case, they'll have a optional
+            # block of MIME headers, then an empty line followed by the
+            # message headers.
+            parts = re.split(
+                linesep + re.escape(separator) + r'[ \t]*' + linesep,
+                payload[start:terminator])
+            for part in parts:
+                if isdigest:
+                    if part.startswith(linesep):
+                        # There's no header block so create an empty message
+                        # object as the container, and lop off the newline so
+                        # we can parse the sub-subobject
+                        msgobj = self._class()
+                        part = part[len(linesep):]
+                    else:
+                        parthdrs, part = part.split(linesep+linesep, 1)
+                        # msgobj in this case is the "message/rfc822" container
+                        msgobj = self.parsestr(parthdrs, headersonly=1)
+                    # while submsgobj is the message itself
+                    msgobj.set_default_type('message/rfc822')
+                    maintype = msgobj.get_content_maintype()
+                    if maintype in ('message', 'multipart'):
+                        submsgobj = self.parsestr(part)
+                        msgobj.attach(submsgobj)
+                    else:
+                        msgobj.set_payload(part)
+                else:
+                    msgobj = self.parsestr(part)
+                container.preamble = preamble
+                container.epilogue = epilogue
+                container.attach(msgobj)
+        elif container.get_main_type() == 'multipart':
+            # Very bad.  A message is a multipart with no boundary!
+            raise Errors.BoundaryError(
+                'multipart message with no defined boundary')
+        elif container.get_type() == 'message/delivery-status':
+            # This special kind of type contains blocks of headers separated
+            # by a blank line.  We'll represent each header block as a
+            # separate Message object
+            blocks = []
+            while True:
+                blockmsg = self._class()
+                self._parseheaders(blockmsg, fp)
+                if not len(blockmsg):
+                    # No more header blocks left
+                    break
+                blocks.append(blockmsg)
+            container.set_payload(blocks)
+        elif container.get_main_type() == 'message':
+            # Create a container for the payload, but watch out for there not
+            # being any headers left
+            try:
+                msg = self.parse(fp)
+            except Errors.HeaderParseError:
+                msg = self._class()
+                self._parsebody(msg, fp)
+            container.attach(msg)
+        else:
+            text = fp.read()
+            if firstbodyline is not None:
+                text = firstbodyline + '\n' + text
+            container.set_payload(text)
+
+
+
+class HeaderParser(Parser):
+    """A subclass of Parser, this one only meaningfully parses message headers.
+
+    This class can be used if all you're interested in is the headers of a
+    message.  While it consumes the message body, it does not parse it, but
+    simply makes it available as a string payload.
+
+    Parsing with this subclass can be considerably faster if all you're
+    interested in is the message headers.
+    """
+    def _parsebody(self, container, fp, firstbodyline=None):
+        # Consume but do not parse, the body
+        text = fp.read()
+        if firstbodyline is not None:
+            text = firstbodyline + '\n' + text
+        container.set_payload(text)
diff --git a/lib-python/2.2/email/Utils.py b/lib-python/2.2/email/Utils.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/Utils.py
@@ -0,0 +1,340 @@
+# Copyright (C) 2001,2002 Python Software Foundation
+# Author: barry at zope.com (Barry Warsaw)
+
+"""Miscellaneous utilities.
+"""
+
+import time
+import socket
+import re
+import random
+import os
+import warnings
+from cStringIO import StringIO
+from types import ListType
+
+from email._parseaddr import quote
+from email._parseaddr import AddressList as _AddressList
+from email._parseaddr import mktime_tz
+
+# We need wormarounds for bugs in these methods in older Pythons (see below)
+from email._parseaddr import parsedate as _parsedate
+from email._parseaddr import parsedate_tz as _parsedate_tz
+
+try:
+    True, False
+except NameError:
+    True = 1
+    False = 0
+
+try:
+    from quopri import decodestring as _qdecode
+except ImportError:
+    # Python 2.1 doesn't have quopri.decodestring()
+    def _qdecode(s):
+        import quopri as _quopri
+
+        if not s:
+            return s
+        infp = StringIO(s)
+        outfp = StringIO()
+        _quopri.decode(infp, outfp)
+        value = outfp.getvalue()
+        if not s.endswith('\n') and value.endswith('\n'):
+            return value[:-1]
+        return value
+
+import base64
+
+# Intrapackage imports
+from email.Encoders import _bencode, _qencode
+
+COMMASPACE = ', '
+EMPTYSTRING = ''
+UEMPTYSTRING = u''
+CRLF = '\r\n'
+
+specialsre = re.compile(r'[][\\()<>@,:;".]')
+escapesre = re.compile(r'[][\\()"]')
+
+
+
+# Helpers
+
+def _identity(s):
+    return s
+
+
+def _bdecode(s):
+    # We can't quite use base64.encodestring() since it tacks on a "courtesy
+    # newline".  Blech!
+    if not s:
+        return s
+    value = base64.decodestring(s)
+    if not s.endswith('\n') and value.endswith('\n'):
+        return value[:-1]
+    return value
+
+
+
+def fix_eols(s):
+    """Replace all line-ending characters with \r\n."""
+    # Fix newlines with no preceding carriage return
+    s = re.sub(r'(?<!\r)\n', CRLF, s)
+    # Fix carriage returns with no following newline
+    s = re.sub(r'\r(?!\n)', CRLF, s)
+    return s
+
+
+
+def formataddr(pair):
+    """The inverse of parseaddr(), this takes a 2-tuple of the form
+    (realname, email_address) and returns the string value suitable
+    for an RFC 2822 From, To or Cc header.
+
+    If the first element of pair is false, then the second element is
+    returned unmodified.
+    """
+    name, address = pair
+    if name:
+        quotes = ''
+        if specialsre.search(name):
+            quotes = '"'
+        name = escapesre.sub(r'\\\g<0>', name)
+        return '%s%s%s <%s>' % (quotes, name, quotes, address)
+    return address
+
+# For backwards compatibility
+def dump_address_pair(pair):
+    warnings.warn('Use email.Utils.formataddr() instead',
+                  DeprecationWarning, 2)
+    return formataddr(pair)
+
+
+
+def getaddresses(fieldvalues):
+    """Return a list of (REALNAME, EMAIL) for each fieldvalue."""
+    all = COMMASPACE.join(fieldvalues)
+    a = _AddressList(all)
+    return a.addresslist
+
+
+
+ecre = re.compile(r'''
+  =\?                   # literal =?
+  (?P<charset>[^?]*?)   # non-greedy up to the next ? is the charset
+  \?                    # literal ?
+  (?P<encoding>[qb])    # either a "q" or a "b", case insensitive
+  \?                    # literal ?
+  (?P<atom>.*?)         # non-greedy up to the next ?= is the atom
+  \?=                   # literal ?=
+  ''', re.VERBOSE | re.IGNORECASE)
+
+
+def decode(s):
+    """Return a decoded string according to RFC 2047, as a unicode string.
+
+    NOTE: This function is deprecated.  Use Header.decode_header() instead.
+    """
+    warnings.warn('Use Header.decode_header() instead.', DeprecationWarning, 2)
+    # Intra-package import here to avoid circular import problems.
+    from email.Header import decode_header
+    L = decode_header(s)
+    if not isinstance(L, ListType):
+        # s wasn't decoded
+        return s
+
+    rtn = []
+    for atom, charset in L:
+        if charset is None:
+            rtn.append(atom)
+        else:
+            # Convert the string to Unicode using the given encoding.  Leave
+            # Unicode conversion errors to strict.
+            rtn.append(unicode(atom, charset))
+    # Now that we've decoded everything, we just need to join all the parts
+    # together into the final string.
+    return UEMPTYSTRING.join(rtn)
+
+
+
+def encode(s, charset='iso-8859-1', encoding='q'):
+    """Encode a string according to RFC 2047."""
+    warnings.warn('Use Header.Header.encode() instead.', DeprecationWarning, 2)
+    encoding = encoding.lower()
+    if encoding == 'q':
+        estr = _qencode(s)
+    elif encoding == 'b':
+        estr = _bencode(s)
+    else:
+        raise ValueError, 'Illegal encoding code: ' + encoding
+    return '=?%s?%s?%s?=' % (charset.lower(), encoding, estr)
+
+
+
+def formatdate(timeval=None, localtime=False):
+    """Returns a date string as specified by RFC 2822, e.g.:
+
+    Fri, 09 Nov 2001 01:08:47 -0000
+
+    Optional timeval if given is a floating point time value as accepted by
+    gmtime() and localtime(), otherwise the current time is used.
+
+    Optional localtime is a flag that when True, interprets timeval, and
+    returns a date relative to the local timezone instead of UTC, properly
+    taking daylight savings time into account.
+    """
+    # Note: we cannot use strftime() because that honors the locale and RFC
+    # 2822 requires that day and month names be the English abbreviations.
+    if timeval is None:
+        timeval = time.time()
+    if localtime:
+        now = time.localtime(timeval)
+        # Calculate timezone offset, based on whether the local zone has
+        # daylight savings time, and whether DST is in effect.
+        if time.daylight and now[-1]:
+            offset = time.altzone
+        else:
+            offset = time.timezone
+        hours, minutes = divmod(abs(offset), 3600)
+        # Remember offset is in seconds west of UTC, but the timezone is in
+        # minutes east of UTC, so the signs differ.
+        if offset > 0:
+            sign = '-'
+        else:
+            sign = '+'
+        zone = '%s%02d%02d' % (sign, hours, minutes / 60)
+    else:
+        now = time.gmtime(timeval)
+        # Timezone offset is always -0000
+        zone = '-0000'
+    return '%s, %02d %s %04d %02d:%02d:%02d %s' % (
+        ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'][now[6]],
+        now[2],
+        ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
+         'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'][now[1] - 1],
+        now[0], now[3], now[4], now[5],
+        zone)
+
+
+
+def make_msgid(idstring=None):
+    """Returns a string suitable for RFC 2822 compliant Message-ID, e.g:
+
+    <20020201195627.33539.96671 at nightshade.la.mastaler.com>
+
+    Optional idstring if given is a string used to strengthen the
+    uniqueness of the message id.
+    """
+    timeval = time.time()
+    utcdate = time.strftime('%Y%m%d%H%M%S', time.gmtime(timeval))
+    pid = os.getpid()
+    randint = random.randrange(100000)
+    if idstring is None:
+        idstring = ''
+    else:
+        idstring = '.' + idstring
+    idhost = socket.getfqdn()
+    msgid = '<%s.%s.%s%s@%s>' % (utcdate, pid, randint, idstring, idhost)
+    return msgid
+
+
+
+# These functions are in the standalone mimelib version only because they've
+# subsequently been fixed in the latest Python versions.  We use this to worm
+# around broken older Pythons.
+def parsedate(data):
+    if not data:
+        return None
+    return _parsedate(data)
+
+
+def parsedate_tz(data):
+    if not data:
+        return None
+    return _parsedate_tz(data)
+
+
+def parseaddr(addr):
+    addrs = _AddressList(addr).addresslist
+    if not addrs:
+        return '', ''
+    return addrs[0]
+
+
+# rfc822.unquote() doesn't properly de-backslash-ify in Python pre-2.3.
+def unquote(str):
+    """Remove quotes from a string."""
+    if len(str) > 1:
+        if str.startswith('"') and str.endswith('"'):
+            return str[1:-1].replace('\\\\', '\\').replace('\\"', '"')
+        if str.startswith('<') and str.endswith('>'):
+            return str[1:-1]
+    return str
+
+
+
+# RFC2231-related functions - parameter encoding and decoding
+def decode_rfc2231(s):
+    """Decode string according to RFC 2231"""
+    import urllib
+    parts = s.split("'", 2)
+    if len(parts) == 1:
+        return None, None, urllib.unquote(s)
+    charset, language, s = parts
+    return charset, language, urllib.unquote(s)
+
+
+def encode_rfc2231(s, charset=None, language=None):
+    """Encode string according to RFC 2231.
+
+    If neither charset nor language is given, then s is returned as-is.  If
+    charset is given but not language, the string is encoded using the empty
+    string for language.
+    """
+    import urllib
+    s = urllib.quote(s, safe='')
+    if charset is None and language is None:
+        return s
+    if language is None:
+        language = ''
+    return "%s'%s'%s" % (charset, language, s)
+
+
+rfc2231_continuation = re.compile(r'^(?P<name>\w+)\*((?P<num>[0-9]+)\*?)?$')
+
+def decode_params(params):
+    """Decode parameters list according to RFC 2231.
+
+    params is a sequence of 2-tuples containing (content type, string value).
+    """
+    new_params = []
+    # maps parameter's name to a list of continuations
+    rfc2231_params = {}
+    # params is a sequence of 2-tuples containing (content_type, string value)
+    name, value = params[0]
+    new_params.append((name, value))
+    # Cycle through each of the rest of the parameters.
+    for name, value in params[1:]:
+        value = unquote(value)
+        mo = rfc2231_continuation.match(name)
+        if mo:
+            name, num = mo.group('name', 'num')
+            if num is not None:
+                num = int(num)
+            rfc2231_param1 = rfc2231_params.setdefault(name, [])
+            rfc2231_param1.append((num, value))
+        else:
+            new_params.append((name, '"%s"' % quote(value)))
+    if rfc2231_params:
+        for name, continuations in rfc2231_params.items():
+            value = []
+            # Sort by number
+            continuations.sort()
+            # And now append all values in num order
+            for num, continuation in continuations:
+                value.append(continuation)
+            charset, language, value = decode_rfc2231(EMPTYSTRING.join(value))
+            new_params.append(
+                (name, (charset, language, '"%s"' % quote(value))))
+    return new_params
diff --git a/lib-python/2.2/email/__init__.py b/lib-python/2.2/email/__init__.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/__init__.py
@@ -0,0 +1,72 @@
+# Copyright (C) 2001,2002 Python Software Foundation
+# Author: barry at zope.com (Barry Warsaw)
+
+"""A package for parsing, handling, and generating email messages.
+"""
+
+__version__ = '2.5.4'
+
+__all__ = [
+    'base64MIME',
+    'Charset',
+    'Encoders',
+    'Errors',
+    'Generator',
+    'Header',
+    'Iterators',
+    'Message',
+    'MIMEAudio',
+    'MIMEBase',
+    'MIMEImage',
+    'MIMEMessage',
+    'MIMEMultipart',
+    'MIMENonMultipart',
+    'MIMEText',
+    'Parser',
+    'quopriMIME',
+    'Utils',
+    'message_from_string',
+    'message_from_file',
+    ]
+
+try:
+    True, False
+except NameError:
+    True = 1
+    False = 0
+
+
+
+# Some convenience routines.  Don't import Parser and Message as side-effects
+# of importing email since those cascadingly import most of the rest of the
+# email package.
+def message_from_string(s, _class=None, strict=False):
+    """Parse a string into a Message object model.
+
+    Optional _class and strict are passed to the Parser constructor.
+    """
+    from email.Parser import Parser
+    if _class is None:
+        from email.Message import Message
+        _class = Message
+    return Parser(_class, strict=strict).parsestr(s)
+
+def message_from_file(fp, _class=None, strict=False):
+    """Read a file and parse its contents into a Message object model.
+
+    Optional _class and strict are passed to the Parser constructor.
+    """
+    from email.Parser import Parser
+    if _class is None:
+        from email.Message import Message
+        _class = Message
+    return Parser(_class, strict=strict).parse(fp)
+
+
+
+# Patch encodings.aliases to recognize 'ansi_x3.4_1968' which isn't a standard
+# alias in Python 2.1.3, but is used by the email package test suite.
+from encodings.aliases import aliases # The aliases dictionary
+if not aliases.has_key('ansi_x3.4_1968'):
+    aliases['ansi_x3.4_1968'] = 'ascii'
+del aliases # Not needed any more
diff --git a/lib-python/2.2/email/_compat21.py b/lib-python/2.2/email/_compat21.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/_compat21.py
@@ -0,0 +1,69 @@
+# Copyright (C) 2002 Python Software Foundation
+# Author: barry at zope.com
+
+"""Module containing compatibility functions for Python 2.1.
+"""
+
+from cStringIO import StringIO
+from types import StringType, UnicodeType
+
+False = 0
+True = 1
+
+
+
+# This function will become a method of the Message class
+def walk(self):
+    """Walk over the message tree, yielding each subpart.
+
+    The walk is performed in depth-first order.  This method is a
+    generator.
+    """
+    parts = []
+    parts.append(self)
+    if self.is_multipart():
+        for subpart in self.get_payload():
+            parts.extend(subpart.walk())
+    return parts
+
+
+# Python 2.2 spells floor division //
+def _floordiv(i, j):
+    """Do a floor division, i/j."""
+    return i / j
+
+
+def _isstring(obj):
+    return isinstance(obj, StringType) or isinstance(obj, UnicodeType)
+
+
+
+# These two functions are imported into the Iterators.py interface module.
+# The Python 2.2 version uses generators for efficiency.
+def body_line_iterator(msg, decode=False):
+    """Iterate over the parts, returning string payloads line-by-line.
+
+    Optional decode (default False) is passed through to .get_payload().
+    """
+    lines = []
+    for subpart in msg.walk():
+        payload = subpart.get_payload(decode=decode)
+        if _isstring(payload):
+            for line in StringIO(payload).readlines():
+                lines.append(line)
+    return lines
+
+
+def typed_subpart_iterator(msg, maintype='text', subtype=None):
+    """Iterate over the subparts with a given MIME type.
+
+    Use `maintype' as the main MIME type to match against; this defaults to
+    "text".  Optional `subtype' is the MIME subtype to match against; if
+    omitted, only the main type is matched.
+    """
+    parts = []
+    for subpart in msg.walk():
+        if subpart.get_content_maintype() == maintype:
+            if subtype is None or subpart.get_content_subtype() == subtype:
+                parts.append(subpart)
+    return parts
diff --git a/lib-python/2.2/email/_compat22.py b/lib-python/2.2/email/_compat22.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/_compat22.py
@@ -0,0 +1,70 @@
+# Copyright (C) 2002 Python Software Foundation
+# Author: barry at zope.com
+
+"""Module containing compatibility functions for Python 2.2.
+"""
+
+from __future__ import generators
+from __future__ import division
+from cStringIO import StringIO
+from types import StringTypes
+
+# Python 2.2.x where x < 1 lacks True/False
+try:
+    True, False
+except NameError:
+    True = 1
+    False = 0
+
+
+
+# This function will become a method of the Message class
+def walk(self):
+    """Walk over the message tree, yielding each subpart.
+
+    The walk is performed in depth-first order.  This method is a
+    generator.
+    """
+    yield self
+    if self.is_multipart():
+        for subpart in self.get_payload():
+            for subsubpart in subpart.walk():
+                yield subsubpart
+
+
+# Python 2.2 spells floor division //
+def _floordiv(i, j):
+    """Do a floor division, i/j."""
+    return i // j
+
+
+def _isstring(obj):
+    return isinstance(obj, StringTypes)
+
+
+
+# These two functions are imported into the Iterators.py interface module.
+# The Python 2.2 version uses generators for efficiency.
+def body_line_iterator(msg, decode=False):
+    """Iterate over the parts, returning string payloads line-by-line.
+
+    Optional decode (default False) is passed through to .get_payload().
+    """
+    for subpart in msg.walk():
+        payload = subpart.get_payload(decode=decode)
+        if _isstring(payload):
+            for line in StringIO(payload):
+                yield line
+
+
+def typed_subpart_iterator(msg, maintype='text', subtype=None):
+    """Iterate over the subparts with a given MIME type.
+
+    Use `maintype' as the main MIME type to match against; this defaults to
+    "text".  Optional `subtype' is the MIME subtype to match against; if
+    omitted, only the main type is matched.
+    """
+    for subpart in msg.walk():
+        if subpart.get_content_maintype() == maintype:
+            if subtype is None or subpart.get_content_subtype() == subtype:
+                yield subpart
diff --git a/lib-python/2.2/email/_parseaddr.py b/lib-python/2.2/email/_parseaddr.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/_parseaddr.py
@@ -0,0 +1,480 @@
+# Copyright (C) 2002 Python Software Foundation
+
+"""Email address parsing code.
+
+Lifted directly from rfc822.py.  This should eventually be rewritten.
+"""
+
+import time
+from types import TupleType
+
+try:
+    True, False
+except NameError:
+    True = 1
+    False = 0
+
+SPACE = ' '
+EMPTYSTRING = ''
+COMMASPACE = ', '
+
+# Parse a date field
+_monthnames = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul',
+               'aug', 'sep', 'oct', 'nov', 'dec',
+               'january', 'february', 'march', 'april', 'may', 'june', 'july',
+               'august', 'september', 'october', 'november', 'december']
+
+_daynames = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
+
+# The timezone table does not include the military time zones defined
+# in RFC822, other than Z.  According to RFC1123, the description in
+# RFC822 gets the signs wrong, so we can't rely on any such time
+# zones.  RFC1123 recommends that numeric timezone indicators be used
+# instead of timezone names.
+
+_timezones = {'UT':0, 'UTC':0, 'GMT':0, 'Z':0,
+              'AST': -400, 'ADT': -300,  # Atlantic (used in Canada)
+              'EST': -500, 'EDT': -400,  # Eastern
+              'CST': -600, 'CDT': -500,  # Central
+              'MST': -700, 'MDT': -600,  # Mountain
+              'PST': -800, 'PDT': -700   # Pacific
+              }
+
+
+def parsedate_tz(data):
+    """Convert a date string to a time tuple.
+
+    Accounts for military timezones.
+    """
+    data = data.split()
+    # The FWS after the comma after the day-of-week is optional, so search and
+    # adjust for this.
+    if data[0].endswith(',') or data[0].lower() in _daynames:
+        # There's a dayname here. Skip it
+        del data[0]
+    else:
+        i = data[0].rfind(',')
+        if i >= 0:
+            data[0] = data[0][i+1:]
+    if len(data) == 3: # RFC 850 date, deprecated
+        stuff = data[0].split('-')
+        if len(stuff) == 3:
+            data = stuff + data[1:]
+    if len(data) == 4:
+        s = data[3]
+        i = s.find('+')
+        if i > 0:
+            data[3:] = [s[:i], s[i+1:]]
+        else:
+            data.append('') # Dummy tz
+    if len(data) < 5:
+        return None
+    data = data[:5]
+    [dd, mm, yy, tm, tz] = data
+    mm = mm.lower()
+    if mm not in _monthnames:
+        dd, mm = mm, dd.lower()
+        if mm not in _monthnames:
+            return None
+    mm = _monthnames.index(mm) + 1
+    if mm > 12:
+        mm -= 12
+    if dd[-1] == ',':
+        dd = dd[:-1]
+    i = yy.find(':')
+    if i > 0:
+        yy, tm = tm, yy
+    if yy[-1] == ',':
+        yy = yy[:-1]
+    if not yy[0].isdigit():
+        yy, tz = tz, yy
+    if tm[-1] == ',':
+        tm = tm[:-1]
+    tm = tm.split(':')
+    if len(tm) == 2:
+        [thh, tmm] = tm
+        tss = '0'
+    elif len(tm) == 3:
+        [thh, tmm, tss] = tm
+    else:
+        return None
+    try:
+        yy = int(yy)
+        dd = int(dd)
+        thh = int(thh)
+        tmm = int(tmm)
+        tss = int(tss)
+    except ValueError:
+        return None
+    tzoffset = None
+    tz = tz.upper()
+    if _timezones.has_key(tz):
+        tzoffset = _timezones[tz]
+    else:
+        try:
+            tzoffset = int(tz)
+        except ValueError:
+            pass
+    # Convert a timezone offset into seconds ; -0500 -> -18000
+    if tzoffset:
+        if tzoffset < 0:
+            tzsign = -1
+            tzoffset = -tzoffset
+        else:
+            tzsign = 1
+        tzoffset = tzsign * ( (tzoffset/100)*3600 + (tzoffset % 100)*60)
+    tuple = (yy, mm, dd, thh, tmm, tss, 0, 0, 0, tzoffset)
+    return tuple
+
+
+def parsedate(data):
+    """Convert a time string to a time tuple."""
+    t = parsedate_tz(data)
+    if isinstance(t, TupleType):
+        return t[:9]
+    else:
+        return t
+
+
+def mktime_tz(data):
+    """Turn a 10-tuple as returned by parsedate_tz() into a UTC timestamp."""
+    if data[9] is None:
+        # No zone info, so localtime is better assumption than GMT
+        return time.mktime(data[:8] + (-1,))
+    else:
+        t = time.mktime(data[:8] + (0,))
+        return t - data[9] - time.timezone
+
+
+def quote(str):
+    """Add quotes around a string."""
+    return str.replace('\\', '\\\\').replace('"', '\\"')
+
+
+class AddrlistClass:
+    """Address parser class by Ben Escoto.
+
+    To understand what this class does, it helps to have a copy of RFC 2822 in
+    front of you.
+
+    Note: this class interface is deprecated and may be removed in the future.
+    Use rfc822.AddressList instead.
+    """
+
+    def __init__(self, field):
+        """Initialize a new instance.
+
+        `field' is an unparsed address header field, containing
+        one or more addresses.
+        """
+        self.specials = '()<>@,:;.\"[]'
+        self.pos = 0
+        self.LWS = ' \t'
+        self.CR = '\r\n'
+        self.atomends = self.specials + self.LWS + self.CR
+        # Note that RFC 2822 now specifies `.' as obs-phrase, meaning that it
+        # is obsolete syntax.  RFC 2822 requires that we recognize obsolete
+        # syntax, so allow dots in phrases.
+        self.phraseends = self.atomends.replace('.', '')
+        self.field = field
+        self.commentlist = []
+
+    def gotonext(self):
+        """Parse up to the start of the next address."""
+        while self.pos < len(self.field):
+            if self.field[self.pos] in self.LWS + '\n\r':
+                self.pos += 1
+            elif self.field[self.pos] == '(':
+                self.commentlist.append(self.getcomment())
+            else:
+                break
+
+    def getaddrlist(self):
+        """Parse all addresses.
+
+        Returns a list containing all of the addresses.
+        """
+        result = []
+        while self.pos < len(self.field):
+            ad = self.getaddress()
+            if ad:
+                result += ad
+            else:
+                result.append(('', ''))
+        return result
+
+    def getaddress(self):
+        """Parse the next address."""
+        self.commentlist = []
+        self.gotonext()
+
+        oldpos = self.pos
+        oldcl = self.commentlist
+        plist = self.getphraselist()
+
+        self.gotonext()
+        returnlist = []
+
+        if self.pos >= len(self.field):
+            # Bad email address technically, no domain.
+            if plist:
+                returnlist = [(SPACE.join(self.commentlist), plist[0])]
+
+        elif self.field[self.pos] in '.@':
+            # email address is just an addrspec
+            # this isn't very efficient since we start over
+            self.pos = oldpos
+            self.commentlist = oldcl
+            addrspec = self.getaddrspec()
+            returnlist = [(SPACE.join(self.commentlist), addrspec)]
+
+        elif self.field[self.pos] == ':':
+            # address is a group
+            returnlist = []
+
+            fieldlen = len(self.field)
+            self.pos += 1
+            while self.pos < len(self.field):
+                self.gotonext()
+                if self.pos < fieldlen and self.field[self.pos] == ';':
+                    self.pos += 1
+                    break
+                returnlist = returnlist + self.getaddress()
+
+        elif self.field[self.pos] == '<':
+            # Address is a phrase then a route addr
+            routeaddr = self.getrouteaddr()
+
+            if self.commentlist:
+                returnlist = [(SPACE.join(plist) + ' (' +
+                               ' '.join(self.commentlist) + ')', routeaddr)]
+            else:
+                returnlist = [(SPACE.join(plist), routeaddr)]
+
+        else:
+            if plist:
+                returnlist = [(SPACE.join(self.commentlist), plist[0])]
+            elif self.field[self.pos] in self.specials:
+                self.pos += 1
+
+        self.gotonext()
+        if self.pos < len(self.field) and self.field[self.pos] == ',':
+            self.pos += 1
+        return returnlist
+
+    def getrouteaddr(self):
+        """Parse a route address (Return-path value).
+
+        This method just skips all the route stuff and returns the addrspec.
+        """
+        if self.field[self.pos] != '<':
+            return
+
+        expectroute = False
+        self.pos += 1
+        self.gotonext()
+        adlist = ''
+        while self.pos < len(self.field):
+            if expectroute:
+                self.getdomain()
+                expectroute = False
+            elif self.field[self.pos] == '>':
+                self.pos += 1
+                break
+            elif self.field[self.pos] == '@':
+                self.pos += 1
+                expectroute = True
+            elif self.field[self.pos] == ':':
+                self.pos += 1
+            else:
+                adlist = self.getaddrspec()
+                self.pos += 1
+                break
+            self.gotonext()
+
+        return adlist
+
+    def getaddrspec(self):
+        """Parse an RFC 2822 addr-spec."""
+        aslist = []
+
+        self.gotonext()
+        while self.pos < len(self.field):
+            if self.field[self.pos] == '.':
+                aslist.append('.')
+                self.pos += 1
+            elif self.field[self.pos] == '"':
+                aslist.append('"%s"' % self.getquote())
+            elif self.field[self.pos] in self.atomends:
+                break
+            else:
+                aslist.append(self.getatom())
+            self.gotonext()
+
+        if self.pos >= len(self.field) or self.field[self.pos] != '@':
+            return EMPTYSTRING.join(aslist)
+
+        aslist.append('@')
+        self.pos += 1
+        self.gotonext()
+        return EMPTYSTRING.join(aslist) + self.getdomain()
+
+    def getdomain(self):
+        """Get the complete domain name from an address."""
+        sdlist = []
+        while self.pos < len(self.field):
+            if self.field[self.pos] in self.LWS:
+                self.pos += 1
+            elif self.field[self.pos] == '(':
+                self.commentlist.append(self.getcomment())
+            elif self.field[self.pos] == '[':
+                sdlist.append(self.getdomainliteral())
+            elif self.field[self.pos] == '.':
+                self.pos += 1
+                sdlist.append('.')
+            elif self.field[self.pos] in self.atomends:
+                break
+            else:
+                sdlist.append(self.getatom())
+        return EMPTYSTRING.join(sdlist)
+
+    def getdelimited(self, beginchar, endchars, allowcomments=True):
+        """Parse a header fragment delimited by special characters.
+
+        `beginchar' is the start character for the fragment.
+        If self is not looking at an instance of `beginchar' then
+        getdelimited returns the empty string.
+
+        `endchars' is a sequence of allowable end-delimiting characters.
+        Parsing stops when one of these is encountered.
+
+        If `allowcomments' is non-zero, embedded RFC 2822 comments are allowed
+        within the parsed fragment.
+        """
+        if self.field[self.pos] != beginchar:
+            return ''
+
+        slist = ['']
+        quote = False
+        self.pos += 1
+        while self.pos < len(self.field):
+            if quote:
+                slist.append(self.field[self.pos])
+                quote = False
+            elif self.field[self.pos] in endchars:
+                self.pos += 1
+                break
+            elif allowcomments and self.field[self.pos] == '(':
+                slist.append(self.getcomment())
+            elif self.field[self.pos] == '\\':
+                quote = True
+            else:
+                slist.append(self.field[self.pos])
+            self.pos += 1
+
+        return EMPTYSTRING.join(slist)
+
+    def getquote(self):
+        """Get a quote-delimited fragment from self's field."""
+        return self.getdelimited('"', '"\r', False)
+
+    def getcomment(self):
+        """Get a parenthesis-delimited fragment from self's field."""
+        return self.getdelimited('(', ')\r', True)
+
+    def getdomainliteral(self):
+        """Parse an RFC 2822 domain-literal."""
+        return '[%s]' % self.getdelimited('[', ']\r', False)
+
+    def getatom(self, atomends=None):
+        """Parse an RFC 2822 atom.
+
+        Optional atomends specifies a different set of end token delimiters
+        (the default is to use self.atomends).  This is used e.g. in
+        getphraselist() since phrase endings must not include the `.' (which
+        is legal in phrases)."""
+        atomlist = ['']
+        if atomends is None:
+            atomends = self.atomends
+
+        while self.pos < len(self.field):
+            if self.field[self.pos] in atomends:
+                break
+            else:
+                atomlist.append(self.field[self.pos])
+            self.pos += 1
+
+        return EMPTYSTRING.join(atomlist)
+
+    def getphraselist(self):
+        """Parse a sequence of RFC 2822 phrases.
+
+        A phrase is a sequence of words, which are in turn either RFC 2822
+        atoms or quoted-strings.  Phrases are canonicalized by squeezing all
+        runs of continuous whitespace into one space.
+        """
+        plist = []
+
+        while self.pos < len(self.field):
+            if self.field[self.pos] in self.LWS:
+                self.pos += 1
+            elif self.field[self.pos] == '"':
+                plist.append(self.getquote())
+            elif self.field[self.pos] == '(':
+                self.commentlist.append(self.getcomment())
+            elif self.field[self.pos] in self.phraseends:
+                break
+            else:
+                plist.append(self.getatom(self.phraseends))
+
+        return plist
+
+class AddressList(AddrlistClass):
+    """An AddressList encapsulates a list of parsed RFC 2822 addresses."""
+    def __init__(self, field):
+        AddrlistClass.__init__(self, field)
+        if field:
+            self.addresslist = self.getaddrlist()
+        else:
+            self.addresslist = []
+
+    def __len__(self):
+        return len(self.addresslist)
+
+    def __str__(self):
+        return COMMASPACE.join(map(dump_address_pair, self.addresslist))
+
+    def __add__(self, other):
+        # Set union
+        newaddr = AddressList(None)
+        newaddr.addresslist = self.addresslist[:]
+        for x in other.addresslist:
+            if not x in self.addresslist:
+                newaddr.addresslist.append(x)
+        return newaddr
+
+    def __iadd__(self, other):
+        # Set union, in-place
+        for x in other.addresslist:
+            if not x in self.addresslist:
+                self.addresslist.append(x)
+        return self
+
+    def __sub__(self, other):
+        # Set difference
+        newaddr = AddressList(None)
+        for x in self.addresslist:
+            if not x in other.addresslist:
+                newaddr.addresslist.append(x)
+        return newaddr
+
+    def __isub__(self, other):
+        # Set difference, in-place
+        for x in other.addresslist:
+            if x in self.addresslist:
+                self.addresslist.remove(x)
+        return self
+
+    def __getitem__(self, index):
+        # Make indexing, slices, and 'in' work
+        return self.addresslist[index]
diff --git a/lib-python/2.2/email/base64MIME.py b/lib-python/2.2/email/base64MIME.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/base64MIME.py
@@ -0,0 +1,184 @@
+# Copyright (C) 2002 Python Software Foundation
+# Author: che at debian.org (Ben Gertzfield)
+
+"""Base64 content transfer encoding per RFCs 2045-2047.
+
+This module handles the content transfer encoding method defined in RFC 2045
+to encode arbitrary 8-bit data using the three 8-bit bytes in four 7-bit
+characters encoding known as Base64.
+
+It is used in the MIME standards for email to attach images, audio, and text
+using some 8-bit character sets to messages.
+
+This module provides an interface to encode and decode both headers and bodies
+with Base64 encoding.
+
+RFC 2045 defines a method for including character set information in an
+`encoded-word' in a header.  This method is commonly used for 8-bit real names
+in To:, From:, Cc:, etc. fields, as well as Subject: lines.
+
+This module does not do the line wrapping or end-of-line character conversion
+necessary for proper internationalized headers; it only does dumb encoding and
+decoding.  To deal with the various line wrapping issues, use the email.Header
+module.
+"""
+
+import re
+from binascii import b2a_base64, a2b_base64
+from email.Utils import fix_eols
+
+try:
+    from email._compat22 import _floordiv
+except SyntaxError:
+    # Python 2.1 spells integer division differently
+    from email._compat21 import _floordiv
+
+
+CRLF = '\r\n'
+NL = '\n'
+EMPTYSTRING = ''
+
+# See also Charset.py
+MISC_LEN = 7
+
+try:
+    True, False
+except NameError:
+    True = 1
+    False = 0
+
+
+
+# Helpers
+def base64_len(s):
+    """Return the length of s when it is encoded with base64."""
+    groups_of_3, leftover = divmod(len(s), 3)
+    # 4 bytes out for each 3 bytes (or nonzero fraction thereof) in.
+    # Thanks, Tim!
+    n = groups_of_3 * 4
+    if leftover:
+        n += 4
+    return n
+
+
+
+def header_encode(header, charset='iso-8859-1', keep_eols=False,
+                  maxlinelen=76, eol=NL):
+    """Encode a single header line with Base64 encoding in a given charset.
+
+    Defined in RFC 2045, this Base64 encoding is identical to normal Base64
+    encoding, except that each line must be intelligently wrapped (respecting
+    the Base64 encoding), and subsequent lines must start with a space.
+
+    charset names the character set to use to encode the header.  It defaults
+    to iso-8859-1.
+
+    End-of-line characters (\\r, \\n, \\r\\n) will be automatically converted
+    to the canonical email line separator \\r\\n unless the keep_eols
+    parameter is True (the default is False).
+
+    Each line of the header will be terminated in the value of eol, which
+    defaults to "\\n".  Set this to "\\r\\n" if you are using the result of
+    this function directly in email.
+
+    The resulting string will be in the form:
+
+    "=?charset?b?WW/5ciBtYXp66XLrIHf8eiBhIGhhbXBzdGHuciBBIFlv+XIgbWF6euly?=\\n
+      =?charset?b?6yB3/HogYSBoYW1wc3Rh7nIgQkMgWW/5ciBtYXp66XLrIHf8eiBhIGhh?="
+
+    with each line wrapped at, at most, maxlinelen characters (defaults to 76
+    characters).
+    """
+    # Return empty headers unchanged
+    if not header:
+        return header
+
+    if not keep_eols:
+        header = fix_eols(header)
+
+    # Base64 encode each line, in encoded chunks no greater than maxlinelen in
+    # length, after the RFC chrome is added in.
+    base64ed = []
+    max_encoded = maxlinelen - len(charset) - MISC_LEN
+    max_unencoded = _floordiv(max_encoded * 3, 4)
+
+    for i in range(0, len(header), max_unencoded):
+        base64ed.append(b2a_base64(header[i:i+max_unencoded]))
+
+    # Now add the RFC chrome to each encoded chunk
+    lines = []
+    for line in base64ed:
+        # Ignore the last character of each line if it is a newline
+        if line.endswith(NL):
+            line = line[:-1]
+        # Add the chrome
+        lines.append('=?%s?b?%s?=' % (charset, line))
+    # Glue the lines together and return it.  BAW: should we be able to
+    # specify the leading whitespace in the joiner?
+    joiner = eol + ' '
+    return joiner.join(lines)
+
+
+
+def encode(s, binary=True, maxlinelen=76, eol=NL):
+    """Encode a string with base64.
+
+    Each line will be wrapped at, at most, maxlinelen characters (defaults to
+    76 characters).
+
+    If binary is False, end-of-line characters will be converted to the
+    canonical email end-of-line sequence \\r\\n.  Otherwise they will be left
+    verbatim (this is the default).
+
+    Each line of encoded text will end with eol, which defaults to "\\n".  Set
+    this to "\r\n" if you will be using the result of this function directly
+    in an email.
+    """
+    if not s:
+        return s
+
+    if not binary:
+        s = fix_eols(s)
+
+    encvec = []
+    max_unencoded = _floordiv(maxlinelen * 3, 4)
+    for i in range(0, len(s), max_unencoded):
+        # BAW: should encode() inherit b2a_base64()'s dubious behavior in
+        # adding a newline to the encoded string?
+        enc = b2a_base64(s[i:i + max_unencoded])
+        if enc.endswith(NL) and eol <> NL:
+            enc = enc[:-1] + eol
+        encvec.append(enc)
+    return EMPTYSTRING.join(encvec)
+
+
+# For convenience and backwards compatibility w/ standard base64 module
+body_encode = encode
+encodestring = encode
+
+
+
+def decode(s, convert_eols=None):
+    """Decode a raw base64 string.
+
+    If convert_eols is set to a string value, all canonical email linefeeds,
+    e.g. "\\r\\n", in the decoded text will be converted to the value of
+    convert_eols.  os.linesep is a good choice for convert_eols if you are
+    decoding a text attachment.
+
+    This function does not parse a full MIME header value encoded with
+    base64 (like =?iso-8895-1?b?bmloISBuaWgh?=) -- please use the high
+    level email.Header class for that functionality.
+    """
+    if not s:
+        return s
+
+    dec = a2b_base64(s)
+    if convert_eols:
+        return dec.replace(CRLF, convert_eols)
+    return dec
+
+
+# For convenience and backwards compatibility w/ standard base64 module
+body_decode = decode
+decodestring = decode
diff --git a/lib-python/2.2/email/quopriMIME.py b/lib-python/2.2/email/quopriMIME.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/quopriMIME.py
@@ -0,0 +1,323 @@
+# Copyright (C) 2001,2002 Python Software Foundation
+# Author: che at debian.org (Ben Gertzfield)
+
+"""Quoted-printable content transfer encoding per RFCs 2045-2047.
+
+This module handles the content transfer encoding method defined in RFC 2045
+to encode US ASCII-like 8-bit data called `quoted-printable'.  It is used to
+safely encode text that is in a character set similar to the 7-bit US ASCII
+character set, but that includes some 8-bit characters that are normally not
+allowed in email bodies or headers.
+
+Quoted-printable is very space-inefficient for encoding binary files; use the
+email.base64MIME module for that instead.
+
+This module provides an interface to encode and decode both headers and bodies
+with quoted-printable encoding.
+
+RFC 2045 defines a method for including character set information in an
+`encoded-word' in a header.  This method is commonly used for 8-bit real names
+in To:/From:/Cc: etc. fields, as well as Subject: lines.
+
+This module does not do the line wrapping or end-of-line character
+conversion necessary for proper internationalized headers; it only
+does dumb encoding and decoding.  To deal with the various line
+wrapping issues, use the email.Header module.
+"""
+
+import re
+from string import hexdigits
+from email.Utils import fix_eols
+
+CRLF = '\r\n'
+NL = '\n'
+
+# See also Charset.py
+MISC_LEN = 7
+
+hqre = re.compile(r'[^-a-zA-Z0-9!*+/ ]')
+bqre = re.compile(r'[^ !-<>-~\t]')
+
+try:
+    True, False
+except NameError:
+    True = 1
+    False = 0
+
+
+
+# Helpers
+def header_quopri_check(c):
+    """Return True if the character should be escaped with header quopri."""
+    return hqre.match(c) and True
+
+
+def body_quopri_check(c):
+    """Return True if the character should be escaped with body quopri."""
+    return bqre.match(c) and True
+
+
+def header_quopri_len(s):
+    """Return the length of str when it is encoded with header quopri."""
+    count = 0
+    for c in s:
+        if hqre.match(c):
+            count += 3
+        else:
+            count += 1
+    return count
+
+
+def body_quopri_len(str):
+    """Return the length of str when it is encoded with body quopri."""
+    count = 0
+    for c in str:
+        if bqre.match(c):
+            count += 3
+        else:
+            count += 1
+    return count
+
+
+def _max_append(L, s, maxlen, extra=''):
+    if not L:
+        L.append(s.lstrip())
+    elif len(L[-1]) + len(s) <= maxlen:
+        L[-1] += extra + s
+    else:
+        L.append(s.lstrip())
+
+
+def unquote(s):
+    """Turn a string in the form =AB to the ASCII character with value 0xab"""
+    return chr(int(s[1:3], 16))
+
+
+def quote(c):
+    return "=%02X" % ord(c)
+
+
+
+def header_encode(header, charset="iso-8859-1", keep_eols=False,
+                  maxlinelen=76, eol=NL):
+    """Encode a single header line with quoted-printable (like) encoding.
+
+    Defined in RFC 2045, this `Q' encoding is similar to quoted-printable, but
+    used specifically for email header fields to allow charsets with mostly 7
+    bit characters (and some 8 bit) to remain more or less readable in non-RFC
+    2045 aware mail clients.
+
+    charset names the character set to use to encode the header.  It defaults
+    to iso-8859-1.
+
+    The resulting string will be in the form:
+
+    "=?charset?q?I_f=E2rt_in_your_g=E8n=E8ral_dire=E7tion?\\n
+      =?charset?q?Silly_=C8nglish_Kn=EEghts?="
+
+    with each line wrapped safely at, at most, maxlinelen characters (defaults
+    to 76 characters).  If maxlinelen is None, the entire string is encoded in
+    one chunk with no splitting.
+
+    End-of-line characters (\\r, \\n, \\r\\n) will be automatically converted
+    to the canonical email line separator \\r\\n unless the keep_eols
+    parameter is True (the default is False).
+
+    Each line of the header will be terminated in the value of eol, which
+    defaults to "\\n".  Set this to "\\r\\n" if you are using the result of
+    this function directly in email.
+    """
+    # Return empty headers unchanged
+    if not header:
+        return header
+
+    if not keep_eols:
+        header = fix_eols(header)
+
+    # Quopri encode each line, in encoded chunks no greater than maxlinelen in
+    # length, after the RFC chrome is added in.
+    quoted = []
+    if maxlinelen is None:
+        # An obnoxiously large number that's good enough
+        max_encoded = 100000
+    else:
+        max_encoded = maxlinelen - len(charset) - MISC_LEN - 1
+
+    for c in header:
+        # Space may be represented as _ instead of =20 for readability
+        if c == ' ':
+            _max_append(quoted, '_', max_encoded)
+        # These characters can be included verbatim
+        elif not hqre.match(c):
+            _max_append(quoted, c, max_encoded)
+        # Otherwise, replace with hex value like =E2
+        else:
+            _max_append(quoted, "=%02X" % ord(c), max_encoded)
+
+    # Now add the RFC chrome to each encoded chunk and glue the chunks
+    # together.  BAW: should we be able to specify the leading whitespace in
+    # the joiner?
+    joiner = eol + ' '
+    return joiner.join(['=?%s?q?%s?=' % (charset, line) for line in quoted])
+
+
+
+def encode(body, binary=False, maxlinelen=76, eol=NL):
+    """Encode with quoted-printable, wrapping at maxlinelen characters.
+
+    If binary is False (the default), end-of-line characters will be converted
+    to the canonical email end-of-line sequence \\r\\n.  Otherwise they will
+    be left verbatim.
+
+    Each line of encoded text will end with eol, which defaults to "\\n".  Set
+    this to "\\r\\n" if you will be using the result of this function directly
+    in an email.
+
+    Each line will be wrapped at, at most, maxlinelen characters (defaults to
+    76 characters).  Long lines will have the `soft linefeed' quoted-printable
+    character "=" appended to them, so the decoded text will be identical to
+    the original text.
+    """
+    if not body:
+        return body
+
+    if not binary:
+        body = fix_eols(body)
+
+    # BAW: We're accumulating the body text by string concatenation.  That
+    # can't be very efficient, but I don't have time now to rewrite it.  It
+    # just feels like this algorithm could be more efficient.
+    encoded_body = ''
+    lineno = -1
+    # Preserve line endings here so we can check later to see an eol needs to
+    # be added to the output later.
+    lines = body.splitlines(1)
+    for line in lines:
+        # But strip off line-endings for processing this line.
+        if line.endswith(CRLF):
+            line = line[:-2]
+        elif line[-1] in CRLF:
+            line = line[:-1]
+
+        lineno += 1
+        encoded_line = ''
+        prev = None
+        linelen = len(line)
+        # Now we need to examine every character to see if it needs to be
+        # quopri encoded.  BAW: again, string concatenation is inefficient.
+        for j in range(linelen):
+            c = line[j]
+            prev = c
+            if bqre.match(c):
+                c = quote(c)
+            elif j+1 == linelen:
+                # Check for whitespace at end of line; special case
+                if c not in ' \t':
+                    encoded_line += c
+                prev = c
+                continue
+            # Check to see to see if the line has reached its maximum length
+            if len(encoded_line) + len(c) >= maxlinelen:
+                encoded_body += encoded_line + '=' + eol
+                encoded_line = ''
+            encoded_line += c
+        # Now at end of line..
+        if prev and prev in ' \t':
+            # Special case for whitespace at end of file
+            if lineno + 1 == len(lines):
+                prev = quote(prev)
+                if len(encoded_line) + len(prev) > maxlinelen:
+                    encoded_body += encoded_line + '=' + eol + prev
+                else:
+                    encoded_body += encoded_line + prev
+            # Just normal whitespace at end of line
+            else:
+                encoded_body += encoded_line + prev + '=' + eol
+            encoded_line = ''
+        # Now look at the line we just finished and it has a line ending, we
+        # need to add eol to the end of the line.
+        if lines[lineno].endswith(CRLF) or lines[lineno][-1] in CRLF:
+            encoded_body += encoded_line + eol
+        else:
+            encoded_body += encoded_line
+        encoded_line = ''
+    return encoded_body
+
+
+# For convenience and backwards compatibility w/ standard base64 module
+body_encode = encode
+encodestring = encode
+
+
+
+# BAW: I'm not sure if the intent was for the signature of this function to be
+# the same as base64MIME.decode() or not...
+def decode(encoded, eol=NL):
+    """Decode a quoted-printable string.
+
+    Lines are separated with eol, which defaults to \\n.
+    """
+    if not encoded:
+        return encoded
+    # BAW: see comment in encode() above.  Again, we're building up the
+    # decoded string with string concatenation, which could be done much more
+    # efficiently.
+    decoded = ''
+
+    for line in encoded.splitlines():
+        line = line.rstrip()
+        if not line:
+            decoded += eol
+            continue
+
+        i = 0
+        n = len(line)
+        while i < n:
+            c = line[i]
+            if c <> '=':
+                decoded += c
+                i += 1
+            # Otherwise, c == "=".  Are we at the end of the line?  If so, add
+            # a soft line break.
+            elif i+1 == n:
+                i += 1
+                continue
+            # Decode if in form =AB
+            elif i+2 < n and line[i+1] in hexdigits and line[i+2] in hexdigits:
+                decoded += unquote(line[i:i+3])
+                i += 3
+            # Otherwise, not in form =AB, pass literally
+            else:
+                decoded += c
+                i += 1
+
+            if i == n:
+                decoded += eol
+    # Special case if original string did not end with eol
+    if not encoded.endswith(eol) and decoded.endswith(eol):
+        decoded = decoded[:-1]
+    return decoded
+
+
+# For convenience and backwards compatibility w/ standard base64 module
+body_decode = decode
+decodestring = decode
+
+
+
+def _unquote_match(match):
+    """Turn a match in the form =AB to the ASCII character with value 0xab"""
+    s = match.group(0)
+    return unquote(s)
+
+
+# Header decoding is done a bit differently
+def header_decode(s):
+    """Decode a string encoded with RFC 2045 MIME header `Q' encoding.
+
+    This function does not parse a full MIME header value encoded with
+    quoted-printable (like =?iso-8895-1?q?Hello_World?=) -- please use
+    the high level email.Header class for that functionality.
+    """
+    s = s.replace('_', ' ')
+    return re.sub(r'=\w{2}', _unquote_match, s)
diff --git a/lib-python/2.2/email/test/__init__.py b/lib-python/2.2/email/test/__init__.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/test/__init__.py
@@ -0,0 +1,2 @@
+# Copyright (C) 2002 Python Software Foundation
+# Author: barry at zope.com (Barry Warsaw)
diff --git a/lib-python/2.2/email/test/data/PyBanner048.gif b/lib-python/2.2/email/test/data/PyBanner048.gif
new file mode 100644
index 0000000000000000000000000000000000000000..1a5c87f647fbf33e5b46103119c9fd42afbe9e5d
GIT binary patch
literal 954
zc${<hbh9u|oW`KcaD;*3|NsAAfBbp<@yCPLU#~s+c;@cAL)TvKIRAX@$tQ~sJ(#)s
zZvU2Bt!uAUEx%l_;C$N5GYljG9S{Msi-Gn31LeMy%y}8B*5$n3SD<sgB=>&By7x8j
z{~uUT;GrYce_~0|$((r{tZl_!y0Q#RM^`;uQ6(RHpD7?imZ|-N_{uUC7WQk3?{;NH
zGBwmSM64;~s?+3SXG-Nyu3{`#=d5DO(qd+qsKHY;ql=l7x86TUlZ}C6Vbu~P?#atE
z!mD*BPwkz#Y9{O4c}n3W8Y~Pf8hs4Am3i417pt>w-Kbi1Ms*cq6L0#=1@(C;T5FqB
zI`?aIZdcj6`MlEcyPC%tUL`!Oy6(*WPKk$;osA=QZI3!%)vK*u3l>dh6n=W+r`7Uw
z87|f;mIVuzy^!H%3ug*Ry3lZtiKX(3gJD90!wDVbQ~xb(3ZIH_hKecjIdrrNEMq*+
zw2q_9fVDYU$~z!BlZkPe2`868z>JleY9_Cwc0?5}jhn}yqqg&PsRyI=95n_eg{;)6
zUEV25u at h%3nrgtVIQJB900ZOA{Gyhb0w)(u-^0G>fNh4rBnQ=ooY0O-yFUwdHXcyA
zcQ9fSL*pbiE|<mbrb;)bzx9z3j>%fBFT}IRa?h1uSC4v4gI;fuEi5h!m!cM^#(GWk
z)Zh>@cyMCD9RtCd at Fy#;tZHd4VqVvKa97jak0*L2zVCdJ5Y(_=Q)s7-#`Zma8C79D
zo;Ht8`8 at AtY}j?kT1V}M at K+x0X?v|380XI|kgij|@Ti-SU1381e@~x>;pe#a#%!0U
zXwY=(GJ4Nj6vE9Z<1CQzTvD~cRgFbh!Lm9)<52PQi2^*0N<0}(#WyA{yw8=v$ncnh
z+d+JqmhwJ{=D_5=P12nQ8`P(o{r5R^k3)Fb0*4reni+>CorqY};~^rp-QCqN<%yI2
z*O?PMZ25C$KIG7ORpFGNZL#5oqLzx1{A}5?6FH@{b6!U9EDAX8v_MD4Qm%i7#Dfb-
zsdf$w5#n=CsHs&-%;03a{`Ya%Vl9osnUM at JSCTmYaBgm3`mo}MbD!^1!{bb=HZ5YG
ztMBtklXLq~s~K|E)2dc3vF%_z&XjSEA)ROPg<XeMXvnN&a_w^2=u%|eA$i!|aP8Mp
z^-yc8<*x;{eJhCGsOd9FSI>FE&BHUVGZpCVa=fskIb!Cda_v1<Cw3q1HK^#)7ku++
QLJp6_)!*-V6&V<;0S=;nMF0Q*

diff --git a/lib-python/2.2/email/test/data/audiotest.au b/lib-python/2.2/email/test/data/audiotest.au
new file mode 100644
index 0000000000000000000000000000000000000000..1ad28ce1fdd781e4370fdf68577e8a35b4ed8a8e
GIT binary patch
literal 23493
zc${3k$&TE}mIe5_yYTA0Oi7_BMxW=269|GJ2!d!XY{7-CY{_i{-1Vk^pwV4bDWy_U
zYKjQOD8)SZ1c$@nAP53naKQyvT=E0>8P?Z4q^wuX2!-68G}?6Ux#wIny6}JfpZ~|-
z{`R-(-~Q%Y!vFuv-~Lwl-yi;eI9xZ+^E65F+;tq+fp-}kId$M&o~CIAuUzU<N?ka^
zt|Ek82Pg9^C$?={h+vyI#Gwwsa1cz8X_^*7wv8~t#74-14-A97b1<?UI0ctj7CXT%
zgKOXod(Da=ht=R<c5il#jag$HhqZ}S0e7`Av<TWo;4PFfO=Mw1l3P;jrIEk1{Nd<&
zyv&`{v~^W#HtLmntz4-#8v|9O at hlkK-(Or at -`=0yU7t^9VVvm_-&0jl#ws!~+)h+v
zMG<(}HdP(_4i5auEb>w-H<kWC)VOY6lqD-6G<0c}rEwDH&>ZTfnVVx`P)wp|5qsl_
z7sSL;T7$OK=Q+t#B|#7d-9bmtG{ZJ^Qxhe)lcq_g4Wo(?404zSDRrGNikE2^22o0F
zUFdcDg8`>18X?Hg6|UFo_Ii?TrQUopoBGQjiu^G57UTJ3G+#t%W>c8DG)bK-#Ay-{
zLNkKWEVXmjG&HH-=AcDzE6ql{B8&V$MTAa$I6UZtZW@@z;c_~=_cE+XjaI+Q>n=^B
zFf}Z(J7|i%J`9bKIEJmLy}l`GS>P|P?w1QMHk|~MbeZ~NKgbhDRt(*8qd1%{!X$A;
zlNiViGrWkQn>nxMrj>h<vy4q$*V}@r7&Ojkl0!R1UeQgsaWoDHHso4c9dIPh?PUnF
zt~7fxFCy2XM3aSXrP66P728syOi>k~4ow_rwr;zYVH!MVND`qboqJ%ynAfrG#EUST
zgApT1u}wSn7U6Ij#W__i8`-HJh07$zSdoZ<v)Io@^Vm-#%c6#phO=-u at x3^=GMX*t
zqw(<0Tg0(tr3fd{BAd+fh`0hzB$GnXSwN|&wR?O=)J)6Hz{KQ!qtk71vaXX18HUtr
zaqX^ZU at J<&0+mKXY_w(F)T7M6rdF at Zjh<p4HOdXFsEwB19_Yw1!pL$>)bGI at +IdK)
zUKTlyC}P=g;vgH&63?Zw<jN>d;q4^yDB)$-vg5$d?`Dx7r=mtJI|`QJ#V80uDk(@d
zLf>DUUC)R2KF)$H84ahyZ#VPFI0h3zX6PsWWC2F6i;^O9mP?#C#fT`~cDGyW>)_{+
z1Fal1+I*vD!RSY}tV`We6{bj*ILXy`S?-q`?N(JmI?hv5wZ(qBIcN_|1KQ>y$CR5r
zzTZc#4K6@%YRJk!7EQ;EgNYY<S*BUoLebP8-`y{RsY8jGk;L;x^PACpo`$M!BP*Kt
z<D2n3Tu|HA!AhcN9^K3Xa9KgMR5P2$)8Q=6LPM1~Q$d;Q<zu+DWw-k(r=pZNvlN at Q
z&>HllzL60hT+<@F)T&xt)yX60J|?be)Y at i8wj<ZRhnAaWsb%&>6cc+CB2!n3b*<C0
zD79t*V4z&8sNJ at KvARqwO;<`abI`Zb)S3sTq6?*x(Q2C{#>4w4N~692WuoOU8IB0n
zajS1xb~;;l^N3>85~zZ5GRENdff`diO>+x|#-)ymQ!r9W;xtIC7(fY1Q<Oy1&leG3
zJr<~q;~<Xb<3;Q{(0yG|lfa){Pl71ZI1wy{EW>Fs44h2EZ4(K(n<PO1h{d=2@<7c=
zL<6Ge9jT>ihO1CLcM(+`$BwZptCnE$!a!94F$~kS4P6DWFflfCQ;=nOfL&)f at sk+K
zoYaz3<mP^!TZW=^dz3T7Ji?CJ72AR$(QKUMu}&np*TpiO<;yhJ6r&1nFhHMwyYLfa
zt3=032j-HZKr$lP1=usFtRO+xNFL7<C)Xri<v7<!r{O#au;KEmOTzKZB$!eM>8j|^
z<Z3#G+gX_^IT~0)N|riNz%?_8%(8i&nH=g2Fz-gozzL8EMqI<P<HbwtD=?RU>zd`;
z)Hm@!ZC1^;PTkOqHA!tMoNAE~UdFB~I5plCqhPuuw}1qKB3d}T59iY~(Q&Vn^EjT&
z$UU@)M|DTU6Z;+orbtV&%u#2U24mZo^+C&(0IqN1NowS69jj?_=cP9|QWdF9y3UN=
zz=-r|N!5GAy-(6R+mj`sZ4Z)pI1GbOp$)|x;B*?$33io+CK^JzbW at n|zS>gcULK6I
zFiAO7N0OjKUV;`W)~iU;JMPS<3nyr%bs{O+JaV!$;BYA$p!yikgc)|5by?AB?qZ?`
z%CwxW$4!6h4&65H%(U4tWvv#(x9yR5yJ>Ij at 7xylQ(L%qxJu=qefj*|A}Y;{;5lD^
zTl~25&M%3Vq*kmPUMGe3OQkffBpa2B-4eflq-_NK(Tghoy0Z7nZ#}JGTyyIh_p0~d
z`P<&6rr#8c%5U0L^@I1Um`>D}iuCIF*Xr-i)8fh5R&l2MmWp#XTX&<&jmqHFLdZUM
zpBB at ORBg6@D;=+WI)1h>OXzm}aP98P^Ue0jUbFUjU42f(SI$l;jio0<mVSQ`4|@kn
z at b&StO?9ucroOh;c8*Su9^K*Ei}=u3D}CAxo^<iuENd(4Bnvw2-YdVz*LQL0>NF7w
zM_aXbyZK<_Xut_)C;9fNRN46Y&L%I@(YpHnJKnh at Tb)<?!7qgau at b&N;vX;V<?CsG
zbK`2#=uUQ{4R!Kv=L8oEFXo3ko&Kk|@~-b#+w){g)8{D_H(IY|FFNLa at A=|{i`|L*
zYw6>MfIOB5FU)uM{*Jk`-haJ!^7x6=Hjdt3)}G7j;cNP}u;xB1j&2Tu*nC?4^XuZ-
zW9OO{W(zcKo_^a<PLc6^YvuzB+*Xx$*IK2VYfa at +Ilac4J1=&w-hB7|QQuwcyiG^N
zTEW<eUmpkmd at f$!?9N|SgkGWj?(poNk9w&_kN3-qwbmD;&$ZI_wtl;$%4YTcT}5cz
zk19KNhCbhSsxLNfU!6veHs at jg<9#e^rSf3<+j0BRCWPVd-kZhtv*z9F^R*p8N(F!P
zdii8Mc>k$$zP;I;AJ6T=&e7XSY5nNS?xWf)o6`36bZy)CufxIPPIz2IpGK3;j+$?^
z{WqW7e{PevH-)Jfh}#h=nm+&aaQlTwhsP~DebM(jn#`AfTm1TBOyAD7Jb%l~x|iij
z@$;YE?ZVC8_vdMbQ>8jFUT&HHap?WsnY{DY_;>MOr+V;YtMSL~`QJ-}*YCH31)pwJ
zzjETEoma=xf4kjzHC(Gr<7 at uud$YJ!ieG(CA77*&`xW;~NqwGvfBLBS$LFKR#f{f{
zefjp9uC0Gci@#L<bN}SuxZ3N_4Y0?c{Nx>}JX at Q-J}Ir0_udEV=BXDotTXkWg`NN0
zzx+F0d%xeb&*b5H`CNbUy!`R~;q&$9Cw~mf*JIRPpDcDBb<lr&$^X5n|NdnYO}e+8
z?UU|Wv9R~{aQ*p{)3?{%O8a0|@3!?PJ at Oy>cmFon`SgA>Q at bbqwdGcUuYG)X`gDC`
z_~vL^OU=VAZqQq|%kSPDKH?tjzKz;D-;ORG4KxUO#p6HDH at 4S~-;CF+e6;_htcio_
z&ha0&<(F$W?=Cj-bLZqqoA#UH#;3QJPt?apyLm at Gx<6cNO$HSh?srGO-u>h7JwM$(
zKE8iixh|G!o$qgNp6kEdzMC``+4a%dRv?;%t>~49%1_ANX(^)Q!;5Ewtics}|L at J&
zqs8WzgTYnHKi_#yzHbYS at _$|Kt=Z4gx9huIVOM*CjX~QiyxTq8xcz(fA-s1Av%4p}
zfQ-hI{(lCaUKU?|Jf0k_JA0*aGkd9RzxZ?azPkR8^v`cs&c at C0Go4m~t>=^1qgeQN
z{^08^t(V`f*E_{BU+lj6e6(Z#divqZ#q{xe^cXd_`;UI*es7;YdAxOeW8ZI>e{46@
zp6Q6oS8xBEw*Ps2a5-$PeVc6F$LOy7%hs#S<9~m=@%<w4e|_<`wbffMmV3_Wf9!uO
zmB0P=<<s5z#YLf1X{y5Gm#^t at t@!VE?T-16r<T4^Z=uE1I{Lrgy)AXf+pm9o>qmd^
zU80es*8T8tuhsjzb4iajcg*q5`gXBX_-EynH(OhKq)$#JmGURG^Ih|&`nvxcKHGjQ
z-hJOo%EIq*&95lMj<);uC|vueaya}m-1I*5dbWSv-D<tg|9oEghdAqgf3%f<oF;3I
zPM>=JdGeKixi&rgI<A&Ztjc1I$c<m6SLvnvOj*Py&9$|p{+!>gb8dF@|GfQN-ddjS
z{(hMG at 5cDMFm$!j;pe@%_`hDxjnmSLwWPN7{PCklkDd*`Ue+G{Qc0$7ov!kQ+GxP*
zt?v81*Ne+v&W at zt8<P0-WU^VS4SM#6*MFXGJyGvY-+m|f>m=xJTWa}nuz#VH{<mVc
z_@*S4Pd;2*Ri#+q=C8k<l^(t5;-hJ=v^=%VEu&E>@c-%T$!nX{X!krSo!yP^A9Y{W
zO3l~%yJqiKE<5=cy1}=zNy)}(_2tQ*pYFB452~#%)$*o0Hj7&@#4Ymb;Ip{>WGBBr
zlFNj~^tn)xdzJUQZ)V0n%OQCubw&Bm-fXQudsf!}>twgJRwzymkCyyw=r1;;N~ON}
zY4?rC|Fh!eZ;Zg4?B4h2=Ch45eYO8l+jw4?e*d(v$D^a^POnhdDtvo=a^C*6)TSSz
z#GD+QjQE|MVqN+D&(A^q*P at wxNGTfMrkkzev+WZ7?RwXGx>><ruQMeYo#Zb!Up#qQ
zxO#JT+yBRQ3GL>t;$Pn=J1 at 5?jppIoy`l9hvbFc})Vn!5vpO5kN>%C4PhXwFuSLb$
zGbx!|nbk&Nv&08~9Df1d-1fz<mL8 at PT54`>6s6#|{f`J<_ at lj>aCCp0l-lK;vNihc
z=o8)E*ts$G18tIx!eV`MqbR1YPQIWQo7?ehZ<vjy-)0?QXRB;Y{y5sRHy*FwNxP=H
zRHt6OxVE-YY5#e6nC|@Y!l-^QjC<wgl&`(qD7MwTHwP!;FAb&rPH}F8Z{HvYuWc8E
z#p~OB>G9^KeR_*8(df==RM&PY?d<iz$7p9`J(j<a`8d2fuhgEutd!j2*H;IfwdS^Y
zV&C486K_!~Jg<~R`fm5*vi|aA*!o0ASJ~x7t-tZ2R-{+IpY97Ui<{(P`u&0)&%gC|
zcA6DA`}6(YmAJm4R`<fu4W5pWP<q}fNsG6qyR^9RR4^{QnG5v1R^8etRp{*>_q+0T
z>4hCmzm1boa$hdJESD<i<n76^x>?w+P3)_QH6)An=6bEvHP1fp50w{Nt<sG%o+t~;
ztik8ShIMp$K$|btnw4QNUs5mOx~1(_SqeWNeF{2T&xPLY-Nn6k at 9SK%#&^-=?br8<
zLHVhyo*f?FPvVIrG>ff at Hh;JOr#C39@#4kt>CB6LuHP&-tLpUa{`<LHdLoJEU(e@(
z8%k2UQYsC?cgJ6r()Ocn>*Dmr10gPQm11G1k$pJ)GO at NFwd#jQXJZR#nNZy<tQYLB
zy9aah@=>X{e|fVI&>-tHwhHUp`tke2k-ha~t8{vOI#Ee~-W`-nTU*lb?dfS;EIjSq
zobTN^IloY?PPwqd`|nP^E_yqU`_9qcWvt0)qDhr<sqP-U`+V-MKdp2R&X1>k+!>mA
zt6V6k0Zf3uz4MrNPxfvtfio^Oy#|5Dy8h$KY4&WZ)IYvCo;CH}c#^@`ReH&%y-$nU
z`kJU6os7)BVjY`lrBp6kcYhomx=(jD`op`^WnGv33#+ at cS!&YVy-!|!{V~s-o-I&M
zvyLTH*e;ay>pzYU^B0Ae&B@|)(Zu|1YWLT-3Z2EDC%bWV>yfOSA1zH)$^y0fVrR4F
z?0)((R at Wc5>&K at TQ{BM}xBsHJUcvhxzTA-QM~%|y;kS{A at Y3wRD8c7@A3tA#i`Gil
z``>PI>?G1)v$WkvK7aanLAHLa)-S&toG)mOy3Ln`?S}vU>z7+~``7Yd{ONE!m13?`
z-71v2!?y>Yr~Qpz%hK at U!E`*BNktgB3V-~^m)&J&?P=M%{Ic(jx$&U5T`X4AZ*RWr
zdG)pDWpcT at ACCFqpuAHoROI6~U%tkrXTaO1Uk<ZT>%Lap-rgw*7k?b=Mc|u2%)TBu
zx9w#Uz^_!~79UQ&ME$~}YI}ZovRrg9>Gz(O3q5P^!}}Y%@Jo4Xc6WG(v*uMrFYJ`I
z_|g0OFL|%{E8n~PeCyLr=5hK~xmYt#|JXgwD;rz2 at o4YP4jShUaJO2q<9_|}{yr=}
z=E_Gf$(A$tW-cqGLV1vUzWFe>%a7E`$@iP3X&Gm>S1y$b0@*wNJVDJzYVq=FHdEP&
zpj0Yu^ZL=@?p;=S(%rgWcr(F9=XO>u?G$+V`qRP5qP(@WGf&U&lt3H$bWqsZsrKhz
z&UV9o?Xk#*r^_Tm;f;fK%H^VnPQHA)NJ`HtJJa;$##mUx$m!R%%Ux-7aI(7)>R^16
z<7kni=+1Gvjb2$qlh0Sz>Wl5|_TA*@Ru9x at VF~3zrPj;u_IB^m;-*-=9^YNdo;C-C
z-fq-;-16{bcPUq&D81>4KhudmcU-PoEp`U}{^%&Hbf3yNJdRUa#)~|aTfM4a-+#V2
zb<3Tn3dt^AOXH1@;$E{>@0pXm`;(+9tO>SzlG=Jlj~ozx<ziKg_m`(`MOYJ!_#|@8
zzU`%f*e(^D>TGv-Ov}9|vXPvot|5}?e4=zf2bkmi^Zn(3e~Pr(w`Hs$<H{RawQ6ag
zdq;=+W3#m(b??WsWj7axBj03sU@^SdJGvr;Vy82j-_5&J7~d|Pa<SAFle<sH*Lks3
z?hoVJxMQ~#w;?X>6v{n3`EYS=mH2JzZh01(t~|L8?P77K)J3zs*{QKDZ4o*>$lRRH
zZ>D)0X0#$kC(*T0tUnV5(KpZ2Jn!V<K5Ms|0$yH^hFW!dty#(f>T$VoeKSkywfexl
z|9)_~unL>Cst>SN)Z3ot=sVCjbNc=L$?2jZcFDK#RlpN2P1NqQ7cX`iTCnfK$higo
z{$lrj7R2|*xBg&zYr87ZvA*n<+b?u3yo_ho+&dknS*O|7^84d2Clj+&Xz^Z(^g2?O
z6Gz>ybans!_#;eo%icEfY-ngCJUuua8~s|l*Au(N-iw#(YmHiVJ3aPJ&kv3+gI;5x
zc$brdC=xfD<zC_ at xUUY-$e<M<=7i3UgQEaVDv_Em1+~(zXoG5Tqtw;ntE=~4_rE4r
z)5Xam@@1N4KGqB6R!fugpzk!ccOLDmS31Q;RndpzT(5~{G9TT1y52u^PRQun_;lzH
zB!^OcbM573xh4(LT+j+-j-TBPul$?)lf$9UahmOFX4)my?fSERS4#=O0X at 0!BGJt0
zVmSLUA9-cB+Rqh)I-DYSn$)R at C=D(zj}GQDU7E(X*Yk0*@b5<{?KJv51=&MwlnY at K
z+I<%;oJDrKaP&r7z{w)JF(PY6dr|MTY(*5QI*y52?+cg)(Qz>G+G$Gy?VDJVs?3Pi
z+b*{h)v{-~CT*9?WmVItZb$L;B9|q@$<pQY;?B$a?f$?fCK5Dc`?=Ar4HVl=r{~{>
zzNK1L5T=PmT;gGHnU08Pyd2)l7g)EEKMzBjM)^3^xUy)FJd3VIUc~i#1IxB-1?6;<
z8C*p|Gz^1pcPwVeI(2MQQVE$-v9ev`O=Rd-*~IU;UBe}rYgu`Go5^Lc7*RFkiM)uo
zWlJ^fEO+fBm<3s{*;Z{gLo at f%vuK&`sy0Hhn+4;zKWJ%j9J%RaI1R*3tt at bwqT+0M
z>ltE8p?Q+I*ZKDwGN~IfazRuF;bNL~dmWj&OB!4s9e)Q)={mZt^P1u$X1%mk=?SKe
z at 27_+^BaxgAWnT_u^5fxUaQv8MP0HcBj7?>tIG3=YUuHNytKJ$d!R_NoQ=<~r$po%
znyedozPP<wsC>D>^?5-I$KP(}d9zpHTJ2uT^+)$<uT?Cy%GEaKPfjkD(O}T!2b?5(
z7vH~wxwI?Ia-}WIM+c|Vg~)Z=^;(aM&W;bSV#nzBn+;AM-RvJ+UdG4}x-CvjuTKuo
zrxB65<#MeTEDpb3p3XB at XqKuC&6|9`8c%$auWT3VYCJw4-;DiGY?TW2ft~oX$#4>y
zT)l$CbWTU($uz*-Zn-9Qb%gxoGBvo4(RQ((y5V9 at Rk_9~eU;}-mr|YUgA3qHBU!wl
zSmqSj&_zzv5z(PxSW1bA4L3=^9ym2Osik4pMY<pu at iLgBBu*EZDGNG+2_p{8DAGhR
zvM0+lh$FB%PVA~Ni88Qag4EtX3B1u|HdzL<-01aXBMhP-4*b+s20iTDPp)U<;dE;A
z{idD;qoo%HDOP)(UP>-+rZ at 9tm`k-zO$y at CJdJaMXXBjYXWzpSMV44ERvNOm94`<O
zo3&D*()Q!i>&zE=old3H at 6+!93X<3;*Q&LiFrOS=FLAd~=R2IH2Uo+f-Kdmn-GOMT
z-uc<xqTQ-gk%2Xo&F|-E(C+FcrbNq+?$3st%_~Wk>&OgmW=j|z%W{aNYq!zq9mzS#
zN?qMmlk3qq8}OV&C`JlCy#GGx==E-%=ccJd^DvUSoUA#-P<olWoME}&;$eJ=WCp=9
zA8>-emO=&YB0rn9&061ZZO!VEB=i)Cmkk at 4)<8dA98Q{gNy at _z5ye|9rkbkD$U>GT
zM at e`?x>{A9rJgIQneY3Cq_uU!qKY!MhD!xTSD4!=X=`>gUpiv1B3dL+`ACe0u_D*{
zfsr6yw_|_i2>p&0`+?FZ8^QVMvRT|2c$0gh*S4-MzPrs*S at CWsq+046o_@Hm6xW65
z at G@7M+2#2+y-=!U=f`)#c42(^@lvU6_l=W#n)I#9`OI!t`Xs)O`Pz2n;_Tb9{j at AB
z_vQen`N`>0+ITUTeLHmuFACR3Cuw!9YF`}2y=`%L@*Nj;ww$Z)w|wDw`S$BoQhnUf
zPfloeU74I5(Pn8|363XH`FTZqd-6?uT1vlOdG+Vw)zOh(UoUmN!+T?Uvpjoy5m$a4
zEWUo at S}&8&_eaKhZD55*eyv=r&)!_*?MK4(*HM|@3O`Iw++wH2g-5sDVx=;DeW4B>
zDTjym>ryp*x17WUq2I{9-3U9?B7JqNb{_Lb$Mm@@Ctns$D071%y}l%c`i}hSK<ln4
zM;H1tG0IL8#dG_OEqCUnh0<2<)z^W%X6!GO4f`S;r%LSBt2=tC#ht=-_4i##Sm%%P
zVv$^3WO3r8l?o?P8Pi&^{Qi?0m6ZkB-oeq~D4uDkvTbNs)dL6_2Y2L1_9p61!BVHE
zCr9JFQM5avW_hM1o}S|*8BY8{QS7G6$>HT4?(^JWU}lloF^-RO;w^9HJ9Slp7<PGk
z8)&j5^ES=pzBc)mn0~&Pb*r+P%qf}AV$?M)0zZ-{+OeZ!lE%7b+fiiO2~8~?VS)@p
z<s~V}RLwExu>!=w!#t%{s`Ig|Xeu}8^tmHCaO3o(Rq<5QuSM5!CX1LWX<f5h?=Swa
z2Fqo9vsO6)CfWUR)sjS&FID`W!uQmJ+4IS4Fs#3f^HLU{?Go-r7c2c#;AGs<-i7_k
zk#)OS$vVPvzJFv>f6y%nSv{B2hWe&GIlp$7YdS8Q^RwNV?2~$-i*fT#&CB_pFE19O
zu>U0N7xKx$SC>o%<%*mL;<+gnmS2ATKJ at L%)7x%ga`N at e4X1^jo`gkwF0^-M$G;qZ
zv!&9 at tL@Us;n_{NuwGWAHX?M$_qQfr|MB%ulzZ!wo$A+v>G9A}*Q8!6qWQfbZ|ZM;
zxqOEQbel at 0gZ<^nwbELz7^P+6jMY}9__|#EJ$xB*TWIIvb98djCdG~@M1gy&H-wGx
z{y*OD?HEd@$H{+&NB5&nz165&VW7^0dZGMkr}Z{kyXUu!-teP*a3 at I>siH-(K9B45
zZTsCXpWg5EPkW_-ax{zf(l)9zE5skrZ+cTKci%j^e)qh6AgyQF at YdLK22rQZ*@>D@
zI~}Psc=PDw-G=hHy=mq1JL3Z)exuv8Q!%(xM7O&AN9)Bap>!0k(LlJdjwZFSQcM)r
zw3b>&Zf~ECetA86ay(qaz8uBJ!~Sivp_ehJnMlawPW#Oh=k2=oS$a7NdS3QD<Hx=7
zjHeRuS{BEZ_C~+H3LfuykDXZy&rbsJxY@|cqHOr7;@b7<t2O at 3Gx3xB>@2Os(R?vj
z7>$`YpmL-OcE3LR{O|9N9-SUO702E7#pGu2RVvI%O{|(zH8#2Go9B%;&p0SP3`*oC
z8!MB+U@;IgA+Z#r+rB*i<@d>>{j10Bu-V at C7tT??INoWy20zrXrZwOE8ohrMe`#%;
zhK1?5;D^(qlGWXy9cq26(*FJN{N3aD&z-F+<N5KGad&DLZyRODZsk)|bcA=$?DuQw
zVSn?I7DnG%<2b2MqfGLFnY6{m_v>GNUpzZId~9D73-9&8!CgZRx70z}bbMV>-oEU-
zEq1?>wd;Im`L!3`n=<c~71QN>VbFcAzIbDmhbNCFX+i(oaSwnW<C3T#E;dDLeDe6q
z4c|Mc)Mnkz<$393tk at k~(`-XeWtDzeyFE6`$9Vgu-w3~}$%$sGW_O_4Ty9$G-LiZ>
zcZ{pP?8 at RaFozztWt9_hq*Fv>@Y3ZHlc9zn;CZ>8r at 4kSUAA2dXsBh|K(i@|Bg>?w
ziLs3>4OuuhB`wtjMbUB at tQyf7Hgc?)*g}|ECMLF_0J-B7lqpUa+ca}A5Nd21k!xTV
zBep5WuxyC3W}Z`s(wM-<1koH37h{V$I-w?U9g?!`QAwN=$3&(s8>VIu2MT1aYh%#6
zh*F$HDKLaIM>r)G2n`3Zy;BZEbj)_4xNOgriyhVigbW9}#57FPurM?f!zq%(KRAQ1
z<zmwT+GRTyvNaue9bDzumYcbnkvJCHW&vm51=UrPU=^C{8VF>LhOiAMGy(os4VT);
zb#iJYo?*Mxg7MT4g6ebICWPkDMcak`P at o`A4%9ET4a)dDadT*IiuJ_H6 at YW3pfop8
zMxiY(GN3Ck!Wps?tT}P&BuS2v$TU*K8_t6E)Mc)-9bHJx9Hd*scG$KqQ_%?2Mq=Mf
z97@@qFH>WCObF3+OW|xsks%@^Hi!+FL?WBH#crZ2D)iscbwjlD(8Mw(Mr6R4i!f6v
zf^On0^-z#xw&_?n#SW*ay2|saYr!;29<?Hv0dryFWsKud7<-xHW_Cs}m9;$0aGrzZ
zgr?~d-Laub7%Msn{1J%1FiHZ19EsC8IfNOpl??laAEz=AW{w6{<#?)=z*I{b8T2*2
z4fk|)Ofj?&Kpbb5>E?t|+r at Mo$#IefxpS2W*;waDv&*TDZCgaNEZuPpBQ*_ at x?Yy#
z5lpZptGeVW9CSX5$t*Foh!~dZN`1b|Co|KHh#5q&8>7riO&c6S6D`b#Gh;qUiOp*e
za8qmtA$6!BIksvcN!K8ZTe_7a%MWv+NeY-*o=4Hr4}sn%{xr%GFi{I7mK{b}?!YwU
z7Sj9Jz$8H=2Y*y$qzSSlNUEsyx`w10G`9w9n;W+Cg_HYP5Q9mgG$F38V+(B4)NJIW
zFzI0JF2N2+J}2_0!{y8)Bqh;2g8A^>bRGg}O=mt+0&FXxYGuTU!G%BzD^%l<&=p!;
zp`$8&NfMA~XtH7`k}%+TL6SI)?}KvDx*Xqb54!N7EC{MB%A%qIWS9o{yQ!+GpsR{X
zG(y175dcc+rnyU#G!CNB3l<9y74zw6GMh{$!|`Z7pZVTo?%mxFZ-;mAdp#TvC!@(?
zGV^8&VBo$F<+41ZZbrdzK^N#S%21mHASQ at Hf6#CD+O2M@(P}iB&04DlFIufmtJmpv
zoA9#DiGnl`1n2}e5Cuh+;1}AdseoXHZ39LS>Ja8h1QVDonD!{iqBIMWaOwMtz+VRN
zcez;3rqjuII+ at QG^W}2s!%1)IEtcNG^F1%}{J;xCC>%m(vm{}gKHZ$D4h9 at -d!=D$
znxd?#h96ZCMUcciw2T*d_L?2=hJCVnEr_Bhfue>o8gyHQ_F5)D6u2%l+i?inK+5)%
z(iOxq(9G$NBVYZ(=@m{sT<Egn=er*zSJ$$V*{XQ{uyHj@*}?ko<9PUBb&6feO090i
zwvs*^Y|pCA_NZFs!!}iR=>KwPtPekr|MBt9kA8j*ro{dl55Md%*<r1=u_BvojwP%c
z(1TTT6F6cRcsRn<5k<Zi02nU4r4JJ~U%~_hQ3{5bPv?H(%@=;=<nRf|ab4g<Ji41i
zvw1F7%6%I}!zJpMcM6q$esUXRR%7esX2<d7_ep>A#U^~59Gp9?N_lIi#CvB8qSZIn
z%WaNBqwjQci!-^t5KpoSuc at _;ovIz#8QoU7a7=q0eVJo=Gu4}b8afWoj&G#sX5{MD
z;%@0(olbJcOM)bg?oalE$=!{o>;AXl at ceWP9u1n(o4eWQ#1EHqAnx>b>IF-xsu1s}
zWA)4CVWg?4R^Dn)W=M3+=C;A{hAy@?c7!9`E^C7qJKQyu%2+5CWWER9P}nM~M<6KG
z{!ZDtG=;J;s8wQ(Gc~PT?YYT8L+x1-NA4$vAS9+izp1WA6j^o_hsRCiFHLuOHgxQp
z+huwcn#pjMC)l69)#l;#Qi+Bq8ORNfgm;#GyGQ|dgQL?v_L6~|-^{VOj6^vn{oEsb
zzonvsoD*WLD4?k&h;yYP`&@%)e6`my-Tju>v$<{EoDLLu-fZhmy-Q at F*{iC?mv&$D
zq%G5!v~_dV?pUOw0io&itMc%uG#G}ZO5z7<7DjC at F~o4 at Vu|nL%UzDU9#@*n;nJQ3
zk!nTM&hHZd87cpGXX1mnfTv?RS_}cNW6O+YOPWcNzZ*;5v0ca}85u7946#IFmPRR+
z`O%Fc-Yo}JBiG{C4GCw+0t8#^AgQCxvyMB`+bvt{<pexLlsUj68Q5H>-IE{~JDED*
z<!*<UG((UzQ4nOsGSmU!fF at h6C0mB3XxOrd17ybm5JzmgIL#dhwN6YiP4g at XgCqnv
zVmOq<z8?pE6vtthCIL9c9L|J6oWMtM>chVzVeX&0$;!ENhXPD6?{aPUwG0bELxI-V
zE9YabsOhRI1AKwI%7A8oWC{S8Br6g?8k}NSr81lY@(9PudG!bXHCNlzE!#pCuoi#}
zAb2cZL408E;D<#G$IY`8nzX{4{3raxDFda|zaLNpSFsi|3;-|!9D0BwV0z99Au!uj
zzk*5tC59V}o-vNb!arhr-JwBj|GZ)SYx5WdHmrvOK3#3AXV3;e?3ZoKe}Fl-6Eu~w
zefc)4j>X><3_=NeRmy%i1$et^5If5{NGZcv2B36>!3?9x>eq1}o*rO*AuD8M1y(&<
zUHyRI?3F`U>)4#E2<{4tSD?7M{GmBe{X<jOptENJEC$ODn5!x>iz$+<vd#0dszO{v
zK$>B4bjF*p2}TA}MPPz3YYJo1 at XBR<$yOEuMuXXAeXzwmPGB}5Tx4;WxDJK*2cM=e
zIgFO)ls&A#9yG`@u+fCN*iG0|2=E5}z^x$mQVg|PK>5L at kwGlNh8Wa_Z9!qv08 at m2
zrmmZYVyHF?2rx;8Z8*rl>?LM%suRO8v4cT<xKPewPf5TC8Kw=dFx(S{!)1?Cz^Gav
zWY~ib&@~pO?VLT30oDXWU^O31ra&XmYVv?rF?I!v4L*hufe~g}!lq!s&_!Z{)N<Ic
zu?evq#B3Z26BG0XyA8oiTY;*;$gvR^CNu_!Gh*`#?STOy2x_#Mba64<78+(EXY~{Z
zka)zbZe^MVn^eXCOi&BX$~s^UScQyZVD^{?dT1a!2etv_p(RXxIc&O^g}AH9WCehj
zz!Wnu`9U3lRDc>{e=wV4%;W{Ddu9Bq+c7U<WC=>JJ2C5E&zIOM4`Y8}09G>xM0dqf
zpxmnT%4gW)DHf}g8LvYU at E@G8T4C(r65uyFQ()8yNQUhQM5>9?7>tG00^VzyB+NV*
zE|i$MqSyds8Bi-^vIk#)w1>dlRWnvJeqcEMB6Jzi1EXiMbvYf|L~}wz>-7~km`9cb
zu%&8n4!i#SG|DX?1sXRHs5c9+1Z+`a)C*M5V(vi;v&2$(l`bY;ssk_69{7nx{UD{_
z)s7}gcsU$<jv<PQW&lS8Yju}FLN%gmf~eCmgluBS5=+Rb%;yM{O;n@<y|%LX?bOHM
zl_qmpfO;84ftzuL!YjNJPj2Ut%}WZ{A+TX^f8sA_%4xvAIYbw?v&iJZn=LltFobxz
z3}#ZMVzqCf<!G6T15gn<n at MEKT$oG)l=HD5TlVt)cBXMXPUKW3evvvDgp=`AH}B~N
zXIlQHA6tqf*%k#nc91q`k?1U%%2Eyr%1DB-Pi$G2O|U#C&cJR>v|u9$^2<OLjx$iA
z1i}D#kwa`#2MZw#!5LT~hFQ!GfpIW;%k=o_i6$mX*rYSWhW;?j(ltW`Yy!6az-<{m
zS+>C{VYX+kupNvP5CyaF1y at ke-r!jd!!gQ2CDfKPlXciLRq$~Nf*k<B&LILZ_q17S
zz!D)85Q7p76yA10W#)w44o+GYD~~}&14%Uje%M?&E7Nxv9A<!up!_WY;A3iF>&!GU
zG&lyU1=oTB2G~Ma*P&}Vbdd!_$H~wN5x}>=moba&EYw(lnU3YM5CdSbN<f%4WhoHw
zbPFlSe0VgB<yM%#LNCF1a^_2x3E+bORsh<N!(0Shi*?l^EJE5WMFHyEMKon;6c{m=
z<lxo-KaOLvd~-&A1~gWgo!Kl5z!Wl2W`JVrFg7sdDOAF|Q-iial?KrPq$oqT91)h)
zfnd*~D=|z4u_ObQl2ZmkPz<1(Er-H2>?Hhxu{dn5AYg#gYtS0TP;dquip>NAx>cZs
zv4iX3s+GiI0SeBz*rWiztPhk~2YbSeJqHJ{VHz3)a}1G!J!faK(C9FfWvL$qfG#T!
z$juOty at If47=XDDCmwpsazJP`(#!}fli``mf;$*Bi-HJ0XcU`888&JTW>Fp}6?@bS
zMgb<vW_*oB7Zz1nzycU#frp7X7O2TXPK&jQnLPtn_B0%25uU6_1%uxeAcN7-2WW+X
zW at yd=3EY|8di6jbkQj!`jBhZ*T=B)#u?kW4gN9noFIk25)wn(c6apcexepBRUuD?1
zuPDx{%ui;uy350j9vIx}_$ddm{qsWmA==pZM<xG5HU2n$D99hzKD6j3<$9oo<Y%e>
zaXdUL`Lo{t{`mj*=4TD~FQ!Qz$~ojmADn-=<EjAp$=dLbrmdQXe>TMV(cT~2^5?A|
zbnuTyE?4yLFMjsl{d*W{^7B)6gC7&K>hLNKyFX9=7>l3HbAFE0>d{d9$7KFc#6PO~
z>- at izBY!pbr;29G at _})>|I+*a9=cTl>!E26Metvf at S{<yax0ttYf2wR1T(t5nsxYb
zR&<|DyUPd;W{?l)N*T>S51q+Cl2DeFQVnLb*wQ6IB>sF6*|NwhB+Al&K;)8j0+xwD
z`{F^f=OnXPYUrS&81Mm<m>{I}ep_B9fn#U}8vso;6jOIDM+>3VRQ$+uS4f{Bt|w at q
zna*dl-E0|4XF&ja6B6m&0Z;N^cAc5Mu7+bLfEoeWvDh3KBz8|{*`UgsQwrkGwNf{i
z8UrbH+}qnk?KDwHlML7tO#-dc;h@~`TdcRMBnlwVI4SXCtx at ZfB)dP)<a*hKo*)&N
z;UZS6Z1pxdy>|MQK{O0u5CE;Fp<1bkSsEVRo6Uk8+{Cu2TBbA22IaEorvC9*Ef+O!
znCgy!4QHH5t(r=*=;}Cc)!WO3Zz7slP7r8tTZ20DvpZaF3cl}}B+G43QJT~f6+{-{
z_0VkalIH;%&lV(299<P5&e^t_A54W-!yO_{3GaPdi!)Gt5aUd(-!}FxP`&O<+`cs(
z#-fx3mS8x#th#1Hygv!rsy3x%A)3!61wyx^xu)2+F)z3WV at cvYuiG%w8PMs4BXY8<
zn}lkOeta}iY$L;!UgAv=2KuFHh{}M<1}CNGp-fYccAJ(>LG(@>MbQb6e?#SX=YH;>
zW$wr=of<h!$B7F;R#za_DvFg)a}!9VEA$~kfS8&?d;zMifVt?Z?aorm3G-YYAi@~_
zk}X at fSOL@2R7AoE86kvg*`z82fH(nsWzXXg#F(NPfki|7)fI at 1IdMX=TE{RT)T?Z@
zAs}{=C%I-ojUa~twz at z}4U`wKLDgAe6gx2yb>Q}(q7suaa}zqOGREjGkQLM16wJ)9
z9FXoBb|I!gU{z#Amh*dJL?qJ&C^CTUW>g{6k}YT*=sZE5OsO$)96`)tIf>?uhI6c#
z88z5by#l at riFu1vQ4cddoJWS7=DOg9j*N)h9+30MFwbDvQH;CZBoXw$k-5}Ig6oQP
z9#6)y{aqGC;83zRPx>^rL?uoPEqC~aV#JqvK21fzff;gUu>zd~Lr5VUQl%pbXgWvk
zjig&(7sv;vBC)Cf8HZqOa05<GzZ)=xiija56X3bfa!#TYs4fxvf?#=bJG#{sESj1T
zd6`2qjbpOW!MY%bl6_~V6JIw?AoF$@vPT2 at 3N{QNMyez#dKlTkl}T(>)RZ)dfzN9y
za5o?~EZeNokrCfSI7gZ);Vh(V7a=uO2(B=&nt~=ancQh1^juZh^X7hJQQJ_Es;HWx
z>h4^OuM-?3hAsfT#LF-?*(M-EQdLQo?U=~(8zl*;pomlv9S>AtZb<-tk|8UqGgZ^u
zg*J__rdU+cLl-n9QsDNIsw=Xy5b1S7f{3zJ4pqy6;edabtZkMiFC1ktBVnATik<5c
zn4!kf&^EAa;4&ynGSEywr&plOzB*v_<pHa(!M=$g&$A?_Y`qQm1XGeaHuV*<gHJr<
zUs<+-MGPiD8L<N7U(u$A(3%5vV&@rEv1}F2K?RH^+15%xQbt=DUB`?I0z+aX9=v$9
za$vIX#9{=PIbp(`u`s4+iOmY-I5A8XgCUsc24TF83Eg~^vj7`lD%I6p*K&+Z1M=z0
zP$7geL(23BE^>9KGzIRXV+SZ|W~3AWV*`wcEa((l^aJ%tk)fJ0wH&r=C+C>~K5M`=
z#AZ*7XBLy*80kRwp)f=>&_}M8>aOJ)>{(-rWd(o^1I4#86P*4b+Oh5+#yZ*74fc~F
zJ7 at WW+yeFu)Ev41<Q!ABl4r8j6~ah{3)~yvBun#z#t93?&<7J~rUk-ClfeQo4m_HN
zIpc0Xxxr(QuIS9zj6Aloah!r!h3JP>JGEh)bxG4iL32GRzKTek+LGothUq1SovVUu
z2%Mzf=bCq7n^__ at mCzCMvz(8DF5q{6VBl;lgcq3^MRHrQHQ5~n5WuW~<?@_msL7p>
zT*Z14DP7%CWp|oFn73qvL=FU<J4N|)s=0~LQ%zIPmdk7zJ78q003jVMV`Vwi0daaV
zXdNf^oG_-qYE?loc+m~uiclemB`~7l#-Wo&tJA6~ix6T|GYOIFQ;39?2D-w^6POSz
zORCIsvW>J9fPd}*91z2K$VI}q7z#u at 7%jm5Fv>$Op{cvdYQSi^OyjXc0|Z2uEfuA)
zAG3^@WiZ!aNft0?dzDTEGiF}`!q$Kui1ZKEh*=U2G`?vt>}Lx%EdRF>BCBl7sw$S5
z1a9TBZxvyy#_R%?-C=na at J{BYKlmlVE6)Ew%2yN>+YclR{p5M8q$<lXGKssQj*Q8%
z^ABGxg4yZSBGAK{&R;bC2U|ry==cxb?))V04;1;~@*gbtfg3YM$vpQjlDb-k|BF!n
z;Fk}q{eRH-zxe;E)QVQK<P2UB+kce|Kd*Qo(mxZknEb5chjt+Uq8u>(i_Zh~V at Woq
zvf(b!R%HD}%UKSAu&idDWe>||582y?0b at hUGEMBZHY4_IR`7#lTaCH>url?KE_?XK
zmxtsp{)<ijcWRlf<~^k3*dM0!a+VBT<*nIQy68H at ET;b+C^9HMhpy71tG-(-DME0X
zCfTYyLM)ez*)rp*0$o!T5YH(XJ^+-h+cZfjF`4hLvU0kbC(9sWn%0K6N#lsx><dQB
zRkBr)(|8`G2s3VEl3+GZu(tY^7#(gc%312Ajsa5J&=efb0 at siq(lpR+)r`Vr<RY+K
zOIMs=91uk`5QJYDOkc3FFrkJHqc15^zPR&TSwb#Lr at -yi)Prgv#1VrN#bhyxZJt*Q
zQ4nPV;;$broLmPEt?&k(++6#q2BAz+72VWjYnVVpkQ`fS_myNa^m9wp1c}pi+c5O`
zXo)CJeOqWFdNmxUnW+g}SFtoh*2sD6ir7kVx54N0yJ-v at P3>_qVR_2@`Yx3cC(66E
zejHpaL#U?T><&y*R(0F|Zfj1OyFz(Dm-pk5H_doX5E0g(%gHH4YJ{;^Y?!e(oy0T3
zb^DwGyaM9Kc|y4{iL`pt@`BsZbezfkfxx~*0?=?2pxzQCa;>CfbT}PPGJ)ecURNzJ
zrHcuX$626Mn|kUFXLlYIxL&uf$QsQ2H{Tq1c_<az7Nu|!NPr>$hFS*9``Oe{r*wfz
zEz=~Ea5{HXAn?2*vBcwSL^R)>;(A9jK&DK_f#X7S&{Qx)lwML|MM0v}6w^r3G>8c?
zL|Ib|7=9~DZ8f(2T$CZS`EJJ6S`ka=vG at fs#WhkR&MhEadB`##m;#>$wALZIDiFqW
zq9v|jXE at FBgk^ddr~;D#%at{VMu?>h8C%XWmPKS~B}->Mq)L+RnzjX0h1zaL;e5uj
z at hr$<MFVbbm?&pU4PgIHw)%D<2-<|@YoLL^`N0_-oP!=sGQ{%l4zz-h)tU)hKvwC9
zjIbPF#^N~6sg<vmB at CAAfp7&u)}pKA8fD+H!~h`!HEOd=EY`rARRls57?TYY!Dh-H
zLu at x(KsSTMZ=x9h1Qd)urEIAINL2>VlEEDTY#_RWb0j(-4<NYZuq6k_U1d|)E)EC@
ziXIsm$}szm8{1BOk^>)eK>X^!1P#?Qay<u`3PF`IvJ^Yu<r>Sp8KZCsY{(<LVYqln
zLEBg=Xa!Zr^P{EZa+xIcDM{yEqG+NlaH5s^M>D at mct;-4(D&w<#e=%wR6AOn-9|mD
ziFw%$*mrrzvd;C|UFY`tY>De`4+~c6Ez$_7y1{k2vg4hfh8B-Rtt<Pp1r9-9Dbk?H
z+ljXu(2^~<T$N0&`~(3u$b7w_FTHf1q^+!CD_lC9coRp<&Aw3Y*dYy$Q?aJ>41=fl
zV{h)7)Z}@7;6`z9y1-?x;S2_H;NjSk<UzaJYnwrq+ at 0v%T%j-+liS54uyi8v;JeF+
z_<N32ryLSm+5IS<=Rj^?{H$<6(d{KFC4HyQo5LG#l$x>uj at cdLfCrzXZY>3$=q?v`
z%ey6jwT5`rjR?8DQg)oSUTbyI)LF!VELtiOg%l|GhtQ~;InIC&{iS;C<3UdxG+I3*
z8rcWAqYkN}3PU=}j&BSg|5A at v^}d!KcFG{JE1m9=GpDB`A$I^DZATUt- at nSj)G;cu
zV6?2^T<g11tHmp#h{h-1aB`Mr8bk#>zfE<`>@=F4MzyNoBRVia`l4(&3GPk<P at 0A+
zf<zGvBcfdnNx&}&M%2rJUS{kIx{@sLrV&U!wvHkReeh%nB3ed3=SN8#v2-$Bkq!lv
z7Gp at 7$U=aD7!!k!**=pv%~^^SNUAI;Oq?+0Lmj}GGyvVgSedSaa1h!006-MMGK-Qx
z6tgIaLcjw6QZNI<lx<lR6$z*?f^e9{pf95|ibEV)F(QDsf-Zt+5>!nE!EGC^1CA0W
zVeFyMh+HD&sv#MI!FD}@$}wG*9aw-!Us_8XJ4-dSFeq<O>K5EVVlkYUECU at -$4inF
z2WA at Qj*)0o0rWA6?ylDQ7#difn^Lw^?J%NG%#?Fg25u_D5KuNI1~rlC5HP8m=s8<L
z0Mek_wr(j8yq~Q&f~$iPQ=m6m4ALWaY*6tCf)vOPtOICTZthy4lxnfe<dy9Jm>|&0
zHU;dFU_PW!bcvQxN6tWBfh;mXYeL)u)L123p$QOOGC&ppr$nWzl{w3#2xU43-H_PG
zp$-%RJ;HJ(P>TV;#kO`5ro-3^{O}cXWJP2ch9aFwQcO}eN#Z!qEyCC_V at nozwxX&g
z$<ioEUCP!%*g61PlQdNYL^35oI?o-VKP=%hNrw$Ud=1(2#Uj at v#iU?dx|K%>@OEQ5
ziA;_0IMb9QJIxG1Opfp6x|9a4so^Y2Q>1nJ3gbja!%?BfB_Xk-a2BZ+c9iZw1MnhU
zbT|~N4LcoYZ7sWXkm2S5-%@9zrP-<U)X~?8RqC6uZwPug$xOux{7eH$G at 06sVlTV9
zADL1|W8XzbArPAIB2FB>r$@6`thME2z6f(iMGDH(glSy|xHwEXz%q=I+UJZc2<DNi
zSh|(Q5f};E=E&ARfnt#uE3%~JF^n130VM#0br_l$bd070lR_>5AeAhjlOU?{h)4om
zq>iiEF^D7x=ImywtbxN>E<`Z}`?D}NUA99IOfO5Yr7CRWhAa!ZrLWebKpS`qFG)!j
zgb`hlWJ~UJL>+>Y3Q-5=X`Ux}>c-wYfezCwPQepxtSUSwYG{>Oz?f!f0__g{#d4+9
zfC%V<!2FgQh#ICgVJrLCr2vb$lX=T1wLp5q9Ti!HP|tNF4Ga&6vE>4Eq+n{vbTUm{
z1qX5L>I%|L34#u<>87kBCK*zH4t4o{=wRMpJ6yn=^BfqdVp%y+*(Np3$eGge$BPW}
zz|$b8DZpTe)YsUe8$@=teUJDdWl!AMLb-3anI(%r$FZcEEJ*<>5rn6NoH&V>m%c04
zbm%LfS)74hmE4Ra!GXQ2;y||D)D7-jSE(DTZ3gKiv2{_$U at SC(&}~W_r+~H7TTsyr
zL9qZTqUqAMWCeT&YG8s=mqkqnzA#Fl-nIz at +m1(9voM9m8aPi92Q+iP-RTRm9bYF%
z)$|OokN809veacOuTc_)K?2n&ydas*oUn|QE%XFE16`Z_e@&b*jvFxyMUS+A5d=nH
z7??r at 3sow0fVpIv6kE7(<<bYxF%-T}N?rumAe*r)QzA$F|9PG-KW7y$(%%ms&8rWm
z)>=HWzT2+XYk^{Ue*gXFJ?<DMXO-*a^7VE3<)`t2FExVK2|9<yNU#eHG6c%yH%Abj
zKsl2qq)!f9=XQAWgEOcu*iVEnudf_;z&|~mkZ-2EG84<=Z;0A79&Z}z7Y1NB5nfML
z!*#=XKHD76$MZGdR!Ap*`R(xkx(Nufu<3x1-t#$S&0&5T#Y_e(hBA#KO?dG`A_x%k
zhG6oO8{AZbK#D*3kJNJCAqZJCtk~ms{oV`anHKj=%%f+TjXvbz;C}j`No%z5U{KC(
z=$f9ZL(ZZf^kkL?StP?K93_55JMUT4g<kwY=v6NtoirV?9w>STlgE|ykBh@?B|GKa
z=C^c>6octQX3t-08g#Z>Zp#+h08SVTwE<1f>NWr^oizT5_4FQ4TxWP)ZT(ozo(%SI
z#{3Q!x(05x9*b&%lhi=VMmxf&^ySd^0g!ci8dDjW#l93AQ_k7XF at dWr)re~Qy{lkW
zKd<muR2l=c%G%QelhnbFB3$FHvp8B_N7TXQ?UpDQ0X1IP)-91MeWMIE57W1npc*e|
zmUvF0v_#hr4wK!`6hY={sl&)+)d%;Tt_AXZLKKR(4^2q!91T&iK=}@oiou~IM)6Sg
z at S)e!VTDfRpx&53anpBzWz<-yGqzf~o#N4$+B`?x|8di$nCz{z0b$QJR%XyRcfp3Z
z-z4mU?czdI64KqziYEa8IO?1{(uoc2haE<w$YBr7<e9oE4-tV)=A}>-0^1$zqQB>Z
zhO$Qpa?kbcV=g%kXu@~OBeggN4t|kdFFu)UatNgZ-R?EHfhj(WyRGy3GYH-uA2_0r
zmju1XUa400XS21mNQx*<KJd^Jf+;>lVxA(GwsT1UTb0f~-Af-XG^Lnijno$z5p(ub
z0;$m~giY3x3ZbPw*B6PSi at m6vdt~j^_VI$VwyfA0(%pLp1Nq9dV3t;MMlRSe=E2}p
z7zRG at X-nj4`vfl*rI6$kX1%W*C{b57mR>E;&qG%OQF`GguM4*YfpS4`?5i^*e#-T2
zoL5K6SSX7*bj;((dGi$?FSPSywW071WqatZeQ*32+^C>~M;vQ!Fq;JwP{mUI%z57-
zE)gDPEFOyz*j?6T%d#hh`aa(8Htad4FubXv8);SBCwSs2ebM{?CA5(Z!4dE5_7kJX
zrOBLZQ?=?;0<@l#!74G9JGCS_k-BDU at mJ072P`NyuTS|_y0OY{-oI7_UZFU|S6RMS
XY at 3gEoUH7s+P27(s<La%7GUxpA}0A(

diff --git a/lib-python/2.2/email/test/data/msg_01.txt b/lib-python/2.2/email/test/data/msg_01.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/test/data/msg_01.txt
@@ -0,0 +1,19 @@
+Return-Path: <bbb at zzz.org>
+Delivered-To: bbb at zzz.org
+Received: by mail.zzz.org (Postfix, from userid 889)
+	id 27CEAD38CC; Fri,  4 May 2001 14:05:44 -0400 (EDT)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=us-ascii
+Content-Transfer-Encoding: 7bit
+Message-ID: <15090.61304.110929.45684 at aaa.zzz.org>
+From: bbb at ddd.com (John X. Doe)
+To: bbb at zzz.org
+Subject: This is a test message
+Date: Fri, 4 May 2001 14:05:44 -0400
+
+
+Hi,
+
+Do you like this message?
+
+-Me
diff --git a/lib-python/2.2/email/test/data/msg_02.txt b/lib-python/2.2/email/test/data/msg_02.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/test/data/msg_02.txt
@@ -0,0 +1,135 @@
+MIME-version: 1.0
+From: ppp-request at zzz.org
+Sender: ppp-admin at zzz.org
+To: ppp at zzz.org
+Subject: Ppp digest, Vol 1 #2 - 5 msgs
+Date: Fri, 20 Apr 2001 20:18:00 -0400 (EDT)
+X-Mailer: Mailman v2.0.4
+X-Mailman-Version: 2.0.4
+Content-Type: multipart/mixed; boundary="192.168.1.2.889.32614.987812255.500.21814"
+
+--192.168.1.2.889.32614.987812255.500.21814
+Content-type: text/plain; charset=us-ascii
+Content-description: Masthead (Ppp digest, Vol 1 #2)
+
+Send Ppp mailing list submissions to
+	ppp at zzz.org
+
+To subscribe or unsubscribe via the World Wide Web, visit
+	http://www.zzz.org/mailman/listinfo/ppp
+or, via email, send a message with subject or body 'help' to
+	ppp-request at zzz.org
+
+You can reach the person managing the list at
+	ppp-admin at zzz.org
+
+When replying, please edit your Subject line so it is more specific
+than "Re: Contents of Ppp digest..."
+
+
+--192.168.1.2.889.32614.987812255.500.21814
+Content-type: text/plain; charset=us-ascii
+Content-description: Today's Topics (5 msgs)
+
+Today's Topics:
+
+   1. testing #1 (Barry A. Warsaw)
+   2. testing #2 (Barry A. Warsaw)
+   3. testing #3 (Barry A. Warsaw)
+   4. testing #4 (Barry A. Warsaw)
+   5. testing #5 (Barry A. Warsaw)
+
+--192.168.1.2.889.32614.987812255.500.21814
+Content-Type: multipart/digest; boundary="__--__--"
+
+--__--__--
+
+Message: 1
+Content-Type: text/plain; charset=us-ascii
+Content-Transfer-Encoding: 7bit
+Date: Fri, 20 Apr 2001 20:16:13 -0400
+To: ppp at zzz.org
+From: barry at digicool.com (Barry A. Warsaw)
+Subject: [Ppp] testing #1
+Precedence: bulk
+
+
+hello
+
+
+--__--__--
+
+Message: 2
+Date: Fri, 20 Apr 2001 20:16:21 -0400
+Content-Type: text/plain; charset=us-ascii
+Content-Transfer-Encoding: 7bit
+To: ppp at zzz.org
+From: barry at digicool.com (Barry A. Warsaw)
+Precedence: bulk
+
+
+hello
+
+
+--__--__--
+
+Message: 3
+Date: Fri, 20 Apr 2001 20:16:25 -0400
+Content-Type: text/plain; charset=us-ascii
+Content-Transfer-Encoding: 7bit
+To: ppp at zzz.org
+From: barry at digicool.com (Barry A. Warsaw)
+Subject: [Ppp] testing #3
+Precedence: bulk
+
+
+hello
+
+
+--__--__--
+
+Message: 4
+Date: Fri, 20 Apr 2001 20:16:28 -0400
+Content-Type: text/plain; charset=us-ascii
+Content-Transfer-Encoding: 7bit
+To: ppp at zzz.org
+From: barry at digicool.com (Barry A. Warsaw)
+Subject: [Ppp] testing #4
+Precedence: bulk
+
+
+hello
+
+
+--__--__--
+
+Message: 5
+Date: Fri, 20 Apr 2001 20:16:32 -0400
+Content-Type: text/plain; charset=us-ascii
+Content-Transfer-Encoding: 7bit
+To: ppp at zzz.org
+From: barry at digicool.com (Barry A. Warsaw)
+Subject: [Ppp] testing #5
+Precedence: bulk
+
+
+hello
+
+
+
+
+--__--__----
+--192.168.1.2.889.32614.987812255.500.21814
+Content-type: text/plain; charset=us-ascii
+Content-description: Digest Footer
+
+_______________________________________________
+Ppp mailing list
+Ppp at zzz.org
+http://www.zzz.org/mailman/listinfo/ppp
+
+
+--192.168.1.2.889.32614.987812255.500.21814--
+
+End of Ppp Digest
+
diff --git a/lib-python/2.2/email/test/data/msg_03.txt b/lib-python/2.2/email/test/data/msg_03.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/test/data/msg_03.txt
@@ -0,0 +1,16 @@
+Return-Path: <bbb at zzz.org>
+Delivered-To: bbb at zzz.org
+Received: by mail.zzz.org (Postfix, from userid 889)
+	id 27CEAD38CC; Fri,  4 May 2001 14:05:44 -0400 (EDT)
+Message-ID: <15090.61304.110929.45684 at aaa.zzz.org>
+From: bbb at ddd.com (John X. Doe)
+To: bbb at zzz.org
+Subject: This is a test message
+Date: Fri, 4 May 2001 14:05:44 -0400
+
+
+Hi,
+
+Do you like this message?
+
+-Me
diff --git a/lib-python/2.2/email/test/data/msg_04.txt b/lib-python/2.2/email/test/data/msg_04.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/test/data/msg_04.txt
@@ -0,0 +1,37 @@
+Return-Path: <barry at python.org>
+Delivered-To: barry at python.org
+Received: by mail.python.org (Postfix, from userid 889)
+	id C2BF0D37C6; Tue, 11 Sep 2001 00:05:05 -0400 (EDT)
+MIME-Version: 1.0
+Content-Type: multipart/mixed; boundary="h90VIIIKmx"
+Content-Transfer-Encoding: 7bit
+Message-ID: <15261.36209.358846.118674 at anthem.python.org>
+From: barry at python.org (Barry A. Warsaw)
+To: barry at python.org
+Subject: a simple multipart
+Date: Tue, 11 Sep 2001 00:05:05 -0400
+X-Mailer: VM 6.95 under 21.4 (patch 4) "Artificial Intelligence" XEmacs Lucid
+X-Attribution: BAW
+X-Oblique-Strategy: Make a door into a window
+
+
+--h90VIIIKmx
+Content-Type: text/plain
+Content-Disposition: inline;
+	filename="msg.txt"
+Content-Transfer-Encoding: 7bit
+
+a simple kind of mirror
+to reflect upon our own
+
+--h90VIIIKmx
+Content-Type: text/plain
+Content-Disposition: inline;
+	filename="msg.txt"
+Content-Transfer-Encoding: 7bit
+
+a simple kind of mirror
+to reflect upon our own
+
+--h90VIIIKmx--
+
diff --git a/lib-python/2.2/email/test/data/msg_05.txt b/lib-python/2.2/email/test/data/msg_05.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/test/data/msg_05.txt
@@ -0,0 +1,28 @@
+From: foo
+Subject: bar
+To: baz
+MIME-Version: 1.0
+Content-Type: multipart/report; report-type=delivery-status;
+	boundary="D1690A7AC1.996856090/mail.example.com"
+Message-Id: <20010803162810.0CA8AA7ACC at mail.example.com>
+
+This is a MIME-encapsulated message.
+
+--D1690A7AC1.996856090/mail.example.com
+Content-Type: text/plain
+
+Yadda yadda yadda
+
+--D1690A7AC1.996856090/mail.example.com
+
+Yadda yadda yadda
+
+--D1690A7AC1.996856090/mail.example.com
+Content-Type: message/rfc822
+
+From: nobody at python.org
+
+Yadda yadda yadda
+
+--D1690A7AC1.996856090/mail.example.com--
+
diff --git a/lib-python/2.2/email/test/data/msg_06.txt b/lib-python/2.2/email/test/data/msg_06.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/test/data/msg_06.txt
@@ -0,0 +1,33 @@
+Return-Path: <barry at python.org>
+Delivered-To: barry at python.org
+MIME-Version: 1.0
+Content-Type: message/rfc822
+Content-Description: forwarded message
+Content-Transfer-Encoding: 7bit
+Message-ID: <15265.9482.641338.555352 at python.org>
+From: barry at zope.com (Barry A. Warsaw)
+Sender: barry at python.org
+To: barry at python.org
+Subject: forwarded message from Barry A. Warsaw
+Date: Thu, 13 Sep 2001 17:28:42 -0400
+X-Mailer: VM 6.95 under 21.4 (patch 4) "Artificial Intelligence" XEmacs Lucid
+X-Attribution: BAW
+X-Oblique-Strategy: Be dirty
+X-Url: http://barry.wooz.org
+
+MIME-Version: 1.0
+Content-Type: text/plain; charset=us-ascii
+Return-Path: <barry at python.org>
+Delivered-To: barry at python.org
+Message-ID: <15265.9468.713530.98441 at python.org>
+From: barry at zope.com (Barry A. Warsaw)
+Sender: barry at python.org
+To: barry at python.org
+Subject: testing
+Date: Thu, 13 Sep 2001 17:28:28 -0400
+X-Mailer: VM 6.95 under 21.4 (patch 4) "Artificial Intelligence" XEmacs Lucid
+X-Attribution: BAW
+X-Oblique-Strategy: Spectrum analysis
+X-Url: http://barry.wooz.org
+
+
diff --git a/lib-python/2.2/email/test/data/msg_07.txt b/lib-python/2.2/email/test/data/msg_07.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/test/data/msg_07.txt
@@ -0,0 +1,83 @@
+MIME-Version: 1.0
+From: Barry <barry at digicool.com>
+To: Dingus Lovers <cravindogs at cravindogs.com>
+Subject: Here is your dingus fish
+Date: Fri, 20 Apr 2001 19:35:02 -0400
+Content-Type: multipart/mixed; boundary="BOUNDARY"
+
+--BOUNDARY
+Content-Type: text/plain; charset="us-ascii"
+
+Hi there,
+
+This is the dingus fish.
+
+--BOUNDARY
+Content-Type: image/gif; name="dingusfish.gif"
+Content-Transfer-Encoding: base64
+content-disposition: attachment; filename="dingusfish.gif"
+
+R0lGODdhAAEAAfAAAP///wAAACwAAAAAAAEAAQAC/oSPqcvtD6OctNqLs968+w+G4kiW5omm6sq2
+7gvH8kzX9o3n+s73/g8MCofEovGITGICTKbyCV0FDNOo9SqpQqpOrJfXzTQj2vD3TGtqL+NtGQ2f
+qTXmxzuOd7WXdcc9DyjU53ewFni4s0fGhdiYaEhGBelICTNoV1j5NUnFcrmUqemjNifJVWpaOqaI
+oFq3SspZsSraE7sHq3jr1MZqWvi662vxV4tD+pvKW6aLDOCLyur8PDwbanyDeq0N3DctbQYeLDvR
+RY6t95m6UB0d3mwIrV7e2VGNvjjffukeJp4w7F65KecGFsTHQGAygOrgrWs1jt28Rc88KESYcGLA
+/obvTkH6p+CinWJiJmIMqXGQwH/y4qk0SYjgQTczT3ajKZGfuI0uJ4kkVI/DT5s3/ejkxI0aT4Y+
+YTYgWbImUaXk9nlLmnSh1qJiJFl0OpUqRK4oOy7NyRQtHWofhoYVxkwWXKUSn0YsS+fUV6lhqfYb
+6ayd3Z5qQdG1B7bvQzaJjwUV2lixMUZ7JVsOlfjWVr/3NB/uFvnySBN6Dcb6rGwaRM3wsormw5cC
+M9NxWy/bWdufudCvy8bOAjXjVVwta/uO21sE5RHBCzNFXtgq9ORtH4eYjVP4Yryo026nvkFmCeyA
+B29efV6ravCMK5JwWd5897Qrx7ll38o6iHDZ/rXPR//feevhF4l7wjUGX3xq1eeRfM4RSJGBIV1D
+z1gKPkfWag3mVBVvva1RlX5bAJTPR/2YqNtw/FkIYYEi/pIZiAdpcxpoHtmnYYoZtvhUftzdx5ZX
+JSKDW405zkGcZzzGZ6KEv4FI224oDmijlEf+xp6MJK5ojY/ASeVUR+wsKRuJ+XFZ5o7ZeEime8t1
+ouUsU6YjF5ZtUihhkGfCdFQLWQFJ3UXxmElfhQnR+eCdcDbkFZp6vTRmj56ApCihn5QGpaToNZmR
+n3NVSpZcQpZ2KEONusaiCsKAug0wkQbJSFO+PTSjneGxOuFjPlUk3ovWvdIerjUg9ZGIOtGq/qeX
+eCYrrCX+1UPsgTKGGRSbzd5q156d/gpfbJxe66eD5iQKrXj7RGgruGxs62qebBHUKS32CKluCiqZ
+qh+pmehmEb71noAUoe5e9Zm17S7773V10pjrtG4CmuurCV/n6zLK5turWNhqOvFXbjhZrMD0YhKe
+wR0zOyuvsh6MWrGoIuzvyWu5y1WIFAqmJselypxXh6dKLNOKEB98L88bS2rkNqqlKzCNJp9c0G0j
+Gzh0iRrCbHSXmPR643QS+4rWhgFmnSbSuXCjS0xAOWkU2UdLqyuUNfHSFdUouy3bm5i5GnDM3tG8
+doJ4r5tqu3pPbRSVfvs8uJzeNXhp3n4j/tZ42SwH7eaWUUOjc3qFV9453UHTXZfcLH+OeNs5g36x
+lBnHvTm7EbMbLeuaLncao8vWCXimfo1o+843Ak6y4ChNeGntvAYvfLK4ezmoyNIbNCLTCXO9ZV3A
+E8/s88RczPzDwI4Ob7XZyl7+9Miban29h+tJZPrE21wgvBphDfrrfPdCTPKJD/y98L1rZwHcV6Jq
+Zab0metpuNIX/qAFPoz171WUaUb4HAhBSzHuHfjzHb3kha/2Cctis/ORArVHNYfFyYRH2pYIRzic
+isVOfPWD1b6mRTqpCRBozzof6UZVvFXRxWIr3GGrEviGYgyPMfahheiSaLs/9QeFu7oZ/ndSY8DD
+ya9x+uPed+7mxN2IzIISBOMLFYWVqC3Pew1T2nFuuCiwZS5/v6II10i4t1OJcUH2U9zxKodHsGGv
+Oa+zkvNUYUOa/TCCRutF9MzDwdlUMJADTCGSbDQ5OV4PTamDoPEi6Ecc/RF5RWwkcdSXvSOaDWSn
+I9LlvubFTQpuc6JKXLcKeb+xdbKRBnwREemXyjg6ME65aJiOuBgrktzykfPLJBKR9ClMavJ62/Ff
+BlNIyod9yX9wcSXexnXFpvkrbXk64xsx5Db7wXKP5fSgsvwIMM/9631VLBfkmtbHRXpqmtei52hG
+pUwSlo+BASQoeILDOBgREECxBBh5/iYmNsQ9dIv5+OI++QkqdsJPc3uykz5fkM+OraeekcQF7X4n
+B5S67za5U967PmooGQhUXfF7afXyCD7ONdRe17QogYjVx38uLwtrS6nhTnm15LQUnu9E2uK6CNI/
+1HOABj0ESwOjut4FEpFQpdNAm4K2LHnDWHNcmKB2ioKBogysVZtMO2nSxUdZ8Yk2kJc7URioLVI0
+YgmtIwZj4LoeKemgnOnbUdGnzZ4Oa6scqiolBGqS6RgWNLu0RMhcaE6rhhU4hiuqFXPAG8fGwTPW
+FKeLMtdVmXLSs5YJGF/YeVm7rREMlY3UYE+yCxbaMXX8y15m5zVHq6GOKDMynzII/jdUHdyVqIy0
+ifX2+r/EgtZcvRzSb72gU9ui87M2VecjKildW/aFqaYhKoryUjfB/g4qtyVuc60xFDGmCxwjW+qu
+zjuwl2GkOWn66+3QiiEctvd04OVvcCVzjgT7lrkvjVGKKHmmlDUKowSeikb5kK/mJReuWOxONx+s
+ULsl+Lqb0CVn0SrVyJ6wt4t6yTeSCafhPhAf0OXn6L60UMxiLolFAtmN35S2Ob1lZpQ1r/n0Qb5D
+oQ1zJiRVDgF8N3Q8TYfbi3DyWCy3lT1nxyBs6FT3S2GOzWRlxwKvlRP0RPJA9SjxEy0UoEnkA+M4
+cnzLMJrBGWLFEaaUb5lvpqbq/loOaU5+DFuHPxo82/OZuM8FXG3oVNZhtWpMpb/0Xu5m/LfLhHZQ
+7yuVI0MqZ7NE43imC8jH3IwGZlbPm0xkJYs7+2U48hXTsFSMqgGDvai0kLxyynKNT/waj+q1c1tz
+GjOpPBgdCSq3UKZxCSsqFIY+O6JbAWGWcV1pwqLyj5sGqCF1xb1F3varUWqrJv6cN3PrUXzijtfZ
+FshpBL3Xwr4GIPvU2N8EjrJgS1zl21rbXQMXeXc5jjFyrhpCzijSv/RQtyPSzHCFMhlME95fHglt
+pRsX+dfSQjUeHAlpWzJ5iOo79Ldnaxai6bXTcGO3fp07ri7HLEmXXPlYi8bv/qVxvNcdra6m7Rlb
+6JBTb5fd66VhFRjGArh2n7R1rDW4P5NOT9K0I183T2scYkeZ3q/VFyLb09U9ajzXBS8Kgkhc4mBS
+kYY9cy3Vy9lUnuNJH8HGIclUilwnBtjUOH0gteGOZ4c/XNrhXLSYDyxfnD8z1pDy7rYRvDolhnbe
+UMzxCZUs40s6s7UIvBnLgc0+vKuOkIXeOrDymlp+Zxra4MZLBbVrqD/jTJ597pDmnw5c4+DbyB88
+9Cg9DodYcSuMZT/114pptqc/EuTjRPvH/z5slzI3tluOEBBLqOXLOX+0I5929tO97wkvl/atCz+y
+xJrdwteW2FNW/NSmBP+f/maYtVs/bYyBC7Ox3jsYZHL05CIrBa/nS+b3bHfiYm4Ueil1YZZSgAUI
+fFZ1dxUmeA2oQRQ3RuGXNGLFV9/XbGFGPV6kfzk1TBBCd+izc7q1H+OHMJwmaBX2IQNYVAKHYepV
+SSGCe6CnbYHHETKGNe43EDvFgZr0gB/nVHPHZ80VV1ojOiI3XDvYIkl4ayo4bxQIgrFXWTvBI0nH
+VElWMuw2aLUWCRHHf8ymVCHjFlJnOSojfevCYyyyZDH0IcvHhrsnQ5O1OsWzONuVVKIxSxiFZ/tR
+fKDAf6xFTnw4O9Qig2VCfW2hJQrmMOuHW0W3dLQmCMO2ccdUd/xyfflH/olTiHZVdGwb8nIwRzSE
+J15jFlOJuBZBZ4CiyHyd2IFylFlB+HgHhYabhWOGwYO1ZH/Og1dtQlFMk352CGRSIFTapnWQEUtN
+l4zv8S0aaCFDyGCBqDUxZYpxGHX01y/JuH1xhn7TOCnNCI4eKDs5WGX4R425F4vF1o3BJ4vO0otq
+I3rimI7jJY1jISqnBxknCIvruF83mF5wN4X7qGLIhR8A2Vg0yFERSIXn9Vv3GHy3Vj/WIkKddlYi
+yIMv2I/VMjTLpW7pt05SWIZR0RPyxpB4SIUM9lBPGBl0GC7oSEEwRYLe4pJpZY2P0zbI1n+Oc44w
+qY3PUnmF0ixjVpDD/mJ9wpOBGTVgXlaCaZiPcIWK5NiKBIiPdGaQ0TWGvAiG7nMchdZb7Vgf8zNi
+MuMyzRdy/lePe9iC4TRx7WhhOQI/QiSVNAmAa2lT/piFbuh7ofJoYSZzrSZ1bvmWw3eN2nKUPVky
+uPN5/VRfohRd0VYZoqhKIlU6TXYhJxmPUIloAwc1bPmHEpaZYZORHNlXUJM07hATwHR8MJYqkwWR
+WaIezFhxSFlc8/Fq82hEnpeRozg3ULhhr9lAGtVEkCg5ZNRuuVleBPaZadhG0ZgkyPmDOTOKzViM
+YgOcpukKqQcbjAWS0IleQ2ROjdh6A+md1qWdBRSX7iSYgFRTtRmBpJioieXJiHfJiMGIR9fJOn8I
+MSfXYhspn4ooSa2mSAj4n+8Bmg03fBJZoPOJgsVZRxu1oOMRPXYYjdqjihFaEoZpXBREanuJoRI6
+cibFinq4ngUKh/wQd/H5ofYCZ0HJXR62opZFaAT0iFIZo4DIiUojkjeqKiuoZirKo5Y1a7AWckGa
+BkuYoD5lpDK6eUs6CkDqpETwl1EqpfhJpVeKpVl6EgUAADs=
+
+--BOUNDARY--
diff --git a/lib-python/2.2/email/test/data/msg_08.txt b/lib-python/2.2/email/test/data/msg_08.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/test/data/msg_08.txt
@@ -0,0 +1,24 @@
+MIME-Version: 1.0
+From: Barry Warsaw <barry at zope.com>
+To: Dingus Lovers <cravindogs at cravindogs.com>
+Subject: Lyrics
+Date: Fri, 20 Apr 2001 19:35:02 -0400
+Content-Type: multipart/mixed; boundary="BOUNDARY"
+
+--BOUNDARY
+Content-Type: text/plain; charset="us-ascii"
+
+
+--BOUNDARY
+Content-Type: text/html; charset="iso-8859-1"
+
+
+--BOUNDARY
+Content-Type: text/plain; charset="iso-8859-2"
+
+
+--BOUNDARY
+Content-Type: text/plain; charset="koi8-r"
+
+
+--BOUNDARY--
diff --git a/lib-python/2.2/email/test/data/msg_09.txt b/lib-python/2.2/email/test/data/msg_09.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/test/data/msg_09.txt
@@ -0,0 +1,24 @@
+MIME-Version: 1.0
+From: Barry Warsaw <barry at zope.com>
+To: Dingus Lovers <cravindogs at cravindogs.com>
+Subject: Lyrics
+Date: Fri, 20 Apr 2001 19:35:02 -0400
+Content-Type: multipart/mixed; boundary="BOUNDARY"
+
+--BOUNDARY
+Content-Type: text/plain; charset="us-ascii"
+
+
+--BOUNDARY
+Content-Type: text/html; charset="iso-8859-1"
+
+
+--BOUNDARY
+Content-Type: text/plain
+
+
+--BOUNDARY
+Content-Type: text/plain; charset="koi8-r"
+
+
+--BOUNDARY--
diff --git a/lib-python/2.2/email/test/data/msg_10.txt b/lib-python/2.2/email/test/data/msg_10.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/test/data/msg_10.txt
@@ -0,0 +1,32 @@
+MIME-Version: 1.0
+From: Barry Warsaw <barry at zope.com>
+To: Dingus Lovers <cravindogs at cravindogs.com>
+Subject: Lyrics
+Date: Fri, 20 Apr 2001 19:35:02 -0400
+Content-Type: multipart/mixed; boundary="BOUNDARY"
+
+--BOUNDARY
+Content-Type: text/plain; charset="us-ascii"
+Content-Transfer-Encoding: 7bit
+
+This is a 7bit encoded message.
+
+--BOUNDARY
+Content-Type: text/html; charset="iso-8859-1"
+Content-Transfer-Encoding: Quoted-Printable
+
+=A1This is a Quoted Printable encoded message!
+
+--BOUNDARY
+Content-Type: text/plain; charset="iso-8859-1"
+Content-Transfer-Encoding: Base64
+
+VGhpcyBpcyBhIEJhc2U2NCBlbmNvZGVkIG1lc3NhZ2Uu
+
+
+--BOUNDARY
+Content-Type: text/plain; charset="iso-8859-1"
+
+This has no Content-Transfer-Encoding: header.
+
+--BOUNDARY--
diff --git a/lib-python/2.2/email/test/data/msg_11.txt b/lib-python/2.2/email/test/data/msg_11.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/test/data/msg_11.txt
@@ -0,0 +1,7 @@
+Content-Type: message/rfc822
+MIME-Version: 1.0
+Subject: The enclosing message
+
+Subject: An enclosed message
+
+Here is the body of the message.
diff --git a/lib-python/2.2/email/test/data/msg_12.txt b/lib-python/2.2/email/test/data/msg_12.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/test/data/msg_12.txt
@@ -0,0 +1,36 @@
+MIME-Version: 1.0
+From: Barry Warsaw <barry at zope.com>
+To: Dingus Lovers <cravindogs at cravindogs.com>
+Subject: Lyrics
+Date: Fri, 20 Apr 2001 19:35:02 -0400
+Content-Type: multipart/mixed; boundary="BOUNDARY"
+
+--BOUNDARY
+Content-Type: text/plain; charset="us-ascii"
+
+
+--BOUNDARY
+Content-Type: text/html; charset="iso-8859-1"
+
+
+--BOUNDARY
+Content-Type: multipart/mixed; boundary="ANOTHER"
+
+--ANOTHER
+Content-Type: text/plain; charset="iso-8859-2"
+
+
+--ANOTHER
+Content-Type: text/plain; charset="iso-8859-3"
+
+--ANOTHER--
+
+--BOUNDARY
+Content-Type: text/plain; charset="us-ascii"
+
+
+--BOUNDARY
+Content-Type: text/plain; charset="koi8-r"
+
+
+--BOUNDARY--
diff --git a/lib-python/2.2/email/test/data/msg_13.txt b/lib-python/2.2/email/test/data/msg_13.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/test/data/msg_13.txt
@@ -0,0 +1,94 @@
+MIME-Version: 1.0
+From: Barry <barry at digicool.com>
+To: Dingus Lovers <cravindogs at cravindogs.com>
+Subject: Here is your dingus fish
+Date: Fri, 20 Apr 2001 19:35:02 -0400
+Content-Type: multipart/mixed; boundary="OUTER"
+
+--OUTER
+Content-Type: text/plain; charset="us-ascii"
+
+A text/plain part
+
+--OUTER
+Content-Type: multipart/mixed; boundary=BOUNDARY
+
+
+--BOUNDARY
+Content-Type: text/plain; charset="us-ascii"
+
+Hi there,
+
+This is the dingus fish.
+
+--BOUNDARY
+Content-Type: image/gif; name="dingusfish.gif"
+Content-Transfer-Encoding: base64
+content-disposition: attachment; filename="dingusfish.gif"
+
+R0lGODdhAAEAAfAAAP///wAAACwAAAAAAAEAAQAC/oSPqcvtD6OctNqLs968+w+G4kiW5omm6sq2
+7gvH8kzX9o3n+s73/g8MCofEovGITGICTKbyCV0FDNOo9SqpQqpOrJfXzTQj2vD3TGtqL+NtGQ2f
+qTXmxzuOd7WXdcc9DyjU53ewFni4s0fGhdiYaEhGBelICTNoV1j5NUnFcrmUqemjNifJVWpaOqaI
+oFq3SspZsSraE7sHq3jr1MZqWvi662vxV4tD+pvKW6aLDOCLyur8PDwbanyDeq0N3DctbQYeLDvR
+RY6t95m6UB0d3mwIrV7e2VGNvjjffukeJp4w7F65KecGFsTHQGAygOrgrWs1jt28Rc88KESYcGLA
+/obvTkH6p+CinWJiJmIMqXGQwH/y4qk0SYjgQTczT3ajKZGfuI0uJ4kkVI/DT5s3/ejkxI0aT4Y+
+YTYgWbImUaXk9nlLmnSh1qJiJFl0OpUqRK4oOy7NyRQtHWofhoYVxkwWXKUSn0YsS+fUV6lhqfYb
+6ayd3Z5qQdG1B7bvQzaJjwUV2lixMUZ7JVsOlfjWVr/3NB/uFvnySBN6Dcb6rGwaRM3wsormw5cC
+M9NxWy/bWdufudCvy8bOAjXjVVwta/uO21sE5RHBCzNFXtgq9ORtH4eYjVP4Yryo026nvkFmCeyA
+B29efV6ravCMK5JwWd5897Qrx7ll38o6iHDZ/rXPR//feevhF4l7wjUGX3xq1eeRfM4RSJGBIV1D
+z1gKPkfWag3mVBVvva1RlX5bAJTPR/2YqNtw/FkIYYEi/pIZiAdpcxpoHtmnYYoZtvhUftzdx5ZX
+JSKDW405zkGcZzzGZ6KEv4FI224oDmijlEf+xp6MJK5ojY/ASeVUR+wsKRuJ+XFZ5o7ZeEime8t1
+ouUsU6YjF5ZtUihhkGfCdFQLWQFJ3UXxmElfhQnR+eCdcDbkFZp6vTRmj56ApCihn5QGpaToNZmR
+n3NVSpZcQpZ2KEONusaiCsKAug0wkQbJSFO+PTSjneGxOuFjPlUk3ovWvdIerjUg9ZGIOtGq/qeX
+eCYrrCX+1UPsgTKGGRSbzd5q156d/gpfbJxe66eD5iQKrXj7RGgruGxs62qebBHUKS32CKluCiqZ
+qh+pmehmEb71noAUoe5e9Zm17S7773V10pjrtG4CmuurCV/n6zLK5turWNhqOvFXbjhZrMD0YhKe
+wR0zOyuvsh6MWrGoIuzvyWu5y1WIFAqmJselypxXh6dKLNOKEB98L88bS2rkNqqlKzCNJp9c0G0j
+Gzh0iRrCbHSXmPR643QS+4rWhgFmnSbSuXCjS0xAOWkU2UdLqyuUNfHSFdUouy3bm5i5GnDM3tG8
+doJ4r5tqu3pPbRSVfvs8uJzeNXhp3n4j/tZ42SwH7eaWUUOjc3qFV9453UHTXZfcLH+OeNs5g36x
+lBnHvTm7EbMbLeuaLncao8vWCXimfo1o+843Ak6y4ChNeGntvAYvfLK4ezmoyNIbNCLTCXO9ZV3A
+E8/s88RczPzDwI4Ob7XZyl7+9Miban29h+tJZPrE21wgvBphDfrrfPdCTPKJD/y98L1rZwHcV6Jq
+Zab0metpuNIX/qAFPoz171WUaUb4HAhBSzHuHfjzHb3kha/2Cctis/ORArVHNYfFyYRH2pYIRzic
+isVOfPWD1b6mRTqpCRBozzof6UZVvFXRxWIr3GGrEviGYgyPMfahheiSaLs/9QeFu7oZ/ndSY8DD
+ya9x+uPed+7mxN2IzIISBOMLFYWVqC3Pew1T2nFuuCiwZS5/v6II10i4t1OJcUH2U9zxKodHsGGv
+Oa+zkvNUYUOa/TCCRutF9MzDwdlUMJADTCGSbDQ5OV4PTamDoPEi6Ecc/RF5RWwkcdSXvSOaDWSn
+I9LlvubFTQpuc6JKXLcKeb+xdbKRBnwREemXyjg6ME65aJiOuBgrktzykfPLJBKR9ClMavJ62/Ff
+BlNIyod9yX9wcSXexnXFpvkrbXk64xsx5Db7wXKP5fSgsvwIMM/9631VLBfkmtbHRXpqmtei52hG
+pUwSlo+BASQoeILDOBgREECxBBh5/iYmNsQ9dIv5+OI++QkqdsJPc3uykz5fkM+OraeekcQF7X4n
+B5S67za5U967PmooGQhUXfF7afXyCD7ONdRe17QogYjVx38uLwtrS6nhTnm15LQUnu9E2uK6CNI/
+1HOABj0ESwOjut4FEpFQpdNAm4K2LHnDWHNcmKB2ioKBogysVZtMO2nSxUdZ8Yk2kJc7URioLVI0
+YgmtIwZj4LoeKemgnOnbUdGnzZ4Oa6scqiolBGqS6RgWNLu0RMhcaE6rhhU4hiuqFXPAG8fGwTPW
+FKeLMtdVmXLSs5YJGF/YeVm7rREMlY3UYE+yCxbaMXX8y15m5zVHq6GOKDMynzII/jdUHdyVqIy0
+ifX2+r/EgtZcvRzSb72gU9ui87M2VecjKildW/aFqaYhKoryUjfB/g4qtyVuc60xFDGmCxwjW+qu
+zjuwl2GkOWn66+3QiiEctvd04OVvcCVzjgT7lrkvjVGKKHmmlDUKowSeikb5kK/mJReuWOxONx+s
+ULsl+Lqb0CVn0SrVyJ6wt4t6yTeSCafhPhAf0OXn6L60UMxiLolFAtmN35S2Ob1lZpQ1r/n0Qb5D
+oQ1zJiRVDgF8N3Q8TYfbi3DyWCy3lT1nxyBs6FT3S2GOzWRlxwKvlRP0RPJA9SjxEy0UoEnkA+M4
+cnzLMJrBGWLFEaaUb5lvpqbq/loOaU5+DFuHPxo82/OZuM8FXG3oVNZhtWpMpb/0Xu5m/LfLhHZQ
+7yuVI0MqZ7NE43imC8jH3IwGZlbPm0xkJYs7+2U48hXTsFSMqgGDvai0kLxyynKNT/waj+q1c1tz
+GjOpPBgdCSq3UKZxCSsqFIY+O6JbAWGWcV1pwqLyj5sGqCF1xb1F3varUWqrJv6cN3PrUXzijtfZ
+FshpBL3Xwr4GIPvU2N8EjrJgS1zl21rbXQMXeXc5jjFyrhpCzijSv/RQtyPSzHCFMhlME95fHglt
+pRsX+dfSQjUeHAlpWzJ5iOo79Ldnaxai6bXTcGO3fp07ri7HLEmXXPlYi8bv/qVxvNcdra6m7Rlb
+6JBTb5fd66VhFRjGArh2n7R1rDW4P5NOT9K0I183T2scYkeZ3q/VFyLb09U9ajzXBS8Kgkhc4mBS
+kYY9cy3Vy9lUnuNJH8HGIclUilwnBtjUOH0gteGOZ4c/XNrhXLSYDyxfnD8z1pDy7rYRvDolhnbe
+UMzxCZUs40s6s7UIvBnLgc0+vKuOkIXeOrDymlp+Zxra4MZLBbVrqD/jTJ597pDmnw5c4+DbyB88
+9Cg9DodYcSuMZT/114pptqc/EuTjRPvH/z5slzI3tluOEBBLqOXLOX+0I5929tO97wkvl/atCz+y
+xJrdwteW2FNW/NSmBP+f/maYtVs/bYyBC7Ox3jsYZHL05CIrBa/nS+b3bHfiYm4Ueil1YZZSgAUI
+fFZ1dxUmeA2oQRQ3RuGXNGLFV9/XbGFGPV6kfzk1TBBCd+izc7q1H+OHMJwmaBX2IQNYVAKHYepV
+SSGCe6CnbYHHETKGNe43EDvFgZr0gB/nVHPHZ80VV1ojOiI3XDvYIkl4ayo4bxQIgrFXWTvBI0nH
+VElWMuw2aLUWCRHHf8ymVCHjFlJnOSojfevCYyyyZDH0IcvHhrsnQ5O1OsWzONuVVKIxSxiFZ/tR
+fKDAf6xFTnw4O9Qig2VCfW2hJQrmMOuHW0W3dLQmCMO2ccdUd/xyfflH/olTiHZVdGwb8nIwRzSE
+J15jFlOJuBZBZ4CiyHyd2IFylFlB+HgHhYabhWOGwYO1ZH/Og1dtQlFMk352CGRSIFTapnWQEUtN
+l4zv8S0aaCFDyGCBqDUxZYpxGHX01y/JuH1xhn7TOCnNCI4eKDs5WGX4R425F4vF1o3BJ4vO0otq
+I3rimI7jJY1jISqnBxknCIvruF83mF5wN4X7qGLIhR8A2Vg0yFERSIXn9Vv3GHy3Vj/WIkKddlYi
+yIMv2I/VMjTLpW7pt05SWIZR0RPyxpB4SIUM9lBPGBl0GC7oSEEwRYLe4pJpZY2P0zbI1n+Oc44w
+qY3PUnmF0ixjVpDD/mJ9wpOBGTVgXlaCaZiPcIWK5NiKBIiPdGaQ0TWGvAiG7nMchdZb7Vgf8zNi
+MuMyzRdy/lePe9iC4TRx7WhhOQI/QiSVNAmAa2lT/piFbuh7ofJoYSZzrSZ1bvmWw3eN2nKUPVky
+uPN5/VRfohRd0VYZoqhKIlU6TXYhJxmPUIloAwc1bPmHEpaZYZORHNlXUJM07hATwHR8MJYqkwWR
+WaIezFhxSFlc8/Fq82hEnpeRozg3ULhhr9lAGtVEkCg5ZNRuuVleBPaZadhG0ZgkyPmDOTOKzViM
+YgOcpukKqQcbjAWS0IleQ2ROjdh6A+md1qWdBRSX7iSYgFRTtRmBpJioieXJiHfJiMGIR9fJOn8I
+MSfXYhspn4ooSa2mSAj4n+8Bmg03fBJZoPOJgsVZRxu1oOMRPXYYjdqjihFaEoZpXBREanuJoRI6
+cibFinq4ngUKh/wQd/H5ofYCZ0HJXR62opZFaAT0iFIZo4DIiUojkjeqKiuoZirKo5Y1a7AWckGa
+BkuYoD5lpDK6eUs6CkDqpETwl1EqpfhJpVeKpVl6EgUAADs=
+
+--BOUNDARY--
+
+--OUTER--
diff --git a/lib-python/2.2/email/test/data/msg_14.txt b/lib-python/2.2/email/test/data/msg_14.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/test/data/msg_14.txt
@@ -0,0 +1,23 @@
+Return-Path: <bbb at zzz.org>
+Delivered-To: bbb at zzz.org
+Received: by mail.zzz.org (Postfix, from userid 889)
+	id 27CEAD38CC; Fri,  4 May 2001 14:05:44 -0400 (EDT)
+MIME-Version: 1.0
+Content-Type: text; charset=us-ascii
+Content-Transfer-Encoding: 7bit
+Message-ID: <15090.61304.110929.45684 at aaa.zzz.org>
+From: bbb at ddd.com (John X. Doe)
+To: bbb at zzz.org
+Subject: This is a test message
+Date: Fri, 4 May 2001 14:05:44 -0400
+
+
+Hi,
+
+I'm sorry but I'm using a drainbread ISP, which although big and
+wealthy can't seem to generate standard compliant email. :(
+
+This message has a Content-Type: header with no subtype.  I hope you
+can still read it.
+
+-Me
diff --git a/lib-python/2.2/email/test/data/msg_15.txt b/lib-python/2.2/email/test/data/msg_15.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/test/data/msg_15.txt
@@ -0,0 +1,52 @@
+Return-Path: <xx at xx.dk>
+Received: from fepD.post.tele.dk (195.41.46.149) by mail.groupcare.dk (LSMTP for Windows NT v1.1b) with SMTP id <0.0014F8A2 at mail.groupcare.dk>; Mon, 30 Apr 2001 12:17:50 +0200
+User-Agent: Microsoft-Outlook-Express-Macintosh-Edition/5.02.2106
+Subject: XX
+From: xx at xx.dk
+To: XX
+Message-ID: <xxxx>
+Mime-version: 1.0
+Content-type: multipart/mixed;
+   boundary="MS_Mac_OE_3071477847_720252_MIME_Part"
+
+> Denne meddelelse er i MIME-format. Da dit postl¾sningsprogram ikke forstŒr dette format, kan del af eller hele meddelelsen v¾re ul¾selig.
+
+--MS_Mac_OE_3071477847_720252_MIME_Part
+Content-type: multipart/alternative;
+   boundary="MS_Mac_OE_3071477847_720252_MIME_Part"
+
+
+--MS_Mac_OE_3071477847_720252_MIME_Part
+Content-type: text/plain; charset="ISO-8859-1"
+Content-transfer-encoding: quoted-printable
+
+Some removed test. 
+
+--MS_Mac_OE_3071477847_720252_MIME_Part
+Content-type: text/html; charset="ISO-8859-1"
+Content-transfer-encoding: quoted-printable
+
+<HTML>
+<HEAD>
+<TITLE>Some removed HTML</TITLE>
+</HEAD>
+<BODY>
+Some removed text.
+</BODY>
+</HTML>
+
+
+--MS_Mac_OE_3071477847_720252_MIME_Part--
+
+
+--MS_Mac_OE_3071477847_720252_MIME_Part
+Content-type: image/gif; name="xx.gif";
+ x-mac-creator="6F676C65";
+ x-mac-type="47494666"
+Content-disposition: attachment
+Content-transfer-encoding: base64
+
+Some removed base64 encoded chars.
+
+--MS_Mac_OE_3071477847_720252_MIME_Part--
+
diff --git a/lib-python/2.2/email/test/data/msg_16.txt b/lib-python/2.2/email/test/data/msg_16.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/test/data/msg_16.txt
@@ -0,0 +1,123 @@
+Return-Path: <>
+Delivered-To: scr-admin at socal-raves.org
+Received: from cougar.noc.ucla.edu (cougar.noc.ucla.edu [169.232.10.18])
+	by babylon.socal-raves.org (Postfix) with ESMTP id CCC2C51B84
+	for <scr-admin at socal-raves.org>; Sun, 23 Sep 2001 20:13:54 -0700 (PDT)
+Received: from sims-ms-daemon by cougar.noc.ucla.edu
+ (Sun Internet Mail Server sims.3.5.2000.03.23.18.03.p10)
+ id <0GK500B01D0B8Y at cougar.noc.ucla.edu> for scr-admin at socal-raves.org; Sun,
+ 23 Sep 2001 20:14:35 -0700 (PDT)
+Received: from cougar.noc.ucla.edu
+ (Sun Internet Mail Server sims.3.5.2000.03.23.18.03.p10)
+ id <0GK500B01D0B8X at cougar.noc.ucla.edu>; Sun, 23 Sep 2001 20:14:35 -0700 (PDT)
+Date: Sun, 23 Sep 2001 20:14:35 -0700 (PDT)
+From: Internet Mail Delivery <postmaster at ucla.edu>
+Subject: Delivery Notification: Delivery has failed
+To: scr-admin at socal-raves.org
+Message-id: <0GK500B04D0B8X at cougar.noc.ucla.edu>
+MIME-version: 1.0
+Sender: scr-owner at socal-raves.org
+Errors-To: scr-owner at socal-raves.org
+X-BeenThere: scr at socal-raves.org
+X-Mailman-Version: 2.1a3
+Precedence: bulk
+List-Help: <mailto:scr-request at socal-raves.org?subject=help>
+List-Post: <mailto:scr at socal-raves.org>
+List-Subscribe: <http://socal-raves.org/mailman/listinfo/scr>,
+	<mailto:scr-request at socal-raves.org?subject=subscribe>
+List-Id: SoCal-Raves <scr.socal-raves.org>
+List-Unsubscribe: <http://socal-raves.org/mailman/listinfo/scr>,
+	<mailto:scr-request at socal-raves.org?subject=unsubscribe>
+List-Archive: <http://socal-raves.org/mailman/private/scr/>
+Content-Type: multipart/report; boundary="Boundary_(ID_PGS2F2a+z+/jL7hupKgRhA)"
+
+
+--Boundary_(ID_PGS2F2a+z+/jL7hupKgRhA)
+Content-type: text/plain; charset=ISO-8859-1
+
+This report relates to a message you sent with the following header fields:
+
+  Message-id: <002001c144a6$8752e060$56104586 at oxy.edu>
+  Date: Sun, 23 Sep 2001 20:10:55 -0700
+  From: "Ian T. Henry" <henryi at oxy.edu>
+  To: SoCal Raves <scr at socal-raves.org>
+  Subject: [scr] yeah for Ians!!
+
+Your message cannot be delivered to the following recipients:
+
+  Recipient address: jangel1 at cougar.noc.ucla.edu
+  Reason: recipient reached disk quota
+
+
+--Boundary_(ID_PGS2F2a+z+/jL7hupKgRhA)
+Content-type: message/DELIVERY-STATUS
+
+Original-envelope-id: 0GK500B4HD0888 at cougar.noc.ucla.edu
+Reporting-MTA: dns; cougar.noc.ucla.edu
+
+Action: failed
+Status: 5.0.0 (recipient reached disk quota)
+Original-recipient: rfc822;jangel1 at cougar.noc.ucla.edu
+Final-recipient: rfc822;jangel1 at cougar.noc.ucla.edu
+
+--Boundary_(ID_PGS2F2a+z+/jL7hupKgRhA)
+Content-type: MESSAGE/RFC822
+
+Return-path: scr-admin at socal-raves.org
+Received: from sims-ms-daemon by cougar.noc.ucla.edu
+ (Sun Internet Mail Server sims.3.5.2000.03.23.18.03.p10)
+ id <0GK500B01D0B8X at cougar.noc.ucla.edu>; Sun, 23 Sep 2001 20:14:35 -0700 (PDT)
+Received: from panther.noc.ucla.edu by cougar.noc.ucla.edu
+ (Sun Internet Mail Server sims.3.5.2000.03.23.18.03.p10)
+ with ESMTP id <0GK500B4GD0888 at cougar.noc.ucla.edu> for jangel1 at sims-ms-daemon;
+ Sun, 23 Sep 2001 20:14:33 -0700 (PDT)
+Received: from babylon.socal-raves.org
+ (ip-209-85-222-117.dreamhost.com [209.85.222.117])
+ by panther.noc.ucla.edu (8.9.1a/8.9.1) with ESMTP id UAA09793 for
+ <jangel1 at ucla.edu>; Sun, 23 Sep 2001 20:14:32 -0700 (PDT)
+Received: from babylon (localhost [127.0.0.1]) by babylon.socal-raves.org
+ (Postfix) with ESMTP id D3B2951B70; Sun, 23 Sep 2001 20:13:47 -0700 (PDT)
+Received: by babylon.socal-raves.org (Postfix, from userid 60001)
+ id A611F51B82; Sun, 23 Sep 2001 20:13:46 -0700 (PDT)
+Received: from tiger.cc.oxy.edu (tiger.cc.oxy.edu [134.69.3.112])
+ by babylon.socal-raves.org (Postfix) with ESMTP id ADA7351B70 for
+ <scr at socal-raves.org>; Sun, 23 Sep 2001 20:13:44 -0700 (PDT)
+Received: from ent (n16h86.dhcp.oxy.edu [134.69.16.86])
+ by tiger.cc.oxy.edu (8.8.8/8.8.8) with SMTP id UAA08100 for
+ <scr at socal-raves.org>; Sun, 23 Sep 2001 20:14:24 -0700 (PDT)
+Date: Sun, 23 Sep 2001 20:10:55 -0700
+From: "Ian T. Henry" <henryi at oxy.edu>
+Subject: [scr] yeah for Ians!!
+Sender: scr-admin at socal-raves.org
+To: SoCal Raves <scr at socal-raves.org>
+Errors-to: scr-admin at socal-raves.org
+Message-id: <002001c144a6$8752e060$56104586 at oxy.edu>
+MIME-version: 1.0
+X-Mailer: Microsoft Outlook Express 5.50.4522.1200
+Content-type: text/plain; charset=us-ascii
+Precedence: bulk
+Delivered-to: scr-post at babylon.socal-raves.org
+Delivered-to: scr at socal-raves.org
+X-Converted-To-Plain-Text: from multipart/alternative by demime 0.98e
+X-Converted-To-Plain-Text: Alternative section used was text/plain
+X-BeenThere: scr at socal-raves.org
+X-Mailman-Version: 2.1a3
+List-Help: <mailto:scr-request at socal-raves.org?subject=help>
+List-Post: <mailto:scr at socal-raves.org>
+List-Subscribe: <http://socal-raves.org/mailman/listinfo/scr>,
+ <mailto:scr-request at socal-raves.org?subject=subscribe>
+List-Id: SoCal-Raves <scr.socal-raves.org>
+List-Unsubscribe: <http://socal-raves.org/mailman/listinfo/scr>,
+ <mailto:scr-request at socal-raves.org?subject=unsubscribe>
+List-Archive: <http://socal-raves.org/mailman/private/scr/>
+
+I always love to find more Ian's that are over 3 years old!!
+
+Ian
+_______________________________________________
+For event info, list questions, or to unsubscribe, see http://www.socal-raves.org/
+
+
+
+--Boundary_(ID_PGS2F2a+z+/jL7hupKgRhA)--
+
diff --git a/lib-python/2.2/email/test/data/msg_17.txt b/lib-python/2.2/email/test/data/msg_17.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/test/data/msg_17.txt
@@ -0,0 +1,12 @@
+MIME-Version: 1.0
+From: Barry <barry at digicool.com>
+To: Dingus Lovers <cravindogs at cravindogs.com>
+Subject: Here is your dingus fish
+Date: Fri, 20 Apr 2001 19:35:02 -0400
+Content-Type: multipart/mixed; boundary="BOUNDARY"
+
+Hi there,
+
+This is the dingus fish.
+
+[Non-text (image/gif) part of message omitted, filename dingusfish.gif]
diff --git a/lib-python/2.2/email/test/data/msg_18.txt b/lib-python/2.2/email/test/data/msg_18.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/test/data/msg_18.txt
@@ -0,0 +1,6 @@
+Content-Type: text/plain; charset="us-ascii"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+X-Foobar-Spoink-Defrobnit: wasnipoop; giraffes="very-long-necked-animals";
+	spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"
+
diff --git a/lib-python/2.2/email/test/data/msg_19.txt b/lib-python/2.2/email/test/data/msg_19.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/test/data/msg_19.txt
@@ -0,0 +1,43 @@
+Send Ppp mailing list submissions to
+	ppp at zzz.org
+
+To subscribe or unsubscribe via the World Wide Web, visit
+	http://www.zzz.org/mailman/listinfo/ppp
+or, via email, send a message with subject or body 'help' to
+	ppp-request at zzz.org
+
+You can reach the person managing the list at
+	ppp-admin at zzz.org
+
+When replying, please edit your Subject line so it is more specific
+than "Re: Contents of Ppp digest..."
+
+Today's Topics:
+
+   1. testing #1 (Barry A. Warsaw)
+   2. testing #2 (Barry A. Warsaw)
+   3. testing #3 (Barry A. Warsaw)
+   4. testing #4 (Barry A. Warsaw)
+   5. testing #5 (Barry A. Warsaw)
+
+hello
+
+
+hello
+
+
+hello
+
+
+hello
+
+
+hello
+
+
+
+_______________________________________________
+Ppp mailing list
+Ppp at zzz.org
+http://www.zzz.org/mailman/listinfo/ppp
+
diff --git a/lib-python/2.2/email/test/data/msg_20.txt b/lib-python/2.2/email/test/data/msg_20.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/test/data/msg_20.txt
@@ -0,0 +1,22 @@
+Return-Path: <bbb at zzz.org>
+Delivered-To: bbb at zzz.org
+Received: by mail.zzz.org (Postfix, from userid 889)
+	id 27CEAD38CC; Fri,  4 May 2001 14:05:44 -0400 (EDT)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=us-ascii
+Content-Transfer-Encoding: 7bit
+Message-ID: <15090.61304.110929.45684 at aaa.zzz.org>
+From: bbb at ddd.com (John X. Doe)
+To: bbb at zzz.org
+Cc: ccc at zzz.org
+CC: ddd at zzz.org
+cc: eee at zzz.org
+Subject: This is a test message
+Date: Fri, 4 May 2001 14:05:44 -0400
+
+
+Hi,
+
+Do you like this message?
+
+-Me
diff --git a/lib-python/2.2/email/test/data/msg_21.txt b/lib-python/2.2/email/test/data/msg_21.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/test/data/msg_21.txt
@@ -0,0 +1,20 @@
+From: aperson at dom.ain
+To: bperson at dom.ain
+Subject: Test
+Content-Type: multipart/mixed; boundary="BOUNDARY"
+
+MIME message
+--BOUNDARY
+Content-Type: text/plain; charset="us-ascii"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+
+One
+--BOUNDARY
+Content-Type: text/plain; charset="us-ascii"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+
+Two
+--BOUNDARY--
+End of MIME message
diff --git a/lib-python/2.2/email/test/data/msg_22.txt b/lib-python/2.2/email/test/data/msg_22.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/test/data/msg_22.txt
@@ -0,0 +1,46 @@
+Mime-Version: 1.0
+Message-Id: <a05001902b7f1c33773e9@[134.84.183.138]>
+Date: Tue, 16 Oct 2001 13:59:25 +0300
+To: a at example.com
+From: b at example.com
+Content-Type: multipart/mixed; boundary="============_-1208892523==_============"
+
+--============_-1208892523==_============
+Content-Type: text/plain; charset="us-ascii" ; format="flowed"
+
+Text text text.
+--============_-1208892523==_============
+Content-Id: <a05001902b7f1c33773e9@[134.84.183.138].0.0>
+Content-Type: image/jpeg; name="wibble.JPG"
+ ; x-mac-type="4A504547"
+ ; x-mac-creator="474B4F4E"
+Content-Disposition: attachment; filename="wibble.JPG"
+Content-Transfer-Encoding: base64
+
+/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEB
+AQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQH/wAALCAXABIEBAREA
+g6bCjjw/pIZSjO6FWFpldjySOmCNrO7DBZibUXhTwtCixw+GtAijVdqxxaPp0aKvmGXa
+qrbBQvms0mAMeYS/3iTV1dG0hHaRNK01XblnWxtVdjkHLMIgTyqnk9VB7CrP2KzIINpa
+4O7I+zxYO9WV8jZg71Zlb+8rMDkEirAVQFAUAKAFAAAUAYAUDgADgY6DjpRtXj5RxjHA
+4wQRj0wQCMdCAewpaKKK/9k=
+--============_-1208892523==_============
+Content-Id: <a05001902b7f1c33773e9@[134.84.183.138].0.1>
+Content-Type: image/jpeg; name="wibble2.JPG"
+ ; x-mac-type="4A504547"
+ ; x-mac-creator="474B4F4E"
+Content-Disposition: attachment; filename="wibble2.JPG"
+Content-Transfer-Encoding: base64
+
+/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEB
+AQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQH/wAALCAXABJ0BAREA
+/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQA
+W6NFJJBEkU10kKGTcWMDwxuU+0JHvk8qAtOpNwqSR0n8c3BlDyXHlqsUltHEiTvdXLxR
+7vMiGDNJAJWkAMk8ZkCFp5G2oo5W++INrbQtNfTQxJAuXlupz9oS4d5Y1W+E2XlWZJJE
+Y7LWYQxTLE1zuMbfBPxw8X2fibVdIbSbI6nLZxX635t9TjtYreWR7WGKJTLJFFKSlozO
+0ShxIXM43uC3/9k=
+--============_-1208892523==_============
+Content-Type: text/plain; charset="us-ascii" ; format="flowed"
+
+Text text text.
+--============_-1208892523==_============--
+
diff --git a/lib-python/2.2/email/test/data/msg_23.txt b/lib-python/2.2/email/test/data/msg_23.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/test/data/msg_23.txt
@@ -0,0 +1,8 @@
+From: aperson at dom.ain
+Content-Type: multipart/mixed; boundary="BOUNDARY"
+
+--BOUNDARY
+Content-Type: text/plain
+
+A message part
+--BOUNDARY--
diff --git a/lib-python/2.2/email/test/data/msg_24.txt b/lib-python/2.2/email/test/data/msg_24.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/test/data/msg_24.txt
@@ -0,0 +1,10 @@
+Content-Type: multipart/mixed; boundary="BOUNDARY"
+MIME-Version: 1.0
+Subject: A subject
+To: aperson at dom.ain
+From: bperson at dom.ain
+
+--BOUNDARY
+
+
+--BOUNDARY--
diff --git a/lib-python/2.2/email/test/data/msg_25.txt b/lib-python/2.2/email/test/data/msg_25.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/test/data/msg_25.txt
@@ -0,0 +1,117 @@
+From MAILER-DAEMON Fri Apr 06 16:46:09 2001
+Received: from [204.245.199.98] (helo=zinfandel.lacita.com)
+	by www.linux.org.uk with esmtp (Exim 3.13 #1)
+	id 14lYR6-0008Iv-00
+	for linuxuser-admin at www.linux.org.uk; Fri, 06 Apr 2001 16:46:09 +0100
+Received: from localhost (localhost) by zinfandel.lacita.com (8.7.3/8.6.10-MT4.00) with internal id JAB03225; Fri, 6 Apr 2001 09:23:06 -0800 (GMT-0800)
+Date: Fri, 6 Apr 2001 09:23:06 -0800 (GMT-0800)
+From: Mail Delivery Subsystem <MAILER-DAEMON at zinfandel.lacita.com>
+Subject: Returned mail: Too many hops 19 (17 max): from <linuxuser-admin at www.linux.org.uk> via [199.164.235.226], to <scoffman at wellpartner.com>
+Message-Id: <200104061723.JAB03225 at zinfandel.lacita.com>
+To: <linuxuser-admin at www.linux.org.uk>
+To: postmaster at zinfandel.lacita.com
+MIME-Version: 1.0
+Content-Type: multipart/report; report-type=delivery-status;
+	bo
+Auto-Submitted: auto-generated (failure)
+
+This is a MIME-encapsulated message
+
+--JAB03225.986577786/zinfandel.lacita.com
+
+The original message was received at Fri, 6 Apr 2001 09:23:03 -0800 (GMT-0800)
+from [199.164.235.226]
+
+   ----- The following addresses have delivery notifications -----
+<scoffman at wellpartner.com>  (unrecoverable error)
+
+   ----- Transcript of session follows -----
+554 Too many hops 19 (17 max): from <linuxuser-admin at www.linux.org.uk> via [199.164.235.226], to <scoffman at wellpartner.com>
+
+--JAB03225.986577786/zinfandel.lacita.com
+Content-Type: message/delivery-status
+
+Reporting-MTA: dns; zinfandel.lacita.com
+Received-From-MTA: dns; [199.164.235.226]
+Arrival-Date: Fri, 6 Apr 2001 09:23:03 -0800 (GMT-0800)
+
+Final-Recipient: rfc822; scoffman at wellpartner.com
+Action: failed
+Status: 5.4.6
+Last-Attempt-Date: Fri, 6 Apr 2001 09:23:06 -0800 (GMT-0800)
+
+--JAB03225.986577786/zinfandel.lacita.com
+Content-Type: text/rfc822-headers
+
+Return-Path: linuxuser-admin at www.linux.org.uk
+Received: from ns1.wellpartner.net ([199.164.235.226]) by zinfandel.lacita.com (8.7.3/8.6.10-MT4.00) with ESMTP id JAA03225 for <scoffman at wellpartner.com>; Fri, 6 Apr 2001 09:23:03 -0800 (GMT-0800)
+Received: from zinfandel.lacita.com ([204.245.199.98])
+	by
+	fo
+Received: from ns1.wellpartner.net ([199.164.235.226]) by zinfandel.lacita.com (8.7.3/8.6.10-MT4.00) with ESMTP id JAA03221 for <scoffman at wellpartner.com>; Fri, 6 Apr 2001 09:22:18 -0800 (GMT-0800)
+Received: from zinfandel.lacita.com ([204.245.199.98])
+	by
+	fo
+Received: from ns1.wellpartner.net ([199.164.235.226]) by zinfandel.lacita.com (8.7.3/8.6.10-MT4.00) with ESMTP id JAA03217 for <scoffman at wellpartner.com>; Fri, 6 Apr 2001 09:21:37 -0800 (GMT-0800)
+Received: from zinfandel.lacita.com ([204.245.199.98])
+	by
+	fo
+Received: from ns1.wellpartner.net ([199.164.235.226]) by zinfandel.lacita.com (8.7.3/8.6.10-MT4.00) with ESMTP id JAA03213 for <scoffman at wellpartner.com>; Fri, 6 Apr 2001 09:20:56 -0800 (GMT-0800)
+Received: from zinfandel.lacita.com ([204.245.199.98])
+	by
+	fo
+Received: from ns1.wellpartner.net ([199.164.235.226]) by zinfandel.lacita.com (8.7.3/8.6.10-MT4.00) with ESMTP id JAA03209 for <scoffman at wellpartner.com>; Fri, 6 Apr 2001 09:20:15 -0800 (GMT-0800)
+Received: from zinfandel.lacita.com ([204.245.199.98])
+	by
+	fo
+Received: from ns1.wellpartner.net ([199.164.235.226]) by zinfandel.lacita.com (8.7.3/8.6.10-MT4.00) with ESMTP id JAA03205 for <scoffman at wellpartner.com>; Fri, 6 Apr 2001 09:19:33 -0800 (GMT-0800)
+Received: from zinfandel.lacita.com ([204.245.199.98])
+	by
+	fo
+Received: from ns1.wellpartner.net ([199.164.235.226]) by zinfandel.lacita.com (8.7.3/8.6.10-MT4.00) with ESMTP id JAA03201 for <scoffman at wellpartner.com>; Fri, 6 Apr 2001 09:18:52 -0800 (GMT-0800)
+Received: from zinfandel.lacita.com ([204.245.199.98])
+	by
+	fo
+Received: from ns1.wellpartner.net ([199.164.235.226]) by zinfandel.lacita.com (8.7.3/8.6.10-MT4.00) with ESMTP id JAA03197 for <scoffman at wellpartner.com>; Fri, 6 Apr 2001 09:17:54 -0800 (GMT-0800)
+Received: from www.linux.org.uk (parcelfarce.linux.theplanet.co.uk [195.92.249.252])
+	by
+	fo
+Received: from localhost.localdomain
+	([
+	by
+	id
+Received: from [212.1.130.11] (helo=s1.uklinux.net ident=root)
+	by
+	id
+	fo
+Received: from server (ppp-2-22.cvx4.telinco.net [212.1.149.22])
+	by
+	fo
+From: Daniel James <daniel at linuxuser.co.uk>
+Organization: LinuxUser
+To: linuxuser at www.linux.org.uk
+X-Mailer: KMail [version 1.1.99]
+Content-Type: text/plain;
+  c
+MIME-Version: 1.0
+Message-Id: <01040616033903.00962 at server>
+Content-Transfer-Encoding: 8bit
+Subject: [LinuxUser] bulletin no. 45
+Sender: linuxuser-admin at www.linux.org.uk
+Errors-To: linuxuser-admin at www.linux.org.uk
+X-BeenThere: linuxuser at www.linux.org.uk
+X-Mailman-Version: 2.0.3
+Precedence: bulk
+List-Help: <mailto:linuxuser-request at www.linux.org.uk?subject=help>
+List-Post: <mailto:linuxuser at www.linux.org.uk>
+List-Subscribe: <http://www.linux.org.uk/mailman/listinfo/linuxuser>,
+	<m
+List-Id: bulletins from LinuxUser magazine <linuxuser.www.linux.org.uk>
+List-Unsubscribe: <http://www.linux.org.uk/mailman/listinfo/linuxuser>,
+	<m
+List-Archive: <http://www.linux.org.uk/pipermail/linuxuser/>
+Date: Fri, 6 Apr 2001 16:03:39 +0100
+
+--JAB03225.986577786/zinfandel.lacita.com--
+
+
diff --git a/lib-python/2.2/email/test/data/msg_26.txt b/lib-python/2.2/email/test/data/msg_26.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/test/data/msg_26.txt
@@ -0,0 +1,45 @@
+Received: from xcar [192.168.0.2] by jeeves.wooster.local
+  (SMTPD32-7.07 EVAL) id AFF92F0214; Sun, 12 May 2002 08:55:37 +0100
+Date: Sun, 12 May 2002 08:56:15 +0100
+From: Father Time <father.time at xcar.wooster.local>
+To: timbo at jeeves.wooster.local
+Subject: IMAP file test
+Message-ID: <6df65d354b.father.time at rpc.wooster.local>
+X-Organization: Home
+User-Agent: Messenger-Pro/2.50a (MsgServe/1.50) (RISC-OS/4.02) POPstar/2.03
+MIME-Version: 1.0
+Content-Type: multipart/mixed; boundary="1618492860--2051301190--113853680"
+Status: R
+X-UIDL: 319998302
+
+This message is in MIME format which your mailer apparently does not support.
+You either require a newer version of your software which supports MIME, or
+a separate MIME decoding utility.  Alternatively, ask the sender of this
+message to resend it in a different format.
+
+--1618492860--2051301190--113853680
+Content-Type: text/plain; charset=us-ascii
+
+Simple email with attachment.
+
+
+--1618492860--2051301190--113853680
+Content-Type: application/riscos; name="clock.bmp,69c"; type=BMP; load=&fff69c4b; exec=&355dd4d1; access=&03
+Content-Disposition: attachment; filename="clock.bmp"
+Content-Transfer-Encoding: base64
+
+Qk12AgAAAAAAAHYAAAAoAAAAIAAAACAAAAABAAQAAAAAAAAAAADXDQAA1w0AAAAAAAAA
+AAAAAAAAAAAAiAAAiAAAAIiIAIgAAACIAIgAiIgAALu7uwCIiIgAERHdACLuIgAz//8A
+zAAAAN0R3QDu7iIA////AAAAAAAAAAAAAAAAAAAAAAAAAAi3AAAAAAAAADeAAAAAAAAA
+C3ADMzMzMANwAAAAAAAAAAAHMAAAAANwAAAAAAAAAACAMAd3zPfwAwgAAAAAAAAIAwd/
+f8x/f3AwgAAAAAAAgDB0x/f3//zPAwgAAAAAAAcHfM9////8z/AwAAAAAAiwd/f3////
+////A4AAAAAAcEx/f///////zAMAAAAAiwfM9////3///8zwOAAAAAcHf3////B/////
+8DAAAAALB/f3///wd3d3//AwAAAABwTPf//wCQAAD/zAMAAAAAsEx/f///B////8wDAA
+AAAHB39////wf/////AwAAAACwf39///8H/////wMAAAAIcHfM9///B////M8DgAAAAA
+sHTH///wf///xAMAAAAACHB3f3//8H////cDgAAAAAALB3zH//D//M9wMAAAAAAAgLB0
+z39///xHAwgAAAAAAAgLB3d3RHd3cDCAAAAAAAAAgLAHd0R3cAMIAAAAAAAAgAgLcAAA
+AAMwgAgAAAAACDAAAAu7t7cwAAgDgAAAAABzcIAAAAAAAAgDMwAAAAAAN7uwgAAAAAgH
+MzMAAAAACH97tzAAAAALu3c3gAAAAAAL+7tzDABAu7f7cAAAAAAACA+3MA7EQAv/sIAA
+AAAAAAAIAAAAAAAAAIAAAAAA
+
+--1618492860--2051301190--113853680--
diff --git a/lib-python/2.2/email/test/data/msg_27.txt b/lib-python/2.2/email/test/data/msg_27.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/test/data/msg_27.txt
@@ -0,0 +1,15 @@
+Return-Path: <aperson at dom.ain>
+Received: by mail.dom.ain (Postfix, from userid 889)
+	id B9D0AD35DB; Tue,  4 Jun 2002 21:46:59 -0400 (EDT)
+Message-ID: <15613.28051.707126.569693 at dom.ain>
+Date: Tue, 4 Jun 2002 21:46:59 -0400
+MIME-Version: 1.0
+Content-Type: text/plain; charset=us-ascii
+Content-Transfer-Encoding: 7bit
+Subject: bug demonstration
+	12345678911234567892123456789312345678941234567895123456789612345678971234567898112345678911234567892123456789112345678911234567892123456789
+	more text
+From: aperson at dom.ain (Anne P. Erson)
+To: bperson at dom.ain (Barney P. Erson)
+
+test
diff --git a/lib-python/2.2/email/test/data/msg_28.txt b/lib-python/2.2/email/test/data/msg_28.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/test/data/msg_28.txt
@@ -0,0 +1,25 @@
+From: aperson at dom.ain
+MIME-Version: 1.0
+Content-Type: multipart/digest; boundary=BOUNDARY
+
+--BOUNDARY
+Content-Type: message/rfc822
+
+Content-Type: text/plain; charset=us-ascii
+To: aa at bb.org
+From: cc at dd.org
+Subject: ee
+
+message 1
+
+--BOUNDARY
+Content-Type: message/rfc822
+
+Content-Type: text/plain; charset=us-ascii
+To: aa at bb.org
+From: cc at dd.org
+Subject: ee
+
+message 2
+
+--BOUNDARY--
diff --git a/lib-python/2.2/email/test/data/msg_29.txt b/lib-python/2.2/email/test/data/msg_29.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/test/data/msg_29.txt
@@ -0,0 +1,22 @@
+Return-Path: <bbb at zzz.org>
+Delivered-To: bbb at zzz.org
+Received: by mail.zzz.org (Postfix, from userid 889)
+	id 27CEAD38CC; Fri,  4 May 2001 14:05:44 -0400 (EDT)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=us-ascii;
+     title*0*="us-ascii'en'This%20is%20even%20more%20";
+     title*1*="%2A%2A%2Afun%2A%2A%2A%20";
+     title*2="isn't it!"
+Content-Transfer-Encoding: 7bit
+Message-ID: <15090.61304.110929.45684 at aaa.zzz.org>
+From: bbb at ddd.com (John X. Doe)
+To: bbb at zzz.org
+Subject: This is a test message
+Date: Fri, 4 May 2001 14:05:44 -0400
+
+
+Hi,
+
+Do you like this message?
+
+-Me
diff --git a/lib-python/2.2/email/test/data/msg_30.txt b/lib-python/2.2/email/test/data/msg_30.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/test/data/msg_30.txt
@@ -0,0 +1,23 @@
+From: aperson at dom.ain
+MIME-Version: 1.0
+Content-Type: multipart/digest; boundary=BOUNDARY
+
+--BOUNDARY
+
+Content-Type: text/plain; charset=us-ascii
+To: aa at bb.org
+From: cc at dd.org
+Subject: ee
+
+message 1
+
+--BOUNDARY
+
+Content-Type: text/plain; charset=us-ascii
+To: aa at bb.org
+From: cc at dd.org
+Subject: ee
+
+message 2
+
+--BOUNDARY--
diff --git a/lib-python/2.2/email/test/data/msg_31.txt b/lib-python/2.2/email/test/data/msg_31.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/test/data/msg_31.txt
@@ -0,0 +1,15 @@
+From: aperson at dom.ain
+MIME-Version: 1.0
+Content-Type: multipart/mixed; boundary=BOUNDARY_
+
+--BOUNDARY
+Content-Type: text/plain
+
+message 1
+
+--BOUNDARY
+Content-Type: text/plain
+
+message 2
+
+--BOUNDARY--
diff --git a/lib-python/2.2/email/test/data/msg_32.txt b/lib-python/2.2/email/test/data/msg_32.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/test/data/msg_32.txt
@@ -0,0 +1,14 @@
+Delivered-To: freebsd-isp at freebsd.org
+Date: Tue, 26 Sep 2000 12:23:03 -0500
+From: Anne Person <aperson at example.com>
+To: Barney Dude <bdude at example.com>
+Subject: Re: Limiting Perl CPU Utilization...
+Mime-Version: 1.0
+Content-Type: text/plain; charset*=ansi-x3.4-1968''us-ascii
+Content-Disposition: inline
+User-Agent: Mutt/1.3.8i
+Sender: owner-freebsd-isp at FreeBSD.ORG
+Precedence: bulk
+X-Loop: FreeBSD.org
+
+Some message.
diff --git a/lib-python/2.2/email/test/data/msg_33.txt b/lib-python/2.2/email/test/data/msg_33.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/test/data/msg_33.txt
@@ -0,0 +1,29 @@
+Delivered-To: freebsd-isp at freebsd.org
+Date: Wed, 27 Sep 2000 11:11:09 -0500
+From: Anne Person <aperson at example.com>
+To: Barney Dude <bdude at example.com>
+Subject: Re: Limiting Perl CPU Utilization...
+Mime-Version: 1.0
+Content-Type: multipart/signed; micalg*=ansi-x3.4-1968''pgp-md5;
+	protocol*=ansi-x3.4-1968''application%2Fpgp-signature;
+	boundary*="ansi-x3.4-1968''EeQfGwPcQSOJBaQU"
+Content-Disposition: inline
+Sender: owner-freebsd-isp at FreeBSD.ORG
+Precedence: bulk
+X-Loop: FreeBSD.org
+
+
+--EeQfGwPcQSOJBaQU
+Content-Type: text/plain; charset*=ansi-x3.4-1968''us-ascii
+Content-Disposition: inline
+Content-Transfer-Encoding: quoted-printable
+
+part 1
+
+--EeQfGwPcQSOJBaQU
+Content-Type: text/plain
+Content-Disposition: inline
+
+part 2
+
+--EeQfGwPcQSOJBaQU--
diff --git a/lib-python/2.2/email/test/data/msg_34.txt b/lib-python/2.2/email/test/data/msg_34.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/test/data/msg_34.txt
@@ -0,0 +1,19 @@
+From: aperson at dom.ain
+To: bperson at dom.ain
+Content-Type: multipart/digest; boundary=XYZ
+
+--XYZ
+Content-Type: text/plain
+
+
+This is a text plain part that is counter to recommended practice in
+RFC 2046, $5.1.5, but is not illegal
+
+--XYZ
+
+From: cperson at dom.ain
+To: dperson at dom.ain
+
+A submessage
+
+--XYZ--
diff --git a/lib-python/2.2/email/test/data/msg_35.txt b/lib-python/2.2/email/test/data/msg_35.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/test/data/msg_35.txt
@@ -0,0 +1,4 @@
+From: aperson at dom.ain
+To: bperson at dom.ain
+Subject: here's something interesting
+counter to RFC 2822, there's no separating newline here
diff --git a/lib-python/2.2/email/test/test_email.py b/lib-python/2.2/email/test/test_email.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/test/test_email.py
@@ -0,0 +1,2718 @@
+# Copyright (C) 2001,2002,2003 Python Software Foundation
+# email package unit tests
+
+import os
+import sys
+import time
+import base64
+import difflib
+import unittest
+import warnings
+from cStringIO import StringIO
+from types import StringType, ListType
+
+import email
+
+from email.Charset import Charset
+from email.Header import Header, decode_header, make_header
+from email.Parser import Parser, HeaderParser
+from email.Generator import Generator, DecodedGenerator
+from email.Message import Message
+from email.MIMEAudio import MIMEAudio
+from email.MIMEText import MIMEText
+from email.MIMEImage import MIMEImage
+from email.MIMEBase import MIMEBase
+from email.MIMEMessage import MIMEMessage
+from email.MIMEMultipart import MIMEMultipart
+from email import Utils
+from email import Errors
+from email import Encoders
+from email import Iterators
+from email import base64MIME
+from email import quopriMIME
+
+from test.test_support import findfile, run_unittest
+from email.test import __file__ as landmark
+
+
+NL = '\n'
+EMPTYSTRING = ''
+SPACE = ' '
+
+# We don't care about DeprecationWarnings
+warnings.filterwarnings('ignore', '', DeprecationWarning, __name__)
+
+try:
+    True, False
+except NameError:
+    True = 1
+    False = 0
+
+
+
+def openfile(filename, mode='r'):
+    path = os.path.join(os.path.dirname(landmark), 'data', filename)
+    return open(path, mode)
+
+
+
+# Base test class
+class TestEmailBase(unittest.TestCase):
+    if hasattr(difflib, 'ndiff'):
+        # Python 2.2 and beyond
+        def ndiffAssertEqual(self, first, second):
+            """Like failUnlessEqual except use ndiff for readable output."""
+            if first <> second:
+                sfirst = str(first)
+                ssecond = str(second)
+                diff = difflib.ndiff(sfirst.splitlines(), ssecond.splitlines())
+                fp = StringIO()
+                print >> fp, NL, NL.join(diff)
+                raise self.failureException, fp.getvalue()
+    else:
+        # Python 2.1
+        ndiffAssertEqual = unittest.TestCase.assertEqual
+
+    def _msgobj(self, filename, strict=False):
+        fp = openfile(findfile(filename))
+        try:
+            msg = email.message_from_file(fp, strict=strict)
+        finally:
+            fp.close()
+        return msg
+
+
+
+# Test various aspects of the Message class's API
+class TestMessageAPI(TestEmailBase):
+    def test_get_all(self):
+        eq = self.assertEqual
+        msg = self._msgobj('msg_20.txt')
+        eq(msg.get_all('cc'), ['ccc at zzz.org', 'ddd at zzz.org', 'eee at zzz.org'])
+        eq(msg.get_all('xx', 'n/a'), 'n/a')
+
+    def test_getset_charset(self):
+        eq = self.assertEqual
+        msg = Message()
+        eq(msg.get_charset(), None)
+        charset = Charset('iso-8859-1')
+        msg.set_charset(charset)
+        eq(msg['mime-version'], '1.0')
+        eq(msg.get_type(), 'text/plain')
+        eq(msg['content-type'], 'text/plain; charset="iso-8859-1"')
+        eq(msg.get_param('charset'), 'iso-8859-1')
+        eq(msg['content-transfer-encoding'], 'quoted-printable')
+        eq(msg.get_charset().input_charset, 'iso-8859-1')
+        # Remove the charset
+        msg.set_charset(None)
+        eq(msg.get_charset(), None)
+        eq(msg['content-type'], 'text/plain')
+        # Try adding a charset when there's already MIME headers present
+        msg = Message()
+        msg['MIME-Version'] = '2.0'
+        msg['Content-Type'] = 'text/x-weird'
+        msg['Content-Transfer-Encoding'] = 'quinted-puntable'
+        msg.set_charset(charset)
+        eq(msg['mime-version'], '2.0')
+        eq(msg['content-type'], 'text/x-weird; charset="iso-8859-1"')
+        eq(msg['content-transfer-encoding'], 'quinted-puntable')
+
+    def test_set_charset_from_string(self):
+        eq = self.assertEqual
+        msg = Message()
+        msg.set_charset('us-ascii')
+        eq(msg.get_charset().input_charset, 'us-ascii')
+        eq(msg['content-type'], 'text/plain; charset="us-ascii"')
+
+    def test_set_payload_with_charset(self):
+        msg = Message()
+        charset = Charset('iso-8859-1')
+        msg.set_payload('This is a string payload', charset)
+        self.assertEqual(msg.get_charset().input_charset, 'iso-8859-1')
+
+    def test_get_charsets(self):
+        eq = self.assertEqual
+
+        msg = self._msgobj('msg_08.txt')
+        charsets = msg.get_charsets()
+        eq(charsets, [None, 'us-ascii', 'iso-8859-1', 'iso-8859-2', 'koi8-r'])
+
+        msg = self._msgobj('msg_09.txt')
+        charsets = msg.get_charsets('dingbat')
+        eq(charsets, ['dingbat', 'us-ascii', 'iso-8859-1', 'dingbat',
+                      'koi8-r'])
+
+        msg = self._msgobj('msg_12.txt')
+        charsets = msg.get_charsets()
+        eq(charsets, [None, 'us-ascii', 'iso-8859-1', None, 'iso-8859-2',
+                      'iso-8859-3', 'us-ascii', 'koi8-r'])
+
+    def test_get_filename(self):
+        eq = self.assertEqual
+
+        msg = self._msgobj('msg_04.txt')
+        filenames = [p.get_filename() for p in msg.get_payload()]
+        eq(filenames, ['msg.txt', 'msg.txt'])
+
+        msg = self._msgobj('msg_07.txt')
+        subpart = msg.get_payload(1)
+        eq(subpart.get_filename(), 'dingusfish.gif')
+
+    def test_get_boundary(self):
+        eq = self.assertEqual
+        msg = self._msgobj('msg_07.txt')
+        # No quotes!
+        eq(msg.get_boundary(), 'BOUNDARY')
+
+    def test_set_boundary(self):
+        eq = self.assertEqual
+        # This one has no existing boundary parameter, but the Content-Type:
+        # header appears fifth.
+        msg = self._msgobj('msg_01.txt')
+        msg.set_boundary('BOUNDARY')
+        header, value = msg.items()[4]
+        eq(header.lower(), 'content-type')
+        eq(value, 'text/plain; charset="us-ascii"; boundary="BOUNDARY"')
+        # This one has a Content-Type: header, with a boundary, stuck in the
+        # middle of its headers.  Make sure the order is preserved; it should
+        # be fifth.
+        msg = self._msgobj('msg_04.txt')
+        msg.set_boundary('BOUNDARY')
+        header, value = msg.items()[4]
+        eq(header.lower(), 'content-type')
+        eq(value, 'multipart/mixed; boundary="BOUNDARY"')
+        # And this one has no Content-Type: header at all.
+        msg = self._msgobj('msg_03.txt')
+        self.assertRaises(Errors.HeaderParseError,
+                          msg.set_boundary, 'BOUNDARY')
+
+    def test_get_decoded_payload(self):
+        eq = self.assertEqual
+        msg = self._msgobj('msg_10.txt')
+        # The outer message is a multipart
+        eq(msg.get_payload(decode=True), None)
+        # Subpart 1 is 7bit encoded
+        eq(msg.get_payload(0).get_payload(decode=True),
+           'This is a 7bit encoded message.\n')
+        # Subpart 2 is quopri
+        eq(msg.get_payload(1).get_payload(decode=True),
+           '\xa1This is a Quoted Printable encoded message!\n')
+        # Subpart 3 is base64
+        eq(msg.get_payload(2).get_payload(decode=True),
+           'This is a Base64 encoded message.')
+        # Subpart 4 has no Content-Transfer-Encoding: header.
+        eq(msg.get_payload(3).get_payload(decode=True),
+           'This has no Content-Transfer-Encoding: header.\n')
+
+    def test_get_decoded_uu_payload(self):
+        eq = self.assertEqual
+        msg = Message()
+        msg.set_payload('begin 666 -\n+:&5L;&\\@=V]R;&0 \n \nend\n')
+        for cte in ('x-uuencode', 'uuencode', 'uue', 'x-uue'):
+            msg['content-transfer-encoding'] = cte
+            eq(msg.get_payload(decode=True), 'hello world')
+        # Now try some bogus data
+        msg.set_payload('foo')
+        eq(msg.get_payload(decode=True), 'foo')
+
+    def test_decoded_generator(self):
+        eq = self.assertEqual
+        msg = self._msgobj('msg_07.txt')
+        fp = openfile('msg_17.txt')
+        try:
+            text = fp.read()
+        finally:
+            fp.close()
+        s = StringIO()
+        g = DecodedGenerator(s)
+        g.flatten(msg)
+        eq(s.getvalue(), text)
+
+    def test__contains__(self):
+        msg = Message()
+        msg['From'] = 'Me'
+        msg['to'] = 'You'
+        # Check for case insensitivity
+        self.failUnless('from' in msg)
+        self.failUnless('From' in msg)
+        self.failUnless('FROM' in msg)
+        self.failUnless('to' in msg)
+        self.failUnless('To' in msg)
+        self.failUnless('TO' in msg)
+
+    def test_as_string(self):
+        eq = self.assertEqual
+        msg = self._msgobj('msg_01.txt')
+        fp = openfile('msg_01.txt')
+        try:
+            text = fp.read()
+        finally:
+            fp.close()
+        eq(text, msg.as_string())
+        fullrepr = str(msg)
+        lines = fullrepr.split('\n')
+        self.failUnless(lines[0].startswith('From '))
+        eq(text, NL.join(lines[1:]))
+
+    def test_bad_param(self):
+        msg = email.message_from_string("Content-Type: blarg; baz; boo\n")
+        self.assertEqual(msg.get_param('baz'), '')
+
+    def test_missing_filename(self):
+        msg = email.message_from_string("From: foo\n")
+        self.assertEqual(msg.get_filename(), None)
+
+    def test_bogus_filename(self):
+        msg = email.message_from_string(
+        "Content-Disposition: blarg; filename\n")
+        self.assertEqual(msg.get_filename(), '')
+
+    def test_missing_boundary(self):
+        msg = email.message_from_string("From: foo\n")
+        self.assertEqual(msg.get_boundary(), None)
+
+    def test_get_params(self):
+        eq = self.assertEqual
+        msg = email.message_from_string(
+            'X-Header: foo=one; bar=two; baz=three\n')
+        eq(msg.get_params(header='x-header'),
+           [('foo', 'one'), ('bar', 'two'), ('baz', 'three')])
+        msg = email.message_from_string(
+            'X-Header: foo; bar=one; baz=two\n')
+        eq(msg.get_params(header='x-header'),
+           [('foo', ''), ('bar', 'one'), ('baz', 'two')])
+        eq(msg.get_params(), None)
+        msg = email.message_from_string(
+            'X-Header: foo; bar="one"; baz=two\n')
+        eq(msg.get_params(header='x-header'),
+           [('foo', ''), ('bar', 'one'), ('baz', 'two')])
+
+    def test_get_param_liberal(self):
+        msg = Message()
+        msg['Content-Type'] = 'Content-Type: Multipart/mixed; boundary = "CPIMSSMTPC06p5f3tG"'
+        self.assertEqual(msg.get_param('boundary'), 'CPIMSSMTPC06p5f3tG')
+
+    def test_get_param(self):
+        eq = self.assertEqual
+        msg = email.message_from_string(
+            "X-Header: foo=one; bar=two; baz=three\n")
+        eq(msg.get_param('bar', header='x-header'), 'two')
+        eq(msg.get_param('quuz', header='x-header'), None)
+        eq(msg.get_param('quuz'), None)
+        msg = email.message_from_string(
+            'X-Header: foo; bar="one"; baz=two\n')
+        eq(msg.get_param('foo', header='x-header'), '')
+        eq(msg.get_param('bar', header='x-header'), 'one')
+        eq(msg.get_param('baz', header='x-header'), 'two')
+        # XXX: We are not RFC-2045 compliant!  We cannot parse:
+        # msg["Content-Type"] = 'text/plain; weird="hey; dolly? [you] @ <\\"home\\">?"'
+        # msg.get_param("weird")
+        # yet.
+
+    def test_get_param_funky_continuation_lines(self):
+        msg = self._msgobj('msg_22.txt')
+        self.assertEqual(msg.get_payload(1).get_param('name'), 'wibble.JPG')
+
+    def test_get_param_with_semis_in_quotes(self):
+        msg = email.message_from_string(
+            'Content-Type: image/pjpeg; name="Jim&amp;&amp;Jill"\n')
+        self.assertEqual(msg.get_param('name'), 'Jim&amp;&amp;Jill')
+        self.assertEqual(msg.get_param('name', unquote=False),
+                         '"Jim&amp;&amp;Jill"')
+
+    def test_has_key(self):
+        msg = email.message_from_string('Header: exists')
+        self.failUnless(msg.has_key('header'))
+        self.failUnless(msg.has_key('Header'))
+        self.failUnless(msg.has_key('HEADER'))
+        self.failIf(msg.has_key('headeri'))
+
+    def test_set_param(self):
+        eq = self.assertEqual
+        msg = Message()
+        msg.set_param('charset', 'iso-2022-jp')
+        eq(msg.get_param('charset'), 'iso-2022-jp')
+        msg.set_param('importance', 'high value')
+        eq(msg.get_param('importance'), 'high value')
+        eq(msg.get_param('importance', unquote=False), '"high value"')
+        eq(msg.get_params(), [('text/plain', ''),
+                              ('charset', 'iso-2022-jp'),
+                              ('importance', 'high value')])
+        eq(msg.get_params(unquote=False), [('text/plain', ''),
+                                       ('charset', '"iso-2022-jp"'),
+                                       ('importance', '"high value"')])
+        msg.set_param('charset', 'iso-9999-xx', header='X-Jimmy')
+        eq(msg.get_param('charset', header='X-Jimmy'), 'iso-9999-xx')
+
+    def test_del_param(self):
+        eq = self.assertEqual
+        msg = self._msgobj('msg_05.txt')
+        eq(msg.get_params(),
+           [('multipart/report', ''), ('report-type', 'delivery-status'),
+            ('boundary', 'D1690A7AC1.996856090/mail.example.com')])
+        old_val = msg.get_param("report-type")
+        msg.del_param("report-type")
+        eq(msg.get_params(),
+           [('multipart/report', ''),
+            ('boundary', 'D1690A7AC1.996856090/mail.example.com')])
+        msg.set_param("report-type", old_val)
+        eq(msg.get_params(),
+           [('multipart/report', ''),
+            ('boundary', 'D1690A7AC1.996856090/mail.example.com'),
+            ('report-type', old_val)])
+
+    def test_set_type(self):
+        eq = self.assertEqual
+        msg = Message()
+        self.assertRaises(ValueError, msg.set_type, 'text')
+        msg.set_type('text/plain')
+        eq(msg['content-type'], 'text/plain')
+        msg.set_param('charset', 'us-ascii')
+        eq(msg['content-type'], 'text/plain; charset="us-ascii"')
+        msg.set_type('text/html')
+        eq(msg['content-type'], 'text/html; charset="us-ascii"')
+
+    def test_get_content_type_missing(self):
+        msg = Message()
+        self.assertEqual(msg.get_content_type(), 'text/plain')
+
+    def test_get_content_type_missing_with_default_type(self):
+        msg = Message()
+        msg.set_default_type('message/rfc822')
+        self.assertEqual(msg.get_content_type(), 'message/rfc822')
+
+    def test_get_content_type_from_message_implicit(self):
+        msg = self._msgobj('msg_30.txt')
+        self.assertEqual(msg.get_payload(0).get_content_type(),
+                         'message/rfc822')
+
+    def test_get_content_type_from_message_explicit(self):
+        msg = self._msgobj('msg_28.txt')
+        self.assertEqual(msg.get_payload(0).get_content_type(),
+                         'message/rfc822')
+
+    def test_get_content_type_from_message_text_plain_implicit(self):
+        msg = self._msgobj('msg_03.txt')
+        self.assertEqual(msg.get_content_type(), 'text/plain')
+
+    def test_get_content_type_from_message_text_plain_explicit(self):
+        msg = self._msgobj('msg_01.txt')
+        self.assertEqual(msg.get_content_type(), 'text/plain')
+
+    def test_get_content_maintype_missing(self):
+        msg = Message()
+        self.assertEqual(msg.get_content_maintype(), 'text')
+
+    def test_get_content_maintype_missing_with_default_type(self):
+        msg = Message()
+        msg.set_default_type('message/rfc822')
+        self.assertEqual(msg.get_content_maintype(), 'message')
+
+    def test_get_content_maintype_from_message_implicit(self):
+        msg = self._msgobj('msg_30.txt')
+        self.assertEqual(msg.get_payload(0).get_content_maintype(), 'message')
+
+    def test_get_content_maintype_from_message_explicit(self):
+        msg = self._msgobj('msg_28.txt')
+        self.assertEqual(msg.get_payload(0).get_content_maintype(), 'message')
+
+    def test_get_content_maintype_from_message_text_plain_implicit(self):
+        msg = self._msgobj('msg_03.txt')
+        self.assertEqual(msg.get_content_maintype(), 'text')
+
+    def test_get_content_maintype_from_message_text_plain_explicit(self):
+        msg = self._msgobj('msg_01.txt')
+        self.assertEqual(msg.get_content_maintype(), 'text')
+
+    def test_get_content_subtype_missing(self):
+        msg = Message()
+        self.assertEqual(msg.get_content_subtype(), 'plain')
+
+    def test_get_content_subtype_missing_with_default_type(self):
+        msg = Message()
+        msg.set_default_type('message/rfc822')
+        self.assertEqual(msg.get_content_subtype(), 'rfc822')
+
+    def test_get_content_subtype_from_message_implicit(self):
+        msg = self._msgobj('msg_30.txt')
+        self.assertEqual(msg.get_payload(0).get_content_subtype(), 'rfc822')
+
+    def test_get_content_subtype_from_message_explicit(self):
+        msg = self._msgobj('msg_28.txt')
+        self.assertEqual(msg.get_payload(0).get_content_subtype(), 'rfc822')
+
+    def test_get_content_subtype_from_message_text_plain_implicit(self):
+        msg = self._msgobj('msg_03.txt')
+        self.assertEqual(msg.get_content_subtype(), 'plain')
+
+    def test_get_content_subtype_from_message_text_plain_explicit(self):
+        msg = self._msgobj('msg_01.txt')
+        self.assertEqual(msg.get_content_subtype(), 'plain')
+
+    def test_get_content_maintype_error(self):
+        msg = Message()
+        msg['Content-Type'] = 'no-slash-in-this-string'
+        self.assertEqual(msg.get_content_maintype(), 'text')
+
+    def test_get_content_subtype_error(self):
+        msg = Message()
+        msg['Content-Type'] = 'no-slash-in-this-string'
+        self.assertEqual(msg.get_content_subtype(), 'plain')
+
+    def test_replace_header(self):
+        eq = self.assertEqual
+        msg = Message()
+        msg.add_header('First', 'One')
+        msg.add_header('Second', 'Two')
+        msg.add_header('Third', 'Three')
+        eq(msg.keys(), ['First', 'Second', 'Third'])
+        eq(msg.values(), ['One', 'Two', 'Three'])
+        msg.replace_header('Second', 'Twenty')
+        eq(msg.keys(), ['First', 'Second', 'Third'])
+        eq(msg.values(), ['One', 'Twenty', 'Three'])
+        msg.add_header('First', 'Eleven')
+        msg.replace_header('First', 'One Hundred')
+        eq(msg.keys(), ['First', 'Second', 'Third', 'First'])
+        eq(msg.values(), ['One Hundred', 'Twenty', 'Three', 'Eleven'])
+        self.assertRaises(KeyError, msg.replace_header, 'Fourth', 'Missing')
+
+    def test_broken_base64_payload(self):
+        x = 'AwDp0P7//y6LwKEAcPa/6Q=9'
+        msg = Message()
+        msg['content-type'] = 'audio/x-midi'
+        msg['content-transfer-encoding'] = 'base64'
+        msg.set_payload(x)
+        self.assertEqual(msg.get_payload(decode=True), x)
+
+
+
+# Test the email.Encoders module
+class TestEncoders(unittest.TestCase):
+    def test_encode_noop(self):
+        eq = self.assertEqual
+        msg = MIMEText('hello world', _encoder=Encoders.encode_noop)
+        eq(msg.get_payload(), 'hello world')
+
+    def test_encode_7bit(self):
+        eq = self.assertEqual
+        msg = MIMEText('hello world', _encoder=Encoders.encode_7or8bit)
+        eq(msg.get_payload(), 'hello world')
+        eq(msg['content-transfer-encoding'], '7bit')
+        msg = MIMEText('hello \x7f world', _encoder=Encoders.encode_7or8bit)
+        eq(msg.get_payload(), 'hello \x7f world')
+        eq(msg['content-transfer-encoding'], '7bit')
+
+    def test_encode_8bit(self):
+        eq = self.assertEqual
+        msg = MIMEText('hello \x80 world', _encoder=Encoders.encode_7or8bit)
+        eq(msg.get_payload(), 'hello \x80 world')
+        eq(msg['content-transfer-encoding'], '8bit')
+
+    def test_encode_empty_payload(self):
+        eq = self.assertEqual
+        msg = Message()
+        msg.set_charset('us-ascii')
+        eq(msg['content-transfer-encoding'], '7bit')
+
+    def test_encode_base64(self):
+        eq = self.assertEqual
+        msg = MIMEText('hello world', _encoder=Encoders.encode_base64)
+        eq(msg.get_payload(), 'aGVsbG8gd29ybGQ=')
+        eq(msg['content-transfer-encoding'], 'base64')
+
+    def test_encode_quoted_printable(self):
+        eq = self.assertEqual
+        msg = MIMEText('hello world', _encoder=Encoders.encode_quopri)
+        eq(msg.get_payload(), 'hello=20world')
+        eq(msg['content-transfer-encoding'], 'quoted-printable')
+
+    def test_default_cte(self):
+        eq = self.assertEqual
+        msg = MIMEText('hello world')
+        eq(msg['content-transfer-encoding'], '7bit')
+
+    def test_default_cte(self):
+        eq = self.assertEqual
+        # With no explicit _charset its us-ascii, and all are 7-bit
+        msg = MIMEText('hello world')
+        eq(msg['content-transfer-encoding'], '7bit')
+        # Similar, but with 8-bit data
+        msg = MIMEText('hello \xf8 world')
+        eq(msg['content-transfer-encoding'], '8bit')
+        # And now with a different charset
+        msg = MIMEText('hello \xf8 world', _charset='iso-8859-1')
+        eq(msg['content-transfer-encoding'], 'quoted-printable')
+
+
+
+# Test long header wrapping
+class TestLongHeaders(TestEmailBase):
+    def test_split_long_continuation(self):
+        eq = self.ndiffAssertEqual
+        msg = email.message_from_string("""\
+Subject: bug demonstration
+\t12345678911234567892123456789312345678941234567895123456789612345678971234567898112345678911234567892123456789112345678911234567892123456789
+\tmore text
+
+test
+""")
+        sfp = StringIO()
+        g = Generator(sfp)
+        g.flatten(msg)
+        eq(sfp.getvalue(), """\
+Subject: bug demonstration
+\t12345678911234567892123456789312345678941234567895123456789612345678971234567898112345678911234567892123456789112345678911234567892123456789
+\tmore text
+
+test
+""")
+
+    def test_another_long_almost_unsplittable_header(self):
+        eq = self.ndiffAssertEqual
+        hstr = """\
+bug demonstration
+\t12345678911234567892123456789312345678941234567895123456789612345678971234567898112345678911234567892123456789112345678911234567892123456789
+\tmore text"""
+        h = Header(hstr, continuation_ws='\t')
+        eq(h.encode(), """\
+bug demonstration
+\t12345678911234567892123456789312345678941234567895123456789612345678971234567898112345678911234567892123456789112345678911234567892123456789
+\tmore text""")
+        h = Header(hstr)
+        eq(h.encode(), """\
+bug demonstration
+ 12345678911234567892123456789312345678941234567895123456789612345678971234567898112345678911234567892123456789112345678911234567892123456789
+ more text""")
+
+    def test_long_nonstring(self):
+        eq = self.ndiffAssertEqual
+        g = Charset("iso-8859-1")
+        cz = Charset("iso-8859-2")
+        utf8 = Charset("utf-8")
+        g_head = "Die Mieter treten hier ein werden mit einem Foerderband komfortabel den Korridor entlang, an s\xfcdl\xfcndischen Wandgem\xe4lden vorbei, gegen die rotierenden Klingen bef\xf6rdert. "
+        cz_head = "Finan\xe8ni metropole se hroutily pod tlakem jejich d\xf9vtipu.. "
+        utf8_head = u"\u6b63\u78ba\u306b\u8a00\u3046\u3068\u7ffb\u8a33\u306f\u3055\u308c\u3066\u3044\u307e\u305b\u3093\u3002\u4e00\u90e8\u306f\u30c9\u30a4\u30c4\u8a9e\u3067\u3059\u304c\u3001\u3042\u3068\u306f\u3067\u305f\u3089\u3081\u3067\u3059\u3002\u5b9f\u969b\u306b\u306f\u300cWenn ist das Nunstuck git und Slotermeyer? Ja! Beiherhund das Oder die Flipperwaldt gersput.\u300d\u3068\u8a00\u3063\u3066\u3044\u307e\u3059\u3002".encode("utf-8")
+        h = Header(g_head, g, header_name='Subject')
+        h.append(cz_head, cz)
+        h.append(utf8_head, utf8)
+        msg = Message()
+        msg['Subject'] = h
+        sfp = StringIO()
+        g = Generator(sfp)
+        g.flatten(msg)
+        eq(sfp.getvalue(), """\
+Subject: =?iso-8859-1?q?Die_Mieter_treten_hier_ein_werden_mit_einem_Foerd?=
+ =?iso-8859-1?q?erband_komfortabel_den_Korridor_entlang=2C_an_s=FCdl=FCndi?=
+ =?iso-8859-1?q?schen_Wandgem=E4lden_vorbei=2C_gegen_die_rotierenden_Kling?=
+ =?iso-8859-1?q?en_bef=F6rdert=2E_?= =?iso-8859-2?q?Finan=E8ni_met?=
+ =?iso-8859-2?q?ropole_se_hroutily_pod_tlakem_jejich_d=F9vtipu=2E=2E_?=
+ =?utf-8?b?5q2j56K644Gr6KiA44GG44Go57+76Kiz44Gv44GV44KM44Gm44GE?=
+ =?utf-8?b?44G+44Gb44KT44CC5LiA6YOo44Gv44OJ44Kk44OE6Kqe44Gn44GZ44GM44CB?=
+ =?utf-8?b?44GC44Go44Gv44Gn44Gf44KJ44KB44Gn44GZ44CC5a6f6Zqb44Gr44Gv44CM?=
+ =?utf-8?q?Wenn_ist_das_Nunstuck_git_und_Slotermeyer=3F_Ja!_Beiherhund_das?=
+ =?utf-8?b?IE9kZXIgZGllIEZsaXBwZXJ3YWxkdCBnZXJzcHV0LuOAjeOBqOiogOOBow==?=
+ =?utf-8?b?44Gm44GE44G+44GZ44CC?=
+
+""")
+        eq(h.encode(), """\
+=?iso-8859-1?q?Die_Mieter_treten_hier_ein_werden_mit_einem_Foerd?=
+ =?iso-8859-1?q?erband_komfortabel_den_Korridor_entlang=2C_an_s=FCdl=FCndi?=
+ =?iso-8859-1?q?schen_Wandgem=E4lden_vorbei=2C_gegen_die_rotierenden_Kling?=
+ =?iso-8859-1?q?en_bef=F6rdert=2E_?= =?iso-8859-2?q?Finan=E8ni_met?=
+ =?iso-8859-2?q?ropole_se_hroutily_pod_tlakem_jejich_d=F9vtipu=2E=2E_?=
+ =?utf-8?b?5q2j56K644Gr6KiA44GG44Go57+76Kiz44Gv44GV44KM44Gm44GE?=
+ =?utf-8?b?44G+44Gb44KT44CC5LiA6YOo44Gv44OJ44Kk44OE6Kqe44Gn44GZ44GM44CB?=
+ =?utf-8?b?44GC44Go44Gv44Gn44Gf44KJ44KB44Gn44GZ44CC5a6f6Zqb44Gr44Gv44CM?=
+ =?utf-8?q?Wenn_ist_das_Nunstuck_git_und_Slotermeyer=3F_Ja!_Beiherhund_das?=
+ =?utf-8?b?IE9kZXIgZGllIEZsaXBwZXJ3YWxkdCBnZXJzcHV0LuOAjeOBqOiogOOBow==?=
+ =?utf-8?b?44Gm44GE44G+44GZ44CC?=""")
+
+    def test_long_header_encode(self):
+        eq = self.ndiffAssertEqual
+        h = Header('wasnipoop; giraffes="very-long-necked-animals"; '
+                   'spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"',
+                   header_name='X-Foobar-Spoink-Defrobnit')
+        eq(h.encode(), '''\
+wasnipoop; giraffes="very-long-necked-animals";
+ spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"''')
+
+    def test_long_header_encode_with_tab_continuation(self):
+        eq = self.ndiffAssertEqual
+        h = Header('wasnipoop; giraffes="very-long-necked-animals"; '
+                   'spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"',
+                   header_name='X-Foobar-Spoink-Defrobnit',
+                   continuation_ws='\t')
+        eq(h.encode(), '''\
+wasnipoop; giraffes="very-long-necked-animals";
+\tspooge="yummy"; hippos="gargantuan"; marshmallows="gooey"''')
+
+    def test_header_splitter(self):
+        eq = self.ndiffAssertEqual
+        msg = MIMEText('')
+        # It'd be great if we could use add_header() here, but that doesn't
+        # guarantee an order of the parameters.
+        msg['X-Foobar-Spoink-Defrobnit'] = (
+            'wasnipoop; giraffes="very-long-necked-animals"; '
+            'spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"')
+        sfp = StringIO()
+        g = Generator(sfp)
+        g.flatten(msg)
+        eq(sfp.getvalue(), '''\
+Content-Type: text/plain; charset="us-ascii"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+X-Foobar-Spoink-Defrobnit: wasnipoop; giraffes="very-long-necked-animals";
+\tspooge="yummy"; hippos="gargantuan"; marshmallows="gooey"
+
+''')
+
+    def test_no_semis_header_splitter(self):
+        eq = self.ndiffAssertEqual
+        msg = Message()
+        msg['From'] = 'test at dom.ain'
+        msg['References'] = SPACE.join(['<%d at dom.ain>' % i for i in range(10)])
+        msg.set_payload('Test')
+        sfp = StringIO()
+        g = Generator(sfp)
+        g.flatten(msg)
+        eq(sfp.getvalue(), """\
+From: test at dom.ain
+References: <0 at dom.ain> <1 at dom.ain> <2 at dom.ain> <3 at dom.ain> <4 at dom.ain>
+\t<5 at dom.ain> <6 at dom.ain> <7 at dom.ain> <8 at dom.ain> <9 at dom.ain>
+
+Test""")
+
+    def test_no_split_long_header(self):
+        eq = self.ndiffAssertEqual
+        hstr = 'References: ' + 'x' * 80
+        h = Header(hstr, continuation_ws='\t')
+        eq(h.encode(), """\
+References: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx""")
+
+    def test_splitting_multiple_long_lines(self):
+        eq = self.ndiffAssertEqual
+        hstr = """\
+from babylon.socal-raves.org (localhost [127.0.0.1]); by babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81; for <mailman-admin at babylon.socal-raves.org>; Sat, 2 Feb 2002 17:00:06 -0800 (PST)
+\tfrom babylon.socal-raves.org (localhost [127.0.0.1]); by babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81; for <mailman-admin at babylon.socal-raves.org>; Sat, 2 Feb 2002 17:00:06 -0800 (PST)
+\tfrom babylon.socal-raves.org (localhost [127.0.0.1]); by babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81; for <mailman-admin at babylon.socal-raves.org>; Sat, 2 Feb 2002 17:00:06 -0800 (PST)
+"""
+        h = Header(hstr, continuation_ws='\t')
+        eq(h.encode(), """\
+from babylon.socal-raves.org (localhost [127.0.0.1]);
+\tby babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81;
+\tfor <mailman-admin at babylon.socal-raves.org>;
+\tSat, 2 Feb 2002 17:00:06 -0800 (PST)
+\tfrom babylon.socal-raves.org (localhost [127.0.0.1]);
+\tby babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81;
+\tfor <mailman-admin at babylon.socal-raves.org>;
+\tSat, 2 Feb 2002 17:00:06 -0800 (PST)
+\tfrom babylon.socal-raves.org (localhost [127.0.0.1]);
+\tby babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81;
+\tfor <mailman-admin at babylon.socal-raves.org>;
+\tSat, 2 Feb 2002 17:00:06 -0800 (PST)""")
+
+    def test_splitting_first_line_only_is_long(self):
+        eq = self.ndiffAssertEqual
+        hstr = """\
+from modemcable093.139-201-24.que.mc.videotron.ca ([24.201.139.93] helo=cthulhu.gerg.ca)
+\tby kronos.mems-exchange.org with esmtp (Exim 4.05)
+\tid 17k4h5-00034i-00
+\tfor test at mems-exchange.org; Wed, 28 Aug 2002 11:25:20 -0400"""
+        h = Header(hstr, maxlinelen=78, header_name='Received',
+                   continuation_ws='\t')
+        eq(h.encode(), """\
+from modemcable093.139-201-24.que.mc.videotron.ca ([24.201.139.93]
+\thelo=cthulhu.gerg.ca)
+\tby kronos.mems-exchange.org with esmtp (Exim 4.05)
+\tid 17k4h5-00034i-00
+\tfor test at mems-exchange.org; Wed, 28 Aug 2002 11:25:20 -0400""")
+
+    def test_long_8bit_header(self):
+        eq = self.ndiffAssertEqual
+        msg = Message()
+        h = Header('Britische Regierung gibt', 'iso-8859-1',
+                    header_name='Subject')
+        h.append('gr\xfcnes Licht f\xfcr Offshore-Windkraftprojekte')
+        msg['Subject'] = h
+        eq(msg.as_string(), """\
+Subject: =?iso-8859-1?q?Britische_Regierung_gibt?= =?iso-8859-1?q?gr=FCnes?=
+ =?iso-8859-1?q?_Licht_f=FCr_Offshore-Windkraftprojekte?=
+
+""")
+
+    def test_long_8bit_header_no_charset(self):
+        eq = self.ndiffAssertEqual
+        msg = Message()
+        msg['Reply-To'] = 'Britische Regierung gibt gr\xfcnes Licht f\xfcr Offshore-Windkraftprojekte <a-very-long-address at example.com>'
+        eq(msg.as_string(), """\
+Reply-To: Britische Regierung gibt gr\xfcnes Licht f\xfcr Offshore-Windkraftprojekte <a-very-long-address at example.com>
+
+""")
+
+    def test_long_to_header(self):
+        eq = self.ndiffAssertEqual
+        to = '"Someone Test #A" <someone at eecs.umich.edu>,<someone at eecs.umich.edu>,"Someone Test #B" <someone at umich.edu>, "Someone Test #C" <someone at eecs.umich.edu>, "Someone Test #D" <someone at eecs.umich.edu>'
+        msg = Message()
+        msg['To'] = to
+        eq(msg.as_string(0), '''\
+To: "Someone Test #A" <someone at eecs.umich.edu>, <someone at eecs.umich.edu>,
+\t"Someone Test #B" <someone at umich.edu>,
+\t"Someone Test #C" <someone at eecs.umich.edu>,
+\t"Someone Test #D" <someone at eecs.umich.edu>
+
+''')
+
+    def test_long_line_after_append(self):
+        eq = self.ndiffAssertEqual
+        s = 'This is an example of string which has almost the limit of header length.'
+        h = Header(s)
+        h.append('Add another line.')
+        eq(h.encode(), """\
+This is an example of string which has almost the limit of header length.
+ Add another line.""")
+
+    def test_shorter_line_with_append(self):
+        eq = self.ndiffAssertEqual
+        s = 'This is a shorter line.'
+        h = Header(s)
+        h.append('Add another sentence. (Surprise?)')
+        eq(h.encode(),
+           'This is a shorter line. Add another sentence. (Surprise?)')
+
+    def test_long_field_name(self):
+        eq = self.ndiffAssertEqual
+        fn = 'X-Very-Very-Very-Long-Header-Name'
+        gs = "Die Mieter treten hier ein werden mit einem Foerderband komfortabel den Korridor entlang, an s\xfcdl\xfcndischen Wandgem\xe4lden vorbei, gegen die rotierenden Klingen bef\xf6rdert. "
+        h = Header(gs, 'iso-8859-1', header_name=fn)
+        # BAW: this seems broken because the first line is too long
+        eq(h.encode(), """\
+=?iso-8859-1?q?Die_Mieter_treten_hier_?=
+ =?iso-8859-1?q?ein_werden_mit_einem_Foerderband_komfortabel_den_Korridor_?=
+ =?iso-8859-1?q?entlang=2C_an_s=FCdl=FCndischen_Wandgem=E4lden_vorbei=2C_g?=
+ =?iso-8859-1?q?egen_die_rotierenden_Klingen_bef=F6rdert=2E_?=""")
+
+    def test_long_received_header(self):
+        h = 'from FOO.TLD (vizworld.acl.foo.tld [123.452.678.9]) by hrothgar.la.mastaler.com (tmda-ofmipd) with ESMTP; Wed, 05 Mar 2003 18:10:18 -0700'
+        msg = Message()
+        msg['Received-1'] = Header(h, continuation_ws='\t')
+        msg['Received-2'] = h
+        self.assertEqual(msg.as_string(), """\
+Received-1: from FOO.TLD (vizworld.acl.foo.tld [123.452.678.9]) by
+\throthgar.la.mastaler.com (tmda-ofmipd) with ESMTP;
+\tWed, 05 Mar 2003 18:10:18 -0700
+Received-2: from FOO.TLD (vizworld.acl.foo.tld [123.452.678.9]) by
+\throthgar.la.mastaler.com (tmda-ofmipd) with ESMTP;
+\tWed, 05 Mar 2003 18:10:18 -0700
+
+""")
+
+    def test_string_headerinst_eq(self):
+        h = '<15975.17901.207240.414604 at sgigritzmann1.mathematik.tu-muenchen.de> (David Bremner\'s message of "Thu, 6 Mar 2003 13:58:21 +0100")'
+        msg = Message()
+        msg['Received-1'] = Header(h, header_name='Received-1',
+                                   continuation_ws='\t')
+        msg['Received-2'] = h
+        self.assertEqual(msg.as_string(), """\
+Received-1: <15975.17901.207240.414604 at sgigritzmann1.mathematik.tu-muenchen.de>
+\t(David Bremner's message of "Thu, 6 Mar 2003 13:58:21 +0100")
+Received-2: <15975.17901.207240.414604 at sgigritzmann1.mathematik.tu-muenchen.de>
+\t(David Bremner's message of "Thu, 6 Mar 2003 13:58:21 +0100")
+
+""")
+
+    def test_long_unbreakable_lines_with_continuation(self):
+        eq = self.ndiffAssertEqual
+        msg = Message()
+        t = """\
+ iVBORw0KGgoAAAANSUhEUgAAADAAAAAwBAMAAAClLOS0AAAAGFBMVEUAAAAkHiJeRUIcGBi9
+ locQDQ4zJykFBAXJfWDjAAACYUlEQVR4nF2TQY/jIAyFc6lydlG5x8Nyp1Y69wj1PN2I5gzp"""
+        msg['Face-1'] = t
+        msg['Face-2'] = Header(t, header_name='Face-2')
+        eq(msg.as_string(), """\
+Face-1: iVBORw0KGgoAAAANSUhEUgAAADAAAAAwBAMAAAClLOS0AAAAGFBMVEUAAAAkHiJeRUIcGBi9
+\tlocQDQ4zJykFBAXJfWDjAAACYUlEQVR4nF2TQY/jIAyFc6lydlG5x8Nyp1Y69wj1PN2I5gzp
+Face-2: iVBORw0KGgoAAAANSUhEUgAAADAAAAAwBAMAAAClLOS0AAAAGFBMVEUAAAAkHiJeRUIcGBi9
+ locQDQ4zJykFBAXJfWDjAAACYUlEQVR4nF2TQY/jIAyFc6lydlG5x8Nyp1Y69wj1PN2I5gzp
+
+""")
+
+    def test_another_long_multiline_header(self):
+        eq = self.ndiffAssertEqual
+        m = '''\
+Received: from siimage.com ([172.25.1.3]) by zima.siliconimage.com with Microsoft SMTPSVC(5.0.2195.4905);
+\tWed, 16 Oct 2002 07:41:11 -0700'''
+        msg = email.message_from_string(m)
+        eq(msg.as_string(), '''\
+Received: from siimage.com ([172.25.1.3]) by zima.siliconimage.com with
+\tMicrosoft SMTPSVC(5.0.2195.4905); Wed, 16 Oct 2002 07:41:11 -0700
+
+''')
+
+    def test_long_lines_with_different_header(self):
+        eq = self.ndiffAssertEqual
+        h = """\
+List-Unsubscribe: <https://lists.sourceforge.net/lists/listinfo/spamassassin-talk>,
+        <mailto:spamassassin-talk-request at lists.sourceforge.net?subject=unsubscribe>"""
+        msg = Message()
+        msg['List'] = h
+        msg['List'] = Header(h, header_name='List')
+        eq(msg.as_string(), """\
+List: List-Unsubscribe: <https://lists.sourceforge.net/lists/listinfo/spamassassin-talk>,
+\t<mailto:spamassassin-talk-request at lists.sourceforge.net?subject=unsubscribe>
+List: List-Unsubscribe: <https://lists.sourceforge.net/lists/listinfo/spamassassin-talk>,
+ <mailto:spamassassin-talk-request at lists.sourceforge.net?subject=unsubscribe>
+
+""")
+
+
+
+# Test mangling of "From " lines in the body of a message
+class TestFromMangling(unittest.TestCase):
+    def setUp(self):
+        self.msg = Message()
+        self.msg['From'] = 'aaa at bbb.org'
+        self.msg.set_payload("""\
+From the desk of A.A.A.:
+Blah blah blah
+""")
+
+    def test_mangled_from(self):
+        s = StringIO()
+        g = Generator(s, mangle_from_=True)
+        g.flatten(self.msg)
+        self.assertEqual(s.getvalue(), """\
+From: aaa at bbb.org
+
+>From the desk of A.A.A.:
+Blah blah blah
+""")
+
+    def test_dont_mangle_from(self):
+        s = StringIO()
+        g = Generator(s, mangle_from_=False)
+        g.flatten(self.msg)
+        self.assertEqual(s.getvalue(), """\
+From: aaa at bbb.org
+
+From the desk of A.A.A.:
+Blah blah blah
+""")
+
+
+
+# Test the basic MIMEAudio class
+class TestMIMEAudio(unittest.TestCase):
+    def setUp(self):
+        # Make sure we pick up the audiotest.au that lives in email/test/data.
+        # In Python, there's an audiotest.au living in Lib/test but that isn't
+        # included in some binary distros that don't include the test
+        # package.  The trailing empty string on the .join() is significant
+        # since findfile() will do a dirname().
+        datadir = os.path.join(os.path.dirname(landmark), 'data', '')
+        fp = open(findfile('audiotest.au', datadir), 'rb')
+        try:
+            self._audiodata = fp.read()
+        finally:
+            fp.close()
+        self._au = MIMEAudio(self._audiodata)
+
+    def test_guess_minor_type(self):
+        self.assertEqual(self._au.get_type(), 'audio/basic')
+
+    def test_encoding(self):
+        payload = self._au.get_payload()
+        self.assertEqual(base64.decodestring(payload), self._audiodata)
+
+    def checkSetMinor(self):
+        au = MIMEAudio(self._audiodata, 'fish')
+        self.assertEqual(im.get_type(), 'audio/fish')
+
+    def test_custom_encoder(self):
+        eq = self.assertEqual
+        def encoder(msg):
+            orig = msg.get_payload()
+            msg.set_payload(0)
+            msg['Content-Transfer-Encoding'] = 'broken64'
+        au = MIMEAudio(self._audiodata, _encoder=encoder)
+        eq(au.get_payload(), 0)
+        eq(au['content-transfer-encoding'], 'broken64')
+
+    def test_add_header(self):
+        eq = self.assertEqual
+        unless = self.failUnless
+        self._au.add_header('Content-Disposition', 'attachment',
+                            filename='audiotest.au')
+        eq(self._au['content-disposition'],
+           'attachment; filename="audiotest.au"')
+        eq(self._au.get_params(header='content-disposition'),
+           [('attachment', ''), ('filename', 'audiotest.au')])
+        eq(self._au.get_param('filename', header='content-disposition'),
+           'audiotest.au')
+        missing = []
+        eq(self._au.get_param('attachment', header='content-disposition'), '')
+        unless(self._au.get_param('foo', failobj=missing,
+                                  header='content-disposition') is missing)
+        # Try some missing stuff
+        unless(self._au.get_param('foobar', missing) is missing)
+        unless(self._au.get_param('attachment', missing,
+                                  header='foobar') is missing)
+
+
+
+# Test the basic MIMEImage class
+class TestMIMEImage(unittest.TestCase):
+    def setUp(self):
+        fp = openfile('PyBanner048.gif')
+        try:
+            self._imgdata = fp.read()
+        finally:
+            fp.close()
+        self._im = MIMEImage(self._imgdata)
+
+    def test_guess_minor_type(self):
+        self.assertEqual(self._im.get_type(), 'image/gif')
+
+    def test_encoding(self):
+        payload = self._im.get_payload()
+        self.assertEqual(base64.decodestring(payload), self._imgdata)
+
+    def checkSetMinor(self):
+        im = MIMEImage(self._imgdata, 'fish')
+        self.assertEqual(im.get_type(), 'image/fish')
+
+    def test_custom_encoder(self):
+        eq = self.assertEqual
+        def encoder(msg):
+            orig = msg.get_payload()
+            msg.set_payload(0)
+            msg['Content-Transfer-Encoding'] = 'broken64'
+        im = MIMEImage(self._imgdata, _encoder=encoder)
+        eq(im.get_payload(), 0)
+        eq(im['content-transfer-encoding'], 'broken64')
+
+    def test_add_header(self):
+        eq = self.assertEqual
+        unless = self.failUnless
+        self._im.add_header('Content-Disposition', 'attachment',
+                            filename='dingusfish.gif')
+        eq(self._im['content-disposition'],
+           'attachment; filename="dingusfish.gif"')
+        eq(self._im.get_params(header='content-disposition'),
+           [('attachment', ''), ('filename', 'dingusfish.gif')])
+        eq(self._im.get_param('filename', header='content-disposition'),
+           'dingusfish.gif')
+        missing = []
+        eq(self._im.get_param('attachment', header='content-disposition'), '')
+        unless(self._im.get_param('foo', failobj=missing,
+                                  header='content-disposition') is missing)
+        # Try some missing stuff
+        unless(self._im.get_param('foobar', missing) is missing)
+        unless(self._im.get_param('attachment', missing,
+                                  header='foobar') is missing)
+
+
+
+# Test the basic MIMEText class
+class TestMIMEText(unittest.TestCase):
+    def setUp(self):
+        self._msg = MIMEText('hello there')
+
+    def test_types(self):
+        eq = self.assertEqual
+        unless = self.failUnless
+        eq(self._msg.get_type(), 'text/plain')
+        eq(self._msg.get_param('charset'), 'us-ascii')
+        missing = []
+        unless(self._msg.get_param('foobar', missing) is missing)
+        unless(self._msg.get_param('charset', missing, header='foobar')
+               is missing)
+
+    def test_payload(self):
+        self.assertEqual(self._msg.get_payload(), 'hello there')
+        self.failUnless(not self._msg.is_multipart())
+
+    def test_charset(self):
+        eq = self.assertEqual
+        msg = MIMEText('hello there', _charset='us-ascii')
+        eq(msg.get_charset().input_charset, 'us-ascii')
+        eq(msg['content-type'], 'text/plain; charset="us-ascii"')
+
+
+
+# Test a more complicated multipart/mixed type message
+class TestMultipartMixed(TestEmailBase):
+    def setUp(self):
+        fp = openfile('PyBanner048.gif')
+        try:
+            data = fp.read()
+        finally:
+            fp.close()
+
+        container = MIMEBase('multipart', 'mixed', boundary='BOUNDARY')
+        image = MIMEImage(data, name='dingusfish.gif')
+        image.add_header('content-disposition', 'attachment',
+                         filename='dingusfish.gif')
+        intro = MIMEText('''\
+Hi there,
+
+This is the dingus fish.
+''')
+        container.attach(intro)
+        container.attach(image)
+        container['From'] = 'Barry <barry at digicool.com>'
+        container['To'] = 'Dingus Lovers <cravindogs at cravindogs.com>'
+        container['Subject'] = 'Here is your dingus fish'
+
+        now = 987809702.54848599
+        timetuple = time.localtime(now)
+        if timetuple[-1] == 0:
+            tzsecs = time.timezone
+        else:
+            tzsecs = time.altzone
+        if tzsecs > 0:
+            sign = '-'
+        else:
+            sign = '+'
+        tzoffset = ' %s%04d' % (sign, tzsecs / 36)
+        container['Date'] = time.strftime(
+            '%a, %d %b %Y %H:%M:%S',
+            time.localtime(now)) + tzoffset
+        self._msg = container
+        self._im = image
+        self._txt = intro
+
+    def test_hierarchy(self):
+        # convenience
+        eq = self.assertEqual
+        unless = self.failUnless
+        raises = self.assertRaises
+        # tests
+        m = self._msg
+        unless(m.is_multipart())
+        eq(m.get_type(), 'multipart/mixed')
+        eq(len(m.get_payload()), 2)
+        raises(IndexError, m.get_payload, 2)
+        m0 = m.get_payload(0)
+        m1 = m.get_payload(1)
+        unless(m0 is self._txt)
+        unless(m1 is self._im)
+        eq(m.get_payload(), [m0, m1])
+        unless(not m0.is_multipart())
+        unless(not m1.is_multipart())
+
+    def test_no_parts_in_a_multipart(self):
+        outer = MIMEBase('multipart', 'mixed')
+        outer['Subject'] = 'A subject'
+        outer['To'] = 'aperson at dom.ain'
+        outer['From'] = 'bperson at dom.ain'
+        outer.preamble = ''
+        outer.epilogue = ''
+        outer.set_boundary('BOUNDARY')
+        msg = MIMEText('hello world')
+        self.assertEqual(outer.as_string(), '''\
+Content-Type: multipart/mixed; boundary="BOUNDARY"
+MIME-Version: 1.0
+Subject: A subject
+To: aperson at dom.ain
+From: bperson at dom.ain
+
+--BOUNDARY
+
+
+--BOUNDARY--
+''')
+
+    def test_one_part_in_a_multipart(self):
+        eq = self.ndiffAssertEqual
+        outer = MIMEBase('multipart', 'mixed')
+        outer['Subject'] = 'A subject'
+        outer['To'] = 'aperson at dom.ain'
+        outer['From'] = 'bperson at dom.ain'
+        outer.preamble = ''
+        outer.epilogue = ''
+        outer.set_boundary('BOUNDARY')
+        msg = MIMEText('hello world')
+        outer.attach(msg)
+        eq(outer.as_string(), '''\
+Content-Type: multipart/mixed; boundary="BOUNDARY"
+MIME-Version: 1.0
+Subject: A subject
+To: aperson at dom.ain
+From: bperson at dom.ain
+
+--BOUNDARY
+Content-Type: text/plain; charset="us-ascii"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+
+hello world
+--BOUNDARY--
+''')
+
+    def test_seq_parts_in_a_multipart(self):
+        eq = self.ndiffAssertEqual
+        outer = MIMEBase('multipart', 'mixed')
+        outer['Subject'] = 'A subject'
+        outer['To'] = 'aperson at dom.ain'
+        outer['From'] = 'bperson at dom.ain'
+        outer.preamble = ''
+        outer.epilogue = ''
+        msg = MIMEText('hello world')
+        outer.attach(msg)
+        outer.set_boundary('BOUNDARY')
+        eq(outer.as_string(), '''\
+Content-Type: multipart/mixed; boundary="BOUNDARY"
+MIME-Version: 1.0
+Subject: A subject
+To: aperson at dom.ain
+From: bperson at dom.ain
+
+--BOUNDARY
+Content-Type: text/plain; charset="us-ascii"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+
+hello world
+--BOUNDARY--
+''')
+
+
+
+# Test some badly formatted messages
+class TestNonConformant(TestEmailBase):
+    def test_parse_missing_minor_type(self):
+        eq = self.assertEqual
+        msg = self._msgobj('msg_14.txt')
+        eq(msg.get_type(), 'text')
+        eq(msg.get_main_type(), None)
+        eq(msg.get_subtype(), None)
+
+    def test_bogus_boundary(self):
+        fp = openfile(findfile('msg_15.txt'))
+        try:
+            data = fp.read()
+        finally:
+            fp.close()
+        p = Parser(strict=True)
+        # Note, under a future non-strict parsing mode, this would parse the
+        # message into the intended message tree.
+        self.assertRaises(Errors.BoundaryError, p.parsestr, data)
+
+    def test_multipart_no_boundary(self):
+        fp = openfile(findfile('msg_25.txt'))
+        try:
+            self.assertRaises(Errors.BoundaryError,
+                              email.message_from_file, fp)
+        finally:
+            fp.close()
+
+    def test_invalid_content_type(self):
+        eq = self.assertEqual
+        neq = self.ndiffAssertEqual
+        msg = Message()
+        # RFC 2045, $5.2 says invalid yields text/plain
+        msg['Content-Type'] = 'text'
+        eq(msg.get_content_maintype(), 'text')
+        eq(msg.get_content_subtype(), 'plain')
+        eq(msg.get_content_type(), 'text/plain')
+        # Clear the old value and try something /really/ invalid
+        del msg['content-type']
+        msg['Content-Type'] = 'foo'
+        eq(msg.get_content_maintype(), 'text')
+        eq(msg.get_content_subtype(), 'plain')
+        eq(msg.get_content_type(), 'text/plain')
+        # Still, make sure that the message is idempotently generated
+        s = StringIO()
+        g = Generator(s)
+        g.flatten(msg)
+        neq(s.getvalue(), 'Content-Type: foo\n\n')
+
+    def test_no_start_boundary(self):
+        eq = self.ndiffAssertEqual
+        msg = self._msgobj('msg_31.txt')
+        eq(msg.get_payload(), """\
+--BOUNDARY
+Content-Type: text/plain
+
+message 1
+
+--BOUNDARY
+Content-Type: text/plain
+
+message 2
+
+--BOUNDARY--
+""")
+
+    def test_no_separating_blank_line(self):
+        eq = self.ndiffAssertEqual
+        msg = self._msgobj('msg_35.txt')
+        eq(msg.as_string(), """\
+From: aperson at dom.ain
+To: bperson at dom.ain
+Subject: here's something interesting
+
+counter to RFC 2822, there's no separating newline here
+""")
+        # strict=True should raise an exception
+        self.assertRaises(Errors.HeaderParseError,
+                          self._msgobj, 'msg_35.txt', True)
+
+
+
+# Test RFC 2047 header encoding and decoding
+class TestRFC2047(unittest.TestCase):
+    def test_iso_8859_1(self):
+        eq = self.assertEqual
+        s = '=?iso-8859-1?q?this=20is=20some=20text?='
+        eq(Utils.decode(s), 'this is some text')
+        s = '=?ISO-8859-1?Q?Keld_J=F8rn_Simonsen?='
+        eq(Utils.decode(s), u'Keld J\xf8rn Simonsen')
+        s = '=?ISO-8859-1?B?SWYgeW91IGNhbiByZWFkIHRoaXMgeW8=?=' \
+            '=?ISO-8859-2?B?dSB1bmRlcnN0YW5kIHRoZSBleGFtcGxlLg==?='
+        eq(Utils.decode(s), 'If you can read this you understand the example.')
+        s = '=?iso-8859-8?b?7eXs+SDv4SDp7Oj08A==?='
+        eq(Utils.decode(s),
+           u'\u05dd\u05d5\u05dc\u05e9 \u05df\u05d1 \u05d9\u05dc\u05d8\u05e4\u05e0')
+        s = '=?iso-8859-1?q?this=20is?= =?iso-8859-1?q?some=20text?='
+        eq(Utils.decode(s), u'this issome text')
+        s = '=?iso-8859-1?q?this=20is_?= =?iso-8859-1?q?some=20text?='
+        eq(Utils.decode(s), u'this is some text')
+
+    def test_encode_header(self):
+        eq = self.assertEqual
+        s = 'this is some text'
+        eq(Utils.encode(s), '=?iso-8859-1?q?this=20is=20some=20text?=')
+        s = 'Keld_J\xf8rn_Simonsen'
+        eq(Utils.encode(s), '=?iso-8859-1?q?Keld_J=F8rn_Simonsen?=')
+        s1 = 'If you can read this yo'
+        s2 = 'u understand the example.'
+        eq(Utils.encode(s1, encoding='b'),
+           '=?iso-8859-1?b?SWYgeW91IGNhbiByZWFkIHRoaXMgeW8=?=')
+        eq(Utils.encode(s2, charset='iso-8859-2', encoding='b'),
+           '=?iso-8859-2?b?dSB1bmRlcnN0YW5kIHRoZSBleGFtcGxlLg==?=')
+
+    def test_rfc2047_multiline(self):
+        eq = self.assertEqual
+        s = """Re: =?mac-iceland?q?r=8Aksm=9Arg=8Cs?= baz
+ foo bar =?mac-iceland?q?r=8Aksm=9Arg=8Cs?="""
+        dh = decode_header(s)
+        eq(dh, [
+            ('Re:', None),
+            ('r\x8aksm\x9arg\x8cs', 'mac-iceland'),
+            ('baz foo bar', None),
+            ('r\x8aksm\x9arg\x8cs', 'mac-iceland')])
+        eq(str(make_header(dh)),
+           """Re: =?mac-iceland?q?r=8Aksm=9Arg=8Cs?= baz foo bar
+ =?mac-iceland?q?r=8Aksm=9Arg=8Cs?=""")
+
+    def test_whitespace_eater_unicode(self):
+        eq = self.assertEqual
+        s = '=?ISO-8859-1?Q?Andr=E9?= Pirard <pirard at dom.ain>'
+        dh = decode_header(s)
+        eq(dh, [('Andr\xe9', 'iso-8859-1'), ('Pirard <pirard at dom.ain>', None)])
+        # Python 2.1's unicode() builtin doesn't call the object's
+        # __unicode__() method.  Use the following alternative instead.
+        #hu = unicode(make_header(dh)).encode('latin-1')
+        hu = make_header(dh).__unicode__().encode('latin-1')
+        eq(hu, 'Andr\xe9 Pirard <pirard at dom.ain>')
+
+
+
+# Test the MIMEMessage class
+class TestMIMEMessage(TestEmailBase):
+    def setUp(self):
+        fp = openfile('msg_11.txt')
+        try:
+            self._text = fp.read()
+        finally:
+            fp.close()
+
+    def test_type_error(self):
+        self.assertRaises(TypeError, MIMEMessage, 'a plain string')
+
+    def test_valid_argument(self):
+        eq = self.assertEqual
+        unless = self.failUnless
+        subject = 'A sub-message'
+        m = Message()
+        m['Subject'] = subject
+        r = MIMEMessage(m)
+        eq(r.get_type(), 'message/rfc822')
+        payload = r.get_payload()
+        unless(type(payload), ListType)
+        eq(len(payload), 1)
+        subpart = payload[0]
+        unless(subpart is m)
+        eq(subpart['subject'], subject)
+
+    def test_bad_multipart(self):
+        eq = self.assertEqual
+        msg1 = Message()
+        msg1['Subject'] = 'subpart 1'
+        msg2 = Message()
+        msg2['Subject'] = 'subpart 2'
+        r = MIMEMessage(msg1)
+        self.assertRaises(Errors.MultipartConversionError, r.attach, msg2)
+
+    def test_generate(self):
+        # First craft the message to be encapsulated
+        m = Message()
+        m['Subject'] = 'An enclosed message'
+        m.set_payload('Here is the body of the message.\n')
+        r = MIMEMessage(m)
+        r['Subject'] = 'The enclosing message'
+        s = StringIO()
+        g = Generator(s)
+        g.flatten(r)
+        self.assertEqual(s.getvalue(), """\
+Content-Type: message/rfc822
+MIME-Version: 1.0
+Subject: The enclosing message
+
+Subject: An enclosed message
+
+Here is the body of the message.
+""")
+
+    def test_parse_message_rfc822(self):
+        eq = self.assertEqual
+        unless = self.failUnless
+        msg = self._msgobj('msg_11.txt')
+        eq(msg.get_type(), 'message/rfc822')
+        payload = msg.get_payload()
+        unless(isinstance(payload, ListType))
+        eq(len(payload), 1)
+        submsg = payload[0]
+        self.failUnless(isinstance(submsg, Message))
+        eq(submsg['subject'], 'An enclosed message')
+        eq(submsg.get_payload(), 'Here is the body of the message.\n')
+
+    def test_dsn(self):
+        eq = self.assertEqual
+        unless = self.failUnless
+        # msg 16 is a Delivery Status Notification, see RFC 1894
+        msg = self._msgobj('msg_16.txt')
+        eq(msg.get_type(), 'multipart/report')
+        unless(msg.is_multipart())
+        eq(len(msg.get_payload()), 3)
+        # Subpart 1 is a text/plain, human readable section
+        subpart = msg.get_payload(0)
+        eq(subpart.get_type(), 'text/plain')
+        eq(subpart.get_payload(), """\
+This report relates to a message you sent with the following header fields:
+
+  Message-id: <002001c144a6$8752e060$56104586 at oxy.edu>
+  Date: Sun, 23 Sep 2001 20:10:55 -0700
+  From: "Ian T. Henry" <henryi at oxy.edu>
+  To: SoCal Raves <scr at socal-raves.org>
+  Subject: [scr] yeah for Ians!!
+
+Your message cannot be delivered to the following recipients:
+
+  Recipient address: jangel1 at cougar.noc.ucla.edu
+  Reason: recipient reached disk quota
+
+""")
+        # Subpart 2 contains the machine parsable DSN information.  It
+        # consists of two blocks of headers, represented by two nested Message
+        # objects.
+        subpart = msg.get_payload(1)
+        eq(subpart.get_type(), 'message/delivery-status')
+        eq(len(subpart.get_payload()), 2)
+        # message/delivery-status should treat each block as a bunch of
+        # headers, i.e. a bunch of Message objects.
+        dsn1 = subpart.get_payload(0)
+        unless(isinstance(dsn1, Message))
+        eq(dsn1['original-envelope-id'], '0GK500B4HD0888 at cougar.noc.ucla.edu')
+        eq(dsn1.get_param('dns', header='reporting-mta'), '')
+        # Try a missing one <wink>
+        eq(dsn1.get_param('nsd', header='reporting-mta'), None)
+        dsn2 = subpart.get_payload(1)
+        unless(isinstance(dsn2, Message))
+        eq(dsn2['action'], 'failed')
+        eq(dsn2.get_params(header='original-recipient'),
+           [('rfc822', ''), ('jangel1 at cougar.noc.ucla.edu', '')])
+        eq(dsn2.get_param('rfc822', header='final-recipient'), '')
+        # Subpart 3 is the original message
+        subpart = msg.get_payload(2)
+        eq(subpart.get_type(), 'message/rfc822')
+        payload = subpart.get_payload()
+        unless(isinstance(payload, ListType))
+        eq(len(payload), 1)
+        subsubpart = payload[0]
+        unless(isinstance(subsubpart, Message))
+        eq(subsubpart.get_type(), 'text/plain')
+        eq(subsubpart['message-id'],
+           '<002001c144a6$8752e060$56104586 at oxy.edu>')
+
+    def test_epilogue(self):
+        eq = self.ndiffAssertEqual
+        fp = openfile('msg_21.txt')
+        try:
+            text = fp.read()
+        finally:
+            fp.close()
+        msg = Message()
+        msg['From'] = 'aperson at dom.ain'
+        msg['To'] = 'bperson at dom.ain'
+        msg['Subject'] = 'Test'
+        msg.preamble = 'MIME message\n'
+        msg.epilogue = 'End of MIME message\n'
+        msg1 = MIMEText('One')
+        msg2 = MIMEText('Two')
+        msg.add_header('Content-Type', 'multipart/mixed', boundary='BOUNDARY')
+        msg.attach(msg1)
+        msg.attach(msg2)
+        sfp = StringIO()
+        g = Generator(sfp)
+        g.flatten(msg)
+        eq(sfp.getvalue(), text)
+
+    def test_no_nl_preamble(self):
+        eq = self.ndiffAssertEqual
+        msg = Message()
+        msg['From'] = 'aperson at dom.ain'
+        msg['To'] = 'bperson at dom.ain'
+        msg['Subject'] = 'Test'
+        msg.preamble = 'MIME message'
+        msg.epilogue = ''
+        msg1 = MIMEText('One')
+        msg2 = MIMEText('Two')
+        msg.add_header('Content-Type', 'multipart/mixed', boundary='BOUNDARY')
+        msg.attach(msg1)
+        msg.attach(msg2)
+        eq(msg.as_string(), """\
+From: aperson at dom.ain
+To: bperson at dom.ain
+Subject: Test
+Content-Type: multipart/mixed; boundary="BOUNDARY"
+
+MIME message
+--BOUNDARY
+Content-Type: text/plain; charset="us-ascii"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+
+One
+--BOUNDARY
+Content-Type: text/plain; charset="us-ascii"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+
+Two
+--BOUNDARY--
+""")
+
+    def test_default_type(self):
+        eq = self.assertEqual
+        fp = openfile('msg_30.txt')
+        try:
+            msg = email.message_from_file(fp)
+        finally:
+            fp.close()
+        container1 = msg.get_payload(0)
+        eq(container1.get_default_type(), 'message/rfc822')
+        eq(container1.get_type(), None)
+        container2 = msg.get_payload(1)
+        eq(container2.get_default_type(), 'message/rfc822')
+        eq(container2.get_type(), None)
+        container1a = container1.get_payload(0)
+        eq(container1a.get_default_type(), 'text/plain')
+        eq(container1a.get_type(), 'text/plain')
+        container2a = container2.get_payload(0)
+        eq(container2a.get_default_type(), 'text/plain')
+        eq(container2a.get_type(), 'text/plain')
+
+    def test_default_type_with_explicit_container_type(self):
+        eq = self.assertEqual
+        fp = openfile('msg_28.txt')
+        try:
+            msg = email.message_from_file(fp)
+        finally:
+            fp.close()
+        container1 = msg.get_payload(0)
+        eq(container1.get_default_type(), 'message/rfc822')
+        eq(container1.get_type(), 'message/rfc822')
+        container2 = msg.get_payload(1)
+        eq(container2.get_default_type(), 'message/rfc822')
+        eq(container2.get_type(), 'message/rfc822')
+        container1a = container1.get_payload(0)
+        eq(container1a.get_default_type(), 'text/plain')
+        eq(container1a.get_type(), 'text/plain')
+        container2a = container2.get_payload(0)
+        eq(container2a.get_default_type(), 'text/plain')
+        eq(container2a.get_type(), 'text/plain')
+
+    def test_default_type_non_parsed(self):
+        eq = self.assertEqual
+        neq = self.ndiffAssertEqual
+        # Set up container
+        container = MIMEMultipart('digest', 'BOUNDARY')
+        container.epilogue = '\n'
+        # Set up subparts
+        subpart1a = MIMEText('message 1\n')
+        subpart2a = MIMEText('message 2\n')
+        subpart1 = MIMEMessage(subpart1a)
+        subpart2 = MIMEMessage(subpart2a)
+        container.attach(subpart1)
+        container.attach(subpart2)
+        eq(subpart1.get_type(), 'message/rfc822')
+        eq(subpart1.get_default_type(), 'message/rfc822')
+        eq(subpart2.get_type(), 'message/rfc822')
+        eq(subpart2.get_default_type(), 'message/rfc822')
+        neq(container.as_string(0), '''\
+Content-Type: multipart/digest; boundary="BOUNDARY"
+MIME-Version: 1.0
+
+--BOUNDARY
+Content-Type: message/rfc822
+MIME-Version: 1.0
+
+Content-Type: text/plain; charset="us-ascii"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+
+message 1
+
+--BOUNDARY
+Content-Type: message/rfc822
+MIME-Version: 1.0
+
+Content-Type: text/plain; charset="us-ascii"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+
+message 2
+
+--BOUNDARY--
+''')
+        del subpart1['content-type']
+        del subpart1['mime-version']
+        del subpart2['content-type']
+        del subpart2['mime-version']
+        eq(subpart1.get_type(), None)
+        eq(subpart1.get_default_type(), 'message/rfc822')
+        eq(subpart2.get_type(), None)
+        eq(subpart2.get_default_type(), 'message/rfc822')
+        neq(container.as_string(0), '''\
+Content-Type: multipart/digest; boundary="BOUNDARY"
+MIME-Version: 1.0
+
+--BOUNDARY
+
+Content-Type: text/plain; charset="us-ascii"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+
+message 1
+
+--BOUNDARY
+
+Content-Type: text/plain; charset="us-ascii"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+
+message 2
+
+--BOUNDARY--
+''')
+
+
+
+# A general test of parser->model->generator idempotency.  IOW, read a message
+# in, parse it into a message object tree, then without touching the tree,
+# regenerate the plain text.  The original text and the transformed text
+# should be identical.  Note: that we ignore the Unix-From since that may
+# contain a changed date.
+class TestIdempotent(TestEmailBase):
+    def _msgobj(self, filename):
+        fp = openfile(filename)
+        try:
+            data = fp.read()
+        finally:
+            fp.close()
+        msg = email.message_from_string(data)
+        return msg, data
+
+    def _idempotent(self, msg, text):
+        eq = self.ndiffAssertEqual
+        s = StringIO()
+        g = Generator(s, maxheaderlen=0)
+        g.flatten(msg)
+        eq(text, s.getvalue())
+
+    def test_parse_text_message(self):
+        eq = self.assertEquals
+        msg, text = self._msgobj('msg_01.txt')
+        eq(msg.get_type(), 'text/plain')
+        eq(msg.get_main_type(), 'text')
+        eq(msg.get_subtype(), 'plain')
+        eq(msg.get_params()[1], ('charset', 'us-ascii'))
+        eq(msg.get_param('charset'), 'us-ascii')
+        eq(msg.preamble, None)
+        eq(msg.epilogue, None)
+        self._idempotent(msg, text)
+
+    def test_parse_untyped_message(self):
+        eq = self.assertEquals
+        msg, text = self._msgobj('msg_03.txt')
+        eq(msg.get_type(), None)
+        eq(msg.get_params(), None)
+        eq(msg.get_param('charset'), None)
+        self._idempotent(msg, text)
+
+    def test_simple_multipart(self):
+        msg, text = self._msgobj('msg_04.txt')
+        self._idempotent(msg, text)
+
+    def test_MIME_digest(self):
+        msg, text = self._msgobj('msg_02.txt')
+        self._idempotent(msg, text)
+
+    def test_long_header(self):
+        msg, text = self._msgobj('msg_27.txt')
+        self._idempotent(msg, text)
+
+    def test_MIME_digest_with_part_headers(self):
+        msg, text = self._msgobj('msg_28.txt')
+        self._idempotent(msg, text)
+
+    def test_mixed_with_image(self):
+        msg, text = self._msgobj('msg_06.txt')
+        self._idempotent(msg, text)
+
+    def test_multipart_report(self):
+        msg, text = self._msgobj('msg_05.txt')
+        self._idempotent(msg, text)
+
+    def test_dsn(self):
+        msg, text = self._msgobj('msg_16.txt')
+        self._idempotent(msg, text)
+
+    def test_preamble_epilogue(self):
+        msg, text = self._msgobj('msg_21.txt')
+        self._idempotent(msg, text)
+
+    def test_multipart_one_part(self):
+        msg, text = self._msgobj('msg_23.txt')
+        self._idempotent(msg, text)
+
+    def test_multipart_no_parts(self):
+        msg, text = self._msgobj('msg_24.txt')
+        self._idempotent(msg, text)
+
+    def test_no_start_boundary(self):
+        msg, text = self._msgobj('msg_31.txt')
+        self._idempotent(msg, text)
+
+    def test_rfc2231_charset(self):
+        msg, text = self._msgobj('msg_32.txt')
+        self._idempotent(msg, text)
+
+    def test_more_rfc2231_parameters(self):
+        msg, text = self._msgobj('msg_33.txt')
+        self._idempotent(msg, text)
+
+    def test_text_plain_in_a_multipart_digest(self):
+        msg, text = self._msgobj('msg_34.txt')
+        self._idempotent(msg, text)
+
+    def test_content_type(self):
+        eq = self.assertEquals
+        unless = self.failUnless
+        # Get a message object and reset the seek pointer for other tests
+        msg, text = self._msgobj('msg_05.txt')
+        eq(msg.get_type(), 'multipart/report')
+        # Test the Content-Type: parameters
+        params = {}
+        for pk, pv in msg.get_params():
+            params[pk] = pv
+        eq(params['report-type'], 'delivery-status')
+        eq(params['boundary'], 'D1690A7AC1.996856090/mail.example.com')
+        eq(msg.preamble, 'This is a MIME-encapsulated message.\n\n')
+        eq(msg.epilogue, '\n\n')
+        eq(len(msg.get_payload()), 3)
+        # Make sure the subparts are what we expect
+        msg1 = msg.get_payload(0)
+        eq(msg1.get_type(), 'text/plain')
+        eq(msg1.get_payload(), 'Yadda yadda yadda\n')
+        msg2 = msg.get_payload(1)
+        eq(msg2.get_type(), None)
+        eq(msg2.get_payload(), 'Yadda yadda yadda\n')
+        msg3 = msg.get_payload(2)
+        eq(msg3.get_type(), 'message/rfc822')
+        self.failUnless(isinstance(msg3, Message))
+        payload = msg3.get_payload()
+        unless(isinstance(payload, ListType))
+        eq(len(payload), 1)
+        msg4 = payload[0]
+        unless(isinstance(msg4, Message))
+        eq(msg4.get_payload(), 'Yadda yadda yadda\n')
+
+    def test_parser(self):
+        eq = self.assertEquals
+        unless = self.failUnless
+        msg, text = self._msgobj('msg_06.txt')
+        # Check some of the outer headers
+        eq(msg.get_type(), 'message/rfc822')
+        # Make sure the payload is a list of exactly one sub-Message, and that
+        # that submessage has a type of text/plain
+        payload = msg.get_payload()
+        unless(isinstance(payload, ListType))
+        eq(len(payload), 1)
+        msg1 = payload[0]
+        self.failUnless(isinstance(msg1, Message))
+        eq(msg1.get_type(), 'text/plain')
+        self.failUnless(isinstance(msg1.get_payload(), StringType))
+        eq(msg1.get_payload(), '\n')
+
+
+
+# Test various other bits of the package's functionality
+class TestMiscellaneous(unittest.TestCase):
+    def test_message_from_string(self):
+        fp = openfile('msg_01.txt')
+        try:
+            text = fp.read()
+        finally:
+            fp.close()
+        msg = email.message_from_string(text)
+        s = StringIO()
+        # Don't wrap/continue long headers since we're trying to test
+        # idempotency.
+        g = Generator(s, maxheaderlen=0)
+        g.flatten(msg)
+        self.assertEqual(text, s.getvalue())
+
+    def test_message_from_file(self):
+        fp = openfile('msg_01.txt')
+        try:
+            text = fp.read()
+            fp.seek(0)
+            msg = email.message_from_file(fp)
+            s = StringIO()
+            # Don't wrap/continue long headers since we're trying to test
+            # idempotency.
+            g = Generator(s, maxheaderlen=0)
+            g.flatten(msg)
+            self.assertEqual(text, s.getvalue())
+        finally:
+            fp.close()
+
+    def test_message_from_string_with_class(self):
+        unless = self.failUnless
+        fp = openfile('msg_01.txt')
+        try:
+            text = fp.read()
+        finally:
+            fp.close()
+        # Create a subclass
+        class MyMessage(Message):
+            pass
+
+        msg = email.message_from_string(text, MyMessage)
+        unless(isinstance(msg, MyMessage))
+        # Try something more complicated
+        fp = openfile('msg_02.txt')
+        try:
+            text = fp.read()
+        finally:
+            fp.close()
+        msg = email.message_from_string(text, MyMessage)
+        for subpart in msg.walk():
+            unless(isinstance(subpart, MyMessage))
+
+    def test_message_from_file_with_class(self):
+        unless = self.failUnless
+        # Create a subclass
+        class MyMessage(Message):
+            pass
+
+        fp = openfile('msg_01.txt')
+        try:
+            msg = email.message_from_file(fp, MyMessage)
+        finally:
+            fp.close()
+        unless(isinstance(msg, MyMessage))
+        # Try something more complicated
+        fp = openfile('msg_02.txt')
+        try:
+            msg = email.message_from_file(fp, MyMessage)
+        finally:
+            fp.close()
+        for subpart in msg.walk():
+            unless(isinstance(subpart, MyMessage))
+
+    def test__all__(self):
+        module = __import__('email')
+        all = module.__all__
+        all.sort()
+        self.assertEqual(all, ['Charset', 'Encoders', 'Errors', 'Generator',
+                               'Header', 'Iterators', 'MIMEAudio', 'MIMEBase',
+                               'MIMEImage', 'MIMEMessage', 'MIMEMultipart',
+                               'MIMENonMultipart', 'MIMEText', 'Message',
+                               'Parser', 'Utils', 'base64MIME',
+                               'message_from_file', 'message_from_string',
+                               'quopriMIME'])
+
+    def test_formatdate(self):
+        now = time.time()
+        self.assertEqual(Utils.parsedate(Utils.formatdate(now))[:6],
+                         time.gmtime(now)[:6])
+
+    def test_formatdate_localtime(self):
+        now = time.time()
+        self.assertEqual(
+            Utils.parsedate(Utils.formatdate(now, localtime=True))[:6],
+            time.localtime(now)[:6])
+
+    def test_parsedate_none(self):
+        self.assertEqual(Utils.parsedate(''), None)
+
+    def test_parsedate_compact(self):
+        # The FWS after the comma is optional
+        self.assertEqual(Utils.parsedate('Wed,3 Apr 2002 14:58:26 +0800'),
+                         Utils.parsedate('Wed, 3 Apr 2002 14:58:26 +0800'))
+
+    def test_parsedate_no_dayofweek(self):
+        eq = self.assertEqual
+        eq(Utils.parsedate_tz('25 Feb 2003 13:47:26 -0800'),
+           (2003, 2, 25, 13, 47, 26, 0, 0, 0, -28800))
+
+    def test_parsedate_compact_no_dayofweek(self):
+        eq = self.assertEqual
+        eq(Utils.parsedate_tz('5 Feb 2003 13:47:26 -0800'),
+           (2003, 2, 5, 13, 47, 26, 0, 0, 0, -28800))
+
+    def test_parseaddr_empty(self):
+        self.assertEqual(Utils.parseaddr('<>'), ('', ''))
+        self.assertEqual(Utils.formataddr(Utils.parseaddr('<>')), '')
+
+    def test_noquote_dump(self):
+        self.assertEqual(
+            Utils.formataddr(('A Silly Person', 'person at dom.ain')),
+            'A Silly Person <person at dom.ain>')
+
+    def test_escape_dump(self):
+        self.assertEqual(
+            Utils.formataddr(('A (Very) Silly Person', 'person at dom.ain')),
+            r'"A \(Very\) Silly Person" <person at dom.ain>')
+        a = r'A \(Special\) Person'
+        b = 'person at dom.ain'
+        self.assertEqual(Utils.parseaddr(Utils.formataddr((a, b))), (a, b))
+
+    def test_escape_backslashes(self):
+        self.assertEqual(
+            Utils.formataddr(('Arthur \Backslash\ Foobar', 'person at dom.ain')),
+            r'"Arthur \\Backslash\\ Foobar" <person at dom.ain>')
+        a = r'Arthur \Backslash\ Foobar'
+        b = 'person at dom.ain'
+        self.assertEqual(Utils.parseaddr(Utils.formataddr((a, b))), (a, b))
+
+    def test_name_with_dot(self):
+        x = 'John X. Doe <jxd at example.com>'
+        y = '"John X. Doe" <jxd at example.com>'
+        a, b = ('John X. Doe', 'jxd at example.com')
+        self.assertEqual(Utils.parseaddr(x), (a, b))
+        self.assertEqual(Utils.parseaddr(y), (a, b))
+        # formataddr() quotes the name if there's a dot in it
+        self.assertEqual(Utils.formataddr((a, b)), y)
+
+    def test_quote_dump(self):
+        self.assertEqual(
+            Utils.formataddr(('A Silly; Person', 'person at dom.ain')),
+            r'"A Silly; Person" <person at dom.ain>')
+
+    def test_fix_eols(self):
+        eq = self.assertEqual
+        eq(Utils.fix_eols('hello'), 'hello')
+        eq(Utils.fix_eols('hello\n'), 'hello\r\n')
+        eq(Utils.fix_eols('hello\r'), 'hello\r\n')
+        eq(Utils.fix_eols('hello\r\n'), 'hello\r\n')
+        eq(Utils.fix_eols('hello\n\r'), 'hello\r\n\r\n')
+
+    def test_charset_richcomparisons(self):
+        eq = self.assertEqual
+        ne = self.failIfEqual
+        cset1 = Charset()
+        cset2 = Charset()
+        eq(cset1, 'us-ascii')
+        eq(cset1, 'US-ASCII')
+        eq(cset1, 'Us-AsCiI')
+        eq('us-ascii', cset1)
+        eq('US-ASCII', cset1)
+        eq('Us-AsCiI', cset1)
+        ne(cset1, 'usascii')
+        ne(cset1, 'USASCII')
+        ne(cset1, 'UsAsCiI')
+        ne('usascii', cset1)
+        ne('USASCII', cset1)
+        ne('UsAsCiI', cset1)
+        eq(cset1, cset2)
+        eq(cset2, cset1)
+
+    def test_getaddresses(self):
+        eq = self.assertEqual
+        eq(Utils.getaddresses(['aperson at dom.ain (Al Person)',
+                               'Bud Person <bperson at dom.ain>']),
+           [('Al Person', 'aperson at dom.ain'),
+            ('Bud Person', 'bperson at dom.ain')])
+
+    def test_getaddresses_nasty(self):
+        eq = self.assertEqual
+        eq(Utils.getaddresses(['foo: ;']), [('', '')])
+        eq(Utils.getaddresses(
+           ['[]*-- =~$']),
+           [('', ''), ('', ''), ('', '*--')])
+        eq(Utils.getaddresses(
+           ['foo: ;', '"Jason R. Mastaler" <jason at dom.ain>']),
+           [('', ''), ('Jason R. Mastaler', 'jason at dom.ain')])
+
+    def test_utils_quote_unquote(self):
+        eq = self.assertEqual
+        msg = Message()
+        msg.add_header('content-disposition', 'attachment',
+                       filename='foo\\wacky"name')
+        eq(msg.get_filename(), 'foo\\wacky"name')
+
+    def test_get_body_encoding_with_bogus_charset(self):
+        charset = Charset('not a charset')
+        self.assertEqual(charset.get_body_encoding(), 'base64')
+
+    def test_get_body_encoding_with_uppercase_charset(self):
+        eq = self.assertEqual
+        msg = Message()
+        msg['Content-Type'] = 'text/plain; charset=UTF-8'
+        eq(msg['content-type'], 'text/plain; charset=UTF-8')
+        charsets = msg.get_charsets()
+        eq(len(charsets), 1)
+        eq(charsets[0], 'utf-8')
+        charset = Charset(charsets[0])
+        eq(charset.get_body_encoding(), 'base64')
+        msg.set_payload('hello world', charset=charset)
+        eq(msg.get_payload(), 'hello world')
+        eq(msg['content-transfer-encoding'], 'base64')
+        # Try another one
+        msg = Message()
+        msg['Content-Type'] = 'text/plain; charset="US-ASCII"'
+        charsets = msg.get_charsets()
+        eq(len(charsets), 1)
+        eq(charsets[0], 'us-ascii')
+        charset = Charset(charsets[0])
+        eq(charset.get_body_encoding(), Encoders.encode_7or8bit)
+        msg.set_payload('hello world', charset=charset)
+        eq(msg.get_payload(), 'hello world')
+        eq(msg['content-transfer-encoding'], '7bit')
+
+    def test_charsets_case_insensitive(self):
+        lc = Charset('us-ascii')
+        uc = Charset('US-ASCII')
+        self.assertEqual(lc.get_body_encoding(), uc.get_body_encoding())
+
+
+
+# Test the iterator/generators
+class TestIterators(TestEmailBase):
+    def test_body_line_iterator(self):
+        eq = self.assertEqual
+        # First a simple non-multipart message
+        msg = self._msgobj('msg_01.txt')
+        it = Iterators.body_line_iterator(msg)
+        lines = list(it)
+        eq(len(lines), 6)
+        eq(EMPTYSTRING.join(lines), msg.get_payload())
+        # Now a more complicated multipart
+        msg = self._msgobj('msg_02.txt')
+        it = Iterators.body_line_iterator(msg)
+        lines = list(it)
+        eq(len(lines), 43)
+        fp = openfile('msg_19.txt')
+        try:
+            eq(EMPTYSTRING.join(lines), fp.read())
+        finally:
+            fp.close()
+
+    def test_typed_subpart_iterator(self):
+        eq = self.assertEqual
+        msg = self._msgobj('msg_04.txt')
+        it = Iterators.typed_subpart_iterator(msg, 'text')
+        lines = []
+        subparts = 0
+        for subpart in it:
+            subparts += 1
+            lines.append(subpart.get_payload())
+        eq(subparts, 2)
+        eq(EMPTYSTRING.join(lines), """\
+a simple kind of mirror
+to reflect upon our own
+a simple kind of mirror
+to reflect upon our own
+""")
+
+    def test_typed_subpart_iterator_default_type(self):
+        eq = self.assertEqual
+        msg = self._msgobj('msg_03.txt')
+        it = Iterators.typed_subpart_iterator(msg, 'text', 'plain')
+        lines = []
+        subparts = 0
+        for subpart in it:
+            subparts += 1
+            lines.append(subpart.get_payload())
+        eq(subparts, 1)
+        eq(EMPTYSTRING.join(lines), """\
+
+Hi,
+
+Do you like this message?
+
+-Me
+""")
+
+
+
+class TestParsers(TestEmailBase):
+    def test_header_parser(self):
+        eq = self.assertEqual
+        # Parse only the headers of a complex multipart MIME document
+        fp = openfile('msg_02.txt')
+        try:
+            msg = HeaderParser().parse(fp)
+        finally:
+            fp.close()
+        eq(msg['from'], 'ppp-request at zzz.org')
+        eq(msg['to'], 'ppp at zzz.org')
+        eq(msg.get_type(), 'multipart/mixed')
+        eq(msg.is_multipart(), 0)
+        self.failUnless(isinstance(msg.get_payload(), StringType))
+
+    def test_whitespace_continuaton(self):
+        eq = self.assertEqual
+        # This message contains a line after the Subject: header that has only
+        # whitespace, but it is not empty!
+        msg = email.message_from_string("""\
+From: aperson at dom.ain
+To: bperson at dom.ain
+Subject: the next line has a space on it
+\x20
+Date: Mon, 8 Apr 2002 15:09:19 -0400
+Message-ID: spam
+
+Here's the message body
+""")
+        eq(msg['subject'], 'the next line has a space on it\n ')
+        eq(msg['message-id'], 'spam')
+        eq(msg.get_payload(), "Here's the message body\n")
+
+    def test_crlf_separation(self):
+        eq = self.assertEqual
+        fp = openfile('msg_26.txt', mode='rb')
+        try:
+            msg = Parser().parse(fp)
+        finally:
+            fp.close()
+        eq(len(msg.get_payload()), 2)
+        part1 = msg.get_payload(0)
+        eq(part1.get_type(), 'text/plain')
+        eq(part1.get_payload(), 'Simple email with attachment.\r\n\r\n')
+        part2 = msg.get_payload(1)
+        eq(part2.get_type(), 'application/riscos')
+
+    def test_multipart_digest_with_extra_mime_headers(self):
+        eq = self.assertEqual
+        neq = self.ndiffAssertEqual
+        fp = openfile('msg_28.txt')
+        try:
+            msg = email.message_from_file(fp)
+        finally:
+            fp.close()
+        # Structure is:
+        # multipart/digest
+        #   message/rfc822
+        #     text/plain
+        #   message/rfc822
+        #     text/plain
+        eq(msg.is_multipart(), 1)
+        eq(len(msg.get_payload()), 2)
+        part1 = msg.get_payload(0)
+        eq(part1.get_type(), 'message/rfc822')
+        eq(part1.is_multipart(), 1)
+        eq(len(part1.get_payload()), 1)
+        part1a = part1.get_payload(0)
+        eq(part1a.is_multipart(), 0)
+        eq(part1a.get_type(), 'text/plain')
+        neq(part1a.get_payload(), 'message 1\n')
+        # next message/rfc822
+        part2 = msg.get_payload(1)
+        eq(part2.get_type(), 'message/rfc822')
+        eq(part2.is_multipart(), 1)
+        eq(len(part2.get_payload()), 1)
+        part2a = part2.get_payload(0)
+        eq(part2a.is_multipart(), 0)
+        eq(part2a.get_type(), 'text/plain')
+        neq(part2a.get_payload(), 'message 2\n')
+
+    def test_three_lines(self):
+        # A bug report by Andrew McNamara
+        lines = ['From: Andrew Person <aperson at dom.ain',
+                 'Subject: Test',
+                 'Date: Tue, 20 Aug 2002 16:43:45 +1000']
+        msg = email.message_from_string(NL.join(lines))
+        self.assertEqual(msg['date'], 'Tue, 20 Aug 2002 16:43:45 +1000')
+
+
+
+class TestBase64(unittest.TestCase):
+    def test_len(self):
+        eq = self.assertEqual
+        eq(base64MIME.base64_len('hello'),
+           len(base64MIME.encode('hello', eol='')))
+        for size in range(15):
+            if   size == 0 : bsize = 0
+            elif size <= 3 : bsize = 4
+            elif size <= 6 : bsize = 8
+            elif size <= 9 : bsize = 12
+            elif size <= 12: bsize = 16
+            else           : bsize = 20
+            eq(base64MIME.base64_len('x'*size), bsize)
+
+    def test_decode(self):
+        eq = self.assertEqual
+        eq(base64MIME.decode(''), '')
+        eq(base64MIME.decode('aGVsbG8='), 'hello')
+        eq(base64MIME.decode('aGVsbG8=', 'X'), 'hello')
+        eq(base64MIME.decode('aGVsbG8NCndvcmxk\n', 'X'), 'helloXworld')
+
+    def test_encode(self):
+        eq = self.assertEqual
+        eq(base64MIME.encode(''), '')
+        eq(base64MIME.encode('hello'), 'aGVsbG8=\n')
+        # Test the binary flag
+        eq(base64MIME.encode('hello\n'), 'aGVsbG8K\n')
+        eq(base64MIME.encode('hello\n', 0), 'aGVsbG8NCg==\n')
+        # Test the maxlinelen arg
+        eq(base64MIME.encode('xxxx ' * 20, maxlinelen=40), """\
+eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg
+eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg
+eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg
+eHh4eCB4eHh4IA==
+""")
+        # Test the eol argument
+        eq(base64MIME.encode('xxxx ' * 20, maxlinelen=40, eol='\r\n'), """\
+eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg\r
+eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg\r
+eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg\r
+eHh4eCB4eHh4IA==\r
+""")
+
+    def test_header_encode(self):
+        eq = self.assertEqual
+        he = base64MIME.header_encode
+        eq(he('hello'), '=?iso-8859-1?b?aGVsbG8=?=')
+        eq(he('hello\nworld'), '=?iso-8859-1?b?aGVsbG8NCndvcmxk?=')
+        # Test the charset option
+        eq(he('hello', charset='iso-8859-2'), '=?iso-8859-2?b?aGVsbG8=?=')
+        # Test the keep_eols flag
+        eq(he('hello\nworld', keep_eols=True),
+           '=?iso-8859-1?b?aGVsbG8Kd29ybGQ=?=')
+        # Test the maxlinelen argument
+        eq(he('xxxx ' * 20, maxlinelen=40), """\
+=?iso-8859-1?b?eHh4eCB4eHh4IHh4eHggeHg=?=
+ =?iso-8859-1?b?eHggeHh4eCB4eHh4IHh4eHg=?=
+ =?iso-8859-1?b?IHh4eHggeHh4eCB4eHh4IHg=?=
+ =?iso-8859-1?b?eHh4IHh4eHggeHh4eCB4eHg=?=
+ =?iso-8859-1?b?eCB4eHh4IHh4eHggeHh4eCA=?=
+ =?iso-8859-1?b?eHh4eCB4eHh4IHh4eHgg?=""")
+        # Test the eol argument
+        eq(he('xxxx ' * 20, maxlinelen=40, eol='\r\n'), """\
+=?iso-8859-1?b?eHh4eCB4eHh4IHh4eHggeHg=?=\r
+ =?iso-8859-1?b?eHggeHh4eCB4eHh4IHh4eHg=?=\r
+ =?iso-8859-1?b?IHh4eHggeHh4eCB4eHh4IHg=?=\r
+ =?iso-8859-1?b?eHh4IHh4eHggeHh4eCB4eHg=?=\r
+ =?iso-8859-1?b?eCB4eHh4IHh4eHggeHh4eCA=?=\r
+ =?iso-8859-1?b?eHh4eCB4eHh4IHh4eHgg?=""")
+
+
+
+class TestQuopri(unittest.TestCase):
+    def setUp(self):
+        self.hlit = [chr(x) for x in range(ord('a'), ord('z')+1)] + \
+                    [chr(x) for x in range(ord('A'), ord('Z')+1)] + \
+                    [chr(x) for x in range(ord('0'), ord('9')+1)] + \
+                    ['!', '*', '+', '-', '/', ' ']
+        self.hnon = [chr(x) for x in range(256) if chr(x) not in self.hlit]
+        assert len(self.hlit) + len(self.hnon) == 256
+        self.blit = [chr(x) for x in range(ord(' '), ord('~')+1)] + ['\t']
+        self.blit.remove('=')
+        self.bnon = [chr(x) for x in range(256) if chr(x) not in self.blit]
+        assert len(self.blit) + len(self.bnon) == 256
+
+    def test_header_quopri_check(self):
+        for c in self.hlit:
+            self.failIf(quopriMIME.header_quopri_check(c))
+        for c in self.hnon:
+            self.failUnless(quopriMIME.header_quopri_check(c))
+
+    def test_body_quopri_check(self):
+        for c in self.blit:
+            self.failIf(quopriMIME.body_quopri_check(c))
+        for c in self.bnon:
+            self.failUnless(quopriMIME.body_quopri_check(c))
+
+    def test_header_quopri_len(self):
+        eq = self.assertEqual
+        hql = quopriMIME.header_quopri_len
+        enc = quopriMIME.header_encode
+        for s in ('hello', 'h at e@l at l@o@'):
+            # Empty charset and no line-endings.  7 == RFC chrome
+            eq(hql(s), len(enc(s, charset='', eol=''))-7)
+        for c in self.hlit:
+            eq(hql(c), 1)
+        for c in self.hnon:
+            eq(hql(c), 3)
+
+    def test_body_quopri_len(self):
+        eq = self.assertEqual
+        bql = quopriMIME.body_quopri_len
+        for c in self.blit:
+            eq(bql(c), 1)
+        for c in self.bnon:
+            eq(bql(c), 3)
+
+    def test_quote_unquote_idempotent(self):
+        for x in range(256):
+            c = chr(x)
+            self.assertEqual(quopriMIME.unquote(quopriMIME.quote(c)), c)
+
+    def test_header_encode(self):
+        eq = self.assertEqual
+        he = quopriMIME.header_encode
+        eq(he('hello'), '=?iso-8859-1?q?hello?=')
+        eq(he('hello\nworld'), '=?iso-8859-1?q?hello=0D=0Aworld?=')
+        # Test the charset option
+        eq(he('hello', charset='iso-8859-2'), '=?iso-8859-2?q?hello?=')
+        # Test the keep_eols flag
+        eq(he('hello\nworld', keep_eols=True), '=?iso-8859-1?q?hello=0Aworld?=')
+        # Test a non-ASCII character
+        eq(he('hello\xc7there'), '=?iso-8859-1?q?hello=C7there?=')
+        # Test the maxlinelen argument
+        eq(he('xxxx ' * 20, maxlinelen=40), """\
+=?iso-8859-1?q?xxxx_xxxx_xxxx_xxxx_xx?=
+ =?iso-8859-1?q?xx_xxxx_xxxx_xxxx_xxxx?=
+ =?iso-8859-1?q?_xxxx_xxxx_xxxx_xxxx_x?=
+ =?iso-8859-1?q?xxx_xxxx_xxxx_xxxx_xxx?=
+ =?iso-8859-1?q?x_xxxx_xxxx_?=""")
+        # Test the eol argument
+        eq(he('xxxx ' * 20, maxlinelen=40, eol='\r\n'), """\
+=?iso-8859-1?q?xxxx_xxxx_xxxx_xxxx_xx?=\r
+ =?iso-8859-1?q?xx_xxxx_xxxx_xxxx_xxxx?=\r
+ =?iso-8859-1?q?_xxxx_xxxx_xxxx_xxxx_x?=\r
+ =?iso-8859-1?q?xxx_xxxx_xxxx_xxxx_xxx?=\r
+ =?iso-8859-1?q?x_xxxx_xxxx_?=""")
+
+    def test_decode(self):
+        eq = self.assertEqual
+        eq(quopriMIME.decode(''), '')
+        eq(quopriMIME.decode('hello'), 'hello')
+        eq(quopriMIME.decode('hello', 'X'), 'hello')
+        eq(quopriMIME.decode('hello\nworld', 'X'), 'helloXworld')
+
+    def test_encode(self):
+        eq = self.assertEqual
+        eq(quopriMIME.encode(''), '')
+        eq(quopriMIME.encode('hello'), 'hello')
+        # Test the binary flag
+        eq(quopriMIME.encode('hello\r\nworld'), 'hello\nworld')
+        eq(quopriMIME.encode('hello\r\nworld', 0), 'hello\nworld')
+        # Test the maxlinelen arg
+        eq(quopriMIME.encode('xxxx ' * 20, maxlinelen=40), """\
+xxxx xxxx xxxx xxxx xxxx xxxx xxxx xxxx=
+ xxxx xxxx xxxx xxxx xxxx xxxx xxxx xxx=
+x xxxx xxxx xxxx xxxx=20""")
+        # Test the eol argument
+        eq(quopriMIME.encode('xxxx ' * 20, maxlinelen=40, eol='\r\n'), """\
+xxxx xxxx xxxx xxxx xxxx xxxx xxxx xxxx=\r
+ xxxx xxxx xxxx xxxx xxxx xxxx xxxx xxx=\r
+x xxxx xxxx xxxx xxxx=20""")
+        eq(quopriMIME.encode("""\
+one line
+
+two line"""), """\
+one line
+
+two line""")
+
+
+
+# Test the Charset class
+class TestCharset(unittest.TestCase):
+    def tearDown(self):
+        from email import Charset as CharsetModule
+        try:
+            del CharsetModule.CHARSETS['fake']
+        except KeyError:
+            pass
+
+    def test_idempotent(self):
+        eq = self.assertEqual
+        # Make sure us-ascii = no Unicode conversion
+        c = Charset('us-ascii')
+        s = 'Hello World!'
+        sp = c.to_splittable(s)
+        eq(s, c.from_splittable(sp))
+        # test 8-bit idempotency with us-ascii
+        s = '\xa4\xa2\xa4\xa4\xa4\xa6\xa4\xa8\xa4\xaa'
+        sp = c.to_splittable(s)
+        eq(s, c.from_splittable(sp))
+
+    def test_body_encode(self):
+        eq = self.assertEqual
+        # Try a charset with QP body encoding
+        c = Charset('iso-8859-1')
+        eq('hello w=F6rld', c.body_encode('hello w\xf6rld'))
+        # Try a charset with Base64 body encoding
+        c = Charset('utf-8')
+        eq('aGVsbG8gd29ybGQ=\n', c.body_encode('hello world'))
+        # Try a charset with None body encoding
+        c = Charset('us-ascii')
+        eq('hello world', c.body_encode('hello world'))
+        # Try the convert argument, where input codec <> output codec
+        c = Charset('euc-jp')
+        # With apologies to Tokio Kikuchi ;)
+        try:
+            eq('\x1b$B5FCO;~IW\x1b(B',
+               c.body_encode('\xb5\xc6\xc3\xcf\xbb\xfe\xc9\xd7'))
+            eq('\xb5\xc6\xc3\xcf\xbb\xfe\xc9\xd7',
+               c.body_encode('\xb5\xc6\xc3\xcf\xbb\xfe\xc9\xd7', False))
+        except LookupError:
+            # We probably don't have the Japanese codecs installed
+            pass
+        # Testing SF bug #625509, which we have to fake, since there are no
+        # built-in encodings where the header encoding is QP but the body
+        # encoding is not.
+        from email import Charset as CharsetModule
+        CharsetModule.add_charset('fake', CharsetModule.QP, None)
+        c = Charset('fake')
+        eq('hello w\xf6rld', c.body_encode('hello w\xf6rld'))
+
+
+
+# Test multilingual MIME headers.
+class TestHeader(TestEmailBase):
+    def test_simple(self):
+        eq = self.ndiffAssertEqual
+        h = Header('Hello World!')
+        eq(h.encode(), 'Hello World!')
+        h.append(' Goodbye World!')
+        eq(h.encode(), 'Hello World!  Goodbye World!')
+
+    def test_simple_surprise(self):
+        eq = self.ndiffAssertEqual
+        h = Header('Hello World!')
+        eq(h.encode(), 'Hello World!')
+        h.append('Goodbye World!')
+        eq(h.encode(), 'Hello World! Goodbye World!')
+
+    def test_header_needs_no_decoding(self):
+        h = 'no decoding needed'
+        self.assertEqual(decode_header(h), [(h, None)])
+
+    def test_long(self):
+        h = Header("I am the very model of a modern Major-General; I've information vegetable, animal, and mineral; I know the kings of England, and I quote the fights historical from Marathon to Waterloo, in order categorical; I'm very well acquainted, too, with matters mathematical; I understand equations, both the simple and quadratical; about binomial theorem I'm teeming with a lot o' news, with many cheerful facts about the square of the hypotenuse.",
+                   maxlinelen=76)
+        for l in h.encode(splitchars=' ').split('\n '):
+            self.failUnless(len(l) <= 76)
+
+    def test_multilingual(self):
+        eq = self.ndiffAssertEqual
+        g = Charset("iso-8859-1")
+        cz = Charset("iso-8859-2")
+        utf8 = Charset("utf-8")
+        g_head = "Die Mieter treten hier ein werden mit einem Foerderband komfortabel den Korridor entlang, an s\xfcdl\xfcndischen Wandgem\xe4lden vorbei, gegen die rotierenden Klingen bef\xf6rdert. "
+        cz_head = "Finan\xe8ni metropole se hroutily pod tlakem jejich d\xf9vtipu.. "
+        utf8_head = u"\u6b63\u78ba\u306b\u8a00\u3046\u3068\u7ffb\u8a33\u306f\u3055\u308c\u3066\u3044\u307e\u305b\u3093\u3002\u4e00\u90e8\u306f\u30c9\u30a4\u30c4\u8a9e\u3067\u3059\u304c\u3001\u3042\u3068\u306f\u3067\u305f\u3089\u3081\u3067\u3059\u3002\u5b9f\u969b\u306b\u306f\u300cWenn ist das Nunstuck git und Slotermeyer? Ja! Beiherhund das Oder die Flipperwaldt gersput.\u300d\u3068\u8a00\u3063\u3066\u3044\u307e\u3059\u3002".encode("utf-8")
+        h = Header(g_head, g)
+        h.append(cz_head, cz)
+        h.append(utf8_head, utf8)
+        enc = h.encode()
+        eq(enc, """\
+=?iso-8859-1?q?Die_Mieter_treten_hier_ein_werden_mit_einem_Foerderband_ko?=
+ =?iso-8859-1?q?mfortabel_den_Korridor_entlang=2C_an_s=FCdl=FCndischen_Wan?=
+ =?iso-8859-1?q?dgem=E4lden_vorbei=2C_gegen_die_rotierenden_Klingen_bef=F6?=
+ =?iso-8859-1?q?rdert=2E_?= =?iso-8859-2?q?Finan=E8ni_metropole_se_hroutily?=
+ =?iso-8859-2?q?_pod_tlakem_jejich_d=F9vtipu=2E=2E_?= =?utf-8?b?5q2j56K6?=
+ =?utf-8?b?44Gr6KiA44GG44Go57+76Kiz44Gv44GV44KM44Gm44GE44G+44Gb44KT44CC?=
+ =?utf-8?b?5LiA6YOo44Gv44OJ44Kk44OE6Kqe44Gn44GZ44GM44CB44GC44Go44Gv44Gn?=
+ =?utf-8?b?44Gf44KJ44KB44Gn44GZ44CC5a6f6Zqb44Gr44Gv44CMV2VubiBpc3QgZGFz?=
+ =?utf-8?q?_Nunstuck_git_und_Slotermeyer=3F_Ja!_Beiherhund_das_Oder_die_Fl?=
+ =?utf-8?b?aXBwZXJ3YWxkdCBnZXJzcHV0LuOAjeOBqOiogOOBo+OBpuOBhOOBvuOBmQ==?=
+ =?utf-8?b?44CC?=""")
+        eq(decode_header(enc),
+           [(g_head, "iso-8859-1"), (cz_head, "iso-8859-2"),
+            (utf8_head, "utf-8")])
+        # Test for conversion to unicode.  BAW: Python 2.1 doesn't support the
+        # __unicode__() protocol, so do things this way for compatibility.
+        ustr = h.__unicode__()
+        # For Python 2.2 and beyond
+        #ustr = unicode(h)
+        eq(ustr.encode('utf-8'),
+           'Die Mieter treten hier ein werden mit einem Foerderband '
+           'komfortabel den Korridor entlang, an s\xc3\xbcdl\xc3\xbcndischen '
+           'Wandgem\xc3\xa4lden vorbei, gegen die rotierenden Klingen '
+           'bef\xc3\xb6rdert. Finan\xc4\x8dni metropole se hroutily pod '
+           'tlakem jejich d\xc5\xafvtipu.. \xe6\xad\xa3\xe7\xa2\xba\xe3\x81'
+           '\xab\xe8\xa8\x80\xe3\x81\x86\xe3\x81\xa8\xe7\xbf\xbb\xe8\xa8\xb3'
+           '\xe3\x81\xaf\xe3\x81\x95\xe3\x82\x8c\xe3\x81\xa6\xe3\x81\x84\xe3'
+           '\x81\xbe\xe3\x81\x9b\xe3\x82\x93\xe3\x80\x82\xe4\xb8\x80\xe9\x83'
+           '\xa8\xe3\x81\xaf\xe3\x83\x89\xe3\x82\xa4\xe3\x83\x84\xe8\xaa\x9e'
+           '\xe3\x81\xa7\xe3\x81\x99\xe3\x81\x8c\xe3\x80\x81\xe3\x81\x82\xe3'
+           '\x81\xa8\xe3\x81\xaf\xe3\x81\xa7\xe3\x81\x9f\xe3\x82\x89\xe3\x82'
+           '\x81\xe3\x81\xa7\xe3\x81\x99\xe3\x80\x82\xe5\xae\x9f\xe9\x9a\x9b'
+           '\xe3\x81\xab\xe3\x81\xaf\xe3\x80\x8cWenn ist das Nunstuck git '
+           'und Slotermeyer? Ja! Beiherhund das Oder die Flipperwaldt '
+           'gersput.\xe3\x80\x8d\xe3\x81\xa8\xe8\xa8\x80\xe3\x81\xa3\xe3\x81'
+           '\xa6\xe3\x81\x84\xe3\x81\xbe\xe3\x81\x99\xe3\x80\x82')
+        # Test make_header()
+        newh = make_header(decode_header(enc))
+        eq(newh, enc)
+
+    def test_header_ctor_default_args(self):
+        eq = self.ndiffAssertEqual
+        h = Header()
+        eq(h, '')
+        h.append('foo', Charset('iso-8859-1'))
+        eq(h, '=?iso-8859-1?q?foo?=')
+
+    def test_explicit_maxlinelen(self):
+        eq = self.ndiffAssertEqual
+        hstr = 'A very long line that must get split to something other than at the 76th character boundary to test the non-default behavior'
+        h = Header(hstr)
+        eq(h.encode(), '''\
+A very long line that must get split to something other than at the 76th
+ character boundary to test the non-default behavior''')
+        h = Header(hstr, header_name='Subject')
+        eq(h.encode(), '''\
+A very long line that must get split to something other than at the
+ 76th character boundary to test the non-default behavior''')
+        h = Header(hstr, maxlinelen=1024, header_name='Subject')
+        eq(h.encode(), hstr)
+
+    def test_us_ascii_header(self):
+        eq = self.assertEqual
+        s = 'hello'
+        x = decode_header(s)
+        eq(x, [('hello', None)])
+        h = make_header(x)
+        eq(s, h.encode())
+
+    def test_string_charset(self):
+        eq = self.assertEqual
+        h = Header()
+        h.append('hello', 'iso-8859-1')
+        eq(h, '=?iso-8859-1?q?hello?=')
+
+##    def test_unicode_error(self):
+##        raises = self.assertRaises
+##        raises(UnicodeError, Header, u'[P\xf6stal]', 'us-ascii')
+##        raises(UnicodeError, Header, '[P\xf6stal]', 'us-ascii')
+##        h = Header()
+##        raises(UnicodeError, h.append, u'[P\xf6stal]', 'us-ascii')
+##        raises(UnicodeError, h.append, '[P\xf6stal]', 'us-ascii')
+##        raises(UnicodeError, Header, u'\u83ca\u5730\u6642\u592b', 'iso-8859-1')
+
+    def test_utf8_shortest(self):
+        eq = self.assertEqual
+        h = Header(u'p\xf6stal', 'utf-8')
+        eq(h.encode(), '=?utf-8?q?p=C3=B6stal?=')
+        h = Header(u'\u83ca\u5730\u6642\u592b', 'utf-8')
+        eq(h.encode(), '=?utf-8?b?6I+K5Zyw5pmC5aSr?=')
+
+    def test_bad_8bit_header(self):
+        raises = self.assertRaises
+        eq = self.assertEqual
+        x = 'Ynwp4dUEbay Auction Semiar- No Charge \x96 Earn Big'
+        raises(UnicodeError, Header, x)
+        h = Header()
+        raises(UnicodeError, h.append, x)
+        eq(str(Header(x, errors='replace')), x)
+        h.append(x, errors='replace')
+        eq(str(h), x)
+
+    def test_encoded_adjacent_nonencoded(self):
+        eq = self.assertEqual
+        h = Header()
+        h.append('hello', 'iso-8859-1')
+        h.append('world')
+        s = h.encode()
+        eq(s, '=?iso-8859-1?q?hello?= world')
+        h = make_header(decode_header(s))
+        eq(h.encode(), s)
+
+    def test_whitespace_eater(self):
+        eq = self.assertEqual
+        s = 'Subject: =?koi8-r?b?8NLP18XSy8EgzsEgxsnOwczYztk=?= =?koi8-r?q?=CA?= zz.'
+        parts = decode_header(s)
+        eq(parts, [('Subject:', None), ('\xf0\xd2\xcf\xd7\xc5\xd2\xcb\xc1 \xce\xc1 \xc6\xc9\xce\xc1\xcc\xd8\xce\xd9\xca', 'koi8-r'), ('zz.', None)])
+        hdr = make_header(parts)
+        eq(hdr.encode(),
+           'Subject: =?koi8-r?b?8NLP18XSy8EgzsEgxsnOwczYztnK?= zz.')
+
+    def test_broken_base64_header(self):
+        raises = self.assertRaises
+        s = 'Subject: =?EUC-KR?B?CSixpLDtKSC/7Liuvsax4iC6uLmwMcijIKHaILzSwd/H0SC8+LCjwLsgv7W/+Mj3IQ?='
+        raises(Errors.HeaderParseError, decode_header, s)
+
+
+
+# Test RFC 2231 header parameters (en/de)coding
+class TestRFC2231(TestEmailBase):
+    def test_get_param(self):
+        eq = self.assertEqual
+        msg = self._msgobj('msg_29.txt')
+        eq(msg.get_param('title'),
+           ('us-ascii', 'en', 'This is even more ***fun*** isn\'t it!'))
+        eq(msg.get_param('title', unquote=False),
+           ('us-ascii', 'en', '"This is even more ***fun*** isn\'t it!"'))
+
+    def test_set_param(self):
+        eq = self.assertEqual
+        msg = Message()
+        msg.set_param('title', 'This is even more ***fun*** isn\'t it!',
+                      charset='us-ascii')
+        eq(msg.get_param('title'),
+           ('us-ascii', '', 'This is even more ***fun*** isn\'t it!'))
+        msg.set_param('title', 'This is even more ***fun*** isn\'t it!',
+                      charset='us-ascii', language='en')
+        eq(msg.get_param('title'),
+           ('us-ascii', 'en', 'This is even more ***fun*** isn\'t it!'))
+        msg = self._msgobj('msg_01.txt')
+        msg.set_param('title', 'This is even more ***fun*** isn\'t it!',
+                      charset='us-ascii', language='en')
+        eq(msg.as_string(), """\
+Return-Path: <bbb at zzz.org>
+Delivered-To: bbb at zzz.org
+Received: by mail.zzz.org (Postfix, from userid 889)
+\tid 27CEAD38CC; Fri,  4 May 2001 14:05:44 -0400 (EDT)
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Message-ID: <15090.61304.110929.45684 at aaa.zzz.org>
+From: bbb at ddd.com (John X. Doe)
+To: bbb at zzz.org
+Subject: This is a test message
+Date: Fri, 4 May 2001 14:05:44 -0400
+Content-Type: text/plain; charset=us-ascii;
+\ttitle*="us-ascii'en'This%20is%20even%20more%20%2A%2A%2Afun%2A%2A%2A%20isn%27t%20it%21"
+
+
+Hi,
+
+Do you like this message?
+
+-Me
+""")
+
+    def test_del_param(self):
+        eq = self.ndiffAssertEqual
+        msg = self._msgobj('msg_01.txt')
+        msg.set_param('foo', 'bar', charset='us-ascii', language='en')
+        msg.set_param('title', 'This is even more ***fun*** isn\'t it!',
+            charset='us-ascii', language='en')
+        msg.del_param('foo', header='Content-Type')
+        eq(msg.as_string(), """\
+Return-Path: <bbb at zzz.org>
+Delivered-To: bbb at zzz.org
+Received: by mail.zzz.org (Postfix, from userid 889)
+\tid 27CEAD38CC; Fri,  4 May 2001 14:05:44 -0400 (EDT)
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Message-ID: <15090.61304.110929.45684 at aaa.zzz.org>
+From: bbb at ddd.com (John X. Doe)
+To: bbb at zzz.org
+Subject: This is a test message
+Date: Fri, 4 May 2001 14:05:44 -0400
+Content-Type: text/plain; charset="us-ascii";
+\ttitle*="us-ascii'en'This%20is%20even%20more%20%2A%2A%2Afun%2A%2A%2A%20isn%27t%20it%21"
+
+
+Hi,
+
+Do you like this message?
+
+-Me
+""")
+
+    def test_rfc2231_get_content_charset(self):
+        eq = self.assertEqual
+        msg = self._msgobj('msg_32.txt')
+        eq(msg.get_content_charset(), 'us-ascii')
+
+    def test_rfc2231_no_language_or_charset(self):
+        m = '''\
+Content-Transfer-Encoding: 8bit
+Content-Disposition: inline; filename="file____C__DOCUMENTS_20AND_20SETTINGS_FABIEN_LOCAL_20SETTINGS_TEMP_nsmail.htm"
+Content-Type: text/html; NAME*0=file____C__DOCUMENTS_20AND_20SETTINGS_FABIEN_LOCAL_20SETTINGS_TEM; NAME*1=P_nsmail.htm
+
+'''
+        msg = email.message_from_string(m)
+        self.assertEqual(msg.get_param('NAME'),
+                         (None, None, 'file____C__DOCUMENTS_20AND_20SETTINGS_FABIEN_LOCAL_20SETTINGS_TEMP_nsmail.htm'))
+
+    def test_rfc2231_no_language_or_charset_in_filename(self):
+        m = '''\
+Content-Disposition: inline;
+\tfilename*0="This%20is%20even%20more%20";
+\tfilename*1="%2A%2A%2Afun%2A%2A%2A%20";
+\tfilename*2="is it not.pdf"
+
+'''
+        msg = email.message_from_string(m)
+        self.assertEqual(msg.get_filename(),
+                         'This is even more ***fun*** is it not.pdf')
+
+    def test_rfc2231_no_language_or_charset_in_boundary(self):
+        m = '''\
+Content-Type: multipart/alternative;
+\tboundary*0="This%20is%20even%20more%20";
+\tboundary*1="%2A%2A%2Afun%2A%2A%2A%20";
+\tboundary*2="is it not.pdf"
+
+'''
+        msg = email.message_from_string(m)
+        self.assertEqual(msg.get_boundary(),
+                         'This is even more ***fun*** is it not.pdf')
+
+    def test_rfc2231_no_language_or_charset_in_charset(self):
+        # This is a nonsensical charset value, but tests the code anyway
+        m = '''\
+Content-Type: text/plain;
+\tcharset*0="This%20is%20even%20more%20";
+\tcharset*1="%2A%2A%2Afun%2A%2A%2A%20";
+\tcharset*2="is it not.pdf"
+
+'''
+        msg = email.message_from_string(m)
+        self.assertEqual(msg.get_content_charset(),
+                         'this is even more ***fun*** is it not.pdf')
+
+
+
+def _testclasses():
+    mod = sys.modules[__name__]
+    return [getattr(mod, name) for name in dir(mod) if name.startswith('Test')]
+
+
+def suite():
+    suite = unittest.TestSuite()
+    for testclass in _testclasses():
+        suite.addTest(unittest.makeSuite(testclass))
+    return suite
+
+
+def test_main():
+    for testclass in _testclasses():
+        run_unittest(testclass)
+
+
+
+if __name__ == '__main__':
+    unittest.main(defaultTest='suite')
diff --git a/lib-python/2.2/email/test/test_email_codecs.py b/lib-python/2.2/email/test/test_email_codecs.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/test/test_email_codecs.py
@@ -0,0 +1,68 @@
+# Copyright (C) 2002 Python Software Foundation
+# email package unit tests for (optional) Asian codecs
+
+import unittest
+from test.test_support import run_unittest
+
+from email.test.test_email import TestEmailBase
+from email.Charset import Charset
+from email.Header import Header, decode_header
+
+# See if we have the Japanese codecs package installed
+try:
+    unicode('foo', 'japanese.iso-2022-jp')
+except LookupError:
+    # Different in Python 2.3
+    from test_support import TestSkipped
+    raise TestSkipped, 'Optional Japanese codecs not installed'
+
+
+
+class TestEmailAsianCodecs(TestEmailBase):
+    def test_japanese_codecs(self):
+        eq = self.ndiffAssertEqual
+        j = Charset("euc-jp")
+        g = Charset("iso-8859-1")
+        h = Header("Hello World!")
+        jhello = '\xa5\xcf\xa5\xed\xa1\xbc\xa5\xef\xa1\xbc\xa5\xeb\xa5\xc9\xa1\xaa'
+        ghello = 'Gr\xfc\xdf Gott!'
+        h.append(jhello, j)
+        h.append(ghello, g)
+        # BAW: This used to -- and maybe should -- fold the two iso-8859-1
+        # chunks into a single encoded word.  However it doesn't violate the
+        # standard to have them as two encoded chunks and maybe it's
+        # reasonable <wink> for each .append() call to result in a separate
+        # encoded word.
+        eq(h.encode(), """\
+Hello World! =?iso-2022-jp?b?GyRCJU8lbSE8JW8hPCVrJUkhKhsoQg==?=
+ =?iso-8859-1?q?Gr=FC=DF?= =?iso-8859-1?q?_Gott!?=""")
+        eq(decode_header(h.encode()),
+           [('Hello World!', None),
+            ('\x1b$B%O%m!<%o!<%k%I!*\x1b(B', 'iso-2022-jp'),
+            ('Gr\xfc\xdf Gott!', 'iso-8859-1')])
+        long = 'test-ja \xa4\xd8\xc5\xea\xb9\xc6\xa4\xb5\xa4\xec\xa4\xbf\xa5\xe1\xa1\xbc\xa5\xeb\xa4\xcf\xbb\xca\xb2\xf1\xbc\xd4\xa4\xce\xbe\xb5\xc7\xa7\xa4\xf2\xc2\xd4\xa4\xc3\xa4\xc6\xa4\xa4\xa4\xde\xa4\xb9'
+        h = Header(long, j, header_name="Subject")
+        # test a very long header
+        enc = h.encode()
+        # TK: splitting point may differ by codec design and/or Header encoding
+        eq(enc , """\
+=?iso-2022-jp?b?dGVzdC1qYSAbJEIkWEVqOUYkNSRsJD8lYSE8JWskTztKGyhC?=
+ =?iso-2022-jp?b?GyRCMnE8VCROPjVHJyRyQlQkQyRGJCQkXiQ5GyhC?=""")
+        # TK: full decode comparison
+        eq(h.__unicode__().encode('euc-jp'), long)
+
+
+
+def suite():
+    suite = unittest.TestSuite()
+    suite.addTest(unittest.makeSuite(TestEmailAsianCodecs))
+    return suite
+
+
+def test_main():
+    run_unittest(TestEmailAsianCodecs)
+
+
+
+if __name__ == '__main__':
+    unittest.main(defaultTest='suite')
diff --git a/lib-python/2.2/email/test/test_email_torture.py b/lib-python/2.2/email/test/test_email_torture.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/email/test/test_email_torture.py
@@ -0,0 +1,136 @@
+# Copyright (C) 2002 Python Software Foundation
+#
+# A torture test of the email package.  This should not be run as part of the
+# standard Python test suite since it requires several meg of email messages
+# collected in the wild.  These source messages are not checked into the
+# Python distro, but are available as part of the standalone email package at
+# http://sf.net/projects/mimelib
+
+import sys
+import os
+import unittest
+from cStringIO import StringIO
+from types import ListType
+
+from email.test.test_email import TestEmailBase
+from test.test_support import TestSkipped
+
+import email
+from email import __file__ as testfile
+from email.Iterators import _structure
+
+def openfile(filename):
+    from os.path import join, dirname, abspath
+    path = abspath(join(dirname(testfile), os.pardir, 'moredata', filename))
+    return open(path, 'r')
+
+# Prevent this test from running in the Python distro
+try:
+    openfile('crispin-torture.txt')
+except IOError:
+    raise TestSkipped
+
+
+
+class TortureBase(TestEmailBase):
+    def _msgobj(self, filename):
+        fp = openfile(filename)
+        try:
+            msg = email.message_from_file(fp)
+        finally:
+            fp.close()
+        return msg
+
+
+
+class TestCrispinTorture(TortureBase):
+    # Mark Crispin's torture test from the SquirrelMail project
+    def test_mondo_message(self):
+        eq = self.assertEqual
+        neq = self.ndiffAssertEqual
+        msg = self._msgobj('crispin-torture.txt')
+        payload = msg.get_payload()
+        eq(type(payload), ListType)
+        eq(len(payload), 12)
+        eq(msg.preamble, None)
+        eq(msg.epilogue, '\n\n')
+        # Probably the best way to verify the message is parsed correctly is to
+        # dump its structure and compare it against the known structure.
+        fp = StringIO()
+        _structure(msg, fp=fp)
+        neq(fp.getvalue(), """\
+multipart/mixed
+    text/plain
+    message/rfc822
+        multipart/alternative
+            text/plain
+            multipart/mixed
+                text/richtext
+            application/andrew-inset
+    message/rfc822
+        audio/basic
+    audio/basic
+    image/pbm
+    message/rfc822
+        multipart/mixed
+            multipart/mixed
+                text/plain
+                audio/x-sun
+            multipart/mixed
+                image/gif
+                image/gif
+                application/x-be2
+                application/atomicmail
+            audio/x-sun
+    message/rfc822
+        multipart/mixed
+            text/plain
+            image/pgm
+            text/plain
+    message/rfc822
+        multipart/mixed
+            text/plain
+            image/pbm
+    message/rfc822
+        application/postscript
+    image/gif
+    message/rfc822
+        multipart/mixed
+            audio/basic
+            audio/basic
+    message/rfc822
+        multipart/mixed
+            application/postscript
+            text/plain
+            message/rfc822
+                multipart/mixed
+                    text/plain
+                    multipart/parallel
+                        image/gif
+                        audio/basic
+                    application/atomicmail
+                    message/rfc822
+                        audio/x-sun
+""")
+
+
+def _testclasses():
+    mod = sys.modules[__name__]
+    return [getattr(mod, name) for name in dir(mod) if name.startswith('Test')]
+
+
+def suite():
+    suite = unittest.TestSuite()
+    for testclass in _testclasses():
+        suite.addTest(unittest.makeSuite(testclass))
+    return suite
+
+
+def test_main():
+    for testclass in _testclasses():
+        test_support.run_unittest(testclass)
+
+
+
+if __name__ == '__main__':
+    unittest.main(defaultTest='suite')
diff --git a/lib-python/2.2/encodings/__init__.py b/lib-python/2.2/encodings/__init__.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/__init__.py
@@ -0,0 +1,97 @@
+""" Standard "encodings" Package
+
+    Standard Python encoding modules are stored in this package
+    directory.
+
+    Codec modules must have names corresponding to standard lower-case
+    encoding names with hyphens mapped to underscores, e.g. 'utf-8' is
+    implemented by the module 'utf_8.py'.
+
+    Each codec module must export the following interface:
+
+    * getregentry() -> (encoder, decoder, stream_reader, stream_writer)
+    The getregentry() API must return callable objects which adhere to
+    the Python Codec Interface Standard.
+
+    In addition, a module may optionally also define the following
+    APIs which are then used by the package's codec search function:
+
+    * getaliases() -> sequence of encoding name strings to use as aliases
+
+    Alias names returned by getaliases() must be standard encoding
+    names as defined above (lower-case, hyphens converted to
+    underscores).
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+
+"""#"
+
+import codecs,aliases,exceptions
+
+_cache = {}
+_unknown = '--unknown--'
+
+class CodecRegistryError(exceptions.LookupError,
+                         exceptions.SystemError):
+    pass
+
+def search_function(encoding):
+    
+    # Cache lookup
+    entry = _cache.get(encoding,_unknown)
+    if entry is not _unknown:
+        return entry
+
+    # Import the module
+    modname = encoding.replace('-', '_')
+    modname = aliases.aliases.get(modname,modname)
+    try:
+        mod = __import__(modname,globals(),locals(),'*')
+    except ImportError,why:
+        # cache misses
+        _cache[encoding] = None
+        return None
+
+    try:
+        getregentry = mod.getregentry
+    except AttributeError:
+        # Not a codec module
+        _cache[encoding] = None
+        return None
+    
+    # Now ask the module for the registry entry
+    try:
+        entry = tuple(getregentry())
+    except AttributeError:
+        entry = ()
+    if len(entry) != 4:
+        raise CodecRegistryError,\
+              'module "%s" (%s) failed to register' % \
+              (mod.__name__, mod.__file__)
+    for obj in entry:
+        if not callable(obj):
+            raise CodecRegistryError,\
+                  'incompatible codecs in module "%s" (%s)' % \
+                  (mod.__name__, mod.__file__)
+
+    # Cache the codec registry entry
+    _cache[encoding] = entry
+
+    # Register its aliases (without overwriting previously registered
+    # aliases)
+    try:
+        codecaliases = mod.getaliases()
+    except AttributeError:
+        pass
+    else:
+        for alias in codecaliases:
+            if not aliases.aliases.has_key(alias):
+                aliases.aliases[alias] = modname
+
+    # Return the registry entry
+    return entry
+
+# Register the search_function in the Python codec registry
+codecs.register(search_function)
diff --git a/lib-python/2.2/encodings/aliases.py b/lib-python/2.2/encodings/aliases.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/aliases.py
@@ -0,0 +1,115 @@
+""" Encoding Aliases Support
+
+    This module is used by the encodings package search function to
+    map encodings names to module names.
+
+    Note that the search function converts the encoding names to lower
+    case and replaces hyphens with underscores *before* performing the
+    lookup.
+
+"""
+aliases = {
+
+    # Latin-1
+    'latin': 'latin_1',
+    'latin1': 'latin_1',
+    
+    # UTF-7
+    'utf7': 'utf_7',
+    'u7': 'utf_7',
+    
+    # UTF-8
+    'utf': 'utf_8',
+    'utf8': 'utf_8',
+    'u8': 'utf_8',
+    'utf8 at ucs2': 'utf_8',
+    'utf8 at ucs4': 'utf_8',
+    
+    # UTF-16
+    'utf16': 'utf_16',
+    'u16': 'utf_16',
+    'utf_16be': 'utf_16_be',
+    'utf_16le': 'utf_16_le',
+    'unicodebigunmarked': 'utf_16_be',
+    'unicodelittleunmarked': 'utf_16_le',
+
+    # ASCII
+    'us_ascii': 'ascii',
+    'ansi_x3.4_1968': 'ascii', # used on Linux
+    'ansi_x3_4_1968': 'ascii', # used on BSD?
+    '646': 'ascii',            # used on Solaris
+
+    # EBCDIC
+    'ebcdic_cp_us': 'cp037',
+    'ibm039': 'cp037',
+    'ibm1140': 'cp1140',
+    
+    # ISO
+    '8859': 'latin_1',
+    'iso8859': 'latin_1',
+    'iso8859_1': 'latin_1',
+    'iso_8859_1': 'latin_1',
+    'iso_8859_10': 'iso8859_10',
+    'iso_8859_13': 'iso8859_13',
+    'iso_8859_14': 'iso8859_14',
+    'iso_8859_15': 'iso8859_15',
+    'iso_8859_2': 'iso8859_2',
+    'iso_8859_3': 'iso8859_3',
+    'iso_8859_4': 'iso8859_4',
+    'iso_8859_5': 'iso8859_5',
+    'iso_8859_6': 'iso8859_6',
+    'iso_8859_7': 'iso8859_7',
+    'iso_8859_8': 'iso8859_8',
+    'iso_8859_9': 'iso8859_9',
+
+    # Mac
+    'maclatin2': 'mac_latin2',
+    'maccentraleurope': 'mac_latin2',
+    'maccyrillic': 'mac_cyrillic',
+    'macgreek': 'mac_greek',
+    'maciceland': 'mac_iceland',
+    'macroman': 'mac_roman',
+    'macturkish': 'mac_turkish',
+
+    # Windows
+    'windows_1251': 'cp1251',
+    'windows_1252': 'cp1252',
+    'windows_1254': 'cp1254',
+    'windows_1255': 'cp1255',
+    'windows_1256': 'cp1256',
+    'windows_1257': 'cp1257',
+    'windows_1258': 'cp1258',
+
+    # MBCS
+    'dbcs': 'mbcs',
+
+    # Code pages
+    '437': 'cp437',
+
+    # CJK
+    #
+    # The codecs for these encodings are not distributed with the
+    # Python core, but are included here for reference, since the
+    # locale module relies on having these aliases available.
+    #
+    'jis_7': 'jis_7',
+    'iso_2022_jp': 'jis_7',
+    'ujis': 'euc_jp',
+    'ajec': 'euc_jp',
+    'eucjp': 'euc_jp',
+    'tis260': 'tactis',
+    'sjis': 'shift_jis',
+
+    # Content transfer/compression encodings
+    'rot13': 'rot_13',
+    'base64': 'base64_codec',
+    'base_64': 'base64_codec',
+    'zlib': 'zlib_codec',
+    'zip': 'zlib_codec',
+    'hex': 'hex_codec',
+    'uu': 'uu_codec',
+    'quopri': 'quopri_codec',
+    'quotedprintable': 'quopri_codec',
+    'quoted_printable': 'quopri_codec',
+
+}
diff --git a/lib-python/2.2/encodings/ascii.py b/lib-python/2.2/encodings/ascii.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/ascii.py
@@ -0,0 +1,35 @@
+""" Python 'ascii' Codec
+
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+
+"""
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    # Note: Binding these as C functions will result in the class not
+    # converting them to methods. This is intended.
+    encode = codecs.ascii_encode
+    decode = codecs.ascii_decode
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+class StreamConverter(StreamWriter,StreamReader):
+
+    encode = codecs.ascii_decode
+    decode = codecs.ascii_encode
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec.encode,Codec.decode,StreamReader,StreamWriter)
diff --git a/lib-python/2.2/encodings/base64_codec.py b/lib-python/2.2/encodings/base64_codec.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/base64_codec.py
@@ -0,0 +1,62 @@
+""" Python 'base64_codec' Codec - base64 content transfer encoding
+
+    Unlike most of the other codecs which target Unicode, this codec
+    will return Python string objects for both encode and decode.
+
+    Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+"""
+import codecs, base64
+
+### Codec APIs
+
+def base64_encode(input,errors='strict'):
+
+    """ Encodes the object input and returns a tuple (output
+        object, length consumed).
+
+        errors defines the error handling to apply. It defaults to
+        'strict' handling which is the only currently supported
+        error handling for this codec.
+
+    """
+    assert errors == 'strict'
+    output = base64.encodestring(input)
+    return (output, len(input))
+
+def base64_decode(input,errors='strict'):
+
+    """ Decodes the object input and returns a tuple (output
+        object, length consumed).
+
+        input must be an object which provides the bf_getreadbuf
+        buffer slot. Python strings, buffer objects and memory
+        mapped files are examples of objects providing this slot.
+
+        errors defines the error handling to apply. It defaults to
+        'strict' handling which is the only currently supported
+        error handling for this codec.
+
+    """
+    assert errors == 'strict'
+    output = base64.decodestring(input)
+    return (output, len(input))
+
+class Codec(codecs.Codec):
+
+    def encode(self, input,errors='strict'):
+        return base64_encode(input,errors)
+    def decode(self, input,errors='strict'):
+        return base64_decode(input,errors)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (base64_encode,base64_decode,StreamReader,StreamWriter)
diff --git a/lib-python/2.2/encodings/charmap.py b/lib-python/2.2/encodings/charmap.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/charmap.py
@@ -0,0 +1,51 @@
+""" Generic Python Character Mapping Codec.
+
+    Use this codec directly rather than through the automatic
+    conversion mechanisms supplied by unicode() and .encode().
+    
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    # Note: Binding these as C functions will result in the class not
+    # converting them to methods. This is intended.
+    encode = codecs.charmap_encode
+    decode = codecs.charmap_decode
+
+class StreamWriter(Codec,codecs.StreamWriter):
+
+    def __init__(self,stream,errors='strict',mapping=None):
+
+        codecs.StreamWriter.__init__(self,stream,errors)
+        self.mapping = mapping
+
+    def encode(self,input,errors='strict'):
+
+        return Codec.encode(input,errors,self.mapping)
+        
+class StreamReader(Codec,codecs.StreamReader):
+
+    def __init__(self,stream,errors='strict',mapping=None):
+
+        codecs.StreamReader.__init__(self,strict,errors)
+        self.mapping = mapping
+
+    def decode(self,input,errors='strict'):
+
+        return Codec.decode(input,errors,self.mapping)
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec.encode,Codec.decode,StreamReader,StreamWriter)
+
diff --git a/lib-python/2.2/encodings/cp037.py b/lib-python/2.2/encodings/cp037.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/cp037.py
@@ -0,0 +1,280 @@
+""" Python Character Mapping Codec generated from 'CP037.TXT' with gencodec.py.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+	0x0004: 0x009c,	# CONTROL
+	0x0005: 0x0009,	# HORIZONTAL TABULATION
+	0x0006: 0x0086,	# CONTROL
+	0x0007: 0x007f,	# DELETE
+	0x0008: 0x0097,	# CONTROL
+	0x0009: 0x008d,	# CONTROL
+	0x000a: 0x008e,	# CONTROL
+	0x0014: 0x009d,	# CONTROL
+	0x0015: 0x0085,	# CONTROL
+	0x0016: 0x0008,	# BACKSPACE
+	0x0017: 0x0087,	# CONTROL
+	0x001a: 0x0092,	# CONTROL
+	0x001b: 0x008f,	# CONTROL
+	0x0020: 0x0080,	# CONTROL
+	0x0021: 0x0081,	# CONTROL
+	0x0022: 0x0082,	# CONTROL
+	0x0023: 0x0083,	# CONTROL
+	0x0024: 0x0084,	# CONTROL
+	0x0025: 0x000a,	# LINE FEED
+	0x0026: 0x0017,	# END OF TRANSMISSION BLOCK
+	0x0027: 0x001b,	# ESCAPE
+	0x0028: 0x0088,	# CONTROL
+	0x0029: 0x0089,	# CONTROL
+	0x002a: 0x008a,	# CONTROL
+	0x002b: 0x008b,	# CONTROL
+	0x002c: 0x008c,	# CONTROL
+	0x002d: 0x0005,	# ENQUIRY
+	0x002e: 0x0006,	# ACKNOWLEDGE
+	0x002f: 0x0007,	# BELL
+	0x0030: 0x0090,	# CONTROL
+	0x0031: 0x0091,	# CONTROL
+	0x0032: 0x0016,	# SYNCHRONOUS IDLE
+	0x0033: 0x0093,	# CONTROL
+	0x0034: 0x0094,	# CONTROL
+	0x0035: 0x0095,	# CONTROL
+	0x0036: 0x0096,	# CONTROL
+	0x0037: 0x0004,	# END OF TRANSMISSION
+	0x0038: 0x0098,	# CONTROL
+	0x0039: 0x0099,	# CONTROL
+	0x003a: 0x009a,	# CONTROL
+	0x003b: 0x009b,	# CONTROL
+	0x003c: 0x0014,	# DEVICE CONTROL FOUR
+	0x003d: 0x0015,	# NEGATIVE ACKNOWLEDGE
+	0x003e: 0x009e,	# CONTROL
+	0x003f: 0x001a,	# SUBSTITUTE
+	0x0040: 0x0020,	# SPACE
+	0x0041: 0x00a0,	# NO-BREAK SPACE
+	0x0042: 0x00e2,	# LATIN SMALL LETTER A WITH CIRCUMFLEX
+	0x0043: 0x00e4,	# LATIN SMALL LETTER A WITH DIAERESIS
+	0x0044: 0x00e0,	# LATIN SMALL LETTER A WITH GRAVE
+	0x0045: 0x00e1,	# LATIN SMALL LETTER A WITH ACUTE
+	0x0046: 0x00e3,	# LATIN SMALL LETTER A WITH TILDE
+	0x0047: 0x00e5,	# LATIN SMALL LETTER A WITH RING ABOVE
+	0x0048: 0x00e7,	# LATIN SMALL LETTER C WITH CEDILLA
+	0x0049: 0x00f1,	# LATIN SMALL LETTER N WITH TILDE
+	0x004a: 0x00a2,	# CENT SIGN
+	0x004b: 0x002e,	# FULL STOP
+	0x004c: 0x003c,	# LESS-THAN SIGN
+	0x004d: 0x0028,	# LEFT PARENTHESIS
+	0x004e: 0x002b,	# PLUS SIGN
+	0x004f: 0x007c,	# VERTICAL LINE
+	0x0050: 0x0026,	# AMPERSAND
+	0x0051: 0x00e9,	# LATIN SMALL LETTER E WITH ACUTE
+	0x0052: 0x00ea,	# LATIN SMALL LETTER E WITH CIRCUMFLEX
+	0x0053: 0x00eb,	# LATIN SMALL LETTER E WITH DIAERESIS
+	0x0054: 0x00e8,	# LATIN SMALL LETTER E WITH GRAVE
+	0x0055: 0x00ed,	# LATIN SMALL LETTER I WITH ACUTE
+	0x0056: 0x00ee,	# LATIN SMALL LETTER I WITH CIRCUMFLEX
+	0x0057: 0x00ef,	# LATIN SMALL LETTER I WITH DIAERESIS
+	0x0058: 0x00ec,	# LATIN SMALL LETTER I WITH GRAVE
+	0x0059: 0x00df,	# LATIN SMALL LETTER SHARP S (GERMAN)
+	0x005a: 0x0021,	# EXCLAMATION MARK
+	0x005b: 0x0024,	# DOLLAR SIGN
+	0x005c: 0x002a,	# ASTERISK
+	0x005d: 0x0029,	# RIGHT PARENTHESIS
+	0x005e: 0x003b,	# SEMICOLON
+	0x005f: 0x00ac,	# NOT SIGN
+	0x0060: 0x002d,	# HYPHEN-MINUS
+	0x0061: 0x002f,	# SOLIDUS
+	0x0062: 0x00c2,	# LATIN CAPITAL LETTER A WITH CIRCUMFLEX
+	0x0063: 0x00c4,	# LATIN CAPITAL LETTER A WITH DIAERESIS
+	0x0064: 0x00c0,	# LATIN CAPITAL LETTER A WITH GRAVE
+	0x0065: 0x00c1,	# LATIN CAPITAL LETTER A WITH ACUTE
+	0x0066: 0x00c3,	# LATIN CAPITAL LETTER A WITH TILDE
+	0x0067: 0x00c5,	# LATIN CAPITAL LETTER A WITH RING ABOVE
+	0x0068: 0x00c7,	# LATIN CAPITAL LETTER C WITH CEDILLA
+	0x0069: 0x00d1,	# LATIN CAPITAL LETTER N WITH TILDE
+	0x006a: 0x00a6,	# BROKEN BAR
+	0x006b: 0x002c,	# COMMA
+	0x006c: 0x0025,	# PERCENT SIGN
+	0x006d: 0x005f,	# LOW LINE
+	0x006e: 0x003e,	# GREATER-THAN SIGN
+	0x006f: 0x003f,	# QUESTION MARK
+	0x0070: 0x00f8,	# LATIN SMALL LETTER O WITH STROKE
+	0x0071: 0x00c9,	# LATIN CAPITAL LETTER E WITH ACUTE
+	0x0072: 0x00ca,	# LATIN CAPITAL LETTER E WITH CIRCUMFLEX
+	0x0073: 0x00cb,	# LATIN CAPITAL LETTER E WITH DIAERESIS
+	0x0074: 0x00c8,	# LATIN CAPITAL LETTER E WITH GRAVE
+	0x0075: 0x00cd,	# LATIN CAPITAL LETTER I WITH ACUTE
+	0x0076: 0x00ce,	# LATIN CAPITAL LETTER I WITH CIRCUMFLEX
+	0x0077: 0x00cf,	# LATIN CAPITAL LETTER I WITH DIAERESIS
+	0x0078: 0x00cc,	# LATIN CAPITAL LETTER I WITH GRAVE
+	0x0079: 0x0060,	# GRAVE ACCENT
+	0x007a: 0x003a,	# COLON
+	0x007b: 0x0023,	# NUMBER SIGN
+	0x007c: 0x0040,	# COMMERCIAL AT
+	0x007d: 0x0027,	# APOSTROPHE
+	0x007e: 0x003d,	# EQUALS SIGN
+	0x007f: 0x0022,	# QUOTATION MARK
+	0x0080: 0x00d8,	# LATIN CAPITAL LETTER O WITH STROKE
+	0x0081: 0x0061,	# LATIN SMALL LETTER A
+	0x0082: 0x0062,	# LATIN SMALL LETTER B
+	0x0083: 0x0063,	# LATIN SMALL LETTER C
+	0x0084: 0x0064,	# LATIN SMALL LETTER D
+	0x0085: 0x0065,	# LATIN SMALL LETTER E
+	0x0086: 0x0066,	# LATIN SMALL LETTER F
+	0x0087: 0x0067,	# LATIN SMALL LETTER G
+	0x0088: 0x0068,	# LATIN SMALL LETTER H
+	0x0089: 0x0069,	# LATIN SMALL LETTER I
+	0x008a: 0x00ab,	# LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+	0x008b: 0x00bb,	# RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+	0x008c: 0x00f0,	# LATIN SMALL LETTER ETH (ICELANDIC)
+	0x008d: 0x00fd,	# LATIN SMALL LETTER Y WITH ACUTE
+	0x008e: 0x00fe,	# LATIN SMALL LETTER THORN (ICELANDIC)
+	0x008f: 0x00b1,	# PLUS-MINUS SIGN
+	0x0090: 0x00b0,	# DEGREE SIGN
+	0x0091: 0x006a,	# LATIN SMALL LETTER J
+	0x0092: 0x006b,	# LATIN SMALL LETTER K
+	0x0093: 0x006c,	# LATIN SMALL LETTER L
+	0x0094: 0x006d,	# LATIN SMALL LETTER M
+	0x0095: 0x006e,	# LATIN SMALL LETTER N
+	0x0096: 0x006f,	# LATIN SMALL LETTER O
+	0x0097: 0x0070,	# LATIN SMALL LETTER P
+	0x0098: 0x0071,	# LATIN SMALL LETTER Q
+	0x0099: 0x0072,	# LATIN SMALL LETTER R
+	0x009a: 0x00aa,	# FEMININE ORDINAL INDICATOR
+	0x009b: 0x00ba,	# MASCULINE ORDINAL INDICATOR
+	0x009c: 0x00e6,	# LATIN SMALL LIGATURE AE
+	0x009d: 0x00b8,	# CEDILLA
+	0x009e: 0x00c6,	# LATIN CAPITAL LIGATURE AE
+	0x009f: 0x00a4,	# CURRENCY SIGN
+	0x00a0: 0x00b5,	# MICRO SIGN
+	0x00a1: 0x007e,	# TILDE
+	0x00a2: 0x0073,	# LATIN SMALL LETTER S
+	0x00a3: 0x0074,	# LATIN SMALL LETTER T
+	0x00a4: 0x0075,	# LATIN SMALL LETTER U
+	0x00a5: 0x0076,	# LATIN SMALL LETTER V
+	0x00a6: 0x0077,	# LATIN SMALL LETTER W
+	0x00a7: 0x0078,	# LATIN SMALL LETTER X
+	0x00a8: 0x0079,	# LATIN SMALL LETTER Y
+	0x00a9: 0x007a,	# LATIN SMALL LETTER Z
+	0x00aa: 0x00a1,	# INVERTED EXCLAMATION MARK
+	0x00ab: 0x00bf,	# INVERTED QUESTION MARK
+	0x00ac: 0x00d0,	# LATIN CAPITAL LETTER ETH (ICELANDIC)
+	0x00ad: 0x00dd,	# LATIN CAPITAL LETTER Y WITH ACUTE
+	0x00ae: 0x00de,	# LATIN CAPITAL LETTER THORN (ICELANDIC)
+	0x00af: 0x00ae,	# REGISTERED SIGN
+	0x00b0: 0x005e,	# CIRCUMFLEX ACCENT
+	0x00b1: 0x00a3,	# POUND SIGN
+	0x00b2: 0x00a5,	# YEN SIGN
+	0x00b3: 0x00b7,	# MIDDLE DOT
+	0x00b4: 0x00a9,	# COPYRIGHT SIGN
+	0x00b5: 0x00a7,	# SECTION SIGN
+	0x00b7: 0x00bc,	# VULGAR FRACTION ONE QUARTER
+	0x00b8: 0x00bd,	# VULGAR FRACTION ONE HALF
+	0x00b9: 0x00be,	# VULGAR FRACTION THREE QUARTERS
+	0x00ba: 0x005b,	# LEFT SQUARE BRACKET
+	0x00bb: 0x005d,	# RIGHT SQUARE BRACKET
+	0x00bc: 0x00af,	# MACRON
+	0x00bd: 0x00a8,	# DIAERESIS
+	0x00be: 0x00b4,	# ACUTE ACCENT
+	0x00bf: 0x00d7,	# MULTIPLICATION SIGN
+	0x00c0: 0x007b,	# LEFT CURLY BRACKET
+	0x00c1: 0x0041,	# LATIN CAPITAL LETTER A
+	0x00c2: 0x0042,	# LATIN CAPITAL LETTER B
+	0x00c3: 0x0043,	# LATIN CAPITAL LETTER C
+	0x00c4: 0x0044,	# LATIN CAPITAL LETTER D
+	0x00c5: 0x0045,	# LATIN CAPITAL LETTER E
+	0x00c6: 0x0046,	# LATIN CAPITAL LETTER F
+	0x00c7: 0x0047,	# LATIN CAPITAL LETTER G
+	0x00c8: 0x0048,	# LATIN CAPITAL LETTER H
+	0x00c9: 0x0049,	# LATIN CAPITAL LETTER I
+	0x00ca: 0x00ad,	# SOFT HYPHEN
+	0x00cb: 0x00f4,	# LATIN SMALL LETTER O WITH CIRCUMFLEX
+	0x00cc: 0x00f6,	# LATIN SMALL LETTER O WITH DIAERESIS
+	0x00cd: 0x00f2,	# LATIN SMALL LETTER O WITH GRAVE
+	0x00ce: 0x00f3,	# LATIN SMALL LETTER O WITH ACUTE
+	0x00cf: 0x00f5,	# LATIN SMALL LETTER O WITH TILDE
+	0x00d0: 0x007d,	# RIGHT CURLY BRACKET
+	0x00d1: 0x004a,	# LATIN CAPITAL LETTER J
+	0x00d2: 0x004b,	# LATIN CAPITAL LETTER K
+	0x00d3: 0x004c,	# LATIN CAPITAL LETTER L
+	0x00d4: 0x004d,	# LATIN CAPITAL LETTER M
+	0x00d5: 0x004e,	# LATIN CAPITAL LETTER N
+	0x00d6: 0x004f,	# LATIN CAPITAL LETTER O
+	0x00d7: 0x0050,	# LATIN CAPITAL LETTER P
+	0x00d8: 0x0051,	# LATIN CAPITAL LETTER Q
+	0x00d9: 0x0052,	# LATIN CAPITAL LETTER R
+	0x00da: 0x00b9,	# SUPERSCRIPT ONE
+	0x00db: 0x00fb,	# LATIN SMALL LETTER U WITH CIRCUMFLEX
+	0x00dc: 0x00fc,	# LATIN SMALL LETTER U WITH DIAERESIS
+	0x00dd: 0x00f9,	# LATIN SMALL LETTER U WITH GRAVE
+	0x00de: 0x00fa,	# LATIN SMALL LETTER U WITH ACUTE
+	0x00df: 0x00ff,	# LATIN SMALL LETTER Y WITH DIAERESIS
+	0x00e0: 0x005c,	# REVERSE SOLIDUS
+	0x00e1: 0x00f7,	# DIVISION SIGN
+	0x00e2: 0x0053,	# LATIN CAPITAL LETTER S
+	0x00e3: 0x0054,	# LATIN CAPITAL LETTER T
+	0x00e4: 0x0055,	# LATIN CAPITAL LETTER U
+	0x00e5: 0x0056,	# LATIN CAPITAL LETTER V
+	0x00e6: 0x0057,	# LATIN CAPITAL LETTER W
+	0x00e7: 0x0058,	# LATIN CAPITAL LETTER X
+	0x00e8: 0x0059,	# LATIN CAPITAL LETTER Y
+	0x00e9: 0x005a,	# LATIN CAPITAL LETTER Z
+	0x00ea: 0x00b2,	# SUPERSCRIPT TWO
+	0x00eb: 0x00d4,	# LATIN CAPITAL LETTER O WITH CIRCUMFLEX
+	0x00ec: 0x00d6,	# LATIN CAPITAL LETTER O WITH DIAERESIS
+	0x00ed: 0x00d2,	# LATIN CAPITAL LETTER O WITH GRAVE
+	0x00ee: 0x00d3,	# LATIN CAPITAL LETTER O WITH ACUTE
+	0x00ef: 0x00d5,	# LATIN CAPITAL LETTER O WITH TILDE
+	0x00f0: 0x0030,	# DIGIT ZERO
+	0x00f1: 0x0031,	# DIGIT ONE
+	0x00f2: 0x0032,	# DIGIT TWO
+	0x00f3: 0x0033,	# DIGIT THREE
+	0x00f4: 0x0034,	# DIGIT FOUR
+	0x00f5: 0x0035,	# DIGIT FIVE
+	0x00f6: 0x0036,	# DIGIT SIX
+	0x00f7: 0x0037,	# DIGIT SEVEN
+	0x00f8: 0x0038,	# DIGIT EIGHT
+	0x00f9: 0x0039,	# DIGIT NINE
+	0x00fa: 0x00b3,	# SUPERSCRIPT THREE
+	0x00fb: 0x00db,	# LATIN CAPITAL LETTER U WITH CIRCUMFLEX
+	0x00fc: 0x00dc,	# LATIN CAPITAL LETTER U WITH DIAERESIS
+	0x00fd: 0x00d9,	# LATIN CAPITAL LETTER U WITH GRAVE
+	0x00fe: 0x00da,	# LATIN CAPITAL LETTER U WITH ACUTE
+	0x00ff: 0x009f,	# CONTROL
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/lib-python/2.2/encodings/cp1006.py b/lib-python/2.2/encodings/cp1006.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/cp1006.py
@@ -0,0 +1,138 @@
+""" Python Character Mapping Codec generated from 'CP1006.TXT' with gencodec.py.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+	0x00a1: 0x06f0,	# 	EXTENDED ARABIC-INDIC DIGIT ZERO
+	0x00a2: 0x06f1,	# 	EXTENDED ARABIC-INDIC DIGIT ONE
+	0x00a3: 0x06f2,	# 	EXTENDED ARABIC-INDIC DIGIT TWO
+	0x00a4: 0x06f3,	# 	EXTENDED ARABIC-INDIC DIGIT THREE
+	0x00a5: 0x06f4,	# 	EXTENDED ARABIC-INDIC DIGIT FOUR
+	0x00a6: 0x06f5,	# 	EXTENDED ARABIC-INDIC DIGIT FIVE
+	0x00a7: 0x06f6,	# 	EXTENDED ARABIC-INDIC DIGIT SIX
+	0x00a8: 0x06f7,	# 	EXTENDED ARABIC-INDIC DIGIT SEVEN
+	0x00a9: 0x06f8,	# 	EXTENDED ARABIC-INDIC DIGIT EIGHT
+	0x00aa: 0x06f9,	# 	EXTENDED ARABIC-INDIC DIGIT NINE
+	0x00ab: 0x060c,	# 	ARABIC COMMA
+	0x00ac: 0x061b,	# 	ARABIC SEMICOLON
+	0x00ae: 0x061f,	# 	ARABIC QUESTION MARK
+	0x00af: 0xfe81,	# 	ARABIC LETTER ALEF WITH MADDA ABOVE ISOLATED FORM
+	0x00b0: 0xfe8d,	# 	ARABIC LETTER ALEF ISOLATED FORM
+	0x00b1: 0xfe8e,	# 	ARABIC LETTER ALEF FINAL FORM
+	0x00b2: 0xfe8e,	# 	ARABIC LETTER ALEF FINAL FORM
+	0x00b3: 0xfe8f,	# 	ARABIC LETTER BEH ISOLATED FORM
+	0x00b4: 0xfe91,	# 	ARABIC LETTER BEH INITIAL FORM
+	0x00b5: 0xfb56,	# 	ARABIC LETTER PEH ISOLATED FORM
+	0x00b6: 0xfb58,	# 	ARABIC LETTER PEH INITIAL FORM
+	0x00b7: 0xfe93,	# 	ARABIC LETTER TEH MARBUTA ISOLATED FORM
+	0x00b8: 0xfe95,	# 	ARABIC LETTER TEH ISOLATED FORM
+	0x00b9: 0xfe97,	# 	ARABIC LETTER TEH INITIAL FORM
+	0x00ba: 0xfb66,	# 	ARABIC LETTER TTEH ISOLATED FORM
+	0x00bb: 0xfb68,	# 	ARABIC LETTER TTEH INITIAL FORM
+	0x00bc: 0xfe99,	# 	ARABIC LETTER THEH ISOLATED FORM
+	0x00bd: 0xfe9b,	# 	ARABIC LETTER THEH INITIAL FORM
+	0x00be: 0xfe9d,	# 	ARABIC LETTER JEEM ISOLATED FORM
+	0x00bf: 0xfe9f,	# 	ARABIC LETTER JEEM INITIAL FORM
+	0x00c0: 0xfb7a,	# 	ARABIC LETTER TCHEH ISOLATED FORM
+	0x00c1: 0xfb7c,	# 	ARABIC LETTER TCHEH INITIAL FORM
+	0x00c2: 0xfea1,	# 	ARABIC LETTER HAH ISOLATED FORM
+	0x00c3: 0xfea3,	# 	ARABIC LETTER HAH INITIAL FORM
+	0x00c4: 0xfea5,	# 	ARABIC LETTER KHAH ISOLATED FORM
+	0x00c5: 0xfea7,	# 	ARABIC LETTER KHAH INITIAL FORM
+	0x00c6: 0xfea9,	# 	ARABIC LETTER DAL ISOLATED FORM
+	0x00c7: 0xfb84,	# 	ARABIC LETTER DAHAL ISOLATED FORMN
+	0x00c8: 0xfeab,	# 	ARABIC LETTER THAL ISOLATED FORM
+	0x00c9: 0xfead,	# 	ARABIC LETTER REH ISOLATED FORM
+	0x00ca: 0xfb8c,	# 	ARABIC LETTER RREH ISOLATED FORM
+	0x00cb: 0xfeaf,	# 	ARABIC LETTER ZAIN ISOLATED FORM
+	0x00cc: 0xfb8a,	# 	ARABIC LETTER JEH ISOLATED FORM
+	0x00cd: 0xfeb1,	# 	ARABIC LETTER SEEN ISOLATED FORM
+	0x00ce: 0xfeb3,	# 	ARABIC LETTER SEEN INITIAL FORM
+	0x00cf: 0xfeb5,	# 	ARABIC LETTER SHEEN ISOLATED FORM
+	0x00d0: 0xfeb7,	# 	ARABIC LETTER SHEEN INITIAL FORM
+	0x00d1: 0xfeb9,	# 	ARABIC LETTER SAD ISOLATED FORM
+	0x00d2: 0xfebb,	# 	ARABIC LETTER SAD INITIAL FORM
+	0x00d3: 0xfebd,	# 	ARABIC LETTER DAD ISOLATED FORM
+	0x00d4: 0xfebf,	# 	ARABIC LETTER DAD INITIAL FORM
+	0x00d5: 0xfec1,	# 	ARABIC LETTER TAH ISOLATED FORM
+	0x00d6: 0xfec5,	# 	ARABIC LETTER ZAH ISOLATED FORM
+	0x00d7: 0xfec9,	# 	ARABIC LETTER AIN ISOLATED FORM
+	0x00d8: 0xfeca,	# 	ARABIC LETTER AIN FINAL FORM
+	0x00d9: 0xfecb,	# 	ARABIC LETTER AIN INITIAL FORM
+	0x00da: 0xfecc,	# 	ARABIC LETTER AIN MEDIAL FORM
+	0x00db: 0xfecd,	# 	ARABIC LETTER GHAIN ISOLATED FORM
+	0x00dc: 0xfece,	# 	ARABIC LETTER GHAIN FINAL FORM
+	0x00dd: 0xfecf,	# 	ARABIC LETTER GHAIN INITIAL FORM
+	0x00de: 0xfed0,	# 	ARABIC LETTER GHAIN MEDIAL FORM
+	0x00df: 0xfed1,	# 	ARABIC LETTER FEH ISOLATED FORM
+	0x00e0: 0xfed3,	# 	ARABIC LETTER FEH INITIAL FORM
+	0x00e1: 0xfed5,	# 	ARABIC LETTER QAF ISOLATED FORM
+	0x00e2: 0xfed7,	# 	ARABIC LETTER QAF INITIAL FORM
+	0x00e3: 0xfed9,	# 	ARABIC LETTER KAF ISOLATED FORM
+	0x00e4: 0xfedb,	# 	ARABIC LETTER KAF INITIAL FORM
+	0x00e5: 0xfb92,	# 	ARABIC LETTER GAF ISOLATED FORM
+	0x00e6: 0xfb94,	# 	ARABIC LETTER GAF INITIAL FORM
+	0x00e7: 0xfedd,	# 	ARABIC LETTER LAM ISOLATED FORM
+	0x00e8: 0xfedf,	# 	ARABIC LETTER LAM INITIAL FORM
+	0x00e9: 0xfee0,	# 	ARABIC LETTER LAM MEDIAL FORM
+	0x00ea: 0xfee1,	# 	ARABIC LETTER MEEM ISOLATED FORM
+	0x00eb: 0xfee3,	# 	ARABIC LETTER MEEM INITIAL FORM
+	0x00ec: 0xfb9e,	# 	ARABIC LETTER NOON GHUNNA ISOLATED FORM
+	0x00ed: 0xfee5,	# 	ARABIC LETTER NOON ISOLATED FORM
+	0x00ee: 0xfee7,	# 	ARABIC LETTER NOON INITIAL FORM
+	0x00ef: 0xfe85,	# 	ARABIC LETTER WAW WITH HAMZA ABOVE ISOLATED FORM
+	0x00f0: 0xfeed,	# 	ARABIC LETTER WAW ISOLATED FORM
+	0x00f1: 0xfba6,	# 	ARABIC LETTER HEH GOAL ISOLATED FORM
+	0x00f2: 0xfba8,	# 	ARABIC LETTER HEH GOAL INITIAL FORM
+	0x00f3: 0xfba9,	# 	ARABIC LETTER HEH GOAL MEDIAL FORM
+	0x00f4: 0xfbaa,	# 	ARABIC LETTER HEH DOACHASHMEE ISOLATED FORM
+	0x00f5: 0xfe80,	# 	ARABIC LETTER HAMZA ISOLATED FORM
+	0x00f6: 0xfe89,	# 	ARABIC LETTER YEH WITH HAMZA ABOVE ISOLATED FORM
+	0x00f7: 0xfe8a,	# 	ARABIC LETTER YEH WITH HAMZA ABOVE FINAL FORM
+	0x00f8: 0xfe8b,	# 	ARABIC LETTER YEH WITH HAMZA ABOVE INITIAL FORM
+	0x00f9: 0xfef1,	# 	ARABIC LETTER YEH ISOLATED FORM
+	0x00fa: 0xfef2,	# 	ARABIC LETTER YEH FINAL FORM
+	0x00fb: 0xfef3,	# 	ARABIC LETTER YEH INITIAL FORM
+	0x00fc: 0xfbb0,	# 	ARABIC LETTER YEH BARREE WITH HAMZA ABOVE ISOLATED FORM
+	0x00fd: 0xfbae,	# 	ARABIC LETTER YEH BARREE ISOLATED FORM
+	0x00fe: 0xfe7c,	# 	ARABIC SHADDA ISOLATED FORM
+	0x00ff: 0xfe7d,	# 	ARABIC SHADDA MEDIAL FORM
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/lib-python/2.2/encodings/cp1026.py b/lib-python/2.2/encodings/cp1026.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/cp1026.py
@@ -0,0 +1,280 @@
+""" Python Character Mapping Codec generated from 'CP1026.TXT' with gencodec.py.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+	0x0004: 0x009c,	# CONTROL
+	0x0005: 0x0009,	# HORIZONTAL TABULATION
+	0x0006: 0x0086,	# CONTROL
+	0x0007: 0x007f,	# DELETE
+	0x0008: 0x0097,	# CONTROL
+	0x0009: 0x008d,	# CONTROL
+	0x000a: 0x008e,	# CONTROL
+	0x0014: 0x009d,	# CONTROL
+	0x0015: 0x0085,	# CONTROL
+	0x0016: 0x0008,	# BACKSPACE
+	0x0017: 0x0087,	# CONTROL
+	0x001a: 0x0092,	# CONTROL
+	0x001b: 0x008f,	# CONTROL
+	0x0020: 0x0080,	# CONTROL
+	0x0021: 0x0081,	# CONTROL
+	0x0022: 0x0082,	# CONTROL
+	0x0023: 0x0083,	# CONTROL
+	0x0024: 0x0084,	# CONTROL
+	0x0025: 0x000a,	# LINE FEED
+	0x0026: 0x0017,	# END OF TRANSMISSION BLOCK
+	0x0027: 0x001b,	# ESCAPE
+	0x0028: 0x0088,	# CONTROL
+	0x0029: 0x0089,	# CONTROL
+	0x002a: 0x008a,	# CONTROL
+	0x002b: 0x008b,	# CONTROL
+	0x002c: 0x008c,	# CONTROL
+	0x002d: 0x0005,	# ENQUIRY
+	0x002e: 0x0006,	# ACKNOWLEDGE
+	0x002f: 0x0007,	# BELL
+	0x0030: 0x0090,	# CONTROL
+	0x0031: 0x0091,	# CONTROL
+	0x0032: 0x0016,	# SYNCHRONOUS IDLE
+	0x0033: 0x0093,	# CONTROL
+	0x0034: 0x0094,	# CONTROL
+	0x0035: 0x0095,	# CONTROL
+	0x0036: 0x0096,	# CONTROL
+	0x0037: 0x0004,	# END OF TRANSMISSION
+	0x0038: 0x0098,	# CONTROL
+	0x0039: 0x0099,	# CONTROL
+	0x003a: 0x009a,	# CONTROL
+	0x003b: 0x009b,	# CONTROL
+	0x003c: 0x0014,	# DEVICE CONTROL FOUR
+	0x003d: 0x0015,	# NEGATIVE ACKNOWLEDGE
+	0x003e: 0x009e,	# CONTROL
+	0x003f: 0x001a,	# SUBSTITUTE
+	0x0040: 0x0020,	# SPACE
+	0x0041: 0x00a0,	# NO-BREAK SPACE
+	0x0042: 0x00e2,	# LATIN SMALL LETTER A WITH CIRCUMFLEX
+	0x0043: 0x00e4,	# LATIN SMALL LETTER A WITH DIAERESIS
+	0x0044: 0x00e0,	# LATIN SMALL LETTER A WITH GRAVE
+	0x0045: 0x00e1,	# LATIN SMALL LETTER A WITH ACUTE
+	0x0046: 0x00e3,	# LATIN SMALL LETTER A WITH TILDE
+	0x0047: 0x00e5,	# LATIN SMALL LETTER A WITH RING ABOVE
+	0x0048: 0x007b,	# LEFT CURLY BRACKET
+	0x0049: 0x00f1,	# LATIN SMALL LETTER N WITH TILDE
+	0x004a: 0x00c7,	# LATIN CAPITAL LETTER C WITH CEDILLA
+	0x004b: 0x002e,	# FULL STOP
+	0x004c: 0x003c,	# LESS-THAN SIGN
+	0x004d: 0x0028,	# LEFT PARENTHESIS
+	0x004e: 0x002b,	# PLUS SIGN
+	0x004f: 0x0021,	# EXCLAMATION MARK
+	0x0050: 0x0026,	# AMPERSAND
+	0x0051: 0x00e9,	# LATIN SMALL LETTER E WITH ACUTE
+	0x0052: 0x00ea,	# LATIN SMALL LETTER E WITH CIRCUMFLEX
+	0x0053: 0x00eb,	# LATIN SMALL LETTER E WITH DIAERESIS
+	0x0054: 0x00e8,	# LATIN SMALL LETTER E WITH GRAVE
+	0x0055: 0x00ed,	# LATIN SMALL LETTER I WITH ACUTE
+	0x0056: 0x00ee,	# LATIN SMALL LETTER I WITH CIRCUMFLEX
+	0x0057: 0x00ef,	# LATIN SMALL LETTER I WITH DIAERESIS
+	0x0058: 0x00ec,	# LATIN SMALL LETTER I WITH GRAVE
+	0x0059: 0x00df,	# LATIN SMALL LETTER SHARP S (GERMAN)
+	0x005a: 0x011e,	# LATIN CAPITAL LETTER G WITH BREVE
+	0x005b: 0x0130,	# LATIN CAPITAL LETTER I WITH DOT ABOVE
+	0x005c: 0x002a,	# ASTERISK
+	0x005d: 0x0029,	# RIGHT PARENTHESIS
+	0x005e: 0x003b,	# SEMICOLON
+	0x005f: 0x005e,	# CIRCUMFLEX ACCENT
+	0x0060: 0x002d,	# HYPHEN-MINUS
+	0x0061: 0x002f,	# SOLIDUS
+	0x0062: 0x00c2,	# LATIN CAPITAL LETTER A WITH CIRCUMFLEX
+	0x0063: 0x00c4,	# LATIN CAPITAL LETTER A WITH DIAERESIS
+	0x0064: 0x00c0,	# LATIN CAPITAL LETTER A WITH GRAVE
+	0x0065: 0x00c1,	# LATIN CAPITAL LETTER A WITH ACUTE
+	0x0066: 0x00c3,	# LATIN CAPITAL LETTER A WITH TILDE
+	0x0067: 0x00c5,	# LATIN CAPITAL LETTER A WITH RING ABOVE
+	0x0068: 0x005b,	# LEFT SQUARE BRACKET
+	0x0069: 0x00d1,	# LATIN CAPITAL LETTER N WITH TILDE
+	0x006a: 0x015f,	# LATIN SMALL LETTER S WITH CEDILLA
+	0x006b: 0x002c,	# COMMA
+	0x006c: 0x0025,	# PERCENT SIGN
+	0x006d: 0x005f,	# LOW LINE
+	0x006e: 0x003e,	# GREATER-THAN SIGN
+	0x006f: 0x003f,	# QUESTION MARK
+	0x0070: 0x00f8,	# LATIN SMALL LETTER O WITH STROKE
+	0x0071: 0x00c9,	# LATIN CAPITAL LETTER E WITH ACUTE
+	0x0072: 0x00ca,	# LATIN CAPITAL LETTER E WITH CIRCUMFLEX
+	0x0073: 0x00cb,	# LATIN CAPITAL LETTER E WITH DIAERESIS
+	0x0074: 0x00c8,	# LATIN CAPITAL LETTER E WITH GRAVE
+	0x0075: 0x00cd,	# LATIN CAPITAL LETTER I WITH ACUTE
+	0x0076: 0x00ce,	# LATIN CAPITAL LETTER I WITH CIRCUMFLEX
+	0x0077: 0x00cf,	# LATIN CAPITAL LETTER I WITH DIAERESIS
+	0x0078: 0x00cc,	# LATIN CAPITAL LETTER I WITH GRAVE
+	0x0079: 0x0131,	# LATIN SMALL LETTER DOTLESS I
+	0x007a: 0x003a,	# COLON
+	0x007b: 0x00d6,	# LATIN CAPITAL LETTER O WITH DIAERESIS
+	0x007c: 0x015e,	# LATIN CAPITAL LETTER S WITH CEDILLA
+	0x007d: 0x0027,	# APOSTROPHE
+	0x007e: 0x003d,	# EQUALS SIGN
+	0x007f: 0x00dc,	# LATIN CAPITAL LETTER U WITH DIAERESIS
+	0x0080: 0x00d8,	# LATIN CAPITAL LETTER O WITH STROKE
+	0x0081: 0x0061,	# LATIN SMALL LETTER A
+	0x0082: 0x0062,	# LATIN SMALL LETTER B
+	0x0083: 0x0063,	# LATIN SMALL LETTER C
+	0x0084: 0x0064,	# LATIN SMALL LETTER D
+	0x0085: 0x0065,	# LATIN SMALL LETTER E
+	0x0086: 0x0066,	# LATIN SMALL LETTER F
+	0x0087: 0x0067,	# LATIN SMALL LETTER G
+	0x0088: 0x0068,	# LATIN SMALL LETTER H
+	0x0089: 0x0069,	# LATIN SMALL LETTER I
+	0x008a: 0x00ab,	# LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+	0x008b: 0x00bb,	# RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+	0x008c: 0x007d,	# RIGHT CURLY BRACKET
+	0x008d: 0x0060,	# GRAVE ACCENT
+	0x008e: 0x00a6,	# BROKEN BAR
+	0x008f: 0x00b1,	# PLUS-MINUS SIGN
+	0x0090: 0x00b0,	# DEGREE SIGN
+	0x0091: 0x006a,	# LATIN SMALL LETTER J
+	0x0092: 0x006b,	# LATIN SMALL LETTER K
+	0x0093: 0x006c,	# LATIN SMALL LETTER L
+	0x0094: 0x006d,	# LATIN SMALL LETTER M
+	0x0095: 0x006e,	# LATIN SMALL LETTER N
+	0x0096: 0x006f,	# LATIN SMALL LETTER O
+	0x0097: 0x0070,	# LATIN SMALL LETTER P
+	0x0098: 0x0071,	# LATIN SMALL LETTER Q
+	0x0099: 0x0072,	# LATIN SMALL LETTER R
+	0x009a: 0x00aa,	# FEMININE ORDINAL INDICATOR
+	0x009b: 0x00ba,	# MASCULINE ORDINAL INDICATOR
+	0x009c: 0x00e6,	# LATIN SMALL LIGATURE AE
+	0x009d: 0x00b8,	# CEDILLA
+	0x009e: 0x00c6,	# LATIN CAPITAL LIGATURE AE
+	0x009f: 0x00a4,	# CURRENCY SIGN
+	0x00a0: 0x00b5,	# MICRO SIGN
+	0x00a1: 0x00f6,	# LATIN SMALL LETTER O WITH DIAERESIS
+	0x00a2: 0x0073,	# LATIN SMALL LETTER S
+	0x00a3: 0x0074,	# LATIN SMALL LETTER T
+	0x00a4: 0x0075,	# LATIN SMALL LETTER U
+	0x00a5: 0x0076,	# LATIN SMALL LETTER V
+	0x00a6: 0x0077,	# LATIN SMALL LETTER W
+	0x00a7: 0x0078,	# LATIN SMALL LETTER X
+	0x00a8: 0x0079,	# LATIN SMALL LETTER Y
+	0x00a9: 0x007a,	# LATIN SMALL LETTER Z
+	0x00aa: 0x00a1,	# INVERTED EXCLAMATION MARK
+	0x00ab: 0x00bf,	# INVERTED QUESTION MARK
+	0x00ac: 0x005d,	# RIGHT SQUARE BRACKET
+	0x00ad: 0x0024,	# DOLLAR SIGN
+	0x00ae: 0x0040,	# COMMERCIAL AT
+	0x00af: 0x00ae,	# REGISTERED SIGN
+	0x00b0: 0x00a2,	# CENT SIGN
+	0x00b1: 0x00a3,	# POUND SIGN
+	0x00b2: 0x00a5,	# YEN SIGN
+	0x00b3: 0x00b7,	# MIDDLE DOT
+	0x00b4: 0x00a9,	# COPYRIGHT SIGN
+	0x00b5: 0x00a7,	# SECTION SIGN
+	0x00b7: 0x00bc,	# VULGAR FRACTION ONE QUARTER
+	0x00b8: 0x00bd,	# VULGAR FRACTION ONE HALF
+	0x00b9: 0x00be,	# VULGAR FRACTION THREE QUARTERS
+	0x00ba: 0x00ac,	# NOT SIGN
+	0x00bb: 0x007c,	# VERTICAL LINE
+	0x00bc: 0x00af,	# MACRON
+	0x00bd: 0x00a8,	# DIAERESIS
+	0x00be: 0x00b4,	# ACUTE ACCENT
+	0x00bf: 0x00d7,	# MULTIPLICATION SIGN
+	0x00c0: 0x00e7,	# LATIN SMALL LETTER C WITH CEDILLA
+	0x00c1: 0x0041,	# LATIN CAPITAL LETTER A
+	0x00c2: 0x0042,	# LATIN CAPITAL LETTER B
+	0x00c3: 0x0043,	# LATIN CAPITAL LETTER C
+	0x00c4: 0x0044,	# LATIN CAPITAL LETTER D
+	0x00c5: 0x0045,	# LATIN CAPITAL LETTER E
+	0x00c6: 0x0046,	# LATIN CAPITAL LETTER F
+	0x00c7: 0x0047,	# LATIN CAPITAL LETTER G
+	0x00c8: 0x0048,	# LATIN CAPITAL LETTER H
+	0x00c9: 0x0049,	# LATIN CAPITAL LETTER I
+	0x00ca: 0x00ad,	# SOFT HYPHEN
+	0x00cb: 0x00f4,	# LATIN SMALL LETTER O WITH CIRCUMFLEX
+	0x00cc: 0x007e,	# TILDE
+	0x00cd: 0x00f2,	# LATIN SMALL LETTER O WITH GRAVE
+	0x00ce: 0x00f3,	# LATIN SMALL LETTER O WITH ACUTE
+	0x00cf: 0x00f5,	# LATIN SMALL LETTER O WITH TILDE
+	0x00d0: 0x011f,	# LATIN SMALL LETTER G WITH BREVE
+	0x00d1: 0x004a,	# LATIN CAPITAL LETTER J
+	0x00d2: 0x004b,	# LATIN CAPITAL LETTER K
+	0x00d3: 0x004c,	# LATIN CAPITAL LETTER L
+	0x00d4: 0x004d,	# LATIN CAPITAL LETTER M
+	0x00d5: 0x004e,	# LATIN CAPITAL LETTER N
+	0x00d6: 0x004f,	# LATIN CAPITAL LETTER O
+	0x00d7: 0x0050,	# LATIN CAPITAL LETTER P
+	0x00d8: 0x0051,	# LATIN CAPITAL LETTER Q
+	0x00d9: 0x0052,	# LATIN CAPITAL LETTER R
+	0x00da: 0x00b9,	# SUPERSCRIPT ONE
+	0x00db: 0x00fb,	# LATIN SMALL LETTER U WITH CIRCUMFLEX
+	0x00dc: 0x005c,	# REVERSE SOLIDUS
+	0x00dd: 0x00f9,	# LATIN SMALL LETTER U WITH GRAVE
+	0x00de: 0x00fa,	# LATIN SMALL LETTER U WITH ACUTE
+	0x00df: 0x00ff,	# LATIN SMALL LETTER Y WITH DIAERESIS
+	0x00e0: 0x00fc,	# LATIN SMALL LETTER U WITH DIAERESIS
+	0x00e1: 0x00f7,	# DIVISION SIGN
+	0x00e2: 0x0053,	# LATIN CAPITAL LETTER S
+	0x00e3: 0x0054,	# LATIN CAPITAL LETTER T
+	0x00e4: 0x0055,	# LATIN CAPITAL LETTER U
+	0x00e5: 0x0056,	# LATIN CAPITAL LETTER V
+	0x00e6: 0x0057,	# LATIN CAPITAL LETTER W
+	0x00e7: 0x0058,	# LATIN CAPITAL LETTER X
+	0x00e8: 0x0059,	# LATIN CAPITAL LETTER Y
+	0x00e9: 0x005a,	# LATIN CAPITAL LETTER Z
+	0x00ea: 0x00b2,	# SUPERSCRIPT TWO
+	0x00eb: 0x00d4,	# LATIN CAPITAL LETTER O WITH CIRCUMFLEX
+	0x00ec: 0x0023,	# NUMBER SIGN
+	0x00ed: 0x00d2,	# LATIN CAPITAL LETTER O WITH GRAVE
+	0x00ee: 0x00d3,	# LATIN CAPITAL LETTER O WITH ACUTE
+	0x00ef: 0x00d5,	# LATIN CAPITAL LETTER O WITH TILDE
+	0x00f0: 0x0030,	# DIGIT ZERO
+	0x00f1: 0x0031,	# DIGIT ONE
+	0x00f2: 0x0032,	# DIGIT TWO
+	0x00f3: 0x0033,	# DIGIT THREE
+	0x00f4: 0x0034,	# DIGIT FOUR
+	0x00f5: 0x0035,	# DIGIT FIVE
+	0x00f6: 0x0036,	# DIGIT SIX
+	0x00f7: 0x0037,	# DIGIT SEVEN
+	0x00f8: 0x0038,	# DIGIT EIGHT
+	0x00f9: 0x0039,	# DIGIT NINE
+	0x00fa: 0x00b3,	# SUPERSCRIPT THREE
+	0x00fb: 0x00db,	# LATIN CAPITAL LETTER U WITH CIRCUMFLEX
+	0x00fc: 0x0022,	# QUOTATION MARK
+	0x00fd: 0x00d9,	# LATIN CAPITAL LETTER U WITH GRAVE
+	0x00fe: 0x00da,	# LATIN CAPITAL LETTER U WITH ACUTE
+	0x00ff: 0x009f,	# CONTROL
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/lib-python/2.2/encodings/cp1140.py b/lib-python/2.2/encodings/cp1140.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/cp1140.py
@@ -0,0 +1,45 @@
+""" Python Character Mapping Codec for cp1140
+
+Written by Brian Quinlan(brian at sweetapp.com). NO WARRANTY.
+"""
+
+import codecs
+import copy
+import cp037
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = copy.copy(cp037.decoding_map)
+
+decoding_map.update({
+ 	0x009f: 0x20ac # EURO SIGN
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
+
diff --git a/lib-python/2.2/encodings/cp1250.py b/lib-python/2.2/encodings/cp1250.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/cp1250.py
@@ -0,0 +1,123 @@
+""" Python Character Mapping Codec generated from 'CP1250.TXT' with gencodec.py.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+	0x0080: 0x20ac,	# EURO SIGN
+	0x0081: None,	# UNDEFINED
+	0x0082: 0x201a,	# SINGLE LOW-9 QUOTATION MARK
+	0x0083: None,	# UNDEFINED
+	0x0084: 0x201e,	# DOUBLE LOW-9 QUOTATION MARK
+	0x0085: 0x2026,	# HORIZONTAL ELLIPSIS
+	0x0086: 0x2020,	# DAGGER
+	0x0087: 0x2021,	# DOUBLE DAGGER
+	0x0088: None,	# UNDEFINED
+	0x0089: 0x2030,	# PER MILLE SIGN
+	0x008a: 0x0160,	# LATIN CAPITAL LETTER S WITH CARON
+	0x008b: 0x2039,	# SINGLE LEFT-POINTING ANGLE QUOTATION MARK
+	0x008c: 0x015a,	# LATIN CAPITAL LETTER S WITH ACUTE
+	0x008d: 0x0164,	# LATIN CAPITAL LETTER T WITH CARON
+	0x008e: 0x017d,	# LATIN CAPITAL LETTER Z WITH CARON
+	0x008f: 0x0179,	# LATIN CAPITAL LETTER Z WITH ACUTE
+	0x0090: None,	# UNDEFINED
+	0x0091: 0x2018,	# LEFT SINGLE QUOTATION MARK
+	0x0092: 0x2019,	# RIGHT SINGLE QUOTATION MARK
+	0x0093: 0x201c,	# LEFT DOUBLE QUOTATION MARK
+	0x0094: 0x201d,	# RIGHT DOUBLE QUOTATION MARK
+	0x0095: 0x2022,	# BULLET
+	0x0096: 0x2013,	# EN DASH
+	0x0097: 0x2014,	# EM DASH
+	0x0098: None,	# UNDEFINED
+	0x0099: 0x2122,	# TRADE MARK SIGN
+	0x009a: 0x0161,	# LATIN SMALL LETTER S WITH CARON
+	0x009b: 0x203a,	# SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
+	0x009c: 0x015b,	# LATIN SMALL LETTER S WITH ACUTE
+	0x009d: 0x0165,	# LATIN SMALL LETTER T WITH CARON
+	0x009e: 0x017e,	# LATIN SMALL LETTER Z WITH CARON
+	0x009f: 0x017a,	# LATIN SMALL LETTER Z WITH ACUTE
+	0x00a1: 0x02c7,	# CARON
+	0x00a2: 0x02d8,	# BREVE
+	0x00a3: 0x0141,	# LATIN CAPITAL LETTER L WITH STROKE
+	0x00a5: 0x0104,	# LATIN CAPITAL LETTER A WITH OGONEK
+	0x00aa: 0x015e,	# LATIN CAPITAL LETTER S WITH CEDILLA
+	0x00af: 0x017b,	# LATIN CAPITAL LETTER Z WITH DOT ABOVE
+	0x00b2: 0x02db,	# OGONEK
+	0x00b3: 0x0142,	# LATIN SMALL LETTER L WITH STROKE
+	0x00b9: 0x0105,	# LATIN SMALL LETTER A WITH OGONEK
+	0x00ba: 0x015f,	# LATIN SMALL LETTER S WITH CEDILLA
+	0x00bc: 0x013d,	# LATIN CAPITAL LETTER L WITH CARON
+	0x00bd: 0x02dd,	# DOUBLE ACUTE ACCENT
+	0x00be: 0x013e,	# LATIN SMALL LETTER L WITH CARON
+	0x00bf: 0x017c,	# LATIN SMALL LETTER Z WITH DOT ABOVE
+	0x00c0: 0x0154,	# LATIN CAPITAL LETTER R WITH ACUTE
+	0x00c3: 0x0102,	# LATIN CAPITAL LETTER A WITH BREVE
+	0x00c5: 0x0139,	# LATIN CAPITAL LETTER L WITH ACUTE
+	0x00c6: 0x0106,	# LATIN CAPITAL LETTER C WITH ACUTE
+	0x00c8: 0x010c,	# LATIN CAPITAL LETTER C WITH CARON
+	0x00ca: 0x0118,	# LATIN CAPITAL LETTER E WITH OGONEK
+	0x00cc: 0x011a,	# LATIN CAPITAL LETTER E WITH CARON
+	0x00cf: 0x010e,	# LATIN CAPITAL LETTER D WITH CARON
+	0x00d0: 0x0110,	# LATIN CAPITAL LETTER D WITH STROKE
+	0x00d1: 0x0143,	# LATIN CAPITAL LETTER N WITH ACUTE
+	0x00d2: 0x0147,	# LATIN CAPITAL LETTER N WITH CARON
+	0x00d5: 0x0150,	# LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
+	0x00d8: 0x0158,	# LATIN CAPITAL LETTER R WITH CARON
+	0x00d9: 0x016e,	# LATIN CAPITAL LETTER U WITH RING ABOVE
+	0x00db: 0x0170,	# LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
+	0x00de: 0x0162,	# LATIN CAPITAL LETTER T WITH CEDILLA
+	0x00e0: 0x0155,	# LATIN SMALL LETTER R WITH ACUTE
+	0x00e3: 0x0103,	# LATIN SMALL LETTER A WITH BREVE
+	0x00e5: 0x013a,	# LATIN SMALL LETTER L WITH ACUTE
+	0x00e6: 0x0107,	# LATIN SMALL LETTER C WITH ACUTE
+	0x00e8: 0x010d,	# LATIN SMALL LETTER C WITH CARON
+	0x00ea: 0x0119,	# LATIN SMALL LETTER E WITH OGONEK
+	0x00ec: 0x011b,	# LATIN SMALL LETTER E WITH CARON
+	0x00ef: 0x010f,	# LATIN SMALL LETTER D WITH CARON
+	0x00f0: 0x0111,	# LATIN SMALL LETTER D WITH STROKE
+	0x00f1: 0x0144,	# LATIN SMALL LETTER N WITH ACUTE
+	0x00f2: 0x0148,	# LATIN SMALL LETTER N WITH CARON
+	0x00f5: 0x0151,	# LATIN SMALL LETTER O WITH DOUBLE ACUTE
+	0x00f8: 0x0159,	# LATIN SMALL LETTER R WITH CARON
+	0x00f9: 0x016f,	# LATIN SMALL LETTER U WITH RING ABOVE
+	0x00fb: 0x0171,	# LATIN SMALL LETTER U WITH DOUBLE ACUTE
+	0x00fe: 0x0163,	# LATIN SMALL LETTER T WITH CEDILLA
+	0x00ff: 0x02d9,	# DOT ABOVE
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/lib-python/2.2/encodings/cp1251.py b/lib-python/2.2/encodings/cp1251.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/cp1251.py
@@ -0,0 +1,157 @@
+""" Python Character Mapping Codec generated from 'CP1251.TXT' with gencodec.py.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+	0x0080: 0x0402,	# CYRILLIC CAPITAL LETTER DJE
+	0x0081: 0x0403,	# CYRILLIC CAPITAL LETTER GJE
+	0x0082: 0x201a,	# SINGLE LOW-9 QUOTATION MARK
+	0x0083: 0x0453,	# CYRILLIC SMALL LETTER GJE
+	0x0084: 0x201e,	# DOUBLE LOW-9 QUOTATION MARK
+	0x0085: 0x2026,	# HORIZONTAL ELLIPSIS
+	0x0086: 0x2020,	# DAGGER
+	0x0087: 0x2021,	# DOUBLE DAGGER
+	0x0088: 0x20ac,	# EURO SIGN
+	0x0089: 0x2030,	# PER MILLE SIGN
+	0x008a: 0x0409,	# CYRILLIC CAPITAL LETTER LJE
+	0x008b: 0x2039,	# SINGLE LEFT-POINTING ANGLE QUOTATION MARK
+	0x008c: 0x040a,	# CYRILLIC CAPITAL LETTER NJE
+	0x008d: 0x040c,	# CYRILLIC CAPITAL LETTER KJE
+	0x008e: 0x040b,	# CYRILLIC CAPITAL LETTER TSHE
+	0x008f: 0x040f,	# CYRILLIC CAPITAL LETTER DZHE
+	0x0090: 0x0452,	# CYRILLIC SMALL LETTER DJE
+	0x0091: 0x2018,	# LEFT SINGLE QUOTATION MARK
+	0x0092: 0x2019,	# RIGHT SINGLE QUOTATION MARK
+	0x0093: 0x201c,	# LEFT DOUBLE QUOTATION MARK
+	0x0094: 0x201d,	# RIGHT DOUBLE QUOTATION MARK
+	0x0095: 0x2022,	# BULLET
+	0x0096: 0x2013,	# EN DASH
+	0x0097: 0x2014,	# EM DASH
+	0x0098: None,	# UNDEFINED
+	0x0099: 0x2122,	# TRADE MARK SIGN
+	0x009a: 0x0459,	# CYRILLIC SMALL LETTER LJE
+	0x009b: 0x203a,	# SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
+	0x009c: 0x045a,	# CYRILLIC SMALL LETTER NJE
+	0x009d: 0x045c,	# CYRILLIC SMALL LETTER KJE
+	0x009e: 0x045b,	# CYRILLIC SMALL LETTER TSHE
+	0x009f: 0x045f,	# CYRILLIC SMALL LETTER DZHE
+	0x00a1: 0x040e,	# CYRILLIC CAPITAL LETTER SHORT U
+	0x00a2: 0x045e,	# CYRILLIC SMALL LETTER SHORT U
+	0x00a3: 0x0408,	# CYRILLIC CAPITAL LETTER JE
+	0x00a5: 0x0490,	# CYRILLIC CAPITAL LETTER GHE WITH UPTURN
+	0x00a8: 0x0401,	# CYRILLIC CAPITAL LETTER IO
+	0x00aa: 0x0404,	# CYRILLIC CAPITAL LETTER UKRAINIAN IE
+	0x00af: 0x0407,	# CYRILLIC CAPITAL LETTER YI
+	0x00b2: 0x0406,	# CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
+	0x00b3: 0x0456,	# CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
+	0x00b4: 0x0491,	# CYRILLIC SMALL LETTER GHE WITH UPTURN
+	0x00b8: 0x0451,	# CYRILLIC SMALL LETTER IO
+	0x00b9: 0x2116,	# NUMERO SIGN
+	0x00ba: 0x0454,	# CYRILLIC SMALL LETTER UKRAINIAN IE
+	0x00bc: 0x0458,	# CYRILLIC SMALL LETTER JE
+	0x00bd: 0x0405,	# CYRILLIC CAPITAL LETTER DZE
+	0x00be: 0x0455,	# CYRILLIC SMALL LETTER DZE
+	0x00bf: 0x0457,	# CYRILLIC SMALL LETTER YI
+	0x00c0: 0x0410,	# CYRILLIC CAPITAL LETTER A
+	0x00c1: 0x0411,	# CYRILLIC CAPITAL LETTER BE
+	0x00c2: 0x0412,	# CYRILLIC CAPITAL LETTER VE
+	0x00c3: 0x0413,	# CYRILLIC CAPITAL LETTER GHE
+	0x00c4: 0x0414,	# CYRILLIC CAPITAL LETTER DE
+	0x00c5: 0x0415,	# CYRILLIC CAPITAL LETTER IE
+	0x00c6: 0x0416,	# CYRILLIC CAPITAL LETTER ZHE
+	0x00c7: 0x0417,	# CYRILLIC CAPITAL LETTER ZE
+	0x00c8: 0x0418,	# CYRILLIC CAPITAL LETTER I
+	0x00c9: 0x0419,	# CYRILLIC CAPITAL LETTER SHORT I
+	0x00ca: 0x041a,	# CYRILLIC CAPITAL LETTER KA
+	0x00cb: 0x041b,	# CYRILLIC CAPITAL LETTER EL
+	0x00cc: 0x041c,	# CYRILLIC CAPITAL LETTER EM
+	0x00cd: 0x041d,	# CYRILLIC CAPITAL LETTER EN
+	0x00ce: 0x041e,	# CYRILLIC CAPITAL LETTER O
+	0x00cf: 0x041f,	# CYRILLIC CAPITAL LETTER PE
+	0x00d0: 0x0420,	# CYRILLIC CAPITAL LETTER ER
+	0x00d1: 0x0421,	# CYRILLIC CAPITAL LETTER ES
+	0x00d2: 0x0422,	# CYRILLIC CAPITAL LETTER TE
+	0x00d3: 0x0423,	# CYRILLIC CAPITAL LETTER U
+	0x00d4: 0x0424,	# CYRILLIC CAPITAL LETTER EF
+	0x00d5: 0x0425,	# CYRILLIC CAPITAL LETTER HA
+	0x00d6: 0x0426,	# CYRILLIC CAPITAL LETTER TSE
+	0x00d7: 0x0427,	# CYRILLIC CAPITAL LETTER CHE
+	0x00d8: 0x0428,	# CYRILLIC CAPITAL LETTER SHA
+	0x00d9: 0x0429,	# CYRILLIC CAPITAL LETTER SHCHA
+	0x00da: 0x042a,	# CYRILLIC CAPITAL LETTER HARD SIGN
+	0x00db: 0x042b,	# CYRILLIC CAPITAL LETTER YERU
+	0x00dc: 0x042c,	# CYRILLIC CAPITAL LETTER SOFT SIGN
+	0x00dd: 0x042d,	# CYRILLIC CAPITAL LETTER E
+	0x00de: 0x042e,	# CYRILLIC CAPITAL LETTER YU
+	0x00df: 0x042f,	# CYRILLIC CAPITAL LETTER YA
+	0x00e0: 0x0430,	# CYRILLIC SMALL LETTER A
+	0x00e1: 0x0431,	# CYRILLIC SMALL LETTER BE
+	0x00e2: 0x0432,	# CYRILLIC SMALL LETTER VE
+	0x00e3: 0x0433,	# CYRILLIC SMALL LETTER GHE
+	0x00e4: 0x0434,	# CYRILLIC SMALL LETTER DE
+	0x00e5: 0x0435,	# CYRILLIC SMALL LETTER IE
+	0x00e6: 0x0436,	# CYRILLIC SMALL LETTER ZHE
+	0x00e7: 0x0437,	# CYRILLIC SMALL LETTER ZE
+	0x00e8: 0x0438,	# CYRILLIC SMALL LETTER I
+	0x00e9: 0x0439,	# CYRILLIC SMALL LETTER SHORT I
+	0x00ea: 0x043a,	# CYRILLIC SMALL LETTER KA
+	0x00eb: 0x043b,	# CYRILLIC SMALL LETTER EL
+	0x00ec: 0x043c,	# CYRILLIC SMALL LETTER EM
+	0x00ed: 0x043d,	# CYRILLIC SMALL LETTER EN
+	0x00ee: 0x043e,	# CYRILLIC SMALL LETTER O
+	0x00ef: 0x043f,	# CYRILLIC SMALL LETTER PE
+	0x00f0: 0x0440,	# CYRILLIC SMALL LETTER ER
+	0x00f1: 0x0441,	# CYRILLIC SMALL LETTER ES
+	0x00f2: 0x0442,	# CYRILLIC SMALL LETTER TE
+	0x00f3: 0x0443,	# CYRILLIC SMALL LETTER U
+	0x00f4: 0x0444,	# CYRILLIC SMALL LETTER EF
+	0x00f5: 0x0445,	# CYRILLIC SMALL LETTER HA
+	0x00f6: 0x0446,	# CYRILLIC SMALL LETTER TSE
+	0x00f7: 0x0447,	# CYRILLIC SMALL LETTER CHE
+	0x00f8: 0x0448,	# CYRILLIC SMALL LETTER SHA
+	0x00f9: 0x0449,	# CYRILLIC SMALL LETTER SHCHA
+	0x00fa: 0x044a,	# CYRILLIC SMALL LETTER HARD SIGN
+	0x00fb: 0x044b,	# CYRILLIC SMALL LETTER YERU
+	0x00fc: 0x044c,	# CYRILLIC SMALL LETTER SOFT SIGN
+	0x00fd: 0x044d,	# CYRILLIC SMALL LETTER E
+	0x00fe: 0x044e,	# CYRILLIC SMALL LETTER YU
+	0x00ff: 0x044f,	# CYRILLIC SMALL LETTER YA
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/lib-python/2.2/encodings/cp1252.py b/lib-python/2.2/encodings/cp1252.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/cp1252.py
@@ -0,0 +1,76 @@
+""" Python Character Mapping Codec generated from 'CP1252.TXT' with gencodec.py.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+	0x0080: 0x20ac,	# EURO SIGN
+	0x0081: None,	# UNDEFINED
+	0x0082: 0x201a,	# SINGLE LOW-9 QUOTATION MARK
+	0x0083: 0x0192,	# LATIN SMALL LETTER F WITH HOOK
+	0x0084: 0x201e,	# DOUBLE LOW-9 QUOTATION MARK
+	0x0085: 0x2026,	# HORIZONTAL ELLIPSIS
+	0x0086: 0x2020,	# DAGGER
+	0x0087: 0x2021,	# DOUBLE DAGGER
+	0x0088: 0x02c6,	# MODIFIER LETTER CIRCUMFLEX ACCENT
+	0x0089: 0x2030,	# PER MILLE SIGN
+	0x008a: 0x0160,	# LATIN CAPITAL LETTER S WITH CARON
+	0x008b: 0x2039,	# SINGLE LEFT-POINTING ANGLE QUOTATION MARK
+	0x008c: 0x0152,	# LATIN CAPITAL LIGATURE OE
+	0x008d: None,	# UNDEFINED
+	0x008e: 0x017d,	# LATIN CAPITAL LETTER Z WITH CARON
+	0x008f: None,	# UNDEFINED
+	0x0090: None,	# UNDEFINED
+	0x0091: 0x2018,	# LEFT SINGLE QUOTATION MARK
+	0x0092: 0x2019,	# RIGHT SINGLE QUOTATION MARK
+	0x0093: 0x201c,	# LEFT DOUBLE QUOTATION MARK
+	0x0094: 0x201d,	# RIGHT DOUBLE QUOTATION MARK
+	0x0095: 0x2022,	# BULLET
+	0x0096: 0x2013,	# EN DASH
+	0x0097: 0x2014,	# EM DASH
+	0x0098: 0x02dc,	# SMALL TILDE
+	0x0099: 0x2122,	# TRADE MARK SIGN
+	0x009a: 0x0161,	# LATIN SMALL LETTER S WITH CARON
+	0x009b: 0x203a,	# SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
+	0x009c: 0x0153,	# LATIN SMALL LIGATURE OE
+	0x009d: None,	# UNDEFINED
+	0x009e: 0x017e,	# LATIN SMALL LETTER Z WITH CARON
+	0x009f: 0x0178,	# LATIN CAPITAL LETTER Y WITH DIAERESIS
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/lib-python/2.2/encodings/cp1253.py b/lib-python/2.2/encodings/cp1253.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/cp1253.py
@@ -0,0 +1,151 @@
+""" Python Character Mapping Codec generated from 'CP1253.TXT' with gencodec.py.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+	0x0080: 0x20ac,	# EURO SIGN
+	0x0081: None,	# UNDEFINED
+	0x0082: 0x201a,	# SINGLE LOW-9 QUOTATION MARK
+	0x0083: 0x0192,	# LATIN SMALL LETTER F WITH HOOK
+	0x0084: 0x201e,	# DOUBLE LOW-9 QUOTATION MARK
+	0x0085: 0x2026,	# HORIZONTAL ELLIPSIS
+	0x0086: 0x2020,	# DAGGER
+	0x0087: 0x2021,	# DOUBLE DAGGER
+	0x0088: None,	# UNDEFINED
+	0x0089: 0x2030,	# PER MILLE SIGN
+	0x008a: None,	# UNDEFINED
+	0x008b: 0x2039,	# SINGLE LEFT-POINTING ANGLE QUOTATION MARK
+	0x008c: None,	# UNDEFINED
+	0x008d: None,	# UNDEFINED
+	0x008e: None,	# UNDEFINED
+	0x008f: None,	# UNDEFINED
+	0x0090: None,	# UNDEFINED
+	0x0091: 0x2018,	# LEFT SINGLE QUOTATION MARK
+	0x0092: 0x2019,	# RIGHT SINGLE QUOTATION MARK
+	0x0093: 0x201c,	# LEFT DOUBLE QUOTATION MARK
+	0x0094: 0x201d,	# RIGHT DOUBLE QUOTATION MARK
+	0x0095: 0x2022,	# BULLET
+	0x0096: 0x2013,	# EN DASH
+	0x0097: 0x2014,	# EM DASH
+	0x0098: None,	# UNDEFINED
+	0x0099: 0x2122,	# TRADE MARK SIGN
+	0x009a: None,	# UNDEFINED
+	0x009b: 0x203a,	# SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
+	0x009c: None,	# UNDEFINED
+	0x009d: None,	# UNDEFINED
+	0x009e: None,	# UNDEFINED
+	0x009f: None,	# UNDEFINED
+	0x00a1: 0x0385,	# GREEK DIALYTIKA TONOS
+	0x00a2: 0x0386,	# GREEK CAPITAL LETTER ALPHA WITH TONOS
+	0x00aa: None,	# UNDEFINED
+	0x00af: 0x2015,	# HORIZONTAL BAR
+	0x00b4: 0x0384,	# GREEK TONOS
+	0x00b8: 0x0388,	# GREEK CAPITAL LETTER EPSILON WITH TONOS
+	0x00b9: 0x0389,	# GREEK CAPITAL LETTER ETA WITH TONOS
+	0x00ba: 0x038a,	# GREEK CAPITAL LETTER IOTA WITH TONOS
+	0x00bc: 0x038c,	# GREEK CAPITAL LETTER OMICRON WITH TONOS
+	0x00be: 0x038e,	# GREEK CAPITAL LETTER UPSILON WITH TONOS
+	0x00bf: 0x038f,	# GREEK CAPITAL LETTER OMEGA WITH TONOS
+	0x00c0: 0x0390,	# GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
+	0x00c1: 0x0391,	# GREEK CAPITAL LETTER ALPHA
+	0x00c2: 0x0392,	# GREEK CAPITAL LETTER BETA
+	0x00c3: 0x0393,	# GREEK CAPITAL LETTER GAMMA
+	0x00c4: 0x0394,	# GREEK CAPITAL LETTER DELTA
+	0x00c5: 0x0395,	# GREEK CAPITAL LETTER EPSILON
+	0x00c6: 0x0396,	# GREEK CAPITAL LETTER ZETA
+	0x00c7: 0x0397,	# GREEK CAPITAL LETTER ETA
+	0x00c8: 0x0398,	# GREEK CAPITAL LETTER THETA
+	0x00c9: 0x0399,	# GREEK CAPITAL LETTER IOTA
+	0x00ca: 0x039a,	# GREEK CAPITAL LETTER KAPPA
+	0x00cb: 0x039b,	# GREEK CAPITAL LETTER LAMDA
+	0x00cc: 0x039c,	# GREEK CAPITAL LETTER MU
+	0x00cd: 0x039d,	# GREEK CAPITAL LETTER NU
+	0x00ce: 0x039e,	# GREEK CAPITAL LETTER XI
+	0x00cf: 0x039f,	# GREEK CAPITAL LETTER OMICRON
+	0x00d0: 0x03a0,	# GREEK CAPITAL LETTER PI
+	0x00d1: 0x03a1,	# GREEK CAPITAL LETTER RHO
+	0x00d2: None,	# UNDEFINED
+	0x00d3: 0x03a3,	# GREEK CAPITAL LETTER SIGMA
+	0x00d4: 0x03a4,	# GREEK CAPITAL LETTER TAU
+	0x00d5: 0x03a5,	# GREEK CAPITAL LETTER UPSILON
+	0x00d6: 0x03a6,	# GREEK CAPITAL LETTER PHI
+	0x00d7: 0x03a7,	# GREEK CAPITAL LETTER CHI
+	0x00d8: 0x03a8,	# GREEK CAPITAL LETTER PSI
+	0x00d9: 0x03a9,	# GREEK CAPITAL LETTER OMEGA
+	0x00da: 0x03aa,	# GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
+	0x00db: 0x03ab,	# GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
+	0x00dc: 0x03ac,	# GREEK SMALL LETTER ALPHA WITH TONOS
+	0x00dd: 0x03ad,	# GREEK SMALL LETTER EPSILON WITH TONOS
+	0x00de: 0x03ae,	# GREEK SMALL LETTER ETA WITH TONOS
+	0x00df: 0x03af,	# GREEK SMALL LETTER IOTA WITH TONOS
+	0x00e0: 0x03b0,	# GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
+	0x00e1: 0x03b1,	# GREEK SMALL LETTER ALPHA
+	0x00e2: 0x03b2,	# GREEK SMALL LETTER BETA
+	0x00e3: 0x03b3,	# GREEK SMALL LETTER GAMMA
+	0x00e4: 0x03b4,	# GREEK SMALL LETTER DELTA
+	0x00e5: 0x03b5,	# GREEK SMALL LETTER EPSILON
+	0x00e6: 0x03b6,	# GREEK SMALL LETTER ZETA
+	0x00e7: 0x03b7,	# GREEK SMALL LETTER ETA
+	0x00e8: 0x03b8,	# GREEK SMALL LETTER THETA
+	0x00e9: 0x03b9,	# GREEK SMALL LETTER IOTA
+	0x00ea: 0x03ba,	# GREEK SMALL LETTER KAPPA
+	0x00eb: 0x03bb,	# GREEK SMALL LETTER LAMDA
+	0x00ec: 0x03bc,	# GREEK SMALL LETTER MU
+	0x00ed: 0x03bd,	# GREEK SMALL LETTER NU
+	0x00ee: 0x03be,	# GREEK SMALL LETTER XI
+	0x00ef: 0x03bf,	# GREEK SMALL LETTER OMICRON
+	0x00f0: 0x03c0,	# GREEK SMALL LETTER PI
+	0x00f1: 0x03c1,	# GREEK SMALL LETTER RHO
+	0x00f2: 0x03c2,	# GREEK SMALL LETTER FINAL SIGMA
+	0x00f3: 0x03c3,	# GREEK SMALL LETTER SIGMA
+	0x00f4: 0x03c4,	# GREEK SMALL LETTER TAU
+	0x00f5: 0x03c5,	# GREEK SMALL LETTER UPSILON
+	0x00f6: 0x03c6,	# GREEK SMALL LETTER PHI
+	0x00f7: 0x03c7,	# GREEK SMALL LETTER CHI
+	0x00f8: 0x03c8,	# GREEK SMALL LETTER PSI
+	0x00f9: 0x03c9,	# GREEK SMALL LETTER OMEGA
+	0x00fa: 0x03ca,	# GREEK SMALL LETTER IOTA WITH DIALYTIKA
+	0x00fb: 0x03cb,	# GREEK SMALL LETTER UPSILON WITH DIALYTIKA
+	0x00fc: 0x03cc,	# GREEK SMALL LETTER OMICRON WITH TONOS
+	0x00fd: 0x03cd,	# GREEK SMALL LETTER UPSILON WITH TONOS
+	0x00fe: 0x03ce,	# GREEK SMALL LETTER OMEGA WITH TONOS
+	0x00ff: None,	# UNDEFINED
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/lib-python/2.2/encodings/cp1254.py b/lib-python/2.2/encodings/cp1254.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/cp1254.py
@@ -0,0 +1,82 @@
+""" Python Character Mapping Codec generated from 'CP1254.TXT' with gencodec.py.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+	0x0080: 0x20ac,	# EURO SIGN
+	0x0081: None,	# UNDEFINED
+	0x0082: 0x201a,	# SINGLE LOW-9 QUOTATION MARK
+	0x0083: 0x0192,	# LATIN SMALL LETTER F WITH HOOK
+	0x0084: 0x201e,	# DOUBLE LOW-9 QUOTATION MARK
+	0x0085: 0x2026,	# HORIZONTAL ELLIPSIS
+	0x0086: 0x2020,	# DAGGER
+	0x0087: 0x2021,	# DOUBLE DAGGER
+	0x0088: 0x02c6,	# MODIFIER LETTER CIRCUMFLEX ACCENT
+	0x0089: 0x2030,	# PER MILLE SIGN
+	0x008a: 0x0160,	# LATIN CAPITAL LETTER S WITH CARON
+	0x008b: 0x2039,	# SINGLE LEFT-POINTING ANGLE QUOTATION MARK
+	0x008c: 0x0152,	# LATIN CAPITAL LIGATURE OE
+	0x008d: None,	# UNDEFINED
+	0x008e: None,	# UNDEFINED
+	0x008f: None,	# UNDEFINED
+	0x0090: None,	# UNDEFINED
+	0x0091: 0x2018,	# LEFT SINGLE QUOTATION MARK
+	0x0092: 0x2019,	# RIGHT SINGLE QUOTATION MARK
+	0x0093: 0x201c,	# LEFT DOUBLE QUOTATION MARK
+	0x0094: 0x201d,	# RIGHT DOUBLE QUOTATION MARK
+	0x0095: 0x2022,	# BULLET
+	0x0096: 0x2013,	# EN DASH
+	0x0097: 0x2014,	# EM DASH
+	0x0098: 0x02dc,	# SMALL TILDE
+	0x0099: 0x2122,	# TRADE MARK SIGN
+	0x009a: 0x0161,	# LATIN SMALL LETTER S WITH CARON
+	0x009b: 0x203a,	# SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
+	0x009c: 0x0153,	# LATIN SMALL LIGATURE OE
+	0x009d: None,	# UNDEFINED
+	0x009e: None,	# UNDEFINED
+	0x009f: 0x0178,	# LATIN CAPITAL LETTER Y WITH DIAERESIS
+	0x00d0: 0x011e,	# LATIN CAPITAL LETTER G WITH BREVE
+	0x00dd: 0x0130,	# LATIN CAPITAL LETTER I WITH DOT ABOVE
+	0x00de: 0x015e,	# LATIN CAPITAL LETTER S WITH CEDILLA
+	0x00f0: 0x011f,	# LATIN SMALL LETTER G WITH BREVE
+	0x00fd: 0x0131,	# LATIN SMALL LETTER DOTLESS I
+	0x00fe: 0x015f,	# LATIN SMALL LETTER S WITH CEDILLA
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/lib-python/2.2/encodings/cp1255.py b/lib-python/2.2/encodings/cp1255.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/cp1255.py
@@ -0,0 +1,143 @@
+""" Python Character Mapping Codec generated from 'CP1255.TXT' with gencodec.py.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+	0x0080: 0x20ac,	# EURO SIGN
+	0x0081: None,	# UNDEFINED
+	0x0082: 0x201a,	# SINGLE LOW-9 QUOTATION MARK
+	0x0083: 0x0192,	# LATIN SMALL LETTER F WITH HOOK
+	0x0084: 0x201e,	# DOUBLE LOW-9 QUOTATION MARK
+	0x0085: 0x2026,	# HORIZONTAL ELLIPSIS
+	0x0086: 0x2020,	# DAGGER
+	0x0087: 0x2021,	# DOUBLE DAGGER
+	0x0088: 0x02c6,	# MODIFIER LETTER CIRCUMFLEX ACCENT
+	0x0089: 0x2030,	# PER MILLE SIGN
+	0x008a: None,	# UNDEFINED
+	0x008b: 0x2039,	# SINGLE LEFT-POINTING ANGLE QUOTATION MARK
+	0x008c: None,	# UNDEFINED
+	0x008d: None,	# UNDEFINED
+	0x008e: None,	# UNDEFINED
+	0x008f: None,	# UNDEFINED
+	0x0090: None,	# UNDEFINED
+	0x0091: 0x2018,	# LEFT SINGLE QUOTATION MARK
+	0x0092: 0x2019,	# RIGHT SINGLE QUOTATION MARK
+	0x0093: 0x201c,	# LEFT DOUBLE QUOTATION MARK
+	0x0094: 0x201d,	# RIGHT DOUBLE QUOTATION MARK
+	0x0095: 0x2022,	# BULLET
+	0x0096: 0x2013,	# EN DASH
+	0x0097: 0x2014,	# EM DASH
+	0x0098: 0x02dc,	# SMALL TILDE
+	0x0099: 0x2122,	# TRADE MARK SIGN
+	0x009a: None,	# UNDEFINED
+	0x009b: 0x203a,	# SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
+	0x009c: None,	# UNDEFINED
+	0x009d: None,	# UNDEFINED
+	0x009e: None,	# UNDEFINED
+	0x009f: None,	# UNDEFINED
+	0x00a4: 0x20aa,	# NEW SHEQEL SIGN
+	0x00aa: 0x00d7,	# MULTIPLICATION SIGN
+	0x00ba: 0x00f7,	# DIVISION SIGN
+	0x00c0: 0x05b0,	# HEBREW POINT SHEVA
+	0x00c1: 0x05b1,	# HEBREW POINT HATAF SEGOL
+	0x00c2: 0x05b2,	# HEBREW POINT HATAF PATAH
+	0x00c3: 0x05b3,	# HEBREW POINT HATAF QAMATS
+	0x00c4: 0x05b4,	# HEBREW POINT HIRIQ
+	0x00c5: 0x05b5,	# HEBREW POINT TSERE
+	0x00c6: 0x05b6,	# HEBREW POINT SEGOL
+	0x00c7: 0x05b7,	# HEBREW POINT PATAH
+	0x00c8: 0x05b8,	# HEBREW POINT QAMATS
+	0x00c9: 0x05b9,	# HEBREW POINT HOLAM
+	0x00ca: None,	# UNDEFINED
+	0x00cb: 0x05bb,	# HEBREW POINT QUBUTS
+	0x00cc: 0x05bc,	# HEBREW POINT DAGESH OR MAPIQ
+	0x00cd: 0x05bd,	# HEBREW POINT METEG
+	0x00ce: 0x05be,	# HEBREW PUNCTUATION MAQAF
+	0x00cf: 0x05bf,	# HEBREW POINT RAFE
+	0x00d0: 0x05c0,	# HEBREW PUNCTUATION PASEQ
+	0x00d1: 0x05c1,	# HEBREW POINT SHIN DOT
+	0x00d2: 0x05c2,	# HEBREW POINT SIN DOT
+	0x00d3: 0x05c3,	# HEBREW PUNCTUATION SOF PASUQ
+	0x00d4: 0x05f0,	# HEBREW LIGATURE YIDDISH DOUBLE VAV
+	0x00d5: 0x05f1,	# HEBREW LIGATURE YIDDISH VAV YOD
+	0x00d6: 0x05f2,	# HEBREW LIGATURE YIDDISH DOUBLE YOD
+	0x00d7: 0x05f3,	# HEBREW PUNCTUATION GERESH
+	0x00d8: 0x05f4,	# HEBREW PUNCTUATION GERSHAYIM
+	0x00d9: None,	# UNDEFINED
+	0x00da: None,	# UNDEFINED
+	0x00db: None,	# UNDEFINED
+	0x00dc: None,	# UNDEFINED
+	0x00dd: None,	# UNDEFINED
+	0x00de: None,	# UNDEFINED
+	0x00df: None,	# UNDEFINED
+	0x00e0: 0x05d0,	# HEBREW LETTER ALEF
+	0x00e1: 0x05d1,	# HEBREW LETTER BET
+	0x00e2: 0x05d2,	# HEBREW LETTER GIMEL
+	0x00e3: 0x05d3,	# HEBREW LETTER DALET
+	0x00e4: 0x05d4,	# HEBREW LETTER HE
+	0x00e5: 0x05d5,	# HEBREW LETTER VAV
+	0x00e6: 0x05d6,	# HEBREW LETTER ZAYIN
+	0x00e7: 0x05d7,	# HEBREW LETTER HET
+	0x00e8: 0x05d8,	# HEBREW LETTER TET
+	0x00e9: 0x05d9,	# HEBREW LETTER YOD
+	0x00ea: 0x05da,	# HEBREW LETTER FINAL KAF
+	0x00eb: 0x05db,	# HEBREW LETTER KAF
+	0x00ec: 0x05dc,	# HEBREW LETTER LAMED
+	0x00ed: 0x05dd,	# HEBREW LETTER FINAL MEM
+	0x00ee: 0x05de,	# HEBREW LETTER MEM
+	0x00ef: 0x05df,	# HEBREW LETTER FINAL NUN
+	0x00f0: 0x05e0,	# HEBREW LETTER NUN
+	0x00f1: 0x05e1,	# HEBREW LETTER SAMEKH
+	0x00f2: 0x05e2,	# HEBREW LETTER AYIN
+	0x00f3: 0x05e3,	# HEBREW LETTER FINAL PE
+	0x00f4: 0x05e4,	# HEBREW LETTER PE
+	0x00f5: 0x05e5,	# HEBREW LETTER FINAL TSADI
+	0x00f6: 0x05e6,	# HEBREW LETTER TSADI
+	0x00f7: 0x05e7,	# HEBREW LETTER QOF
+	0x00f8: 0x05e8,	# HEBREW LETTER RESH
+	0x00f9: 0x05e9,	# HEBREW LETTER SHIN
+	0x00fa: 0x05ea,	# HEBREW LETTER TAV
+	0x00fb: None,	# UNDEFINED
+	0x00fc: None,	# UNDEFINED
+	0x00fd: 0x200e,	# LEFT-TO-RIGHT MARK
+	0x00fe: 0x200f,	# RIGHT-TO-LEFT MARK
+	0x00ff: None,	# UNDEFINED
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/lib-python/2.2/encodings/cp1256.py b/lib-python/2.2/encodings/cp1256.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/cp1256.py
@@ -0,0 +1,129 @@
+""" Python Character Mapping Codec generated from 'CP1256.TXT' with gencodec.py.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+	0x0080: 0x20ac,	# EURO SIGN
+	0x0081: 0x067e,	# ARABIC LETTER PEH
+	0x0082: 0x201a,	# SINGLE LOW-9 QUOTATION MARK
+	0x0083: 0x0192,	# LATIN SMALL LETTER F WITH HOOK
+	0x0084: 0x201e,	# DOUBLE LOW-9 QUOTATION MARK
+	0x0085: 0x2026,	# HORIZONTAL ELLIPSIS
+	0x0086: 0x2020,	# DAGGER
+	0x0087: 0x2021,	# DOUBLE DAGGER
+	0x0088: 0x02c6,	# MODIFIER LETTER CIRCUMFLEX ACCENT
+	0x0089: 0x2030,	# PER MILLE SIGN
+	0x008a: 0x0679,	# ARABIC LETTER TTEH
+	0x008b: 0x2039,	# SINGLE LEFT-POINTING ANGLE QUOTATION MARK
+	0x008c: 0x0152,	# LATIN CAPITAL LIGATURE OE
+	0x008d: 0x0686,	# ARABIC LETTER TCHEH
+	0x008e: 0x0698,	# ARABIC LETTER JEH
+	0x008f: 0x0688,	# ARABIC LETTER DDAL
+	0x0090: 0x06af,	# ARABIC LETTER GAF
+	0x0091: 0x2018,	# LEFT SINGLE QUOTATION MARK
+	0x0092: 0x2019,	# RIGHT SINGLE QUOTATION MARK
+	0x0093: 0x201c,	# LEFT DOUBLE QUOTATION MARK
+	0x0094: 0x201d,	# RIGHT DOUBLE QUOTATION MARK
+	0x0095: 0x2022,	# BULLET
+	0x0096: 0x2013,	# EN DASH
+	0x0097: 0x2014,	# EM DASH
+	0x0098: 0x06a9,	# ARABIC LETTER KEHEH
+	0x0099: 0x2122,	# TRADE MARK SIGN
+	0x009a: 0x0691,	# ARABIC LETTER RREH
+	0x009b: 0x203a,	# SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
+	0x009c: 0x0153,	# LATIN SMALL LIGATURE OE
+	0x009d: 0x200c,	# ZERO WIDTH NON-JOINER
+	0x009e: 0x200d,	# ZERO WIDTH JOINER
+	0x009f: 0x06ba,	# ARABIC LETTER NOON GHUNNA
+	0x00a1: 0x060c,	# ARABIC COMMA
+	0x00aa: 0x06be,	# ARABIC LETTER HEH DOACHASHMEE
+	0x00ba: 0x061b,	# ARABIC SEMICOLON
+	0x00bf: 0x061f,	# ARABIC QUESTION MARK
+	0x00c0: 0x06c1,	# ARABIC LETTER HEH GOAL
+	0x00c1: 0x0621,	# ARABIC LETTER HAMZA
+	0x00c2: 0x0622,	# ARABIC LETTER ALEF WITH MADDA ABOVE
+	0x00c3: 0x0623,	# ARABIC LETTER ALEF WITH HAMZA ABOVE
+	0x00c4: 0x0624,	# ARABIC LETTER WAW WITH HAMZA ABOVE
+	0x00c5: 0x0625,	# ARABIC LETTER ALEF WITH HAMZA BELOW
+	0x00c6: 0x0626,	# ARABIC LETTER YEH WITH HAMZA ABOVE
+	0x00c7: 0x0627,	# ARABIC LETTER ALEF
+	0x00c8: 0x0628,	# ARABIC LETTER BEH
+	0x00c9: 0x0629,	# ARABIC LETTER TEH MARBUTA
+	0x00ca: 0x062a,	# ARABIC LETTER TEH
+	0x00cb: 0x062b,	# ARABIC LETTER THEH
+	0x00cc: 0x062c,	# ARABIC LETTER JEEM
+	0x00cd: 0x062d,	# ARABIC LETTER HAH
+	0x00ce: 0x062e,	# ARABIC LETTER KHAH
+	0x00cf: 0x062f,	# ARABIC LETTER DAL
+	0x00d0: 0x0630,	# ARABIC LETTER THAL
+	0x00d1: 0x0631,	# ARABIC LETTER REH
+	0x00d2: 0x0632,	# ARABIC LETTER ZAIN
+	0x00d3: 0x0633,	# ARABIC LETTER SEEN
+	0x00d4: 0x0634,	# ARABIC LETTER SHEEN
+	0x00d5: 0x0635,	# ARABIC LETTER SAD
+	0x00d6: 0x0636,	# ARABIC LETTER DAD
+	0x00d8: 0x0637,	# ARABIC LETTER TAH
+	0x00d9: 0x0638,	# ARABIC LETTER ZAH
+	0x00da: 0x0639,	# ARABIC LETTER AIN
+	0x00db: 0x063a,	# ARABIC LETTER GHAIN
+	0x00dc: 0x0640,	# ARABIC TATWEEL
+	0x00dd: 0x0641,	# ARABIC LETTER FEH
+	0x00de: 0x0642,	# ARABIC LETTER QAF
+	0x00df: 0x0643,	# ARABIC LETTER KAF
+	0x00e1: 0x0644,	# ARABIC LETTER LAM
+	0x00e3: 0x0645,	# ARABIC LETTER MEEM
+	0x00e4: 0x0646,	# ARABIC LETTER NOON
+	0x00e5: 0x0647,	# ARABIC LETTER HEH
+	0x00e6: 0x0648,	# ARABIC LETTER WAW
+	0x00ec: 0x0649,	# ARABIC LETTER ALEF MAKSURA
+	0x00ed: 0x064a,	# ARABIC LETTER YEH
+	0x00f0: 0x064b,	# ARABIC FATHATAN
+	0x00f1: 0x064c,	# ARABIC DAMMATAN
+	0x00f2: 0x064d,	# ARABIC KASRATAN
+	0x00f3: 0x064e,	# ARABIC FATHA
+	0x00f5: 0x064f,	# ARABIC DAMMA
+	0x00f6: 0x0650,	# ARABIC KASRA
+	0x00f8: 0x0651,	# ARABIC SHADDA
+	0x00fa: 0x0652,	# ARABIC SUKUN
+	0x00fd: 0x200e,	# LEFT-TO-RIGHT MARK
+	0x00fe: 0x200f,	# RIGHT-TO-LEFT MARK
+	0x00ff: 0x06d2,	# ARABIC LETTER YEH BARREE
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/lib-python/2.2/encodings/cp1257.py b/lib-python/2.2/encodings/cp1257.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/cp1257.py
@@ -0,0 +1,131 @@
+""" Python Character Mapping Codec generated from 'CP1257.TXT' with gencodec.py.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+	0x0080: 0x20ac,	# EURO SIGN
+	0x0081: None,	# UNDEFINED
+	0x0082: 0x201a,	# SINGLE LOW-9 QUOTATION MARK
+	0x0083: None,	# UNDEFINED
+	0x0084: 0x201e,	# DOUBLE LOW-9 QUOTATION MARK
+	0x0085: 0x2026,	# HORIZONTAL ELLIPSIS
+	0x0086: 0x2020,	# DAGGER
+	0x0087: 0x2021,	# DOUBLE DAGGER
+	0x0088: None,	# UNDEFINED
+	0x0089: 0x2030,	# PER MILLE SIGN
+	0x008a: None,	# UNDEFINED
+	0x008b: 0x2039,	# SINGLE LEFT-POINTING ANGLE QUOTATION MARK
+	0x008c: None,	# UNDEFINED
+	0x008d: 0x00a8,	# DIAERESIS
+	0x008e: 0x02c7,	# CARON
+	0x008f: 0x00b8,	# CEDILLA
+	0x0090: None,	# UNDEFINED
+	0x0091: 0x2018,	# LEFT SINGLE QUOTATION MARK
+	0x0092: 0x2019,	# RIGHT SINGLE QUOTATION MARK
+	0x0093: 0x201c,	# LEFT DOUBLE QUOTATION MARK
+	0x0094: 0x201d,	# RIGHT DOUBLE QUOTATION MARK
+	0x0095: 0x2022,	# BULLET
+	0x0096: 0x2013,	# EN DASH
+	0x0097: 0x2014,	# EM DASH
+	0x0098: None,	# UNDEFINED
+	0x0099: 0x2122,	# TRADE MARK SIGN
+	0x009a: None,	# UNDEFINED
+	0x009b: 0x203a,	# SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
+	0x009c: None,	# UNDEFINED
+	0x009d: 0x00af,	# MACRON
+	0x009e: 0x02db,	# OGONEK
+	0x009f: None,	# UNDEFINED
+	0x00a1: None,	# UNDEFINED
+	0x00a5: None,	# UNDEFINED
+	0x00a8: 0x00d8,	# LATIN CAPITAL LETTER O WITH STROKE
+	0x00aa: 0x0156,	# LATIN CAPITAL LETTER R WITH CEDILLA
+	0x00af: 0x00c6,	# LATIN CAPITAL LETTER AE
+	0x00b8: 0x00f8,	# LATIN SMALL LETTER O WITH STROKE
+	0x00ba: 0x0157,	# LATIN SMALL LETTER R WITH CEDILLA
+	0x00bf: 0x00e6,	# LATIN SMALL LETTER AE
+	0x00c0: 0x0104,	# LATIN CAPITAL LETTER A WITH OGONEK
+	0x00c1: 0x012e,	# LATIN CAPITAL LETTER I WITH OGONEK
+	0x00c2: 0x0100,	# LATIN CAPITAL LETTER A WITH MACRON
+	0x00c3: 0x0106,	# LATIN CAPITAL LETTER C WITH ACUTE
+	0x00c6: 0x0118,	# LATIN CAPITAL LETTER E WITH OGONEK
+	0x00c7: 0x0112,	# LATIN CAPITAL LETTER E WITH MACRON
+	0x00c8: 0x010c,	# LATIN CAPITAL LETTER C WITH CARON
+	0x00ca: 0x0179,	# LATIN CAPITAL LETTER Z WITH ACUTE
+	0x00cb: 0x0116,	# LATIN CAPITAL LETTER E WITH DOT ABOVE
+	0x00cc: 0x0122,	# LATIN CAPITAL LETTER G WITH CEDILLA
+	0x00cd: 0x0136,	# LATIN CAPITAL LETTER K WITH CEDILLA
+	0x00ce: 0x012a,	# LATIN CAPITAL LETTER I WITH MACRON
+	0x00cf: 0x013b,	# LATIN CAPITAL LETTER L WITH CEDILLA
+	0x00d0: 0x0160,	# LATIN CAPITAL LETTER S WITH CARON
+	0x00d1: 0x0143,	# LATIN CAPITAL LETTER N WITH ACUTE
+	0x00d2: 0x0145,	# LATIN CAPITAL LETTER N WITH CEDILLA
+	0x00d4: 0x014c,	# LATIN CAPITAL LETTER O WITH MACRON
+	0x00d8: 0x0172,	# LATIN CAPITAL LETTER U WITH OGONEK
+	0x00d9: 0x0141,	# LATIN CAPITAL LETTER L WITH STROKE
+	0x00da: 0x015a,	# LATIN CAPITAL LETTER S WITH ACUTE
+	0x00db: 0x016a,	# LATIN CAPITAL LETTER U WITH MACRON
+	0x00dd: 0x017b,	# LATIN CAPITAL LETTER Z WITH DOT ABOVE
+	0x00de: 0x017d,	# LATIN CAPITAL LETTER Z WITH CARON
+	0x00e0: 0x0105,	# LATIN SMALL LETTER A WITH OGONEK
+	0x00e1: 0x012f,	# LATIN SMALL LETTER I WITH OGONEK
+	0x00e2: 0x0101,	# LATIN SMALL LETTER A WITH MACRON
+	0x00e3: 0x0107,	# LATIN SMALL LETTER C WITH ACUTE
+	0x00e6: 0x0119,	# LATIN SMALL LETTER E WITH OGONEK
+	0x00e7: 0x0113,	# LATIN SMALL LETTER E WITH MACRON
+	0x00e8: 0x010d,	# LATIN SMALL LETTER C WITH CARON
+	0x00ea: 0x017a,	# LATIN SMALL LETTER Z WITH ACUTE
+	0x00eb: 0x0117,	# LATIN SMALL LETTER E WITH DOT ABOVE
+	0x00ec: 0x0123,	# LATIN SMALL LETTER G WITH CEDILLA
+	0x00ed: 0x0137,	# LATIN SMALL LETTER K WITH CEDILLA
+	0x00ee: 0x012b,	# LATIN SMALL LETTER I WITH MACRON
+	0x00ef: 0x013c,	# LATIN SMALL LETTER L WITH CEDILLA
+	0x00f0: 0x0161,	# LATIN SMALL LETTER S WITH CARON
+	0x00f1: 0x0144,	# LATIN SMALL LETTER N WITH ACUTE
+	0x00f2: 0x0146,	# LATIN SMALL LETTER N WITH CEDILLA
+	0x00f4: 0x014d,	# LATIN SMALL LETTER O WITH MACRON
+	0x00f8: 0x0173,	# LATIN SMALL LETTER U WITH OGONEK
+	0x00f9: 0x0142,	# LATIN SMALL LETTER L WITH STROKE
+	0x00fa: 0x015b,	# LATIN SMALL LETTER S WITH ACUTE
+	0x00fb: 0x016b,	# LATIN SMALL LETTER U WITH MACRON
+	0x00fd: 0x017c,	# LATIN SMALL LETTER Z WITH DOT ABOVE
+	0x00fe: 0x017e,	# LATIN SMALL LETTER Z WITH CARON
+	0x00ff: 0x02d9,	# DOT ABOVE
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/lib-python/2.2/encodings/cp1258.py b/lib-python/2.2/encodings/cp1258.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/cp1258.py
@@ -0,0 +1,90 @@
+""" Python Character Mapping Codec generated from 'CP1258.TXT' with gencodec.py.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+	0x0080: 0x20ac,	# EURO SIGN
+	0x0081: None,	# UNDEFINED
+	0x0082: 0x201a,	# SINGLE LOW-9 QUOTATION MARK
+	0x0083: 0x0192,	# LATIN SMALL LETTER F WITH HOOK
+	0x0084: 0x201e,	# DOUBLE LOW-9 QUOTATION MARK
+	0x0085: 0x2026,	# HORIZONTAL ELLIPSIS
+	0x0086: 0x2020,	# DAGGER
+	0x0087: 0x2021,	# DOUBLE DAGGER
+	0x0088: 0x02c6,	# MODIFIER LETTER CIRCUMFLEX ACCENT
+	0x0089: 0x2030,	# PER MILLE SIGN
+	0x008a: None,	# UNDEFINED
+	0x008b: 0x2039,	# SINGLE LEFT-POINTING ANGLE QUOTATION MARK
+	0x008c: 0x0152,	# LATIN CAPITAL LIGATURE OE
+	0x008d: None,	# UNDEFINED
+	0x008e: None,	# UNDEFINED
+	0x008f: None,	# UNDEFINED
+	0x0090: None,	# UNDEFINED
+	0x0091: 0x2018,	# LEFT SINGLE QUOTATION MARK
+	0x0092: 0x2019,	# RIGHT SINGLE QUOTATION MARK
+	0x0093: 0x201c,	# LEFT DOUBLE QUOTATION MARK
+	0x0094: 0x201d,	# RIGHT DOUBLE QUOTATION MARK
+	0x0095: 0x2022,	# BULLET
+	0x0096: 0x2013,	# EN DASH
+	0x0097: 0x2014,	# EM DASH
+	0x0098: 0x02dc,	# SMALL TILDE
+	0x0099: 0x2122,	# TRADE MARK SIGN
+	0x009a: None,	# UNDEFINED
+	0x009b: 0x203a,	# SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
+	0x009c: 0x0153,	# LATIN SMALL LIGATURE OE
+	0x009d: None,	# UNDEFINED
+	0x009e: None,	# UNDEFINED
+	0x009f: 0x0178,	# LATIN CAPITAL LETTER Y WITH DIAERESIS
+	0x00c3: 0x0102,	# LATIN CAPITAL LETTER A WITH BREVE
+	0x00cc: 0x0300,	# COMBINING GRAVE ACCENT
+	0x00d0: 0x0110,	# LATIN CAPITAL LETTER D WITH STROKE
+	0x00d2: 0x0309,	# COMBINING HOOK ABOVE
+	0x00d5: 0x01a0,	# LATIN CAPITAL LETTER O WITH HORN
+	0x00dd: 0x01af,	# LATIN CAPITAL LETTER U WITH HORN
+	0x00de: 0x0303,	# COMBINING TILDE
+	0x00e3: 0x0103,	# LATIN SMALL LETTER A WITH BREVE
+	0x00ec: 0x0301,	# COMBINING ACUTE ACCENT
+	0x00f0: 0x0111,	# LATIN SMALL LETTER D WITH STROKE
+	0x00f2: 0x0323,	# COMBINING DOT BELOW
+	0x00f5: 0x01a1,	# LATIN SMALL LETTER O WITH HORN
+	0x00fd: 0x01b0,	# LATIN SMALL LETTER U WITH HORN
+	0x00fe: 0x20ab,	# DONG SIGN
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/lib-python/2.2/encodings/cp424.py b/lib-python/2.2/encodings/cp424.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/cp424.py
@@ -0,0 +1,280 @@
+""" Python Character Mapping Codec generated from 'CP424.TXT' with gencodec.py.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+	0x0004: 0x009c,	# SELECT
+	0x0005: 0x0009,	# HORIZONTAL TABULATION
+	0x0006: 0x0086,	# REQUIRED NEW LINE
+	0x0007: 0x007f,	# DELETE
+	0x0008: 0x0097,	# GRAPHIC ESCAPE
+	0x0009: 0x008d,	# SUPERSCRIPT
+	0x000a: 0x008e,	# REPEAT
+	0x0014: 0x009d,	# RESTORE/ENABLE PRESENTATION
+	0x0015: 0x0085,	# NEW LINE
+	0x0016: 0x0008,	# BACKSPACE
+	0x0017: 0x0087,	# PROGRAM OPERATOR COMMUNICATION
+	0x001a: 0x0092,	# UNIT BACK SPACE
+	0x001b: 0x008f,	# CUSTOMER USE ONE
+	0x0020: 0x0080,	# DIGIT SELECT
+	0x0021: 0x0081,	# START OF SIGNIFICANCE
+	0x0022: 0x0082,	# FIELD SEPARATOR
+	0x0023: 0x0083,	# WORD UNDERSCORE
+	0x0024: 0x0084,	# BYPASS OR INHIBIT PRESENTATION
+	0x0025: 0x000a,	# LINE FEED
+	0x0026: 0x0017,	# END OF TRANSMISSION BLOCK
+	0x0027: 0x001b,	# ESCAPE
+	0x0028: 0x0088,	# SET ATTRIBUTE
+	0x0029: 0x0089,	# START FIELD EXTENDED
+	0x002a: 0x008a,	# SET MODE OR SWITCH
+	0x002b: 0x008b,	# CONTROL SEQUENCE PREFIX
+	0x002c: 0x008c,	# MODIFY FIELD ATTRIBUTE
+	0x002d: 0x0005,	# ENQUIRY
+	0x002e: 0x0006,	# ACKNOWLEDGE
+	0x002f: 0x0007,	# BELL
+	0x0030: 0x0090,	# <reserved>
+	0x0031: 0x0091,	# <reserved>
+	0x0032: 0x0016,	# SYNCHRONOUS IDLE
+	0x0033: 0x0093,	# INDEX RETURN
+	0x0034: 0x0094,	# PRESENTATION POSITION
+	0x0035: 0x0095,	# TRANSPARENT
+	0x0036: 0x0096,	# NUMERIC BACKSPACE
+	0x0037: 0x0004,	# END OF TRANSMISSION
+	0x0038: 0x0098,	# SUBSCRIPT
+	0x0039: 0x0099,	# INDENT TABULATION
+	0x003a: 0x009a,	# REVERSE FORM FEED
+	0x003b: 0x009b,	# CUSTOMER USE THREE
+	0x003c: 0x0014,	# DEVICE CONTROL FOUR
+	0x003d: 0x0015,	# NEGATIVE ACKNOWLEDGE
+	0x003e: 0x009e,	# <reserved>
+	0x003f: 0x001a,	# SUBSTITUTE
+	0x0040: 0x0020,	# SPACE
+	0x0041: 0x05d0,	# HEBREW LETTER ALEF
+	0x0042: 0x05d1,	# HEBREW LETTER BET
+	0x0043: 0x05d2,	# HEBREW LETTER GIMEL
+	0x0044: 0x05d3,	# HEBREW LETTER DALET
+	0x0045: 0x05d4,	# HEBREW LETTER HE
+	0x0046: 0x05d5,	# HEBREW LETTER VAV
+	0x0047: 0x05d6,	# HEBREW LETTER ZAYIN
+	0x0048: 0x05d7,	# HEBREW LETTER HET
+	0x0049: 0x05d8,	# HEBREW LETTER TET
+	0x004a: 0x00a2,	# CENT SIGN
+	0x004b: 0x002e,	# FULL STOP
+	0x004c: 0x003c,	# LESS-THAN SIGN
+	0x004d: 0x0028,	# LEFT PARENTHESIS
+	0x004e: 0x002b,	# PLUS SIGN
+	0x004f: 0x007c,	# VERTICAL LINE
+	0x0050: 0x0026,	# AMPERSAND
+	0x0051: 0x05d9,	# HEBREW LETTER YOD
+	0x0052: 0x05da,	# HEBREW LETTER FINAL KAF
+	0x0053: 0x05db,	# HEBREW LETTER KAF
+	0x0054: 0x05dc,	# HEBREW LETTER LAMED
+	0x0055: 0x05dd,	# HEBREW LETTER FINAL MEM
+	0x0056: 0x05de,	# HEBREW LETTER MEM
+	0x0057: 0x05df,	# HEBREW LETTER FINAL NUN
+	0x0058: 0x05e0,	# HEBREW LETTER NUN
+	0x0059: 0x05e1,	# HEBREW LETTER SAMEKH
+	0x005a: 0x0021,	# EXCLAMATION MARK
+	0x005b: 0x0024,	# DOLLAR SIGN
+	0x005c: 0x002a,	# ASTERISK
+	0x005d: 0x0029,	# RIGHT PARENTHESIS
+	0x005e: 0x003b,	# SEMICOLON
+	0x005f: 0x00ac,	# NOT SIGN
+	0x0060: 0x002d,	# HYPHEN-MINUS
+	0x0061: 0x002f,	# SOLIDUS
+	0x0062: 0x05e2,	# HEBREW LETTER AYIN
+	0x0063: 0x05e3,	# HEBREW LETTER FINAL PE
+	0x0064: 0x05e4,	# HEBREW LETTER PE
+	0x0065: 0x05e5,	# HEBREW LETTER FINAL TSADI
+	0x0066: 0x05e6,	# HEBREW LETTER TSADI
+	0x0067: 0x05e7,	# HEBREW LETTER QOF
+	0x0068: 0x05e8,	# HEBREW LETTER RESH
+	0x0069: 0x05e9,	# HEBREW LETTER SHIN
+	0x006a: 0x00a6,	# BROKEN BAR
+	0x006b: 0x002c,	# COMMA
+	0x006c: 0x0025,	# PERCENT SIGN
+	0x006d: 0x005f,	# LOW LINE
+	0x006e: 0x003e,	# GREATER-THAN SIGN
+	0x006f: 0x003f,	# QUESTION MARK
+	0x0070: None,	# UNDEFINED
+	0x0071: 0x05ea,	# HEBREW LETTER TAV
+	0x0072: None,	# UNDEFINED
+	0x0073: None,	# UNDEFINED
+	0x0074: 0x00a0,	# NO-BREAK SPACE
+	0x0075: None,	# UNDEFINED
+	0x0076: None,	# UNDEFINED
+	0x0077: None,	# UNDEFINED
+	0x0078: 0x2017,	# DOUBLE LOW LINE
+	0x0079: 0x0060,	# GRAVE ACCENT
+	0x007a: 0x003a,	# COLON
+	0x007b: 0x0023,	# NUMBER SIGN
+	0x007c: 0x0040,	# COMMERCIAL AT
+	0x007d: 0x0027,	# APOSTROPHE
+	0x007e: 0x003d,	# EQUALS SIGN
+	0x007f: 0x0022,	# QUOTATION MARK
+	0x0080: None,	# UNDEFINED
+	0x0081: 0x0061,	# LATIN SMALL LETTER A
+	0x0082: 0x0062,	# LATIN SMALL LETTER B
+	0x0083: 0x0063,	# LATIN SMALL LETTER C
+	0x0084: 0x0064,	# LATIN SMALL LETTER D
+	0x0085: 0x0065,	# LATIN SMALL LETTER E
+	0x0086: 0x0066,	# LATIN SMALL LETTER F
+	0x0087: 0x0067,	# LATIN SMALL LETTER G
+	0x0088: 0x0068,	# LATIN SMALL LETTER H
+	0x0089: 0x0069,	# LATIN SMALL LETTER I
+	0x008a: 0x00ab,	# LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+	0x008b: 0x00bb,	# RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+	0x008c: None,	# UNDEFINED
+	0x008d: None,	# UNDEFINED
+	0x008e: None,	# UNDEFINED
+	0x008f: 0x00b1,	# PLUS-MINUS SIGN
+	0x0090: 0x00b0,	# DEGREE SIGN
+	0x0091: 0x006a,	# LATIN SMALL LETTER J
+	0x0092: 0x006b,	# LATIN SMALL LETTER K
+	0x0093: 0x006c,	# LATIN SMALL LETTER L
+	0x0094: 0x006d,	# LATIN SMALL LETTER M
+	0x0095: 0x006e,	# LATIN SMALL LETTER N
+	0x0096: 0x006f,	# LATIN SMALL LETTER O
+	0x0097: 0x0070,	# LATIN SMALL LETTER P
+	0x0098: 0x0071,	# LATIN SMALL LETTER Q
+	0x0099: 0x0072,	# LATIN SMALL LETTER R
+	0x009a: None,	# UNDEFINED
+	0x009b: None,	# UNDEFINED
+	0x009c: None,	# UNDEFINED
+	0x009d: 0x00b8,	# CEDILLA
+	0x009e: None,	# UNDEFINED
+	0x009f: 0x00a4,	# CURRENCY SIGN
+	0x00a0: 0x00b5,	# MICRO SIGN
+	0x00a1: 0x007e,	# TILDE
+	0x00a2: 0x0073,	# LATIN SMALL LETTER S
+	0x00a3: 0x0074,	# LATIN SMALL LETTER T
+	0x00a4: 0x0075,	# LATIN SMALL LETTER U
+	0x00a5: 0x0076,	# LATIN SMALL LETTER V
+	0x00a6: 0x0077,	# LATIN SMALL LETTER W
+	0x00a7: 0x0078,	# LATIN SMALL LETTER X
+	0x00a8: 0x0079,	# LATIN SMALL LETTER Y
+	0x00a9: 0x007a,	# LATIN SMALL LETTER Z
+	0x00aa: None,	# UNDEFINED
+	0x00ab: None,	# UNDEFINED
+	0x00ac: None,	# UNDEFINED
+	0x00ad: None,	# UNDEFINED
+	0x00ae: None,	# UNDEFINED
+	0x00af: 0x00ae,	# REGISTERED SIGN
+	0x00b0: 0x005e,	# CIRCUMFLEX ACCENT
+	0x00b1: 0x00a3,	# POUND SIGN
+	0x00b2: 0x00a5,	# YEN SIGN
+	0x00b3: 0x00b7,	# MIDDLE DOT
+	0x00b4: 0x00a9,	# COPYRIGHT SIGN
+	0x00b5: 0x00a7,	# SECTION SIGN
+	0x00b7: 0x00bc,	# VULGAR FRACTION ONE QUARTER
+	0x00b8: 0x00bd,	# VULGAR FRACTION ONE HALF
+	0x00b9: 0x00be,	# VULGAR FRACTION THREE QUARTERS
+	0x00ba: 0x005b,	# LEFT SQUARE BRACKET
+	0x00bb: 0x005d,	# RIGHT SQUARE BRACKET
+	0x00bc: 0x00af,	# MACRON
+	0x00bd: 0x00a8,	# DIAERESIS
+	0x00be: 0x00b4,	# ACUTE ACCENT
+	0x00bf: 0x00d7,	# MULTIPLICATION SIGN
+	0x00c0: 0x007b,	# LEFT CURLY BRACKET
+	0x00c1: 0x0041,	# LATIN CAPITAL LETTER A
+	0x00c2: 0x0042,	# LATIN CAPITAL LETTER B
+	0x00c3: 0x0043,	# LATIN CAPITAL LETTER C
+	0x00c4: 0x0044,	# LATIN CAPITAL LETTER D
+	0x00c5: 0x0045,	# LATIN CAPITAL LETTER E
+	0x00c6: 0x0046,	# LATIN CAPITAL LETTER F
+	0x00c7: 0x0047,	# LATIN CAPITAL LETTER G
+	0x00c8: 0x0048,	# LATIN CAPITAL LETTER H
+	0x00c9: 0x0049,	# LATIN CAPITAL LETTER I
+	0x00ca: 0x00ad,	# SOFT HYPHEN
+	0x00cb: None,	# UNDEFINED
+	0x00cc: None,	# UNDEFINED
+	0x00cd: None,	# UNDEFINED
+	0x00ce: None,	# UNDEFINED
+	0x00cf: None,	# UNDEFINED
+	0x00d0: 0x007d,	# RIGHT CURLY BRACKET
+	0x00d1: 0x004a,	# LATIN CAPITAL LETTER J
+	0x00d2: 0x004b,	# LATIN CAPITAL LETTER K
+	0x00d3: 0x004c,	# LATIN CAPITAL LETTER L
+	0x00d4: 0x004d,	# LATIN CAPITAL LETTER M
+	0x00d5: 0x004e,	# LATIN CAPITAL LETTER N
+	0x00d6: 0x004f,	# LATIN CAPITAL LETTER O
+	0x00d7: 0x0050,	# LATIN CAPITAL LETTER P
+	0x00d8: 0x0051,	# LATIN CAPITAL LETTER Q
+	0x00d9: 0x0052,	# LATIN CAPITAL LETTER R
+	0x00da: 0x00b9,	# SUPERSCRIPT ONE
+	0x00db: None,	# UNDEFINED
+	0x00dc: None,	# UNDEFINED
+	0x00dd: None,	# UNDEFINED
+	0x00de: None,	# UNDEFINED
+	0x00df: None,	# UNDEFINED
+	0x00e0: 0x005c,	# REVERSE SOLIDUS
+	0x00e1: 0x00f7,	# DIVISION SIGN
+	0x00e2: 0x0053,	# LATIN CAPITAL LETTER S
+	0x00e3: 0x0054,	# LATIN CAPITAL LETTER T
+	0x00e4: 0x0055,	# LATIN CAPITAL LETTER U
+	0x00e5: 0x0056,	# LATIN CAPITAL LETTER V
+	0x00e6: 0x0057,	# LATIN CAPITAL LETTER W
+	0x00e7: 0x0058,	# LATIN CAPITAL LETTER X
+	0x00e8: 0x0059,	# LATIN CAPITAL LETTER Y
+	0x00e9: 0x005a,	# LATIN CAPITAL LETTER Z
+	0x00ea: 0x00b2,	# SUPERSCRIPT TWO
+	0x00eb: None,	# UNDEFINED
+	0x00ec: None,	# UNDEFINED
+	0x00ed: None,	# UNDEFINED
+	0x00ee: None,	# UNDEFINED
+	0x00ef: None,	# UNDEFINED
+	0x00f0: 0x0030,	# DIGIT ZERO
+	0x00f1: 0x0031,	# DIGIT ONE
+	0x00f2: 0x0032,	# DIGIT TWO
+	0x00f3: 0x0033,	# DIGIT THREE
+	0x00f4: 0x0034,	# DIGIT FOUR
+	0x00f5: 0x0035,	# DIGIT FIVE
+	0x00f6: 0x0036,	# DIGIT SIX
+	0x00f7: 0x0037,	# DIGIT SEVEN
+	0x00f8: 0x0038,	# DIGIT EIGHT
+	0x00f9: 0x0039,	# DIGIT NINE
+	0x00fa: 0x00b3,	# SUPERSCRIPT THREE
+	0x00fb: None,	# UNDEFINED
+	0x00fc: None,	# UNDEFINED
+	0x00fd: None,	# UNDEFINED
+	0x00fe: None,	# UNDEFINED
+	0x00ff: 0x009f,	# EIGHT ONES
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/lib-python/2.2/encodings/cp437.py b/lib-python/2.2/encodings/cp437.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/cp437.py
@@ -0,0 +1,172 @@
+""" Python Character Mapping Codec generated from 'CP437.TXT' with gencodec.py.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+	0x0080: 0x00c7,	# LATIN CAPITAL LETTER C WITH CEDILLA
+	0x0081: 0x00fc,	# LATIN SMALL LETTER U WITH DIAERESIS
+	0x0082: 0x00e9,	# LATIN SMALL LETTER E WITH ACUTE
+	0x0083: 0x00e2,	# LATIN SMALL LETTER A WITH CIRCUMFLEX
+	0x0084: 0x00e4,	# LATIN SMALL LETTER A WITH DIAERESIS
+	0x0085: 0x00e0,	# LATIN SMALL LETTER A WITH GRAVE
+	0x0086: 0x00e5,	# LATIN SMALL LETTER A WITH RING ABOVE
+	0x0087: 0x00e7,	# LATIN SMALL LETTER C WITH CEDILLA
+	0x0088: 0x00ea,	# LATIN SMALL LETTER E WITH CIRCUMFLEX
+	0x0089: 0x00eb,	# LATIN SMALL LETTER E WITH DIAERESIS
+	0x008a: 0x00e8,	# LATIN SMALL LETTER E WITH GRAVE
+	0x008b: 0x00ef,	# LATIN SMALL LETTER I WITH DIAERESIS
+	0x008c: 0x00ee,	# LATIN SMALL LETTER I WITH CIRCUMFLEX
+	0x008d: 0x00ec,	# LATIN SMALL LETTER I WITH GRAVE
+	0x008e: 0x00c4,	# LATIN CAPITAL LETTER A WITH DIAERESIS
+	0x008f: 0x00c5,	# LATIN CAPITAL LETTER A WITH RING ABOVE
+	0x0090: 0x00c9,	# LATIN CAPITAL LETTER E WITH ACUTE
+	0x0091: 0x00e6,	# LATIN SMALL LIGATURE AE
+	0x0092: 0x00c6,	# LATIN CAPITAL LIGATURE AE
+	0x0093: 0x00f4,	# LATIN SMALL LETTER O WITH CIRCUMFLEX
+	0x0094: 0x00f6,	# LATIN SMALL LETTER O WITH DIAERESIS
+	0x0095: 0x00f2,	# LATIN SMALL LETTER O WITH GRAVE
+	0x0096: 0x00fb,	# LATIN SMALL LETTER U WITH CIRCUMFLEX
+	0x0097: 0x00f9,	# LATIN SMALL LETTER U WITH GRAVE
+	0x0098: 0x00ff,	# LATIN SMALL LETTER Y WITH DIAERESIS
+	0x0099: 0x00d6,	# LATIN CAPITAL LETTER O WITH DIAERESIS
+	0x009a: 0x00dc,	# LATIN CAPITAL LETTER U WITH DIAERESIS
+	0x009b: 0x00a2,	# CENT SIGN
+	0x009c: 0x00a3,	# POUND SIGN
+	0x009d: 0x00a5,	# YEN SIGN
+	0x009e: 0x20a7,	# PESETA SIGN
+	0x009f: 0x0192,	# LATIN SMALL LETTER F WITH HOOK
+	0x00a0: 0x00e1,	# LATIN SMALL LETTER A WITH ACUTE
+	0x00a1: 0x00ed,	# LATIN SMALL LETTER I WITH ACUTE
+	0x00a2: 0x00f3,	# LATIN SMALL LETTER O WITH ACUTE
+	0x00a3: 0x00fa,	# LATIN SMALL LETTER U WITH ACUTE
+	0x00a4: 0x00f1,	# LATIN SMALL LETTER N WITH TILDE
+	0x00a5: 0x00d1,	# LATIN CAPITAL LETTER N WITH TILDE
+	0x00a6: 0x00aa,	# FEMININE ORDINAL INDICATOR
+	0x00a7: 0x00ba,	# MASCULINE ORDINAL INDICATOR
+	0x00a8: 0x00bf,	# INVERTED QUESTION MARK
+	0x00a9: 0x2310,	# REVERSED NOT SIGN
+	0x00aa: 0x00ac,	# NOT SIGN
+	0x00ab: 0x00bd,	# VULGAR FRACTION ONE HALF
+	0x00ac: 0x00bc,	# VULGAR FRACTION ONE QUARTER
+	0x00ad: 0x00a1,	# INVERTED EXCLAMATION MARK
+	0x00ae: 0x00ab,	# LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+	0x00af: 0x00bb,	# RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+	0x00b0: 0x2591,	# LIGHT SHADE
+	0x00b1: 0x2592,	# MEDIUM SHADE
+	0x00b2: 0x2593,	# DARK SHADE
+	0x00b3: 0x2502,	# BOX DRAWINGS LIGHT VERTICAL
+	0x00b4: 0x2524,	# BOX DRAWINGS LIGHT VERTICAL AND LEFT
+	0x00b5: 0x2561,	# BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
+	0x00b6: 0x2562,	# BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
+	0x00b7: 0x2556,	# BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
+	0x00b8: 0x2555,	# BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
+	0x00b9: 0x2563,	# BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+	0x00ba: 0x2551,	# BOX DRAWINGS DOUBLE VERTICAL
+	0x00bb: 0x2557,	# BOX DRAWINGS DOUBLE DOWN AND LEFT
+	0x00bc: 0x255d,	# BOX DRAWINGS DOUBLE UP AND LEFT
+	0x00bd: 0x255c,	# BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
+	0x00be: 0x255b,	# BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
+	0x00bf: 0x2510,	# BOX DRAWINGS LIGHT DOWN AND LEFT
+	0x00c0: 0x2514,	# BOX DRAWINGS LIGHT UP AND RIGHT
+	0x00c1: 0x2534,	# BOX DRAWINGS LIGHT UP AND HORIZONTAL
+	0x00c2: 0x252c,	# BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+	0x00c3: 0x251c,	# BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+	0x00c4: 0x2500,	# BOX DRAWINGS LIGHT HORIZONTAL
+	0x00c5: 0x253c,	# BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+	0x00c6: 0x255e,	# BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
+	0x00c7: 0x255f,	# BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
+	0x00c8: 0x255a,	# BOX DRAWINGS DOUBLE UP AND RIGHT
+	0x00c9: 0x2554,	# BOX DRAWINGS DOUBLE DOWN AND RIGHT
+	0x00ca: 0x2569,	# BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+	0x00cb: 0x2566,	# BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+	0x00cc: 0x2560,	# BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+	0x00cd: 0x2550,	# BOX DRAWINGS DOUBLE HORIZONTAL
+	0x00ce: 0x256c,	# BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+	0x00cf: 0x2567,	# BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
+	0x00d0: 0x2568,	# BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
+	0x00d1: 0x2564,	# BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
+	0x00d2: 0x2565,	# BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
+	0x00d3: 0x2559,	# BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
+	0x00d4: 0x2558,	# BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
+	0x00d5: 0x2552,	# BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
+	0x00d6: 0x2553,	# BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
+	0x00d7: 0x256b,	# BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
+	0x00d8: 0x256a,	# BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
+	0x00d9: 0x2518,	# BOX DRAWINGS LIGHT UP AND LEFT
+	0x00da: 0x250c,	# BOX DRAWINGS LIGHT DOWN AND RIGHT
+	0x00db: 0x2588,	# FULL BLOCK
+	0x00dc: 0x2584,	# LOWER HALF BLOCK
+	0x00dd: 0x258c,	# LEFT HALF BLOCK
+	0x00de: 0x2590,	# RIGHT HALF BLOCK
+	0x00df: 0x2580,	# UPPER HALF BLOCK
+	0x00e0: 0x03b1,	# GREEK SMALL LETTER ALPHA
+	0x00e1: 0x00df,	# LATIN SMALL LETTER SHARP S
+	0x00e2: 0x0393,	# GREEK CAPITAL LETTER GAMMA
+	0x00e3: 0x03c0,	# GREEK SMALL LETTER PI
+	0x00e4: 0x03a3,	# GREEK CAPITAL LETTER SIGMA
+	0x00e5: 0x03c3,	# GREEK SMALL LETTER SIGMA
+	0x00e6: 0x00b5,	# MICRO SIGN
+	0x00e7: 0x03c4,	# GREEK SMALL LETTER TAU
+	0x00e8: 0x03a6,	# GREEK CAPITAL LETTER PHI
+	0x00e9: 0x0398,	# GREEK CAPITAL LETTER THETA
+	0x00ea: 0x03a9,	# GREEK CAPITAL LETTER OMEGA
+	0x00eb: 0x03b4,	# GREEK SMALL LETTER DELTA
+	0x00ec: 0x221e,	# INFINITY
+	0x00ed: 0x03c6,	# GREEK SMALL LETTER PHI
+	0x00ee: 0x03b5,	# GREEK SMALL LETTER EPSILON
+	0x00ef: 0x2229,	# INTERSECTION
+	0x00f0: 0x2261,	# IDENTICAL TO
+	0x00f1: 0x00b1,	# PLUS-MINUS SIGN
+	0x00f2: 0x2265,	# GREATER-THAN OR EQUAL TO
+	0x00f3: 0x2264,	# LESS-THAN OR EQUAL TO
+	0x00f4: 0x2320,	# TOP HALF INTEGRAL
+	0x00f5: 0x2321,	# BOTTOM HALF INTEGRAL
+	0x00f6: 0x00f7,	# DIVISION SIGN
+	0x00f7: 0x2248,	# ALMOST EQUAL TO
+	0x00f8: 0x00b0,	# DEGREE SIGN
+	0x00f9: 0x2219,	# BULLET OPERATOR
+	0x00fa: 0x00b7,	# MIDDLE DOT
+	0x00fb: 0x221a,	# SQUARE ROOT
+	0x00fc: 0x207f,	# SUPERSCRIPT LATIN SMALL LETTER N
+	0x00fd: 0x00b2,	# SUPERSCRIPT TWO
+	0x00fe: 0x25a0,	# BLACK SQUARE
+	0x00ff: 0x00a0,	# NO-BREAK SPACE
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/lib-python/2.2/encodings/cp500.py b/lib-python/2.2/encodings/cp500.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/cp500.py
@@ -0,0 +1,280 @@
+""" Python Character Mapping Codec generated from 'CP500.TXT' with gencodec.py.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+	0x0004: 0x009c,	# CONTROL
+	0x0005: 0x0009,	# HORIZONTAL TABULATION
+	0x0006: 0x0086,	# CONTROL
+	0x0007: 0x007f,	# DELETE
+	0x0008: 0x0097,	# CONTROL
+	0x0009: 0x008d,	# CONTROL
+	0x000a: 0x008e,	# CONTROL
+	0x0014: 0x009d,	# CONTROL
+	0x0015: 0x0085,	# CONTROL
+	0x0016: 0x0008,	# BACKSPACE
+	0x0017: 0x0087,	# CONTROL
+	0x001a: 0x0092,	# CONTROL
+	0x001b: 0x008f,	# CONTROL
+	0x0020: 0x0080,	# CONTROL
+	0x0021: 0x0081,	# CONTROL
+	0x0022: 0x0082,	# CONTROL
+	0x0023: 0x0083,	# CONTROL
+	0x0024: 0x0084,	# CONTROL
+	0x0025: 0x000a,	# LINE FEED
+	0x0026: 0x0017,	# END OF TRANSMISSION BLOCK
+	0x0027: 0x001b,	# ESCAPE
+	0x0028: 0x0088,	# CONTROL
+	0x0029: 0x0089,	# CONTROL
+	0x002a: 0x008a,	# CONTROL
+	0x002b: 0x008b,	# CONTROL
+	0x002c: 0x008c,	# CONTROL
+	0x002d: 0x0005,	# ENQUIRY
+	0x002e: 0x0006,	# ACKNOWLEDGE
+	0x002f: 0x0007,	# BELL
+	0x0030: 0x0090,	# CONTROL
+	0x0031: 0x0091,	# CONTROL
+	0x0032: 0x0016,	# SYNCHRONOUS IDLE
+	0x0033: 0x0093,	# CONTROL
+	0x0034: 0x0094,	# CONTROL
+	0x0035: 0x0095,	# CONTROL
+	0x0036: 0x0096,	# CONTROL
+	0x0037: 0x0004,	# END OF TRANSMISSION
+	0x0038: 0x0098,	# CONTROL
+	0x0039: 0x0099,	# CONTROL
+	0x003a: 0x009a,	# CONTROL
+	0x003b: 0x009b,	# CONTROL
+	0x003c: 0x0014,	# DEVICE CONTROL FOUR
+	0x003d: 0x0015,	# NEGATIVE ACKNOWLEDGE
+	0x003e: 0x009e,	# CONTROL
+	0x003f: 0x001a,	# SUBSTITUTE
+	0x0040: 0x0020,	# SPACE
+	0x0041: 0x00a0,	# NO-BREAK SPACE
+	0x0042: 0x00e2,	# LATIN SMALL LETTER A WITH CIRCUMFLEX
+	0x0043: 0x00e4,	# LATIN SMALL LETTER A WITH DIAERESIS
+	0x0044: 0x00e0,	# LATIN SMALL LETTER A WITH GRAVE
+	0x0045: 0x00e1,	# LATIN SMALL LETTER A WITH ACUTE
+	0x0046: 0x00e3,	# LATIN SMALL LETTER A WITH TILDE
+	0x0047: 0x00e5,	# LATIN SMALL LETTER A WITH RING ABOVE
+	0x0048: 0x00e7,	# LATIN SMALL LETTER C WITH CEDILLA
+	0x0049: 0x00f1,	# LATIN SMALL LETTER N WITH TILDE
+	0x004a: 0x005b,	# LEFT SQUARE BRACKET
+	0x004b: 0x002e,	# FULL STOP
+	0x004c: 0x003c,	# LESS-THAN SIGN
+	0x004d: 0x0028,	# LEFT PARENTHESIS
+	0x004e: 0x002b,	# PLUS SIGN
+	0x004f: 0x0021,	# EXCLAMATION MARK
+	0x0050: 0x0026,	# AMPERSAND
+	0x0051: 0x00e9,	# LATIN SMALL LETTER E WITH ACUTE
+	0x0052: 0x00ea,	# LATIN SMALL LETTER E WITH CIRCUMFLEX
+	0x0053: 0x00eb,	# LATIN SMALL LETTER E WITH DIAERESIS
+	0x0054: 0x00e8,	# LATIN SMALL LETTER E WITH GRAVE
+	0x0055: 0x00ed,	# LATIN SMALL LETTER I WITH ACUTE
+	0x0056: 0x00ee,	# LATIN SMALL LETTER I WITH CIRCUMFLEX
+	0x0057: 0x00ef,	# LATIN SMALL LETTER I WITH DIAERESIS
+	0x0058: 0x00ec,	# LATIN SMALL LETTER I WITH GRAVE
+	0x0059: 0x00df,	# LATIN SMALL LETTER SHARP S (GERMAN)
+	0x005a: 0x005d,	# RIGHT SQUARE BRACKET
+	0x005b: 0x0024,	# DOLLAR SIGN
+	0x005c: 0x002a,	# ASTERISK
+	0x005d: 0x0029,	# RIGHT PARENTHESIS
+	0x005e: 0x003b,	# SEMICOLON
+	0x005f: 0x005e,	# CIRCUMFLEX ACCENT
+	0x0060: 0x002d,	# HYPHEN-MINUS
+	0x0061: 0x002f,	# SOLIDUS
+	0x0062: 0x00c2,	# LATIN CAPITAL LETTER A WITH CIRCUMFLEX
+	0x0063: 0x00c4,	# LATIN CAPITAL LETTER A WITH DIAERESIS
+	0x0064: 0x00c0,	# LATIN CAPITAL LETTER A WITH GRAVE
+	0x0065: 0x00c1,	# LATIN CAPITAL LETTER A WITH ACUTE
+	0x0066: 0x00c3,	# LATIN CAPITAL LETTER A WITH TILDE
+	0x0067: 0x00c5,	# LATIN CAPITAL LETTER A WITH RING ABOVE
+	0x0068: 0x00c7,	# LATIN CAPITAL LETTER C WITH CEDILLA
+	0x0069: 0x00d1,	# LATIN CAPITAL LETTER N WITH TILDE
+	0x006a: 0x00a6,	# BROKEN BAR
+	0x006b: 0x002c,	# COMMA
+	0x006c: 0x0025,	# PERCENT SIGN
+	0x006d: 0x005f,	# LOW LINE
+	0x006e: 0x003e,	# GREATER-THAN SIGN
+	0x006f: 0x003f,	# QUESTION MARK
+	0x0070: 0x00f8,	# LATIN SMALL LETTER O WITH STROKE
+	0x0071: 0x00c9,	# LATIN CAPITAL LETTER E WITH ACUTE
+	0x0072: 0x00ca,	# LATIN CAPITAL LETTER E WITH CIRCUMFLEX
+	0x0073: 0x00cb,	# LATIN CAPITAL LETTER E WITH DIAERESIS
+	0x0074: 0x00c8,	# LATIN CAPITAL LETTER E WITH GRAVE
+	0x0075: 0x00cd,	# LATIN CAPITAL LETTER I WITH ACUTE
+	0x0076: 0x00ce,	# LATIN CAPITAL LETTER I WITH CIRCUMFLEX
+	0x0077: 0x00cf,	# LATIN CAPITAL LETTER I WITH DIAERESIS
+	0x0078: 0x00cc,	# LATIN CAPITAL LETTER I WITH GRAVE
+	0x0079: 0x0060,	# GRAVE ACCENT
+	0x007a: 0x003a,	# COLON
+	0x007b: 0x0023,	# NUMBER SIGN
+	0x007c: 0x0040,	# COMMERCIAL AT
+	0x007d: 0x0027,	# APOSTROPHE
+	0x007e: 0x003d,	# EQUALS SIGN
+	0x007f: 0x0022,	# QUOTATION MARK
+	0x0080: 0x00d8,	# LATIN CAPITAL LETTER O WITH STROKE
+	0x0081: 0x0061,	# LATIN SMALL LETTER A
+	0x0082: 0x0062,	# LATIN SMALL LETTER B
+	0x0083: 0x0063,	# LATIN SMALL LETTER C
+	0x0084: 0x0064,	# LATIN SMALL LETTER D
+	0x0085: 0x0065,	# LATIN SMALL LETTER E
+	0x0086: 0x0066,	# LATIN SMALL LETTER F
+	0x0087: 0x0067,	# LATIN SMALL LETTER G
+	0x0088: 0x0068,	# LATIN SMALL LETTER H
+	0x0089: 0x0069,	# LATIN SMALL LETTER I
+	0x008a: 0x00ab,	# LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+	0x008b: 0x00bb,	# RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+	0x008c: 0x00f0,	# LATIN SMALL LETTER ETH (ICELANDIC)
+	0x008d: 0x00fd,	# LATIN SMALL LETTER Y WITH ACUTE
+	0x008e: 0x00fe,	# LATIN SMALL LETTER THORN (ICELANDIC)
+	0x008f: 0x00b1,	# PLUS-MINUS SIGN
+	0x0090: 0x00b0,	# DEGREE SIGN
+	0x0091: 0x006a,	# LATIN SMALL LETTER J
+	0x0092: 0x006b,	# LATIN SMALL LETTER K
+	0x0093: 0x006c,	# LATIN SMALL LETTER L
+	0x0094: 0x006d,	# LATIN SMALL LETTER M
+	0x0095: 0x006e,	# LATIN SMALL LETTER N
+	0x0096: 0x006f,	# LATIN SMALL LETTER O
+	0x0097: 0x0070,	# LATIN SMALL LETTER P
+	0x0098: 0x0071,	# LATIN SMALL LETTER Q
+	0x0099: 0x0072,	# LATIN SMALL LETTER R
+	0x009a: 0x00aa,	# FEMININE ORDINAL INDICATOR
+	0x009b: 0x00ba,	# MASCULINE ORDINAL INDICATOR
+	0x009c: 0x00e6,	# LATIN SMALL LIGATURE AE
+	0x009d: 0x00b8,	# CEDILLA
+	0x009e: 0x00c6,	# LATIN CAPITAL LIGATURE AE
+	0x009f: 0x00a4,	# CURRENCY SIGN
+	0x00a0: 0x00b5,	# MICRO SIGN
+	0x00a1: 0x007e,	# TILDE
+	0x00a2: 0x0073,	# LATIN SMALL LETTER S
+	0x00a3: 0x0074,	# LATIN SMALL LETTER T
+	0x00a4: 0x0075,	# LATIN SMALL LETTER U
+	0x00a5: 0x0076,	# LATIN SMALL LETTER V
+	0x00a6: 0x0077,	# LATIN SMALL LETTER W
+	0x00a7: 0x0078,	# LATIN SMALL LETTER X
+	0x00a8: 0x0079,	# LATIN SMALL LETTER Y
+	0x00a9: 0x007a,	# LATIN SMALL LETTER Z
+	0x00aa: 0x00a1,	# INVERTED EXCLAMATION MARK
+	0x00ab: 0x00bf,	# INVERTED QUESTION MARK
+	0x00ac: 0x00d0,	# LATIN CAPITAL LETTER ETH (ICELANDIC)
+	0x00ad: 0x00dd,	# LATIN CAPITAL LETTER Y WITH ACUTE
+	0x00ae: 0x00de,	# LATIN CAPITAL LETTER THORN (ICELANDIC)
+	0x00af: 0x00ae,	# REGISTERED SIGN
+	0x00b0: 0x00a2,	# CENT SIGN
+	0x00b1: 0x00a3,	# POUND SIGN
+	0x00b2: 0x00a5,	# YEN SIGN
+	0x00b3: 0x00b7,	# MIDDLE DOT
+	0x00b4: 0x00a9,	# COPYRIGHT SIGN
+	0x00b5: 0x00a7,	# SECTION SIGN
+	0x00b7: 0x00bc,	# VULGAR FRACTION ONE QUARTER
+	0x00b8: 0x00bd,	# VULGAR FRACTION ONE HALF
+	0x00b9: 0x00be,	# VULGAR FRACTION THREE QUARTERS
+	0x00ba: 0x00ac,	# NOT SIGN
+	0x00bb: 0x007c,	# VERTICAL LINE
+	0x00bc: 0x00af,	# MACRON
+	0x00bd: 0x00a8,	# DIAERESIS
+	0x00be: 0x00b4,	# ACUTE ACCENT
+	0x00bf: 0x00d7,	# MULTIPLICATION SIGN
+	0x00c0: 0x007b,	# LEFT CURLY BRACKET
+	0x00c1: 0x0041,	# LATIN CAPITAL LETTER A
+	0x00c2: 0x0042,	# LATIN CAPITAL LETTER B
+	0x00c3: 0x0043,	# LATIN CAPITAL LETTER C
+	0x00c4: 0x0044,	# LATIN CAPITAL LETTER D
+	0x00c5: 0x0045,	# LATIN CAPITAL LETTER E
+	0x00c6: 0x0046,	# LATIN CAPITAL LETTER F
+	0x00c7: 0x0047,	# LATIN CAPITAL LETTER G
+	0x00c8: 0x0048,	# LATIN CAPITAL LETTER H
+	0x00c9: 0x0049,	# LATIN CAPITAL LETTER I
+	0x00ca: 0x00ad,	# SOFT HYPHEN
+	0x00cb: 0x00f4,	# LATIN SMALL LETTER O WITH CIRCUMFLEX
+	0x00cc: 0x00f6,	# LATIN SMALL LETTER O WITH DIAERESIS
+	0x00cd: 0x00f2,	# LATIN SMALL LETTER O WITH GRAVE
+	0x00ce: 0x00f3,	# LATIN SMALL LETTER O WITH ACUTE
+	0x00cf: 0x00f5,	# LATIN SMALL LETTER O WITH TILDE
+	0x00d0: 0x007d,	# RIGHT CURLY BRACKET
+	0x00d1: 0x004a,	# LATIN CAPITAL LETTER J
+	0x00d2: 0x004b,	# LATIN CAPITAL LETTER K
+	0x00d3: 0x004c,	# LATIN CAPITAL LETTER L
+	0x00d4: 0x004d,	# LATIN CAPITAL LETTER M
+	0x00d5: 0x004e,	# LATIN CAPITAL LETTER N
+	0x00d6: 0x004f,	# LATIN CAPITAL LETTER O
+	0x00d7: 0x0050,	# LATIN CAPITAL LETTER P
+	0x00d8: 0x0051,	# LATIN CAPITAL LETTER Q
+	0x00d9: 0x0052,	# LATIN CAPITAL LETTER R
+	0x00da: 0x00b9,	# SUPERSCRIPT ONE
+	0x00db: 0x00fb,	# LATIN SMALL LETTER U WITH CIRCUMFLEX
+	0x00dc: 0x00fc,	# LATIN SMALL LETTER U WITH DIAERESIS
+	0x00dd: 0x00f9,	# LATIN SMALL LETTER U WITH GRAVE
+	0x00de: 0x00fa,	# LATIN SMALL LETTER U WITH ACUTE
+	0x00df: 0x00ff,	# LATIN SMALL LETTER Y WITH DIAERESIS
+	0x00e0: 0x005c,	# REVERSE SOLIDUS
+	0x00e1: 0x00f7,	# DIVISION SIGN
+	0x00e2: 0x0053,	# LATIN CAPITAL LETTER S
+	0x00e3: 0x0054,	# LATIN CAPITAL LETTER T
+	0x00e4: 0x0055,	# LATIN CAPITAL LETTER U
+	0x00e5: 0x0056,	# LATIN CAPITAL LETTER V
+	0x00e6: 0x0057,	# LATIN CAPITAL LETTER W
+	0x00e7: 0x0058,	# LATIN CAPITAL LETTER X
+	0x00e8: 0x0059,	# LATIN CAPITAL LETTER Y
+	0x00e9: 0x005a,	# LATIN CAPITAL LETTER Z
+	0x00ea: 0x00b2,	# SUPERSCRIPT TWO
+	0x00eb: 0x00d4,	# LATIN CAPITAL LETTER O WITH CIRCUMFLEX
+	0x00ec: 0x00d6,	# LATIN CAPITAL LETTER O WITH DIAERESIS
+	0x00ed: 0x00d2,	# LATIN CAPITAL LETTER O WITH GRAVE
+	0x00ee: 0x00d3,	# LATIN CAPITAL LETTER O WITH ACUTE
+	0x00ef: 0x00d5,	# LATIN CAPITAL LETTER O WITH TILDE
+	0x00f0: 0x0030,	# DIGIT ZERO
+	0x00f1: 0x0031,	# DIGIT ONE
+	0x00f2: 0x0032,	# DIGIT TWO
+	0x00f3: 0x0033,	# DIGIT THREE
+	0x00f4: 0x0034,	# DIGIT FOUR
+	0x00f5: 0x0035,	# DIGIT FIVE
+	0x00f6: 0x0036,	# DIGIT SIX
+	0x00f7: 0x0037,	# DIGIT SEVEN
+	0x00f8: 0x0038,	# DIGIT EIGHT
+	0x00f9: 0x0039,	# DIGIT NINE
+	0x00fa: 0x00b3,	# SUPERSCRIPT THREE
+	0x00fb: 0x00db,	# LATIN CAPITAL LETTER U WITH CIRCUMFLEX
+	0x00fc: 0x00dc,	# LATIN CAPITAL LETTER U WITH DIAERESIS
+	0x00fd: 0x00d9,	# LATIN CAPITAL LETTER U WITH GRAVE
+	0x00fe: 0x00da,	# LATIN CAPITAL LETTER U WITH ACUTE
+	0x00ff: 0x009f,	# CONTROL
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/lib-python/2.2/encodings/cp737.py b/lib-python/2.2/encodings/cp737.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/cp737.py
@@ -0,0 +1,172 @@
+""" Python Character Mapping Codec generated from 'CP737.TXT' with gencodec.py.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+	0x0080: 0x0391,	# GREEK CAPITAL LETTER ALPHA
+	0x0081: 0x0392,	# GREEK CAPITAL LETTER BETA
+	0x0082: 0x0393,	# GREEK CAPITAL LETTER GAMMA
+	0x0083: 0x0394,	# GREEK CAPITAL LETTER DELTA
+	0x0084: 0x0395,	# GREEK CAPITAL LETTER EPSILON
+	0x0085: 0x0396,	# GREEK CAPITAL LETTER ZETA
+	0x0086: 0x0397,	# GREEK CAPITAL LETTER ETA
+	0x0087: 0x0398,	# GREEK CAPITAL LETTER THETA
+	0x0088: 0x0399,	# GREEK CAPITAL LETTER IOTA
+	0x0089: 0x039a,	# GREEK CAPITAL LETTER KAPPA
+	0x008a: 0x039b,	# GREEK CAPITAL LETTER LAMDA
+	0x008b: 0x039c,	# GREEK CAPITAL LETTER MU
+	0x008c: 0x039d,	# GREEK CAPITAL LETTER NU
+	0x008d: 0x039e,	# GREEK CAPITAL LETTER XI
+	0x008e: 0x039f,	# GREEK CAPITAL LETTER OMICRON
+	0x008f: 0x03a0,	# GREEK CAPITAL LETTER PI
+	0x0090: 0x03a1,	# GREEK CAPITAL LETTER RHO
+	0x0091: 0x03a3,	# GREEK CAPITAL LETTER SIGMA
+	0x0092: 0x03a4,	# GREEK CAPITAL LETTER TAU
+	0x0093: 0x03a5,	# GREEK CAPITAL LETTER UPSILON
+	0x0094: 0x03a6,	# GREEK CAPITAL LETTER PHI
+	0x0095: 0x03a7,	# GREEK CAPITAL LETTER CHI
+	0x0096: 0x03a8,	# GREEK CAPITAL LETTER PSI
+	0x0097: 0x03a9,	# GREEK CAPITAL LETTER OMEGA
+	0x0098: 0x03b1,	# GREEK SMALL LETTER ALPHA
+	0x0099: 0x03b2,	# GREEK SMALL LETTER BETA
+	0x009a: 0x03b3,	# GREEK SMALL LETTER GAMMA
+	0x009b: 0x03b4,	# GREEK SMALL LETTER DELTA
+	0x009c: 0x03b5,	# GREEK SMALL LETTER EPSILON
+	0x009d: 0x03b6,	# GREEK SMALL LETTER ZETA
+	0x009e: 0x03b7,	# GREEK SMALL LETTER ETA
+	0x009f: 0x03b8,	# GREEK SMALL LETTER THETA
+	0x00a0: 0x03b9,	# GREEK SMALL LETTER IOTA
+	0x00a1: 0x03ba,	# GREEK SMALL LETTER KAPPA
+	0x00a2: 0x03bb,	# GREEK SMALL LETTER LAMDA
+	0x00a3: 0x03bc,	# GREEK SMALL LETTER MU
+	0x00a4: 0x03bd,	# GREEK SMALL LETTER NU
+	0x00a5: 0x03be,	# GREEK SMALL LETTER XI
+	0x00a6: 0x03bf,	# GREEK SMALL LETTER OMICRON
+	0x00a7: 0x03c0,	# GREEK SMALL LETTER PI
+	0x00a8: 0x03c1,	# GREEK SMALL LETTER RHO
+	0x00a9: 0x03c3,	# GREEK SMALL LETTER SIGMA
+	0x00aa: 0x03c2,	# GREEK SMALL LETTER FINAL SIGMA
+	0x00ab: 0x03c4,	# GREEK SMALL LETTER TAU
+	0x00ac: 0x03c5,	# GREEK SMALL LETTER UPSILON
+	0x00ad: 0x03c6,	# GREEK SMALL LETTER PHI
+	0x00ae: 0x03c7,	# GREEK SMALL LETTER CHI
+	0x00af: 0x03c8,	# GREEK SMALL LETTER PSI
+	0x00b0: 0x2591,	# LIGHT SHADE
+	0x00b1: 0x2592,	# MEDIUM SHADE
+	0x00b2: 0x2593,	# DARK SHADE
+	0x00b3: 0x2502,	# BOX DRAWINGS LIGHT VERTICAL
+	0x00b4: 0x2524,	# BOX DRAWINGS LIGHT VERTICAL AND LEFT
+	0x00b5: 0x2561,	# BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
+	0x00b6: 0x2562,	# BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
+	0x00b7: 0x2556,	# BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
+	0x00b8: 0x2555,	# BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
+	0x00b9: 0x2563,	# BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+	0x00ba: 0x2551,	# BOX DRAWINGS DOUBLE VERTICAL
+	0x00bb: 0x2557,	# BOX DRAWINGS DOUBLE DOWN AND LEFT
+	0x00bc: 0x255d,	# BOX DRAWINGS DOUBLE UP AND LEFT
+	0x00bd: 0x255c,	# BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
+	0x00be: 0x255b,	# BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
+	0x00bf: 0x2510,	# BOX DRAWINGS LIGHT DOWN AND LEFT
+	0x00c0: 0x2514,	# BOX DRAWINGS LIGHT UP AND RIGHT
+	0x00c1: 0x2534,	# BOX DRAWINGS LIGHT UP AND HORIZONTAL
+	0x00c2: 0x252c,	# BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+	0x00c3: 0x251c,	# BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+	0x00c4: 0x2500,	# BOX DRAWINGS LIGHT HORIZONTAL
+	0x00c5: 0x253c,	# BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+	0x00c6: 0x255e,	# BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
+	0x00c7: 0x255f,	# BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
+	0x00c8: 0x255a,	# BOX DRAWINGS DOUBLE UP AND RIGHT
+	0x00c9: 0x2554,	# BOX DRAWINGS DOUBLE DOWN AND RIGHT
+	0x00ca: 0x2569,	# BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+	0x00cb: 0x2566,	# BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+	0x00cc: 0x2560,	# BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+	0x00cd: 0x2550,	# BOX DRAWINGS DOUBLE HORIZONTAL
+	0x00ce: 0x256c,	# BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+	0x00cf: 0x2567,	# BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
+	0x00d0: 0x2568,	# BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
+	0x00d1: 0x2564,	# BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
+	0x00d2: 0x2565,	# BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
+	0x00d3: 0x2559,	# BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
+	0x00d4: 0x2558,	# BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
+	0x00d5: 0x2552,	# BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
+	0x00d6: 0x2553,	# BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
+	0x00d7: 0x256b,	# BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
+	0x00d8: 0x256a,	# BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
+	0x00d9: 0x2518,	# BOX DRAWINGS LIGHT UP AND LEFT
+	0x00da: 0x250c,	# BOX DRAWINGS LIGHT DOWN AND RIGHT
+	0x00db: 0x2588,	# FULL BLOCK
+	0x00dc: 0x2584,	# LOWER HALF BLOCK
+	0x00dd: 0x258c,	# LEFT HALF BLOCK
+	0x00de: 0x2590,	# RIGHT HALF BLOCK
+	0x00df: 0x2580,	# UPPER HALF BLOCK
+	0x00e0: 0x03c9,	# GREEK SMALL LETTER OMEGA
+	0x00e1: 0x03ac,	# GREEK SMALL LETTER ALPHA WITH TONOS
+	0x00e2: 0x03ad,	# GREEK SMALL LETTER EPSILON WITH TONOS
+	0x00e3: 0x03ae,	# GREEK SMALL LETTER ETA WITH TONOS
+	0x00e4: 0x03ca,	# GREEK SMALL LETTER IOTA WITH DIALYTIKA
+	0x00e5: 0x03af,	# GREEK SMALL LETTER IOTA WITH TONOS
+	0x00e6: 0x03cc,	# GREEK SMALL LETTER OMICRON WITH TONOS
+	0x00e7: 0x03cd,	# GREEK SMALL LETTER UPSILON WITH TONOS
+	0x00e8: 0x03cb,	# GREEK SMALL LETTER UPSILON WITH DIALYTIKA
+	0x00e9: 0x03ce,	# GREEK SMALL LETTER OMEGA WITH TONOS
+	0x00ea: 0x0386,	# GREEK CAPITAL LETTER ALPHA WITH TONOS
+	0x00eb: 0x0388,	# GREEK CAPITAL LETTER EPSILON WITH TONOS
+	0x00ec: 0x0389,	# GREEK CAPITAL LETTER ETA WITH TONOS
+	0x00ed: 0x038a,	# GREEK CAPITAL LETTER IOTA WITH TONOS
+	0x00ee: 0x038c,	# GREEK CAPITAL LETTER OMICRON WITH TONOS
+	0x00ef: 0x038e,	# GREEK CAPITAL LETTER UPSILON WITH TONOS
+	0x00f0: 0x038f,	# GREEK CAPITAL LETTER OMEGA WITH TONOS
+	0x00f1: 0x00b1,	# PLUS-MINUS SIGN
+	0x00f2: 0x2265,	# GREATER-THAN OR EQUAL TO
+	0x00f3: 0x2264,	# LESS-THAN OR EQUAL TO
+	0x00f4: 0x03aa,	# GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
+	0x00f5: 0x03ab,	# GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
+	0x00f6: 0x00f7,	# DIVISION SIGN
+	0x00f7: 0x2248,	# ALMOST EQUAL TO
+	0x00f8: 0x00b0,	# DEGREE SIGN
+	0x00f9: 0x2219,	# BULLET OPERATOR
+	0x00fa: 0x00b7,	# MIDDLE DOT
+	0x00fb: 0x221a,	# SQUARE ROOT
+	0x00fc: 0x207f,	# SUPERSCRIPT LATIN SMALL LETTER N
+	0x00fd: 0x00b2,	# SUPERSCRIPT TWO
+	0x00fe: 0x25a0,	# BLACK SQUARE
+	0x00ff: 0x00a0,	# NO-BREAK SPACE
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/lib-python/2.2/encodings/cp775.py b/lib-python/2.2/encodings/cp775.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/cp775.py
@@ -0,0 +1,172 @@
+""" Python Character Mapping Codec generated from 'CP775.TXT' with gencodec.py.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+	0x0080: 0x0106,	# LATIN CAPITAL LETTER C WITH ACUTE
+	0x0081: 0x00fc,	# LATIN SMALL LETTER U WITH DIAERESIS
+	0x0082: 0x00e9,	# LATIN SMALL LETTER E WITH ACUTE
+	0x0083: 0x0101,	# LATIN SMALL LETTER A WITH MACRON
+	0x0084: 0x00e4,	# LATIN SMALL LETTER A WITH DIAERESIS
+	0x0085: 0x0123,	# LATIN SMALL LETTER G WITH CEDILLA
+	0x0086: 0x00e5,	# LATIN SMALL LETTER A WITH RING ABOVE
+	0x0087: 0x0107,	# LATIN SMALL LETTER C WITH ACUTE
+	0x0088: 0x0142,	# LATIN SMALL LETTER L WITH STROKE
+	0x0089: 0x0113,	# LATIN SMALL LETTER E WITH MACRON
+	0x008a: 0x0156,	# LATIN CAPITAL LETTER R WITH CEDILLA
+	0x008b: 0x0157,	# LATIN SMALL LETTER R WITH CEDILLA
+	0x008c: 0x012b,	# LATIN SMALL LETTER I WITH MACRON
+	0x008d: 0x0179,	# LATIN CAPITAL LETTER Z WITH ACUTE
+	0x008e: 0x00c4,	# LATIN CAPITAL LETTER A WITH DIAERESIS
+	0x008f: 0x00c5,	# LATIN CAPITAL LETTER A WITH RING ABOVE
+	0x0090: 0x00c9,	# LATIN CAPITAL LETTER E WITH ACUTE
+	0x0091: 0x00e6,	# LATIN SMALL LIGATURE AE
+	0x0092: 0x00c6,	# LATIN CAPITAL LIGATURE AE
+	0x0093: 0x014d,	# LATIN SMALL LETTER O WITH MACRON
+	0x0094: 0x00f6,	# LATIN SMALL LETTER O WITH DIAERESIS
+	0x0095: 0x0122,	# LATIN CAPITAL LETTER G WITH CEDILLA
+	0x0096: 0x00a2,	# CENT SIGN
+	0x0097: 0x015a,	# LATIN CAPITAL LETTER S WITH ACUTE
+	0x0098: 0x015b,	# LATIN SMALL LETTER S WITH ACUTE
+	0x0099: 0x00d6,	# LATIN CAPITAL LETTER O WITH DIAERESIS
+	0x009a: 0x00dc,	# LATIN CAPITAL LETTER U WITH DIAERESIS
+	0x009b: 0x00f8,	# LATIN SMALL LETTER O WITH STROKE
+	0x009c: 0x00a3,	# POUND SIGN
+	0x009d: 0x00d8,	# LATIN CAPITAL LETTER O WITH STROKE
+	0x009e: 0x00d7,	# MULTIPLICATION SIGN
+	0x009f: 0x00a4,	# CURRENCY SIGN
+	0x00a0: 0x0100,	# LATIN CAPITAL LETTER A WITH MACRON
+	0x00a1: 0x012a,	# LATIN CAPITAL LETTER I WITH MACRON
+	0x00a2: 0x00f3,	# LATIN SMALL LETTER O WITH ACUTE
+	0x00a3: 0x017b,	# LATIN CAPITAL LETTER Z WITH DOT ABOVE
+	0x00a4: 0x017c,	# LATIN SMALL LETTER Z WITH DOT ABOVE
+	0x00a5: 0x017a,	# LATIN SMALL LETTER Z WITH ACUTE
+	0x00a6: 0x201d,	# RIGHT DOUBLE QUOTATION MARK
+	0x00a7: 0x00a6,	# BROKEN BAR
+	0x00a8: 0x00a9,	# COPYRIGHT SIGN
+	0x00a9: 0x00ae,	# REGISTERED SIGN
+	0x00aa: 0x00ac,	# NOT SIGN
+	0x00ab: 0x00bd,	# VULGAR FRACTION ONE HALF
+	0x00ac: 0x00bc,	# VULGAR FRACTION ONE QUARTER
+	0x00ad: 0x0141,	# LATIN CAPITAL LETTER L WITH STROKE
+	0x00ae: 0x00ab,	# LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+	0x00af: 0x00bb,	# RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+	0x00b0: 0x2591,	# LIGHT SHADE
+	0x00b1: 0x2592,	# MEDIUM SHADE
+	0x00b2: 0x2593,	# DARK SHADE
+	0x00b3: 0x2502,	# BOX DRAWINGS LIGHT VERTICAL
+	0x00b4: 0x2524,	# BOX DRAWINGS LIGHT VERTICAL AND LEFT
+	0x00b5: 0x0104,	# LATIN CAPITAL LETTER A WITH OGONEK
+	0x00b6: 0x010c,	# LATIN CAPITAL LETTER C WITH CARON
+	0x00b7: 0x0118,	# LATIN CAPITAL LETTER E WITH OGONEK
+	0x00b8: 0x0116,	# LATIN CAPITAL LETTER E WITH DOT ABOVE
+	0x00b9: 0x2563,	# BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+	0x00ba: 0x2551,	# BOX DRAWINGS DOUBLE VERTICAL
+	0x00bb: 0x2557,	# BOX DRAWINGS DOUBLE DOWN AND LEFT
+	0x00bc: 0x255d,	# BOX DRAWINGS DOUBLE UP AND LEFT
+	0x00bd: 0x012e,	# LATIN CAPITAL LETTER I WITH OGONEK
+	0x00be: 0x0160,	# LATIN CAPITAL LETTER S WITH CARON
+	0x00bf: 0x2510,	# BOX DRAWINGS LIGHT DOWN AND LEFT
+	0x00c0: 0x2514,	# BOX DRAWINGS LIGHT UP AND RIGHT
+	0x00c1: 0x2534,	# BOX DRAWINGS LIGHT UP AND HORIZONTAL
+	0x00c2: 0x252c,	# BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+	0x00c3: 0x251c,	# BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+	0x00c4: 0x2500,	# BOX DRAWINGS LIGHT HORIZONTAL
+	0x00c5: 0x253c,	# BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+	0x00c6: 0x0172,	# LATIN CAPITAL LETTER U WITH OGONEK
+	0x00c7: 0x016a,	# LATIN CAPITAL LETTER U WITH MACRON
+	0x00c8: 0x255a,	# BOX DRAWINGS DOUBLE UP AND RIGHT
+	0x00c9: 0x2554,	# BOX DRAWINGS DOUBLE DOWN AND RIGHT
+	0x00ca: 0x2569,	# BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+	0x00cb: 0x2566,	# BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+	0x00cc: 0x2560,	# BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+	0x00cd: 0x2550,	# BOX DRAWINGS DOUBLE HORIZONTAL
+	0x00ce: 0x256c,	# BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+	0x00cf: 0x017d,	# LATIN CAPITAL LETTER Z WITH CARON
+	0x00d0: 0x0105,	# LATIN SMALL LETTER A WITH OGONEK
+	0x00d1: 0x010d,	# LATIN SMALL LETTER C WITH CARON
+	0x00d2: 0x0119,	# LATIN SMALL LETTER E WITH OGONEK
+	0x00d3: 0x0117,	# LATIN SMALL LETTER E WITH DOT ABOVE
+	0x00d4: 0x012f,	# LATIN SMALL LETTER I WITH OGONEK
+	0x00d5: 0x0161,	# LATIN SMALL LETTER S WITH CARON
+	0x00d6: 0x0173,	# LATIN SMALL LETTER U WITH OGONEK
+	0x00d7: 0x016b,	# LATIN SMALL LETTER U WITH MACRON
+	0x00d8: 0x017e,	# LATIN SMALL LETTER Z WITH CARON
+	0x00d9: 0x2518,	# BOX DRAWINGS LIGHT UP AND LEFT
+	0x00da: 0x250c,	# BOX DRAWINGS LIGHT DOWN AND RIGHT
+	0x00db: 0x2588,	# FULL BLOCK
+	0x00dc: 0x2584,	# LOWER HALF BLOCK
+	0x00dd: 0x258c,	# LEFT HALF BLOCK
+	0x00de: 0x2590,	# RIGHT HALF BLOCK
+	0x00df: 0x2580,	# UPPER HALF BLOCK
+	0x00e0: 0x00d3,	# LATIN CAPITAL LETTER O WITH ACUTE
+	0x00e1: 0x00df,	# LATIN SMALL LETTER SHARP S (GERMAN)
+	0x00e2: 0x014c,	# LATIN CAPITAL LETTER O WITH MACRON
+	0x00e3: 0x0143,	# LATIN CAPITAL LETTER N WITH ACUTE
+	0x00e4: 0x00f5,	# LATIN SMALL LETTER O WITH TILDE
+	0x00e5: 0x00d5,	# LATIN CAPITAL LETTER O WITH TILDE
+	0x00e6: 0x00b5,	# MICRO SIGN
+	0x00e7: 0x0144,	# LATIN SMALL LETTER N WITH ACUTE
+	0x00e8: 0x0136,	# LATIN CAPITAL LETTER K WITH CEDILLA
+	0x00e9: 0x0137,	# LATIN SMALL LETTER K WITH CEDILLA
+	0x00ea: 0x013b,	# LATIN CAPITAL LETTER L WITH CEDILLA
+	0x00eb: 0x013c,	# LATIN SMALL LETTER L WITH CEDILLA
+	0x00ec: 0x0146,	# LATIN SMALL LETTER N WITH CEDILLA
+	0x00ed: 0x0112,	# LATIN CAPITAL LETTER E WITH MACRON
+	0x00ee: 0x0145,	# LATIN CAPITAL LETTER N WITH CEDILLA
+	0x00ef: 0x2019,	# RIGHT SINGLE QUOTATION MARK
+	0x00f0: 0x00ad,	# SOFT HYPHEN
+	0x00f1: 0x00b1,	# PLUS-MINUS SIGN
+	0x00f2: 0x201c,	# LEFT DOUBLE QUOTATION MARK
+	0x00f3: 0x00be,	# VULGAR FRACTION THREE QUARTERS
+	0x00f4: 0x00b6,	# PILCROW SIGN
+	0x00f5: 0x00a7,	# SECTION SIGN
+	0x00f6: 0x00f7,	# DIVISION SIGN
+	0x00f7: 0x201e,	# DOUBLE LOW-9 QUOTATION MARK
+	0x00f8: 0x00b0,	# DEGREE SIGN
+	0x00f9: 0x2219,	# BULLET OPERATOR
+	0x00fa: 0x00b7,	# MIDDLE DOT
+	0x00fb: 0x00b9,	# SUPERSCRIPT ONE
+	0x00fc: 0x00b3,	# SUPERSCRIPT THREE
+	0x00fd: 0x00b2,	# SUPERSCRIPT TWO
+	0x00fe: 0x25a0,	# BLACK SQUARE
+	0x00ff: 0x00a0,	# NO-BREAK SPACE
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/lib-python/2.2/encodings/cp850.py b/lib-python/2.2/encodings/cp850.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/cp850.py
@@ -0,0 +1,172 @@
+""" Python Character Mapping Codec generated from 'CP850.TXT' with gencodec.py.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+	0x0080: 0x00c7,	# LATIN CAPITAL LETTER C WITH CEDILLA
+	0x0081: 0x00fc,	# LATIN SMALL LETTER U WITH DIAERESIS
+	0x0082: 0x00e9,	# LATIN SMALL LETTER E WITH ACUTE
+	0x0083: 0x00e2,	# LATIN SMALL LETTER A WITH CIRCUMFLEX
+	0x0084: 0x00e4,	# LATIN SMALL LETTER A WITH DIAERESIS
+	0x0085: 0x00e0,	# LATIN SMALL LETTER A WITH GRAVE
+	0x0086: 0x00e5,	# LATIN SMALL LETTER A WITH RING ABOVE
+	0x0087: 0x00e7,	# LATIN SMALL LETTER C WITH CEDILLA
+	0x0088: 0x00ea,	# LATIN SMALL LETTER E WITH CIRCUMFLEX
+	0x0089: 0x00eb,	# LATIN SMALL LETTER E WITH DIAERESIS
+	0x008a: 0x00e8,	# LATIN SMALL LETTER E WITH GRAVE
+	0x008b: 0x00ef,	# LATIN SMALL LETTER I WITH DIAERESIS
+	0x008c: 0x00ee,	# LATIN SMALL LETTER I WITH CIRCUMFLEX
+	0x008d: 0x00ec,	# LATIN SMALL LETTER I WITH GRAVE
+	0x008e: 0x00c4,	# LATIN CAPITAL LETTER A WITH DIAERESIS
+	0x008f: 0x00c5,	# LATIN CAPITAL LETTER A WITH RING ABOVE
+	0x0090: 0x00c9,	# LATIN CAPITAL LETTER E WITH ACUTE
+	0x0091: 0x00e6,	# LATIN SMALL LIGATURE AE
+	0x0092: 0x00c6,	# LATIN CAPITAL LIGATURE AE
+	0x0093: 0x00f4,	# LATIN SMALL LETTER O WITH CIRCUMFLEX
+	0x0094: 0x00f6,	# LATIN SMALL LETTER O WITH DIAERESIS
+	0x0095: 0x00f2,	# LATIN SMALL LETTER O WITH GRAVE
+	0x0096: 0x00fb,	# LATIN SMALL LETTER U WITH CIRCUMFLEX
+	0x0097: 0x00f9,	# LATIN SMALL LETTER U WITH GRAVE
+	0x0098: 0x00ff,	# LATIN SMALL LETTER Y WITH DIAERESIS
+	0x0099: 0x00d6,	# LATIN CAPITAL LETTER O WITH DIAERESIS
+	0x009a: 0x00dc,	# LATIN CAPITAL LETTER U WITH DIAERESIS
+	0x009b: 0x00f8,	# LATIN SMALL LETTER O WITH STROKE
+	0x009c: 0x00a3,	# POUND SIGN
+	0x009d: 0x00d8,	# LATIN CAPITAL LETTER O WITH STROKE
+	0x009e: 0x00d7,	# MULTIPLICATION SIGN
+	0x009f: 0x0192,	# LATIN SMALL LETTER F WITH HOOK
+	0x00a0: 0x00e1,	# LATIN SMALL LETTER A WITH ACUTE
+	0x00a1: 0x00ed,	# LATIN SMALL LETTER I WITH ACUTE
+	0x00a2: 0x00f3,	# LATIN SMALL LETTER O WITH ACUTE
+	0x00a3: 0x00fa,	# LATIN SMALL LETTER U WITH ACUTE
+	0x00a4: 0x00f1,	# LATIN SMALL LETTER N WITH TILDE
+	0x00a5: 0x00d1,	# LATIN CAPITAL LETTER N WITH TILDE
+	0x00a6: 0x00aa,	# FEMININE ORDINAL INDICATOR
+	0x00a7: 0x00ba,	# MASCULINE ORDINAL INDICATOR
+	0x00a8: 0x00bf,	# INVERTED QUESTION MARK
+	0x00a9: 0x00ae,	# REGISTERED SIGN
+	0x00aa: 0x00ac,	# NOT SIGN
+	0x00ab: 0x00bd,	# VULGAR FRACTION ONE HALF
+	0x00ac: 0x00bc,	# VULGAR FRACTION ONE QUARTER
+	0x00ad: 0x00a1,	# INVERTED EXCLAMATION MARK
+	0x00ae: 0x00ab,	# LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+	0x00af: 0x00bb,	# RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+	0x00b0: 0x2591,	# LIGHT SHADE
+	0x00b1: 0x2592,	# MEDIUM SHADE
+	0x00b2: 0x2593,	# DARK SHADE
+	0x00b3: 0x2502,	# BOX DRAWINGS LIGHT VERTICAL
+	0x00b4: 0x2524,	# BOX DRAWINGS LIGHT VERTICAL AND LEFT
+	0x00b5: 0x00c1,	# LATIN CAPITAL LETTER A WITH ACUTE
+	0x00b6: 0x00c2,	# LATIN CAPITAL LETTER A WITH CIRCUMFLEX
+	0x00b7: 0x00c0,	# LATIN CAPITAL LETTER A WITH GRAVE
+	0x00b8: 0x00a9,	# COPYRIGHT SIGN
+	0x00b9: 0x2563,	# BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+	0x00ba: 0x2551,	# BOX DRAWINGS DOUBLE VERTICAL
+	0x00bb: 0x2557,	# BOX DRAWINGS DOUBLE DOWN AND LEFT
+	0x00bc: 0x255d,	# BOX DRAWINGS DOUBLE UP AND LEFT
+	0x00bd: 0x00a2,	# CENT SIGN
+	0x00be: 0x00a5,	# YEN SIGN
+	0x00bf: 0x2510,	# BOX DRAWINGS LIGHT DOWN AND LEFT
+	0x00c0: 0x2514,	# BOX DRAWINGS LIGHT UP AND RIGHT
+	0x00c1: 0x2534,	# BOX DRAWINGS LIGHT UP AND HORIZONTAL
+	0x00c2: 0x252c,	# BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+	0x00c3: 0x251c,	# BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+	0x00c4: 0x2500,	# BOX DRAWINGS LIGHT HORIZONTAL
+	0x00c5: 0x253c,	# BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+	0x00c6: 0x00e3,	# LATIN SMALL LETTER A WITH TILDE
+	0x00c7: 0x00c3,	# LATIN CAPITAL LETTER A WITH TILDE
+	0x00c8: 0x255a,	# BOX DRAWINGS DOUBLE UP AND RIGHT
+	0x00c9: 0x2554,	# BOX DRAWINGS DOUBLE DOWN AND RIGHT
+	0x00ca: 0x2569,	# BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+	0x00cb: 0x2566,	# BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+	0x00cc: 0x2560,	# BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+	0x00cd: 0x2550,	# BOX DRAWINGS DOUBLE HORIZONTAL
+	0x00ce: 0x256c,	# BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+	0x00cf: 0x00a4,	# CURRENCY SIGN
+	0x00d0: 0x00f0,	# LATIN SMALL LETTER ETH
+	0x00d1: 0x00d0,	# LATIN CAPITAL LETTER ETH
+	0x00d2: 0x00ca,	# LATIN CAPITAL LETTER E WITH CIRCUMFLEX
+	0x00d3: 0x00cb,	# LATIN CAPITAL LETTER E WITH DIAERESIS
+	0x00d4: 0x00c8,	# LATIN CAPITAL LETTER E WITH GRAVE
+	0x00d5: 0x0131,	# LATIN SMALL LETTER DOTLESS I
+	0x00d6: 0x00cd,	# LATIN CAPITAL LETTER I WITH ACUTE
+	0x00d7: 0x00ce,	# LATIN CAPITAL LETTER I WITH CIRCUMFLEX
+	0x00d8: 0x00cf,	# LATIN CAPITAL LETTER I WITH DIAERESIS
+	0x00d9: 0x2518,	# BOX DRAWINGS LIGHT UP AND LEFT
+	0x00da: 0x250c,	# BOX DRAWINGS LIGHT DOWN AND RIGHT
+	0x00db: 0x2588,	# FULL BLOCK
+	0x00dc: 0x2584,	# LOWER HALF BLOCK
+	0x00dd: 0x00a6,	# BROKEN BAR
+	0x00de: 0x00cc,	# LATIN CAPITAL LETTER I WITH GRAVE
+	0x00df: 0x2580,	# UPPER HALF BLOCK
+	0x00e0: 0x00d3,	# LATIN CAPITAL LETTER O WITH ACUTE
+	0x00e1: 0x00df,	# LATIN SMALL LETTER SHARP S
+	0x00e2: 0x00d4,	# LATIN CAPITAL LETTER O WITH CIRCUMFLEX
+	0x00e3: 0x00d2,	# LATIN CAPITAL LETTER O WITH GRAVE
+	0x00e4: 0x00f5,	# LATIN SMALL LETTER O WITH TILDE
+	0x00e5: 0x00d5,	# LATIN CAPITAL LETTER O WITH TILDE
+	0x00e6: 0x00b5,	# MICRO SIGN
+	0x00e7: 0x00fe,	# LATIN SMALL LETTER THORN
+	0x00e8: 0x00de,	# LATIN CAPITAL LETTER THORN
+	0x00e9: 0x00da,	# LATIN CAPITAL LETTER U WITH ACUTE
+	0x00ea: 0x00db,	# LATIN CAPITAL LETTER U WITH CIRCUMFLEX
+	0x00eb: 0x00d9,	# LATIN CAPITAL LETTER U WITH GRAVE
+	0x00ec: 0x00fd,	# LATIN SMALL LETTER Y WITH ACUTE
+	0x00ed: 0x00dd,	# LATIN CAPITAL LETTER Y WITH ACUTE
+	0x00ee: 0x00af,	# MACRON
+	0x00ef: 0x00b4,	# ACUTE ACCENT
+	0x00f0: 0x00ad,	# SOFT HYPHEN
+	0x00f1: 0x00b1,	# PLUS-MINUS SIGN
+	0x00f2: 0x2017,	# DOUBLE LOW LINE
+	0x00f3: 0x00be,	# VULGAR FRACTION THREE QUARTERS
+	0x00f4: 0x00b6,	# PILCROW SIGN
+	0x00f5: 0x00a7,	# SECTION SIGN
+	0x00f6: 0x00f7,	# DIVISION SIGN
+	0x00f7: 0x00b8,	# CEDILLA
+	0x00f8: 0x00b0,	# DEGREE SIGN
+	0x00f9: 0x00a8,	# DIAERESIS
+	0x00fa: 0x00b7,	# MIDDLE DOT
+	0x00fb: 0x00b9,	# SUPERSCRIPT ONE
+	0x00fc: 0x00b3,	# SUPERSCRIPT THREE
+	0x00fd: 0x00b2,	# SUPERSCRIPT TWO
+	0x00fe: 0x25a0,	# BLACK SQUARE
+	0x00ff: 0x00a0,	# NO-BREAK SPACE
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/lib-python/2.2/encodings/cp852.py b/lib-python/2.2/encodings/cp852.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/cp852.py
@@ -0,0 +1,172 @@
+""" Python Character Mapping Codec generated from 'CP852.TXT' with gencodec.py.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+	0x0080: 0x00c7,	# LATIN CAPITAL LETTER C WITH CEDILLA
+	0x0081: 0x00fc,	# LATIN SMALL LETTER U WITH DIAERESIS
+	0x0082: 0x00e9,	# LATIN SMALL LETTER E WITH ACUTE
+	0x0083: 0x00e2,	# LATIN SMALL LETTER A WITH CIRCUMFLEX
+	0x0084: 0x00e4,	# LATIN SMALL LETTER A WITH DIAERESIS
+	0x0085: 0x016f,	# LATIN SMALL LETTER U WITH RING ABOVE
+	0x0086: 0x0107,	# LATIN SMALL LETTER C WITH ACUTE
+	0x0087: 0x00e7,	# LATIN SMALL LETTER C WITH CEDILLA
+	0x0088: 0x0142,	# LATIN SMALL LETTER L WITH STROKE
+	0x0089: 0x00eb,	# LATIN SMALL LETTER E WITH DIAERESIS
+	0x008a: 0x0150,	# LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
+	0x008b: 0x0151,	# LATIN SMALL LETTER O WITH DOUBLE ACUTE
+	0x008c: 0x00ee,	# LATIN SMALL LETTER I WITH CIRCUMFLEX
+	0x008d: 0x0179,	# LATIN CAPITAL LETTER Z WITH ACUTE
+	0x008e: 0x00c4,	# LATIN CAPITAL LETTER A WITH DIAERESIS
+	0x008f: 0x0106,	# LATIN CAPITAL LETTER C WITH ACUTE
+	0x0090: 0x00c9,	# LATIN CAPITAL LETTER E WITH ACUTE
+	0x0091: 0x0139,	# LATIN CAPITAL LETTER L WITH ACUTE
+	0x0092: 0x013a,	# LATIN SMALL LETTER L WITH ACUTE
+	0x0093: 0x00f4,	# LATIN SMALL LETTER O WITH CIRCUMFLEX
+	0x0094: 0x00f6,	# LATIN SMALL LETTER O WITH DIAERESIS
+	0x0095: 0x013d,	# LATIN CAPITAL LETTER L WITH CARON
+	0x0096: 0x013e,	# LATIN SMALL LETTER L WITH CARON
+	0x0097: 0x015a,	# LATIN CAPITAL LETTER S WITH ACUTE
+	0x0098: 0x015b,	# LATIN SMALL LETTER S WITH ACUTE
+	0x0099: 0x00d6,	# LATIN CAPITAL LETTER O WITH DIAERESIS
+	0x009a: 0x00dc,	# LATIN CAPITAL LETTER U WITH DIAERESIS
+	0x009b: 0x0164,	# LATIN CAPITAL LETTER T WITH CARON
+	0x009c: 0x0165,	# LATIN SMALL LETTER T WITH CARON
+	0x009d: 0x0141,	# LATIN CAPITAL LETTER L WITH STROKE
+	0x009e: 0x00d7,	# MULTIPLICATION SIGN
+	0x009f: 0x010d,	# LATIN SMALL LETTER C WITH CARON
+	0x00a0: 0x00e1,	# LATIN SMALL LETTER A WITH ACUTE
+	0x00a1: 0x00ed,	# LATIN SMALL LETTER I WITH ACUTE
+	0x00a2: 0x00f3,	# LATIN SMALL LETTER O WITH ACUTE
+	0x00a3: 0x00fa,	# LATIN SMALL LETTER U WITH ACUTE
+	0x00a4: 0x0104,	# LATIN CAPITAL LETTER A WITH OGONEK
+	0x00a5: 0x0105,	# LATIN SMALL LETTER A WITH OGONEK
+	0x00a6: 0x017d,	# LATIN CAPITAL LETTER Z WITH CARON
+	0x00a7: 0x017e,	# LATIN SMALL LETTER Z WITH CARON
+	0x00a8: 0x0118,	# LATIN CAPITAL LETTER E WITH OGONEK
+	0x00a9: 0x0119,	# LATIN SMALL LETTER E WITH OGONEK
+	0x00aa: 0x00ac,	# NOT SIGN
+	0x00ab: 0x017a,	# LATIN SMALL LETTER Z WITH ACUTE
+	0x00ac: 0x010c,	# LATIN CAPITAL LETTER C WITH CARON
+	0x00ad: 0x015f,	# LATIN SMALL LETTER S WITH CEDILLA
+	0x00ae: 0x00ab,	# LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+	0x00af: 0x00bb,	# RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+	0x00b0: 0x2591,	# LIGHT SHADE
+	0x00b1: 0x2592,	# MEDIUM SHADE
+	0x00b2: 0x2593,	# DARK SHADE
+	0x00b3: 0x2502,	# BOX DRAWINGS LIGHT VERTICAL
+	0x00b4: 0x2524,	# BOX DRAWINGS LIGHT VERTICAL AND LEFT
+	0x00b5: 0x00c1,	# LATIN CAPITAL LETTER A WITH ACUTE
+	0x00b6: 0x00c2,	# LATIN CAPITAL LETTER A WITH CIRCUMFLEX
+	0x00b7: 0x011a,	# LATIN CAPITAL LETTER E WITH CARON
+	0x00b8: 0x015e,	# LATIN CAPITAL LETTER S WITH CEDILLA
+	0x00b9: 0x2563,	# BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+	0x00ba: 0x2551,	# BOX DRAWINGS DOUBLE VERTICAL
+	0x00bb: 0x2557,	# BOX DRAWINGS DOUBLE DOWN AND LEFT
+	0x00bc: 0x255d,	# BOX DRAWINGS DOUBLE UP AND LEFT
+	0x00bd: 0x017b,	# LATIN CAPITAL LETTER Z WITH DOT ABOVE
+	0x00be: 0x017c,	# LATIN SMALL LETTER Z WITH DOT ABOVE
+	0x00bf: 0x2510,	# BOX DRAWINGS LIGHT DOWN AND LEFT
+	0x00c0: 0x2514,	# BOX DRAWINGS LIGHT UP AND RIGHT
+	0x00c1: 0x2534,	# BOX DRAWINGS LIGHT UP AND HORIZONTAL
+	0x00c2: 0x252c,	# BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+	0x00c3: 0x251c,	# BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+	0x00c4: 0x2500,	# BOX DRAWINGS LIGHT HORIZONTAL
+	0x00c5: 0x253c,	# BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+	0x00c6: 0x0102,	# LATIN CAPITAL LETTER A WITH BREVE
+	0x00c7: 0x0103,	# LATIN SMALL LETTER A WITH BREVE
+	0x00c8: 0x255a,	# BOX DRAWINGS DOUBLE UP AND RIGHT
+	0x00c9: 0x2554,	# BOX DRAWINGS DOUBLE DOWN AND RIGHT
+	0x00ca: 0x2569,	# BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+	0x00cb: 0x2566,	# BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+	0x00cc: 0x2560,	# BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+	0x00cd: 0x2550,	# BOX DRAWINGS DOUBLE HORIZONTAL
+	0x00ce: 0x256c,	# BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+	0x00cf: 0x00a4,	# CURRENCY SIGN
+	0x00d0: 0x0111,	# LATIN SMALL LETTER D WITH STROKE
+	0x00d1: 0x0110,	# LATIN CAPITAL LETTER D WITH STROKE
+	0x00d2: 0x010e,	# LATIN CAPITAL LETTER D WITH CARON
+	0x00d3: 0x00cb,	# LATIN CAPITAL LETTER E WITH DIAERESIS
+	0x00d4: 0x010f,	# LATIN SMALL LETTER D WITH CARON
+	0x00d5: 0x0147,	# LATIN CAPITAL LETTER N WITH CARON
+	0x00d6: 0x00cd,	# LATIN CAPITAL LETTER I WITH ACUTE
+	0x00d7: 0x00ce,	# LATIN CAPITAL LETTER I WITH CIRCUMFLEX
+	0x00d8: 0x011b,	# LATIN SMALL LETTER E WITH CARON
+	0x00d9: 0x2518,	# BOX DRAWINGS LIGHT UP AND LEFT
+	0x00da: 0x250c,	# BOX DRAWINGS LIGHT DOWN AND RIGHT
+	0x00db: 0x2588,	# FULL BLOCK
+	0x00dc: 0x2584,	# LOWER HALF BLOCK
+	0x00dd: 0x0162,	# LATIN CAPITAL LETTER T WITH CEDILLA
+	0x00de: 0x016e,	# LATIN CAPITAL LETTER U WITH RING ABOVE
+	0x00df: 0x2580,	# UPPER HALF BLOCK
+	0x00e0: 0x00d3,	# LATIN CAPITAL LETTER O WITH ACUTE
+	0x00e1: 0x00df,	# LATIN SMALL LETTER SHARP S
+	0x00e2: 0x00d4,	# LATIN CAPITAL LETTER O WITH CIRCUMFLEX
+	0x00e3: 0x0143,	# LATIN CAPITAL LETTER N WITH ACUTE
+	0x00e4: 0x0144,	# LATIN SMALL LETTER N WITH ACUTE
+	0x00e5: 0x0148,	# LATIN SMALL LETTER N WITH CARON
+	0x00e6: 0x0160,	# LATIN CAPITAL LETTER S WITH CARON
+	0x00e7: 0x0161,	# LATIN SMALL LETTER S WITH CARON
+	0x00e8: 0x0154,	# LATIN CAPITAL LETTER R WITH ACUTE
+	0x00e9: 0x00da,	# LATIN CAPITAL LETTER U WITH ACUTE
+	0x00ea: 0x0155,	# LATIN SMALL LETTER R WITH ACUTE
+	0x00eb: 0x0170,	# LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
+	0x00ec: 0x00fd,	# LATIN SMALL LETTER Y WITH ACUTE
+	0x00ed: 0x00dd,	# LATIN CAPITAL LETTER Y WITH ACUTE
+	0x00ee: 0x0163,	# LATIN SMALL LETTER T WITH CEDILLA
+	0x00ef: 0x00b4,	# ACUTE ACCENT
+	0x00f0: 0x00ad,	# SOFT HYPHEN
+	0x00f1: 0x02dd,	# DOUBLE ACUTE ACCENT
+	0x00f2: 0x02db,	# OGONEK
+	0x00f3: 0x02c7,	# CARON
+	0x00f4: 0x02d8,	# BREVE
+	0x00f5: 0x00a7,	# SECTION SIGN
+	0x00f6: 0x00f7,	# DIVISION SIGN
+	0x00f7: 0x00b8,	# CEDILLA
+	0x00f8: 0x00b0,	# DEGREE SIGN
+	0x00f9: 0x00a8,	# DIAERESIS
+	0x00fa: 0x02d9,	# DOT ABOVE
+	0x00fb: 0x0171,	# LATIN SMALL LETTER U WITH DOUBLE ACUTE
+	0x00fc: 0x0158,	# LATIN CAPITAL LETTER R WITH CARON
+	0x00fd: 0x0159,	# LATIN SMALL LETTER R WITH CARON
+	0x00fe: 0x25a0,	# BLACK SQUARE
+	0x00ff: 0x00a0,	# NO-BREAK SPACE
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/lib-python/2.2/encodings/cp855.py b/lib-python/2.2/encodings/cp855.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/cp855.py
@@ -0,0 +1,172 @@
+""" Python Character Mapping Codec generated from 'CP855.TXT' with gencodec.py.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+	0x0080: 0x0452,	# CYRILLIC SMALL LETTER DJE
+	0x0081: 0x0402,	# CYRILLIC CAPITAL LETTER DJE
+	0x0082: 0x0453,	# CYRILLIC SMALL LETTER GJE
+	0x0083: 0x0403,	# CYRILLIC CAPITAL LETTER GJE
+	0x0084: 0x0451,	# CYRILLIC SMALL LETTER IO
+	0x0085: 0x0401,	# CYRILLIC CAPITAL LETTER IO
+	0x0086: 0x0454,	# CYRILLIC SMALL LETTER UKRAINIAN IE
+	0x0087: 0x0404,	# CYRILLIC CAPITAL LETTER UKRAINIAN IE
+	0x0088: 0x0455,	# CYRILLIC SMALL LETTER DZE
+	0x0089: 0x0405,	# CYRILLIC CAPITAL LETTER DZE
+	0x008a: 0x0456,	# CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
+	0x008b: 0x0406,	# CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
+	0x008c: 0x0457,	# CYRILLIC SMALL LETTER YI
+	0x008d: 0x0407,	# CYRILLIC CAPITAL LETTER YI
+	0x008e: 0x0458,	# CYRILLIC SMALL LETTER JE
+	0x008f: 0x0408,	# CYRILLIC CAPITAL LETTER JE
+	0x0090: 0x0459,	# CYRILLIC SMALL LETTER LJE
+	0x0091: 0x0409,	# CYRILLIC CAPITAL LETTER LJE
+	0x0092: 0x045a,	# CYRILLIC SMALL LETTER NJE
+	0x0093: 0x040a,	# CYRILLIC CAPITAL LETTER NJE
+	0x0094: 0x045b,	# CYRILLIC SMALL LETTER TSHE
+	0x0095: 0x040b,	# CYRILLIC CAPITAL LETTER TSHE
+	0x0096: 0x045c,	# CYRILLIC SMALL LETTER KJE
+	0x0097: 0x040c,	# CYRILLIC CAPITAL LETTER KJE
+	0x0098: 0x045e,	# CYRILLIC SMALL LETTER SHORT U
+	0x0099: 0x040e,	# CYRILLIC CAPITAL LETTER SHORT U
+	0x009a: 0x045f,	# CYRILLIC SMALL LETTER DZHE
+	0x009b: 0x040f,	# CYRILLIC CAPITAL LETTER DZHE
+	0x009c: 0x044e,	# CYRILLIC SMALL LETTER YU
+	0x009d: 0x042e,	# CYRILLIC CAPITAL LETTER YU
+	0x009e: 0x044a,	# CYRILLIC SMALL LETTER HARD SIGN
+	0x009f: 0x042a,	# CYRILLIC CAPITAL LETTER HARD SIGN
+	0x00a0: 0x0430,	# CYRILLIC SMALL LETTER A
+	0x00a1: 0x0410,	# CYRILLIC CAPITAL LETTER A
+	0x00a2: 0x0431,	# CYRILLIC SMALL LETTER BE
+	0x00a3: 0x0411,	# CYRILLIC CAPITAL LETTER BE
+	0x00a4: 0x0446,	# CYRILLIC SMALL LETTER TSE
+	0x00a5: 0x0426,	# CYRILLIC CAPITAL LETTER TSE
+	0x00a6: 0x0434,	# CYRILLIC SMALL LETTER DE
+	0x00a7: 0x0414,	# CYRILLIC CAPITAL LETTER DE
+	0x00a8: 0x0435,	# CYRILLIC SMALL LETTER IE
+	0x00a9: 0x0415,	# CYRILLIC CAPITAL LETTER IE
+	0x00aa: 0x0444,	# CYRILLIC SMALL LETTER EF
+	0x00ab: 0x0424,	# CYRILLIC CAPITAL LETTER EF
+	0x00ac: 0x0433,	# CYRILLIC SMALL LETTER GHE
+	0x00ad: 0x0413,	# CYRILLIC CAPITAL LETTER GHE
+	0x00ae: 0x00ab,	# LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+	0x00af: 0x00bb,	# RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+	0x00b0: 0x2591,	# LIGHT SHADE
+	0x00b1: 0x2592,	# MEDIUM SHADE
+	0x00b2: 0x2593,	# DARK SHADE
+	0x00b3: 0x2502,	# BOX DRAWINGS LIGHT VERTICAL
+	0x00b4: 0x2524,	# BOX DRAWINGS LIGHT VERTICAL AND LEFT
+	0x00b5: 0x0445,	# CYRILLIC SMALL LETTER HA
+	0x00b6: 0x0425,	# CYRILLIC CAPITAL LETTER HA
+	0x00b7: 0x0438,	# CYRILLIC SMALL LETTER I
+	0x00b8: 0x0418,	# CYRILLIC CAPITAL LETTER I
+	0x00b9: 0x2563,	# BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+	0x00ba: 0x2551,	# BOX DRAWINGS DOUBLE VERTICAL
+	0x00bb: 0x2557,	# BOX DRAWINGS DOUBLE DOWN AND LEFT
+	0x00bc: 0x255d,	# BOX DRAWINGS DOUBLE UP AND LEFT
+	0x00bd: 0x0439,	# CYRILLIC SMALL LETTER SHORT I
+	0x00be: 0x0419,	# CYRILLIC CAPITAL LETTER SHORT I
+	0x00bf: 0x2510,	# BOX DRAWINGS LIGHT DOWN AND LEFT
+	0x00c0: 0x2514,	# BOX DRAWINGS LIGHT UP AND RIGHT
+	0x00c1: 0x2534,	# BOX DRAWINGS LIGHT UP AND HORIZONTAL
+	0x00c2: 0x252c,	# BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+	0x00c3: 0x251c,	# BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+	0x00c4: 0x2500,	# BOX DRAWINGS LIGHT HORIZONTAL
+	0x00c5: 0x253c,	# BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+	0x00c6: 0x043a,	# CYRILLIC SMALL LETTER KA
+	0x00c7: 0x041a,	# CYRILLIC CAPITAL LETTER KA
+	0x00c8: 0x255a,	# BOX DRAWINGS DOUBLE UP AND RIGHT
+	0x00c9: 0x2554,	# BOX DRAWINGS DOUBLE DOWN AND RIGHT
+	0x00ca: 0x2569,	# BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+	0x00cb: 0x2566,	# BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+	0x00cc: 0x2560,	# BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+	0x00cd: 0x2550,	# BOX DRAWINGS DOUBLE HORIZONTAL
+	0x00ce: 0x256c,	# BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+	0x00cf: 0x00a4,	# CURRENCY SIGN
+	0x00d0: 0x043b,	# CYRILLIC SMALL LETTER EL
+	0x00d1: 0x041b,	# CYRILLIC CAPITAL LETTER EL
+	0x00d2: 0x043c,	# CYRILLIC SMALL LETTER EM
+	0x00d3: 0x041c,	# CYRILLIC CAPITAL LETTER EM
+	0x00d4: 0x043d,	# CYRILLIC SMALL LETTER EN
+	0x00d5: 0x041d,	# CYRILLIC CAPITAL LETTER EN
+	0x00d6: 0x043e,	# CYRILLIC SMALL LETTER O
+	0x00d7: 0x041e,	# CYRILLIC CAPITAL LETTER O
+	0x00d8: 0x043f,	# CYRILLIC SMALL LETTER PE
+	0x00d9: 0x2518,	# BOX DRAWINGS LIGHT UP AND LEFT
+	0x00da: 0x250c,	# BOX DRAWINGS LIGHT DOWN AND RIGHT
+	0x00db: 0x2588,	# FULL BLOCK
+	0x00dc: 0x2584,	# LOWER HALF BLOCK
+	0x00dd: 0x041f,	# CYRILLIC CAPITAL LETTER PE
+	0x00de: 0x044f,	# CYRILLIC SMALL LETTER YA
+	0x00df: 0x2580,	# UPPER HALF BLOCK
+	0x00e0: 0x042f,	# CYRILLIC CAPITAL LETTER YA
+	0x00e1: 0x0440,	# CYRILLIC SMALL LETTER ER
+	0x00e2: 0x0420,	# CYRILLIC CAPITAL LETTER ER
+	0x00e3: 0x0441,	# CYRILLIC SMALL LETTER ES
+	0x00e4: 0x0421,	# CYRILLIC CAPITAL LETTER ES
+	0x00e5: 0x0442,	# CYRILLIC SMALL LETTER TE
+	0x00e6: 0x0422,	# CYRILLIC CAPITAL LETTER TE
+	0x00e7: 0x0443,	# CYRILLIC SMALL LETTER U
+	0x00e8: 0x0423,	# CYRILLIC CAPITAL LETTER U
+	0x00e9: 0x0436,	# CYRILLIC SMALL LETTER ZHE
+	0x00ea: 0x0416,	# CYRILLIC CAPITAL LETTER ZHE
+	0x00eb: 0x0432,	# CYRILLIC SMALL LETTER VE
+	0x00ec: 0x0412,	# CYRILLIC CAPITAL LETTER VE
+	0x00ed: 0x044c,	# CYRILLIC SMALL LETTER SOFT SIGN
+	0x00ee: 0x042c,	# CYRILLIC CAPITAL LETTER SOFT SIGN
+	0x00ef: 0x2116,	# NUMERO SIGN
+	0x00f0: 0x00ad,	# SOFT HYPHEN
+	0x00f1: 0x044b,	# CYRILLIC SMALL LETTER YERU
+	0x00f2: 0x042b,	# CYRILLIC CAPITAL LETTER YERU
+	0x00f3: 0x0437,	# CYRILLIC SMALL LETTER ZE
+	0x00f4: 0x0417,	# CYRILLIC CAPITAL LETTER ZE
+	0x00f5: 0x0448,	# CYRILLIC SMALL LETTER SHA
+	0x00f6: 0x0428,	# CYRILLIC CAPITAL LETTER SHA
+	0x00f7: 0x044d,	# CYRILLIC SMALL LETTER E
+	0x00f8: 0x042d,	# CYRILLIC CAPITAL LETTER E
+	0x00f9: 0x0449,	# CYRILLIC SMALL LETTER SHCHA
+	0x00fa: 0x0429,	# CYRILLIC CAPITAL LETTER SHCHA
+	0x00fb: 0x0447,	# CYRILLIC SMALL LETTER CHE
+	0x00fc: 0x0427,	# CYRILLIC CAPITAL LETTER CHE
+	0x00fd: 0x00a7,	# SECTION SIGN
+	0x00fe: 0x25a0,	# BLACK SQUARE
+	0x00ff: 0x00a0,	# NO-BREAK SPACE
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/lib-python/2.2/encodings/cp856.py b/lib-python/2.2/encodings/cp856.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/cp856.py
@@ -0,0 +1,172 @@
+""" Python Character Mapping Codec generated from 'CP856.TXT' with gencodec.py.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+	0x0080: 0x05d0,	# HEBREW LETTER ALEF
+	0x0081: 0x05d1,	# HEBREW LETTER BET
+	0x0082: 0x05d2,	# HEBREW LETTER GIMEL
+	0x0083: 0x05d3,	# HEBREW LETTER DALET
+	0x0084: 0x05d4,	# HEBREW LETTER HE
+	0x0085: 0x05d5,	# HEBREW LETTER VAV
+	0x0086: 0x05d6,	# HEBREW LETTER ZAYIN
+	0x0087: 0x05d7,	# HEBREW LETTER HET
+	0x0088: 0x05d8,	# HEBREW LETTER TET
+	0x0089: 0x05d9,	# HEBREW LETTER YOD
+	0x008a: 0x05da,	# HEBREW LETTER FINAL KAF
+	0x008b: 0x05db,	# HEBREW LETTER KAF
+	0x008c: 0x05dc,	# HEBREW LETTER LAMED
+	0x008d: 0x05dd,	# HEBREW LETTER FINAL MEM
+	0x008e: 0x05de,	# HEBREW LETTER MEM
+	0x008f: 0x05df,	# HEBREW LETTER FINAL NUN
+	0x0090: 0x05e0,	# HEBREW LETTER NUN
+	0x0091: 0x05e1,	# HEBREW LETTER SAMEKH
+	0x0092: 0x05e2,	# HEBREW LETTER AYIN
+	0x0093: 0x05e3,	# HEBREW LETTER FINAL PE
+	0x0094: 0x05e4,	# HEBREW LETTER PE
+	0x0095: 0x05e5,	# HEBREW LETTER FINAL TSADI
+	0x0096: 0x05e6,	# HEBREW LETTER TSADI
+	0x0097: 0x05e7,	# HEBREW LETTER QOF
+	0x0098: 0x05e8,	# HEBREW LETTER RESH
+	0x0099: 0x05e9,	# HEBREW LETTER SHIN
+	0x009a: 0x05ea,	# HEBREW LETTER TAV
+	0x009b: None,	# UNDEFINED
+	0x009c: 0x00a3,	# POUND SIGN
+	0x009d: None,	# UNDEFINED
+	0x009e: 0x00d7,	# MULTIPLICATION SIGN
+	0x009f: None,	# UNDEFINED
+	0x00a0: None,	# UNDEFINED
+	0x00a1: None,	# UNDEFINED
+	0x00a2: None,	# UNDEFINED
+	0x00a3: None,	# UNDEFINED
+	0x00a4: None,	# UNDEFINED
+	0x00a5: None,	# UNDEFINED
+	0x00a6: None,	# UNDEFINED
+	0x00a7: None,	# UNDEFINED
+	0x00a8: None,	# UNDEFINED
+	0x00a9: 0x00ae,	# REGISTERED SIGN
+	0x00aa: 0x00ac,	# NOT SIGN
+	0x00ab: 0x00bd,	# VULGAR FRACTION ONE HALF
+	0x00ac: 0x00bc,	# VULGAR FRACTION ONE QUARTER
+	0x00ad: None,	# UNDEFINED
+	0x00ae: 0x00ab,	# LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+	0x00af: 0x00bb,	# RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+	0x00b0: 0x2591,	# LIGHT SHADE
+	0x00b1: 0x2592,	# MEDIUM SHADE
+	0x00b2: 0x2593,	# DARK SHADE
+	0x00b3: 0x2502,	# BOX DRAWINGS LIGHT VERTICAL
+	0x00b4: 0x2524,	# BOX DRAWINGS LIGHT VERTICAL AND LEFT
+	0x00b5: None,	# UNDEFINED
+	0x00b6: None,	# UNDEFINED
+	0x00b7: None,	# UNDEFINED
+	0x00b8: 0x00a9,	# COPYRIGHT SIGN
+	0x00b9: 0x2563,	# BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+	0x00ba: 0x2551,	# BOX DRAWINGS DOUBLE VERTICAL
+	0x00bb: 0x2557,	# BOX DRAWINGS DOUBLE DOWN AND LEFT
+	0x00bc: 0x255d,	# BOX DRAWINGS DOUBLE UP AND LEFT
+	0x00bd: 0x00a2,	# CENT SIGN
+	0x00be: 0x00a5,	# YEN SIGN
+	0x00bf: 0x2510,	# BOX DRAWINGS LIGHT DOWN AND LEFT
+	0x00c0: 0x2514,	# BOX DRAWINGS LIGHT UP AND RIGHT
+	0x00c1: 0x2534,	# BOX DRAWINGS LIGHT UP AND HORIZONTAL
+	0x00c2: 0x252c,	# BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+	0x00c3: 0x251c,	# BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+	0x00c4: 0x2500,	# BOX DRAWINGS LIGHT HORIZONTAL
+	0x00c5: 0x253c,	# BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+	0x00c6: None,	# UNDEFINED
+	0x00c7: None,	# UNDEFINED
+	0x00c8: 0x255a,	# BOX DRAWINGS DOUBLE UP AND RIGHT
+	0x00c9: 0x2554,	# BOX DRAWINGS DOUBLE DOWN AND RIGHT
+	0x00ca: 0x2569,	# BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+	0x00cb: 0x2566,	# BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+	0x00cc: 0x2560,	# BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+	0x00cd: 0x2550,	# BOX DRAWINGS DOUBLE HORIZONTAL
+	0x00ce: 0x256c,	# BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+	0x00cf: 0x00a4,	# CURRENCY SIGN
+	0x00d0: None,	# UNDEFINED
+	0x00d1: None,	# UNDEFINED
+	0x00d2: None,	# UNDEFINED
+	0x00d3: None,	# UNDEFINEDS
+	0x00d4: None,	# UNDEFINED
+	0x00d5: None,	# UNDEFINED
+	0x00d6: None,	# UNDEFINEDE
+	0x00d7: None,	# UNDEFINED
+	0x00d8: None,	# UNDEFINED
+	0x00d9: 0x2518,	# BOX DRAWINGS LIGHT UP AND LEFT
+	0x00da: 0x250c,	# BOX DRAWINGS LIGHT DOWN AND RIGHT
+	0x00db: 0x2588,	# FULL BLOCK
+	0x00dc: 0x2584,	# LOWER HALF BLOCK
+	0x00dd: 0x00a6,	# BROKEN BAR
+	0x00de: None,	# UNDEFINED
+	0x00df: 0x2580,	# UPPER HALF BLOCK
+	0x00e0: None,	# UNDEFINED
+	0x00e1: None,	# UNDEFINED
+	0x00e2: None,	# UNDEFINED
+	0x00e3: None,	# UNDEFINED
+	0x00e4: None,	# UNDEFINED
+	0x00e5: None,	# UNDEFINED
+	0x00e6: 0x00b5,	# MICRO SIGN
+	0x00e7: None,	# UNDEFINED
+	0x00e8: None,	# UNDEFINED
+	0x00e9: None,	# UNDEFINED
+	0x00ea: None,	# UNDEFINED
+	0x00eb: None,	# UNDEFINED
+	0x00ec: None,	# UNDEFINED
+	0x00ed: None,	# UNDEFINED
+	0x00ee: 0x00af,	# MACRON
+	0x00ef: 0x00b4,	# ACUTE ACCENT
+	0x00f0: 0x00ad,	# SOFT HYPHEN
+	0x00f1: 0x00b1,	# PLUS-MINUS SIGN
+	0x00f2: 0x2017,	# DOUBLE LOW LINE
+	0x00f3: 0x00be,	# VULGAR FRACTION THREE QUARTERS
+	0x00f4: 0x00b6,	# PILCROW SIGN
+	0x00f5: 0x00a7,	# SECTION SIGN
+	0x00f6: 0x00f7,	# DIVISION SIGN
+	0x00f7: 0x00b8,	# CEDILLA
+	0x00f8: 0x00b0,	# DEGREE SIGN
+	0x00f9: 0x00a8,	# DIAERESIS
+	0x00fa: 0x00b7,	# MIDDLE DOT
+	0x00fb: 0x00b9,	# SUPERSCRIPT ONE
+	0x00fc: 0x00b3,	# SUPERSCRIPT THREE
+	0x00fd: 0x00b2,	# SUPERSCRIPT TWO
+	0x00fe: 0x25a0,	# BLACK SQUARE
+	0x00ff: 0x00a0,	# NO-BREAK SPACE
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/lib-python/2.2/encodings/cp857.py b/lib-python/2.2/encodings/cp857.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/cp857.py
@@ -0,0 +1,171 @@
+""" Python Character Mapping Codec generated from 'CP857.TXT' with gencodec.py.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+	0x0080: 0x00c7,	# LATIN CAPITAL LETTER C WITH CEDILLA
+	0x0081: 0x00fc,	# LATIN SMALL LETTER U WITH DIAERESIS
+	0x0082: 0x00e9,	# LATIN SMALL LETTER E WITH ACUTE
+	0x0083: 0x00e2,	# LATIN SMALL LETTER A WITH CIRCUMFLEX
+	0x0084: 0x00e4,	# LATIN SMALL LETTER A WITH DIAERESIS
+	0x0085: 0x00e0,	# LATIN SMALL LETTER A WITH GRAVE
+	0x0086: 0x00e5,	# LATIN SMALL LETTER A WITH RING ABOVE
+	0x0087: 0x00e7,	# LATIN SMALL LETTER C WITH CEDILLA
+	0x0088: 0x00ea,	# LATIN SMALL LETTER E WITH CIRCUMFLEX
+	0x0089: 0x00eb,	# LATIN SMALL LETTER E WITH DIAERESIS
+	0x008a: 0x00e8,	# LATIN SMALL LETTER E WITH GRAVE
+	0x008b: 0x00ef,	# LATIN SMALL LETTER I WITH DIAERESIS
+	0x008c: 0x00ee,	# LATIN SMALL LETTER I WITH CIRCUMFLEX
+	0x008d: 0x0131,	# LATIN SMALL LETTER DOTLESS I
+	0x008e: 0x00c4,	# LATIN CAPITAL LETTER A WITH DIAERESIS
+	0x008f: 0x00c5,	# LATIN CAPITAL LETTER A WITH RING ABOVE
+	0x0090: 0x00c9,	# LATIN CAPITAL LETTER E WITH ACUTE
+	0x0091: 0x00e6,	# LATIN SMALL LIGATURE AE
+	0x0092: 0x00c6,	# LATIN CAPITAL LIGATURE AE
+	0x0093: 0x00f4,	# LATIN SMALL LETTER O WITH CIRCUMFLEX
+	0x0094: 0x00f6,	# LATIN SMALL LETTER O WITH DIAERESIS
+	0x0095: 0x00f2,	# LATIN SMALL LETTER O WITH GRAVE
+	0x0096: 0x00fb,	# LATIN SMALL LETTER U WITH CIRCUMFLEX
+	0x0097: 0x00f9,	# LATIN SMALL LETTER U WITH GRAVE
+	0x0098: 0x0130,	# LATIN CAPITAL LETTER I WITH DOT ABOVE
+	0x0099: 0x00d6,	# LATIN CAPITAL LETTER O WITH DIAERESIS
+	0x009a: 0x00dc,	# LATIN CAPITAL LETTER U WITH DIAERESIS
+	0x009b: 0x00f8,	# LATIN SMALL LETTER O WITH STROKE
+	0x009c: 0x00a3,	# POUND SIGN
+	0x009d: 0x00d8,	# LATIN CAPITAL LETTER O WITH STROKE
+	0x009e: 0x015e,	# LATIN CAPITAL LETTER S WITH CEDILLA
+	0x009f: 0x015f,	# LATIN SMALL LETTER S WITH CEDILLA
+	0x00a0: 0x00e1,	# LATIN SMALL LETTER A WITH ACUTE
+	0x00a1: 0x00ed,	# LATIN SMALL LETTER I WITH ACUTE
+	0x00a2: 0x00f3,	# LATIN SMALL LETTER O WITH ACUTE
+	0x00a3: 0x00fa,	# LATIN SMALL LETTER U WITH ACUTE
+	0x00a4: 0x00f1,	# LATIN SMALL LETTER N WITH TILDE
+	0x00a5: 0x00d1,	# LATIN CAPITAL LETTER N WITH TILDE
+	0x00a6: 0x011e,	# LATIN CAPITAL LETTER G WITH BREVE
+	0x00a7: 0x011f,	# LATIN SMALL LETTER G WITH BREVE
+	0x00a8: 0x00bf,	# INVERTED QUESTION MARK
+	0x00a9: 0x00ae,	# REGISTERED SIGN
+	0x00aa: 0x00ac,	# NOT SIGN
+	0x00ab: 0x00bd,	# VULGAR FRACTION ONE HALF
+	0x00ac: 0x00bc,	# VULGAR FRACTION ONE QUARTER
+	0x00ad: 0x00a1,	# INVERTED EXCLAMATION MARK
+	0x00ae: 0x00ab,	# LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+	0x00af: 0x00bb,	# RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+	0x00b0: 0x2591,	# LIGHT SHADE
+	0x00b1: 0x2592,	# MEDIUM SHADE
+	0x00b2: 0x2593,	# DARK SHADE
+	0x00b3: 0x2502,	# BOX DRAWINGS LIGHT VERTICAL
+	0x00b4: 0x2524,	# BOX DRAWINGS LIGHT VERTICAL AND LEFT
+	0x00b5: 0x00c1,	# LATIN CAPITAL LETTER A WITH ACUTE
+	0x00b6: 0x00c2,	# LATIN CAPITAL LETTER A WITH CIRCUMFLEX
+	0x00b7: 0x00c0,	# LATIN CAPITAL LETTER A WITH GRAVE
+	0x00b8: 0x00a9,	# COPYRIGHT SIGN
+	0x00b9: 0x2563,	# BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+	0x00ba: 0x2551,	# BOX DRAWINGS DOUBLE VERTICAL
+	0x00bb: 0x2557,	# BOX DRAWINGS DOUBLE DOWN AND LEFT
+	0x00bc: 0x255d,	# BOX DRAWINGS DOUBLE UP AND LEFT
+	0x00bd: 0x00a2,	# CENT SIGN
+	0x00be: 0x00a5,	# YEN SIGN
+	0x00bf: 0x2510,	# BOX DRAWINGS LIGHT DOWN AND LEFT
+	0x00c0: 0x2514,	# BOX DRAWINGS LIGHT UP AND RIGHT
+	0x00c1: 0x2534,	# BOX DRAWINGS LIGHT UP AND HORIZONTAL
+	0x00c2: 0x252c,	# BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+	0x00c3: 0x251c,	# BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+	0x00c4: 0x2500,	# BOX DRAWINGS LIGHT HORIZONTAL
+	0x00c5: 0x253c,	# BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+	0x00c6: 0x00e3,	# LATIN SMALL LETTER A WITH TILDE
+	0x00c7: 0x00c3,	# LATIN CAPITAL LETTER A WITH TILDE
+	0x00c8: 0x255a,	# BOX DRAWINGS DOUBLE UP AND RIGHT
+	0x00c9: 0x2554,	# BOX DRAWINGS DOUBLE DOWN AND RIGHT
+	0x00ca: 0x2569,	# BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+	0x00cb: 0x2566,	# BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+	0x00cc: 0x2560,	# BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+	0x00cd: 0x2550,	# BOX DRAWINGS DOUBLE HORIZONTAL
+	0x00ce: 0x256c,	# BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+	0x00cf: 0x00a4,	# CURRENCY SIGN
+	0x00d0: 0x00ba,	# MASCULINE ORDINAL INDICATOR
+	0x00d1: 0x00aa,	# FEMININE ORDINAL INDICATOR
+	0x00d2: 0x00ca,	# LATIN CAPITAL LETTER E WITH CIRCUMFLEX
+	0x00d3: 0x00cb,	# LATIN CAPITAL LETTER E WITH DIAERESIS
+	0x00d4: 0x00c8,	# LATIN CAPITAL LETTER E WITH GRAVE
+	0x00d5: None,	# UNDEFINED
+	0x00d6: 0x00cd,	# LATIN CAPITAL LETTER I WITH ACUTE
+	0x00d7: 0x00ce,	# LATIN CAPITAL LETTER I WITH CIRCUMFLEX
+	0x00d8: 0x00cf,	# LATIN CAPITAL LETTER I WITH DIAERESIS
+	0x00d9: 0x2518,	# BOX DRAWINGS LIGHT UP AND LEFT
+	0x00da: 0x250c,	# BOX DRAWINGS LIGHT DOWN AND RIGHT
+	0x00db: 0x2588,	# FULL BLOCK
+	0x00dc: 0x2584,	# LOWER HALF BLOCK
+	0x00dd: 0x00a6,	# BROKEN BAR
+	0x00de: 0x00cc,	# LATIN CAPITAL LETTER I WITH GRAVE
+	0x00df: 0x2580,	# UPPER HALF BLOCK
+	0x00e0: 0x00d3,	# LATIN CAPITAL LETTER O WITH ACUTE
+	0x00e1: 0x00df,	# LATIN SMALL LETTER SHARP S
+	0x00e2: 0x00d4,	# LATIN CAPITAL LETTER O WITH CIRCUMFLEX
+	0x00e3: 0x00d2,	# LATIN CAPITAL LETTER O WITH GRAVE
+	0x00e4: 0x00f5,	# LATIN SMALL LETTER O WITH TILDE
+	0x00e5: 0x00d5,	# LATIN CAPITAL LETTER O WITH TILDE
+	0x00e6: 0x00b5,	# MICRO SIGN
+	0x00e7: None,	# UNDEFINED
+	0x00e8: 0x00d7,	# MULTIPLICATION SIGN
+	0x00e9: 0x00da,	# LATIN CAPITAL LETTER U WITH ACUTE
+	0x00ea: 0x00db,	# LATIN CAPITAL LETTER U WITH CIRCUMFLEX
+	0x00eb: 0x00d9,	# LATIN CAPITAL LETTER U WITH GRAVE
+	0x00ed: 0x00ff,	# LATIN SMALL LETTER Y WITH DIAERESIS
+	0x00ee: 0x00af,	# MACRON
+	0x00ef: 0x00b4,	# ACUTE ACCENT
+	0x00f0: 0x00ad,	# SOFT HYPHEN
+	0x00f1: 0x00b1,	# PLUS-MINUS SIGN
+	0x00f2: None,	# UNDEFINED
+	0x00f3: 0x00be,	# VULGAR FRACTION THREE QUARTERS
+	0x00f4: 0x00b6,	# PILCROW SIGN
+	0x00f5: 0x00a7,	# SECTION SIGN
+	0x00f6: 0x00f7,	# DIVISION SIGN
+	0x00f7: 0x00b8,	# CEDILLA
+	0x00f8: 0x00b0,	# DEGREE SIGN
+	0x00f9: 0x00a8,	# DIAERESIS
+	0x00fa: 0x00b7,	# MIDDLE DOT
+	0x00fb: 0x00b9,	# SUPERSCRIPT ONE
+	0x00fc: 0x00b3,	# SUPERSCRIPT THREE
+	0x00fd: 0x00b2,	# SUPERSCRIPT TWO
+	0x00fe: 0x25a0,	# BLACK SQUARE
+	0x00ff: 0x00a0,	# NO-BREAK SPACE
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/lib-python/2.2/encodings/cp860.py b/lib-python/2.2/encodings/cp860.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/cp860.py
@@ -0,0 +1,172 @@
+""" Python Character Mapping Codec generated from 'CP860.TXT' with gencodec.py.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+	0x0080: 0x00c7,	# LATIN CAPITAL LETTER C WITH CEDILLA
+	0x0081: 0x00fc,	# LATIN SMALL LETTER U WITH DIAERESIS
+	0x0082: 0x00e9,	# LATIN SMALL LETTER E WITH ACUTE
+	0x0083: 0x00e2,	# LATIN SMALL LETTER A WITH CIRCUMFLEX
+	0x0084: 0x00e3,	# LATIN SMALL LETTER A WITH TILDE
+	0x0085: 0x00e0,	# LATIN SMALL LETTER A WITH GRAVE
+	0x0086: 0x00c1,	# LATIN CAPITAL LETTER A WITH ACUTE
+	0x0087: 0x00e7,	# LATIN SMALL LETTER C WITH CEDILLA
+	0x0088: 0x00ea,	# LATIN SMALL LETTER E WITH CIRCUMFLEX
+	0x0089: 0x00ca,	# LATIN CAPITAL LETTER E WITH CIRCUMFLEX
+	0x008a: 0x00e8,	# LATIN SMALL LETTER E WITH GRAVE
+	0x008b: 0x00cd,	# LATIN CAPITAL LETTER I WITH ACUTE
+	0x008c: 0x00d4,	# LATIN CAPITAL LETTER O WITH CIRCUMFLEX
+	0x008d: 0x00ec,	# LATIN SMALL LETTER I WITH GRAVE
+	0x008e: 0x00c3,	# LATIN CAPITAL LETTER A WITH TILDE
+	0x008f: 0x00c2,	# LATIN CAPITAL LETTER A WITH CIRCUMFLEX
+	0x0090: 0x00c9,	# LATIN CAPITAL LETTER E WITH ACUTE
+	0x0091: 0x00c0,	# LATIN CAPITAL LETTER A WITH GRAVE
+	0x0092: 0x00c8,	# LATIN CAPITAL LETTER E WITH GRAVE
+	0x0093: 0x00f4,	# LATIN SMALL LETTER O WITH CIRCUMFLEX
+	0x0094: 0x00f5,	# LATIN SMALL LETTER O WITH TILDE
+	0x0095: 0x00f2,	# LATIN SMALL LETTER O WITH GRAVE
+	0x0096: 0x00da,	# LATIN CAPITAL LETTER U WITH ACUTE
+	0x0097: 0x00f9,	# LATIN SMALL LETTER U WITH GRAVE
+	0x0098: 0x00cc,	# LATIN CAPITAL LETTER I WITH GRAVE
+	0x0099: 0x00d5,	# LATIN CAPITAL LETTER O WITH TILDE
+	0x009a: 0x00dc,	# LATIN CAPITAL LETTER U WITH DIAERESIS
+	0x009b: 0x00a2,	# CENT SIGN
+	0x009c: 0x00a3,	# POUND SIGN
+	0x009d: 0x00d9,	# LATIN CAPITAL LETTER U WITH GRAVE
+	0x009e: 0x20a7,	# PESETA SIGN
+	0x009f: 0x00d3,	# LATIN CAPITAL LETTER O WITH ACUTE
+	0x00a0: 0x00e1,	# LATIN SMALL LETTER A WITH ACUTE
+	0x00a1: 0x00ed,	# LATIN SMALL LETTER I WITH ACUTE
+	0x00a2: 0x00f3,	# LATIN SMALL LETTER O WITH ACUTE
+	0x00a3: 0x00fa,	# LATIN SMALL LETTER U WITH ACUTE
+	0x00a4: 0x00f1,	# LATIN SMALL LETTER N WITH TILDE
+	0x00a5: 0x00d1,	# LATIN CAPITAL LETTER N WITH TILDE
+	0x00a6: 0x00aa,	# FEMININE ORDINAL INDICATOR
+	0x00a7: 0x00ba,	# MASCULINE ORDINAL INDICATOR
+	0x00a8: 0x00bf,	# INVERTED QUESTION MARK
+	0x00a9: 0x00d2,	# LATIN CAPITAL LETTER O WITH GRAVE
+	0x00aa: 0x00ac,	# NOT SIGN
+	0x00ab: 0x00bd,	# VULGAR FRACTION ONE HALF
+	0x00ac: 0x00bc,	# VULGAR FRACTION ONE QUARTER
+	0x00ad: 0x00a1,	# INVERTED EXCLAMATION MARK
+	0x00ae: 0x00ab,	# LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+	0x00af: 0x00bb,	# RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+	0x00b0: 0x2591,	# LIGHT SHADE
+	0x00b1: 0x2592,	# MEDIUM SHADE
+	0x00b2: 0x2593,	# DARK SHADE
+	0x00b3: 0x2502,	# BOX DRAWINGS LIGHT VERTICAL
+	0x00b4: 0x2524,	# BOX DRAWINGS LIGHT VERTICAL AND LEFT
+	0x00b5: 0x2561,	# BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
+	0x00b6: 0x2562,	# BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
+	0x00b7: 0x2556,	# BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
+	0x00b8: 0x2555,	# BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
+	0x00b9: 0x2563,	# BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+	0x00ba: 0x2551,	# BOX DRAWINGS DOUBLE VERTICAL
+	0x00bb: 0x2557,	# BOX DRAWINGS DOUBLE DOWN AND LEFT
+	0x00bc: 0x255d,	# BOX DRAWINGS DOUBLE UP AND LEFT
+	0x00bd: 0x255c,	# BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
+	0x00be: 0x255b,	# BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
+	0x00bf: 0x2510,	# BOX DRAWINGS LIGHT DOWN AND LEFT
+	0x00c0: 0x2514,	# BOX DRAWINGS LIGHT UP AND RIGHT
+	0x00c1: 0x2534,	# BOX DRAWINGS LIGHT UP AND HORIZONTAL
+	0x00c2: 0x252c,	# BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+	0x00c3: 0x251c,	# BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+	0x00c4: 0x2500,	# BOX DRAWINGS LIGHT HORIZONTAL
+	0x00c5: 0x253c,	# BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+	0x00c6: 0x255e,	# BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
+	0x00c7: 0x255f,	# BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
+	0x00c8: 0x255a,	# BOX DRAWINGS DOUBLE UP AND RIGHT
+	0x00c9: 0x2554,	# BOX DRAWINGS DOUBLE DOWN AND RIGHT
+	0x00ca: 0x2569,	# BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+	0x00cb: 0x2566,	# BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+	0x00cc: 0x2560,	# BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+	0x00cd: 0x2550,	# BOX DRAWINGS DOUBLE HORIZONTAL
+	0x00ce: 0x256c,	# BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+	0x00cf: 0x2567,	# BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
+	0x00d0: 0x2568,	# BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
+	0x00d1: 0x2564,	# BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
+	0x00d2: 0x2565,	# BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
+	0x00d3: 0x2559,	# BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
+	0x00d4: 0x2558,	# BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
+	0x00d5: 0x2552,	# BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
+	0x00d6: 0x2553,	# BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
+	0x00d7: 0x256b,	# BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
+	0x00d8: 0x256a,	# BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
+	0x00d9: 0x2518,	# BOX DRAWINGS LIGHT UP AND LEFT
+	0x00da: 0x250c,	# BOX DRAWINGS LIGHT DOWN AND RIGHT
+	0x00db: 0x2588,	# FULL BLOCK
+	0x00dc: 0x2584,	# LOWER HALF BLOCK
+	0x00dd: 0x258c,	# LEFT HALF BLOCK
+	0x00de: 0x2590,	# RIGHT HALF BLOCK
+	0x00df: 0x2580,	# UPPER HALF BLOCK
+	0x00e0: 0x03b1,	# GREEK SMALL LETTER ALPHA
+	0x00e1: 0x00df,	# LATIN SMALL LETTER SHARP S
+	0x00e2: 0x0393,	# GREEK CAPITAL LETTER GAMMA
+	0x00e3: 0x03c0,	# GREEK SMALL LETTER PI
+	0x00e4: 0x03a3,	# GREEK CAPITAL LETTER SIGMA
+	0x00e5: 0x03c3,	# GREEK SMALL LETTER SIGMA
+	0x00e6: 0x00b5,	# MICRO SIGN
+	0x00e7: 0x03c4,	# GREEK SMALL LETTER TAU
+	0x00e8: 0x03a6,	# GREEK CAPITAL LETTER PHI
+	0x00e9: 0x0398,	# GREEK CAPITAL LETTER THETA
+	0x00ea: 0x03a9,	# GREEK CAPITAL LETTER OMEGA
+	0x00eb: 0x03b4,	# GREEK SMALL LETTER DELTA
+	0x00ec: 0x221e,	# INFINITY
+	0x00ed: 0x03c6,	# GREEK SMALL LETTER PHI
+	0x00ee: 0x03b5,	# GREEK SMALL LETTER EPSILON
+	0x00ef: 0x2229,	# INTERSECTION
+	0x00f0: 0x2261,	# IDENTICAL TO
+	0x00f1: 0x00b1,	# PLUS-MINUS SIGN
+	0x00f2: 0x2265,	# GREATER-THAN OR EQUAL TO
+	0x00f3: 0x2264,	# LESS-THAN OR EQUAL TO
+	0x00f4: 0x2320,	# TOP HALF INTEGRAL
+	0x00f5: 0x2321,	# BOTTOM HALF INTEGRAL
+	0x00f6: 0x00f7,	# DIVISION SIGN
+	0x00f7: 0x2248,	# ALMOST EQUAL TO
+	0x00f8: 0x00b0,	# DEGREE SIGN
+	0x00f9: 0x2219,	# BULLET OPERATOR
+	0x00fa: 0x00b7,	# MIDDLE DOT
+	0x00fb: 0x221a,	# SQUARE ROOT
+	0x00fc: 0x207f,	# SUPERSCRIPT LATIN SMALL LETTER N
+	0x00fd: 0x00b2,	# SUPERSCRIPT TWO
+	0x00fe: 0x25a0,	# BLACK SQUARE
+	0x00ff: 0x00a0,	# NO-BREAK SPACE
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/lib-python/2.2/encodings/cp861.py b/lib-python/2.2/encodings/cp861.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/cp861.py
@@ -0,0 +1,172 @@
+""" Python Character Mapping Codec generated from 'CP861.TXT' with gencodec.py.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+	0x0080: 0x00c7,	# LATIN CAPITAL LETTER C WITH CEDILLA
+	0x0081: 0x00fc,	# LATIN SMALL LETTER U WITH DIAERESIS
+	0x0082: 0x00e9,	# LATIN SMALL LETTER E WITH ACUTE
+	0x0083: 0x00e2,	# LATIN SMALL LETTER A WITH CIRCUMFLEX
+	0x0084: 0x00e4,	# LATIN SMALL LETTER A WITH DIAERESIS
+	0x0085: 0x00e0,	# LATIN SMALL LETTER A WITH GRAVE
+	0x0086: 0x00e5,	# LATIN SMALL LETTER A WITH RING ABOVE
+	0x0087: 0x00e7,	# LATIN SMALL LETTER C WITH CEDILLA
+	0x0088: 0x00ea,	# LATIN SMALL LETTER E WITH CIRCUMFLEX
+	0x0089: 0x00eb,	# LATIN SMALL LETTER E WITH DIAERESIS
+	0x008a: 0x00e8,	# LATIN SMALL LETTER E WITH GRAVE
+	0x008b: 0x00d0,	# LATIN CAPITAL LETTER ETH
+	0x008c: 0x00f0,	# LATIN SMALL LETTER ETH
+	0x008d: 0x00de,	# LATIN CAPITAL LETTER THORN
+	0x008e: 0x00c4,	# LATIN CAPITAL LETTER A WITH DIAERESIS
+	0x008f: 0x00c5,	# LATIN CAPITAL LETTER A WITH RING ABOVE
+	0x0090: 0x00c9,	# LATIN CAPITAL LETTER E WITH ACUTE
+	0x0091: 0x00e6,	# LATIN SMALL LIGATURE AE
+	0x0092: 0x00c6,	# LATIN CAPITAL LIGATURE AE
+	0x0093: 0x00f4,	# LATIN SMALL LETTER O WITH CIRCUMFLEX
+	0x0094: 0x00f6,	# LATIN SMALL LETTER O WITH DIAERESIS
+	0x0095: 0x00fe,	# LATIN SMALL LETTER THORN
+	0x0096: 0x00fb,	# LATIN SMALL LETTER U WITH CIRCUMFLEX
+	0x0097: 0x00dd,	# LATIN CAPITAL LETTER Y WITH ACUTE
+	0x0098: 0x00fd,	# LATIN SMALL LETTER Y WITH ACUTE
+	0x0099: 0x00d6,	# LATIN CAPITAL LETTER O WITH DIAERESIS
+	0x009a: 0x00dc,	# LATIN CAPITAL LETTER U WITH DIAERESIS
+	0x009b: 0x00f8,	# LATIN SMALL LETTER O WITH STROKE
+	0x009c: 0x00a3,	# POUND SIGN
+	0x009d: 0x00d8,	# LATIN CAPITAL LETTER O WITH STROKE
+	0x009e: 0x20a7,	# PESETA SIGN
+	0x009f: 0x0192,	# LATIN SMALL LETTER F WITH HOOK
+	0x00a0: 0x00e1,	# LATIN SMALL LETTER A WITH ACUTE
+	0x00a1: 0x00ed,	# LATIN SMALL LETTER I WITH ACUTE
+	0x00a2: 0x00f3,	# LATIN SMALL LETTER O WITH ACUTE
+	0x00a3: 0x00fa,	# LATIN SMALL LETTER U WITH ACUTE
+	0x00a4: 0x00c1,	# LATIN CAPITAL LETTER A WITH ACUTE
+	0x00a5: 0x00cd,	# LATIN CAPITAL LETTER I WITH ACUTE
+	0x00a6: 0x00d3,	# LATIN CAPITAL LETTER O WITH ACUTE
+	0x00a7: 0x00da,	# LATIN CAPITAL LETTER U WITH ACUTE
+	0x00a8: 0x00bf,	# INVERTED QUESTION MARK
+	0x00a9: 0x2310,	# REVERSED NOT SIGN
+	0x00aa: 0x00ac,	# NOT SIGN
+	0x00ab: 0x00bd,	# VULGAR FRACTION ONE HALF
+	0x00ac: 0x00bc,	# VULGAR FRACTION ONE QUARTER
+	0x00ad: 0x00a1,	# INVERTED EXCLAMATION MARK
+	0x00ae: 0x00ab,	# LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+	0x00af: 0x00bb,	# RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+	0x00b0: 0x2591,	# LIGHT SHADE
+	0x00b1: 0x2592,	# MEDIUM SHADE
+	0x00b2: 0x2593,	# DARK SHADE
+	0x00b3: 0x2502,	# BOX DRAWINGS LIGHT VERTICAL
+	0x00b4: 0x2524,	# BOX DRAWINGS LIGHT VERTICAL AND LEFT
+	0x00b5: 0x2561,	# BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
+	0x00b6: 0x2562,	# BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
+	0x00b7: 0x2556,	# BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
+	0x00b8: 0x2555,	# BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
+	0x00b9: 0x2563,	# BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+	0x00ba: 0x2551,	# BOX DRAWINGS DOUBLE VERTICAL
+	0x00bb: 0x2557,	# BOX DRAWINGS DOUBLE DOWN AND LEFT
+	0x00bc: 0x255d,	# BOX DRAWINGS DOUBLE UP AND LEFT
+	0x00bd: 0x255c,	# BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
+	0x00be: 0x255b,	# BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
+	0x00bf: 0x2510,	# BOX DRAWINGS LIGHT DOWN AND LEFT
+	0x00c0: 0x2514,	# BOX DRAWINGS LIGHT UP AND RIGHT
+	0x00c1: 0x2534,	# BOX DRAWINGS LIGHT UP AND HORIZONTAL
+	0x00c2: 0x252c,	# BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+	0x00c3: 0x251c,	# BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+	0x00c4: 0x2500,	# BOX DRAWINGS LIGHT HORIZONTAL
+	0x00c5: 0x253c,	# BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+	0x00c6: 0x255e,	# BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
+	0x00c7: 0x255f,	# BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
+	0x00c8: 0x255a,	# BOX DRAWINGS DOUBLE UP AND RIGHT
+	0x00c9: 0x2554,	# BOX DRAWINGS DOUBLE DOWN AND RIGHT
+	0x00ca: 0x2569,	# BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+	0x00cb: 0x2566,	# BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+	0x00cc: 0x2560,	# BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+	0x00cd: 0x2550,	# BOX DRAWINGS DOUBLE HORIZONTAL
+	0x00ce: 0x256c,	# BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+	0x00cf: 0x2567,	# BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
+	0x00d0: 0x2568,	# BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
+	0x00d1: 0x2564,	# BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
+	0x00d2: 0x2565,	# BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
+	0x00d3: 0x2559,	# BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
+	0x00d4: 0x2558,	# BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
+	0x00d5: 0x2552,	# BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
+	0x00d6: 0x2553,	# BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
+	0x00d7: 0x256b,	# BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
+	0x00d8: 0x256a,	# BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
+	0x00d9: 0x2518,	# BOX DRAWINGS LIGHT UP AND LEFT
+	0x00da: 0x250c,	# BOX DRAWINGS LIGHT DOWN AND RIGHT
+	0x00db: 0x2588,	# FULL BLOCK
+	0x00dc: 0x2584,	# LOWER HALF BLOCK
+	0x00dd: 0x258c,	# LEFT HALF BLOCK
+	0x00de: 0x2590,	# RIGHT HALF BLOCK
+	0x00df: 0x2580,	# UPPER HALF BLOCK
+	0x00e0: 0x03b1,	# GREEK SMALL LETTER ALPHA
+	0x00e1: 0x00df,	# LATIN SMALL LETTER SHARP S
+	0x00e2: 0x0393,	# GREEK CAPITAL LETTER GAMMA
+	0x00e3: 0x03c0,	# GREEK SMALL LETTER PI
+	0x00e4: 0x03a3,	# GREEK CAPITAL LETTER SIGMA
+	0x00e5: 0x03c3,	# GREEK SMALL LETTER SIGMA
+	0x00e6: 0x00b5,	# MICRO SIGN
+	0x00e7: 0x03c4,	# GREEK SMALL LETTER TAU
+	0x00e8: 0x03a6,	# GREEK CAPITAL LETTER PHI
+	0x00e9: 0x0398,	# GREEK CAPITAL LETTER THETA
+	0x00ea: 0x03a9,	# GREEK CAPITAL LETTER OMEGA
+	0x00eb: 0x03b4,	# GREEK SMALL LETTER DELTA
+	0x00ec: 0x221e,	# INFINITY
+	0x00ed: 0x03c6,	# GREEK SMALL LETTER PHI
+	0x00ee: 0x03b5,	# GREEK SMALL LETTER EPSILON
+	0x00ef: 0x2229,	# INTERSECTION
+	0x00f0: 0x2261,	# IDENTICAL TO
+	0x00f1: 0x00b1,	# PLUS-MINUS SIGN
+	0x00f2: 0x2265,	# GREATER-THAN OR EQUAL TO
+	0x00f3: 0x2264,	# LESS-THAN OR EQUAL TO
+	0x00f4: 0x2320,	# TOP HALF INTEGRAL
+	0x00f5: 0x2321,	# BOTTOM HALF INTEGRAL
+	0x00f6: 0x00f7,	# DIVISION SIGN
+	0x00f7: 0x2248,	# ALMOST EQUAL TO
+	0x00f8: 0x00b0,	# DEGREE SIGN
+	0x00f9: 0x2219,	# BULLET OPERATOR
+	0x00fa: 0x00b7,	# MIDDLE DOT
+	0x00fb: 0x221a,	# SQUARE ROOT
+	0x00fc: 0x207f,	# SUPERSCRIPT LATIN SMALL LETTER N
+	0x00fd: 0x00b2,	# SUPERSCRIPT TWO
+	0x00fe: 0x25a0,	# BLACK SQUARE
+	0x00ff: 0x00a0,	# NO-BREAK SPACE
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/lib-python/2.2/encodings/cp862.py b/lib-python/2.2/encodings/cp862.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/cp862.py
@@ -0,0 +1,172 @@
+""" Python Character Mapping Codec generated from 'CP862.TXT' with gencodec.py.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+	0x0080: 0x05d0,	# HEBREW LETTER ALEF
+	0x0081: 0x05d1,	# HEBREW LETTER BET
+	0x0082: 0x05d2,	# HEBREW LETTER GIMEL
+	0x0083: 0x05d3,	# HEBREW LETTER DALET
+	0x0084: 0x05d4,	# HEBREW LETTER HE
+	0x0085: 0x05d5,	# HEBREW LETTER VAV
+	0x0086: 0x05d6,	# HEBREW LETTER ZAYIN
+	0x0087: 0x05d7,	# HEBREW LETTER HET
+	0x0088: 0x05d8,	# HEBREW LETTER TET
+	0x0089: 0x05d9,	# HEBREW LETTER YOD
+	0x008a: 0x05da,	# HEBREW LETTER FINAL KAF
+	0x008b: 0x05db,	# HEBREW LETTER KAF
+	0x008c: 0x05dc,	# HEBREW LETTER LAMED
+	0x008d: 0x05dd,	# HEBREW LETTER FINAL MEM
+	0x008e: 0x05de,	# HEBREW LETTER MEM
+	0x008f: 0x05df,	# HEBREW LETTER FINAL NUN
+	0x0090: 0x05e0,	# HEBREW LETTER NUN
+	0x0091: 0x05e1,	# HEBREW LETTER SAMEKH
+	0x0092: 0x05e2,	# HEBREW LETTER AYIN
+	0x0093: 0x05e3,	# HEBREW LETTER FINAL PE
+	0x0094: 0x05e4,	# HEBREW LETTER PE
+	0x0095: 0x05e5,	# HEBREW LETTER FINAL TSADI
+	0x0096: 0x05e6,	# HEBREW LETTER TSADI
+	0x0097: 0x05e7,	# HEBREW LETTER QOF
+	0x0098: 0x05e8,	# HEBREW LETTER RESH
+	0x0099: 0x05e9,	# HEBREW LETTER SHIN
+	0x009a: 0x05ea,	# HEBREW LETTER TAV
+	0x009b: 0x00a2,	# CENT SIGN
+	0x009c: 0x00a3,	# POUND SIGN
+	0x009d: 0x00a5,	# YEN SIGN
+	0x009e: 0x20a7,	# PESETA SIGN
+	0x009f: 0x0192,	# LATIN SMALL LETTER F WITH HOOK
+	0x00a0: 0x00e1,	# LATIN SMALL LETTER A WITH ACUTE
+	0x00a1: 0x00ed,	# LATIN SMALL LETTER I WITH ACUTE
+	0x00a2: 0x00f3,	# LATIN SMALL LETTER O WITH ACUTE
+	0x00a3: 0x00fa,	# LATIN SMALL LETTER U WITH ACUTE
+	0x00a4: 0x00f1,	# LATIN SMALL LETTER N WITH TILDE
+	0x00a5: 0x00d1,	# LATIN CAPITAL LETTER N WITH TILDE
+	0x00a6: 0x00aa,	# FEMININE ORDINAL INDICATOR
+	0x00a7: 0x00ba,	# MASCULINE ORDINAL INDICATOR
+	0x00a8: 0x00bf,	# INVERTED QUESTION MARK
+	0x00a9: 0x2310,	# REVERSED NOT SIGN
+	0x00aa: 0x00ac,	# NOT SIGN
+	0x00ab: 0x00bd,	# VULGAR FRACTION ONE HALF
+	0x00ac: 0x00bc,	# VULGAR FRACTION ONE QUARTER
+	0x00ad: 0x00a1,	# INVERTED EXCLAMATION MARK
+	0x00ae: 0x00ab,	# LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+	0x00af: 0x00bb,	# RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+	0x00b0: 0x2591,	# LIGHT SHADE
+	0x00b1: 0x2592,	# MEDIUM SHADE
+	0x00b2: 0x2593,	# DARK SHADE
+	0x00b3: 0x2502,	# BOX DRAWINGS LIGHT VERTICAL
+	0x00b4: 0x2524,	# BOX DRAWINGS LIGHT VERTICAL AND LEFT
+	0x00b5: 0x2561,	# BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
+	0x00b6: 0x2562,	# BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
+	0x00b7: 0x2556,	# BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
+	0x00b8: 0x2555,	# BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
+	0x00b9: 0x2563,	# BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+	0x00ba: 0x2551,	# BOX DRAWINGS DOUBLE VERTICAL
+	0x00bb: 0x2557,	# BOX DRAWINGS DOUBLE DOWN AND LEFT
+	0x00bc: 0x255d,	# BOX DRAWINGS DOUBLE UP AND LEFT
+	0x00bd: 0x255c,	# BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
+	0x00be: 0x255b,	# BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
+	0x00bf: 0x2510,	# BOX DRAWINGS LIGHT DOWN AND LEFT
+	0x00c0: 0x2514,	# BOX DRAWINGS LIGHT UP AND RIGHT
+	0x00c1: 0x2534,	# BOX DRAWINGS LIGHT UP AND HORIZONTAL
+	0x00c2: 0x252c,	# BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+	0x00c3: 0x251c,	# BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+	0x00c4: 0x2500,	# BOX DRAWINGS LIGHT HORIZONTAL
+	0x00c5: 0x253c,	# BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+	0x00c6: 0x255e,	# BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
+	0x00c7: 0x255f,	# BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
+	0x00c8: 0x255a,	# BOX DRAWINGS DOUBLE UP AND RIGHT
+	0x00c9: 0x2554,	# BOX DRAWINGS DOUBLE DOWN AND RIGHT
+	0x00ca: 0x2569,	# BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+	0x00cb: 0x2566,	# BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+	0x00cc: 0x2560,	# BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+	0x00cd: 0x2550,	# BOX DRAWINGS DOUBLE HORIZONTAL
+	0x00ce: 0x256c,	# BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+	0x00cf: 0x2567,	# BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
+	0x00d0: 0x2568,	# BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
+	0x00d1: 0x2564,	# BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
+	0x00d2: 0x2565,	# BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
+	0x00d3: 0x2559,	# BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
+	0x00d4: 0x2558,	# BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
+	0x00d5: 0x2552,	# BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
+	0x00d6: 0x2553,	# BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
+	0x00d7: 0x256b,	# BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
+	0x00d8: 0x256a,	# BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
+	0x00d9: 0x2518,	# BOX DRAWINGS LIGHT UP AND LEFT
+	0x00da: 0x250c,	# BOX DRAWINGS LIGHT DOWN AND RIGHT
+	0x00db: 0x2588,	# FULL BLOCK
+	0x00dc: 0x2584,	# LOWER HALF BLOCK
+	0x00dd: 0x258c,	# LEFT HALF BLOCK
+	0x00de: 0x2590,	# RIGHT HALF BLOCK
+	0x00df: 0x2580,	# UPPER HALF BLOCK
+	0x00e0: 0x03b1,	# GREEK SMALL LETTER ALPHA
+	0x00e1: 0x00df,	# LATIN SMALL LETTER SHARP S (GERMAN)
+	0x00e2: 0x0393,	# GREEK CAPITAL LETTER GAMMA
+	0x00e3: 0x03c0,	# GREEK SMALL LETTER PI
+	0x00e4: 0x03a3,	# GREEK CAPITAL LETTER SIGMA
+	0x00e5: 0x03c3,	# GREEK SMALL LETTER SIGMA
+	0x00e6: 0x00b5,	# MICRO SIGN
+	0x00e7: 0x03c4,	# GREEK SMALL LETTER TAU
+	0x00e8: 0x03a6,	# GREEK CAPITAL LETTER PHI
+	0x00e9: 0x0398,	# GREEK CAPITAL LETTER THETA
+	0x00ea: 0x03a9,	# GREEK CAPITAL LETTER OMEGA
+	0x00eb: 0x03b4,	# GREEK SMALL LETTER DELTA
+	0x00ec: 0x221e,	# INFINITY
+	0x00ed: 0x03c6,	# GREEK SMALL LETTER PHI
+	0x00ee: 0x03b5,	# GREEK SMALL LETTER EPSILON
+	0x00ef: 0x2229,	# INTERSECTION
+	0x00f0: 0x2261,	# IDENTICAL TO
+	0x00f1: 0x00b1,	# PLUS-MINUS SIGN
+	0x00f2: 0x2265,	# GREATER-THAN OR EQUAL TO
+	0x00f3: 0x2264,	# LESS-THAN OR EQUAL TO
+	0x00f4: 0x2320,	# TOP HALF INTEGRAL
+	0x00f5: 0x2321,	# BOTTOM HALF INTEGRAL
+	0x00f6: 0x00f7,	# DIVISION SIGN
+	0x00f7: 0x2248,	# ALMOST EQUAL TO
+	0x00f8: 0x00b0,	# DEGREE SIGN
+	0x00f9: 0x2219,	# BULLET OPERATOR
+	0x00fa: 0x00b7,	# MIDDLE DOT
+	0x00fb: 0x221a,	# SQUARE ROOT
+	0x00fc: 0x207f,	# SUPERSCRIPT LATIN SMALL LETTER N
+	0x00fd: 0x00b2,	# SUPERSCRIPT TWO
+	0x00fe: 0x25a0,	# BLACK SQUARE
+	0x00ff: 0x00a0,	# NO-BREAK SPACE
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/lib-python/2.2/encodings/cp863.py b/lib-python/2.2/encodings/cp863.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/cp863.py
@@ -0,0 +1,172 @@
+""" Python Character Mapping Codec generated from 'CP863.TXT' with gencodec.py.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+	0x0080: 0x00c7,	# LATIN CAPITAL LETTER C WITH CEDILLA
+	0x0081: 0x00fc,	# LATIN SMALL LETTER U WITH DIAERESIS
+	0x0082: 0x00e9,	# LATIN SMALL LETTER E WITH ACUTE
+	0x0083: 0x00e2,	# LATIN SMALL LETTER A WITH CIRCUMFLEX
+	0x0084: 0x00c2,	# LATIN CAPITAL LETTER A WITH CIRCUMFLEX
+	0x0085: 0x00e0,	# LATIN SMALL LETTER A WITH GRAVE
+	0x0086: 0x00b6,	# PILCROW SIGN
+	0x0087: 0x00e7,	# LATIN SMALL LETTER C WITH CEDILLA
+	0x0088: 0x00ea,	# LATIN SMALL LETTER E WITH CIRCUMFLEX
+	0x0089: 0x00eb,	# LATIN SMALL LETTER E WITH DIAERESIS
+	0x008a: 0x00e8,	# LATIN SMALL LETTER E WITH GRAVE
+	0x008b: 0x00ef,	# LATIN SMALL LETTER I WITH DIAERESIS
+	0x008c: 0x00ee,	# LATIN SMALL LETTER I WITH CIRCUMFLEX
+	0x008d: 0x2017,	# DOUBLE LOW LINE
+	0x008e: 0x00c0,	# LATIN CAPITAL LETTER A WITH GRAVE
+	0x008f: 0x00a7,	# SECTION SIGN
+	0x0090: 0x00c9,	# LATIN CAPITAL LETTER E WITH ACUTE
+	0x0091: 0x00c8,	# LATIN CAPITAL LETTER E WITH GRAVE
+	0x0092: 0x00ca,	# LATIN CAPITAL LETTER E WITH CIRCUMFLEX
+	0x0093: 0x00f4,	# LATIN SMALL LETTER O WITH CIRCUMFLEX
+	0x0094: 0x00cb,	# LATIN CAPITAL LETTER E WITH DIAERESIS
+	0x0095: 0x00cf,	# LATIN CAPITAL LETTER I WITH DIAERESIS
+	0x0096: 0x00fb,	# LATIN SMALL LETTER U WITH CIRCUMFLEX
+	0x0097: 0x00f9,	# LATIN SMALL LETTER U WITH GRAVE
+	0x0098: 0x00a4,	# CURRENCY SIGN
+	0x0099: 0x00d4,	# LATIN CAPITAL LETTER O WITH CIRCUMFLEX
+	0x009a: 0x00dc,	# LATIN CAPITAL LETTER U WITH DIAERESIS
+	0x009b: 0x00a2,	# CENT SIGN
+	0x009c: 0x00a3,	# POUND SIGN
+	0x009d: 0x00d9,	# LATIN CAPITAL LETTER U WITH GRAVE
+	0x009e: 0x00db,	# LATIN CAPITAL LETTER U WITH CIRCUMFLEX
+	0x009f: 0x0192,	# LATIN SMALL LETTER F WITH HOOK
+	0x00a0: 0x00a6,	# BROKEN BAR
+	0x00a1: 0x00b4,	# ACUTE ACCENT
+	0x00a2: 0x00f3,	# LATIN SMALL LETTER O WITH ACUTE
+	0x00a3: 0x00fa,	# LATIN SMALL LETTER U WITH ACUTE
+	0x00a4: 0x00a8,	# DIAERESIS
+	0x00a5: 0x00b8,	# CEDILLA
+	0x00a6: 0x00b3,	# SUPERSCRIPT THREE
+	0x00a7: 0x00af,	# MACRON
+	0x00a8: 0x00ce,	# LATIN CAPITAL LETTER I WITH CIRCUMFLEX
+	0x00a9: 0x2310,	# REVERSED NOT SIGN
+	0x00aa: 0x00ac,	# NOT SIGN
+	0x00ab: 0x00bd,	# VULGAR FRACTION ONE HALF
+	0x00ac: 0x00bc,	# VULGAR FRACTION ONE QUARTER
+	0x00ad: 0x00be,	# VULGAR FRACTION THREE QUARTERS
+	0x00ae: 0x00ab,	# LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+	0x00af: 0x00bb,	# RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+	0x00b0: 0x2591,	# LIGHT SHADE
+	0x00b1: 0x2592,	# MEDIUM SHADE
+	0x00b2: 0x2593,	# DARK SHADE
+	0x00b3: 0x2502,	# BOX DRAWINGS LIGHT VERTICAL
+	0x00b4: 0x2524,	# BOX DRAWINGS LIGHT VERTICAL AND LEFT
+	0x00b5: 0x2561,	# BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
+	0x00b6: 0x2562,	# BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
+	0x00b7: 0x2556,	# BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
+	0x00b8: 0x2555,	# BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
+	0x00b9: 0x2563,	# BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+	0x00ba: 0x2551,	# BOX DRAWINGS DOUBLE VERTICAL
+	0x00bb: 0x2557,	# BOX DRAWINGS DOUBLE DOWN AND LEFT
+	0x00bc: 0x255d,	# BOX DRAWINGS DOUBLE UP AND LEFT
+	0x00bd: 0x255c,	# BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
+	0x00be: 0x255b,	# BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
+	0x00bf: 0x2510,	# BOX DRAWINGS LIGHT DOWN AND LEFT
+	0x00c0: 0x2514,	# BOX DRAWINGS LIGHT UP AND RIGHT
+	0x00c1: 0x2534,	# BOX DRAWINGS LIGHT UP AND HORIZONTAL
+	0x00c2: 0x252c,	# BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+	0x00c3: 0x251c,	# BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+	0x00c4: 0x2500,	# BOX DRAWINGS LIGHT HORIZONTAL
+	0x00c5: 0x253c,	# BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+	0x00c6: 0x255e,	# BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
+	0x00c7: 0x255f,	# BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
+	0x00c8: 0x255a,	# BOX DRAWINGS DOUBLE UP AND RIGHT
+	0x00c9: 0x2554,	# BOX DRAWINGS DOUBLE DOWN AND RIGHT
+	0x00ca: 0x2569,	# BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+	0x00cb: 0x2566,	# BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+	0x00cc: 0x2560,	# BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+	0x00cd: 0x2550,	# BOX DRAWINGS DOUBLE HORIZONTAL
+	0x00ce: 0x256c,	# BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+	0x00cf: 0x2567,	# BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
+	0x00d0: 0x2568,	# BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
+	0x00d1: 0x2564,	# BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
+	0x00d2: 0x2565,	# BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
+	0x00d3: 0x2559,	# BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
+	0x00d4: 0x2558,	# BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
+	0x00d5: 0x2552,	# BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
+	0x00d6: 0x2553,	# BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
+	0x00d7: 0x256b,	# BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
+	0x00d8: 0x256a,	# BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
+	0x00d9: 0x2518,	# BOX DRAWINGS LIGHT UP AND LEFT
+	0x00da: 0x250c,	# BOX DRAWINGS LIGHT DOWN AND RIGHT
+	0x00db: 0x2588,	# FULL BLOCK
+	0x00dc: 0x2584,	# LOWER HALF BLOCK
+	0x00dd: 0x258c,	# LEFT HALF BLOCK
+	0x00de: 0x2590,	# RIGHT HALF BLOCK
+	0x00df: 0x2580,	# UPPER HALF BLOCK
+	0x00e0: 0x03b1,	# GREEK SMALL LETTER ALPHA
+	0x00e1: 0x00df,	# LATIN SMALL LETTER SHARP S
+	0x00e2: 0x0393,	# GREEK CAPITAL LETTER GAMMA
+	0x00e3: 0x03c0,	# GREEK SMALL LETTER PI
+	0x00e4: 0x03a3,	# GREEK CAPITAL LETTER SIGMA
+	0x00e5: 0x03c3,	# GREEK SMALL LETTER SIGMA
+	0x00e6: 0x00b5,	# MICRO SIGN
+	0x00e7: 0x03c4,	# GREEK SMALL LETTER TAU
+	0x00e8: 0x03a6,	# GREEK CAPITAL LETTER PHI
+	0x00e9: 0x0398,	# GREEK CAPITAL LETTER THETA
+	0x00ea: 0x03a9,	# GREEK CAPITAL LETTER OMEGA
+	0x00eb: 0x03b4,	# GREEK SMALL LETTER DELTA
+	0x00ec: 0x221e,	# INFINITY
+	0x00ed: 0x03c6,	# GREEK SMALL LETTER PHI
+	0x00ee: 0x03b5,	# GREEK SMALL LETTER EPSILON
+	0x00ef: 0x2229,	# INTERSECTION
+	0x00f0: 0x2261,	# IDENTICAL TO
+	0x00f1: 0x00b1,	# PLUS-MINUS SIGN
+	0x00f2: 0x2265,	# GREATER-THAN OR EQUAL TO
+	0x00f3: 0x2264,	# LESS-THAN OR EQUAL TO
+	0x00f4: 0x2320,	# TOP HALF INTEGRAL
+	0x00f5: 0x2321,	# BOTTOM HALF INTEGRAL
+	0x00f6: 0x00f7,	# DIVISION SIGN
+	0x00f7: 0x2248,	# ALMOST EQUAL TO
+	0x00f8: 0x00b0,	# DEGREE SIGN
+	0x00f9: 0x2219,	# BULLET OPERATOR
+	0x00fa: 0x00b7,	# MIDDLE DOT
+	0x00fb: 0x221a,	# SQUARE ROOT
+	0x00fc: 0x207f,	# SUPERSCRIPT LATIN SMALL LETTER N
+	0x00fd: 0x00b2,	# SUPERSCRIPT TWO
+	0x00fe: 0x25a0,	# BLACK SQUARE
+	0x00ff: 0x00a0,	# NO-BREAK SPACE
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/lib-python/2.2/encodings/cp864.py b/lib-python/2.2/encodings/cp864.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/cp864.py
@@ -0,0 +1,170 @@
+""" Python Character Mapping Codec generated from 'CP864.TXT' with gencodec.py.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+	0x0025: 0x066a,	# ARABIC PERCENT SIGN
+	0x0080: 0x00b0,	# DEGREE SIGN
+	0x0081: 0x00b7,	# MIDDLE DOT
+	0x0082: 0x2219,	# BULLET OPERATOR
+	0x0083: 0x221a,	# SQUARE ROOT
+	0x0084: 0x2592,	# MEDIUM SHADE
+	0x0085: 0x2500,	# FORMS LIGHT HORIZONTAL
+	0x0086: 0x2502,	# FORMS LIGHT VERTICAL
+	0x0087: 0x253c,	# FORMS LIGHT VERTICAL AND HORIZONTAL
+	0x0088: 0x2524,	# FORMS LIGHT VERTICAL AND LEFT
+	0x0089: 0x252c,	# FORMS LIGHT DOWN AND HORIZONTAL
+	0x008a: 0x251c,	# FORMS LIGHT VERTICAL AND RIGHT
+	0x008b: 0x2534,	# FORMS LIGHT UP AND HORIZONTAL
+	0x008c: 0x2510,	# FORMS LIGHT DOWN AND LEFT
+	0x008d: 0x250c,	# FORMS LIGHT DOWN AND RIGHT
+	0x008e: 0x2514,	# FORMS LIGHT UP AND RIGHT
+	0x008f: 0x2518,	# FORMS LIGHT UP AND LEFT
+	0x0090: 0x03b2,	# GREEK SMALL BETA
+	0x0091: 0x221e,	# INFINITY
+	0x0092: 0x03c6,	# GREEK SMALL PHI
+	0x0093: 0x00b1,	# PLUS-OR-MINUS SIGN
+	0x0094: 0x00bd,	# FRACTION 1/2
+	0x0095: 0x00bc,	# FRACTION 1/4
+	0x0096: 0x2248,	# ALMOST EQUAL TO
+	0x0097: 0x00ab,	# LEFT POINTING GUILLEMET
+	0x0098: 0x00bb,	# RIGHT POINTING GUILLEMET
+	0x0099: 0xfef7,	# ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE ISOLATED FORM
+	0x009a: 0xfef8,	# ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE FINAL FORM
+	0x009b: None,	# UNDEFINED
+	0x009c: None,	# UNDEFINED
+	0x009d: 0xfefb,	# ARABIC LIGATURE LAM WITH ALEF ISOLATED FORM
+	0x009e: 0xfefc,	# ARABIC LIGATURE LAM WITH ALEF FINAL FORM
+	0x009f: None,	# UNDEFINED
+	0x00a1: 0x00ad,	# SOFT HYPHEN
+	0x00a2: 0xfe82,	# ARABIC LETTER ALEF WITH MADDA ABOVE FINAL FORM
+	0x00a5: 0xfe84,	# ARABIC LETTER ALEF WITH HAMZA ABOVE FINAL FORM
+	0x00a6: None,	# UNDEFINED
+	0x00a7: None,	# UNDEFINED
+	0x00a8: 0xfe8e,	# ARABIC LETTER ALEF FINAL FORM
+	0x00a9: 0xfe8f,	# ARABIC LETTER BEH ISOLATED FORM
+	0x00aa: 0xfe95,	# ARABIC LETTER TEH ISOLATED FORM
+	0x00ab: 0xfe99,	# ARABIC LETTER THEH ISOLATED FORM
+	0x00ac: 0x060c,	# ARABIC COMMA
+	0x00ad: 0xfe9d,	# ARABIC LETTER JEEM ISOLATED FORM
+	0x00ae: 0xfea1,	# ARABIC LETTER HAH ISOLATED FORM
+	0x00af: 0xfea5,	# ARABIC LETTER KHAH ISOLATED FORM
+	0x00b0: 0x0660,	# ARABIC-INDIC DIGIT ZERO
+	0x00b1: 0x0661,	# ARABIC-INDIC DIGIT ONE
+	0x00b2: 0x0662,	# ARABIC-INDIC DIGIT TWO
+	0x00b3: 0x0663,	# ARABIC-INDIC DIGIT THREE
+	0x00b4: 0x0664,	# ARABIC-INDIC DIGIT FOUR
+	0x00b5: 0x0665,	# ARABIC-INDIC DIGIT FIVE
+	0x00b6: 0x0666,	# ARABIC-INDIC DIGIT SIX
+	0x00b7: 0x0667,	# ARABIC-INDIC DIGIT SEVEN
+	0x00b8: 0x0668,	# ARABIC-INDIC DIGIT EIGHT
+	0x00b9: 0x0669,	# ARABIC-INDIC DIGIT NINE
+	0x00ba: 0xfed1,	# ARABIC LETTER FEH ISOLATED FORM
+	0x00bb: 0x061b,	# ARABIC SEMICOLON
+	0x00bc: 0xfeb1,	# ARABIC LETTER SEEN ISOLATED FORM
+	0x00bd: 0xfeb5,	# ARABIC LETTER SHEEN ISOLATED FORM
+	0x00be: 0xfeb9,	# ARABIC LETTER SAD ISOLATED FORM
+	0x00bf: 0x061f,	# ARABIC QUESTION MARK
+	0x00c0: 0x00a2,	# CENT SIGN
+	0x00c1: 0xfe80,	# ARABIC LETTER HAMZA ISOLATED FORM
+	0x00c2: 0xfe81,	# ARABIC LETTER ALEF WITH MADDA ABOVE ISOLATED FORM
+	0x00c3: 0xfe83,	# ARABIC LETTER ALEF WITH HAMZA ABOVE ISOLATED FORM
+	0x00c4: 0xfe85,	# ARABIC LETTER WAW WITH HAMZA ABOVE ISOLATED FORM
+	0x00c5: 0xfeca,	# ARABIC LETTER AIN FINAL FORM
+	0x00c6: 0xfe8b,	# ARABIC LETTER YEH WITH HAMZA ABOVE INITIAL FORM
+	0x00c7: 0xfe8d,	# ARABIC LETTER ALEF ISOLATED FORM
+	0x00c8: 0xfe91,	# ARABIC LETTER BEH INITIAL FORM
+	0x00c9: 0xfe93,	# ARABIC LETTER TEH MARBUTA ISOLATED FORM
+	0x00ca: 0xfe97,	# ARABIC LETTER TEH INITIAL FORM
+	0x00cb: 0xfe9b,	# ARABIC LETTER THEH INITIAL FORM
+	0x00cc: 0xfe9f,	# ARABIC LETTER JEEM INITIAL FORM
+	0x00cd: 0xfea3,	# ARABIC LETTER HAH INITIAL FORM
+	0x00ce: 0xfea7,	# ARABIC LETTER KHAH INITIAL FORM
+	0x00cf: 0xfea9,	# ARABIC LETTER DAL ISOLATED FORM
+	0x00d0: 0xfeab,	# ARABIC LETTER THAL ISOLATED FORM
+	0x00d1: 0xfead,	# ARABIC LETTER REH ISOLATED FORM
+	0x00d2: 0xfeaf,	# ARABIC LETTER ZAIN ISOLATED FORM
+	0x00d3: 0xfeb3,	# ARABIC LETTER SEEN INITIAL FORM
+	0x00d4: 0xfeb7,	# ARABIC LETTER SHEEN INITIAL FORM
+	0x00d5: 0xfebb,	# ARABIC LETTER SAD INITIAL FORM
+	0x00d6: 0xfebf,	# ARABIC LETTER DAD INITIAL FORM
+	0x00d7: 0xfec1,	# ARABIC LETTER TAH ISOLATED FORM
+	0x00d8: 0xfec5,	# ARABIC LETTER ZAH ISOLATED FORM
+	0x00d9: 0xfecb,	# ARABIC LETTER AIN INITIAL FORM
+	0x00da: 0xfecf,	# ARABIC LETTER GHAIN INITIAL FORM
+	0x00db: 0x00a6,	# BROKEN VERTICAL BAR
+	0x00dc: 0x00ac,	# NOT SIGN
+	0x00dd: 0x00f7,	# DIVISION SIGN
+	0x00de: 0x00d7,	# MULTIPLICATION SIGN
+	0x00df: 0xfec9,	# ARABIC LETTER AIN ISOLATED FORM
+	0x00e0: 0x0640,	# ARABIC TATWEEL
+	0x00e1: 0xfed3,	# ARABIC LETTER FEH INITIAL FORM
+	0x00e2: 0xfed7,	# ARABIC LETTER QAF INITIAL FORM
+	0x00e3: 0xfedb,	# ARABIC LETTER KAF INITIAL FORM
+	0x00e4: 0xfedf,	# ARABIC LETTER LAM INITIAL FORM
+	0x00e5: 0xfee3,	# ARABIC LETTER MEEM INITIAL FORM
+	0x00e6: 0xfee7,	# ARABIC LETTER NOON INITIAL FORM
+	0x00e7: 0xfeeb,	# ARABIC LETTER HEH INITIAL FORM
+	0x00e8: 0xfeed,	# ARABIC LETTER WAW ISOLATED FORM
+	0x00e9: 0xfeef,	# ARABIC LETTER ALEF MAKSURA ISOLATED FORM
+	0x00ea: 0xfef3,	# ARABIC LETTER YEH INITIAL FORM
+	0x00eb: 0xfebd,	# ARABIC LETTER DAD ISOLATED FORM
+	0x00ec: 0xfecc,	# ARABIC LETTER AIN MEDIAL FORM
+	0x00ed: 0xfece,	# ARABIC LETTER GHAIN FINAL FORM
+	0x00ee: 0xfecd,	# ARABIC LETTER GHAIN ISOLATED FORM
+	0x00ef: 0xfee1,	# ARABIC LETTER MEEM ISOLATED FORM
+	0x00f0: 0xfe7d,	# ARABIC SHADDA MEDIAL FORM
+	0x00f1: 0x0651,	# ARABIC SHADDAH
+	0x00f2: 0xfee5,	# ARABIC LETTER NOON ISOLATED FORM
+	0x00f3: 0xfee9,	# ARABIC LETTER HEH ISOLATED FORM
+	0x00f4: 0xfeec,	# ARABIC LETTER HEH MEDIAL FORM
+	0x00f5: 0xfef0,	# ARABIC LETTER ALEF MAKSURA FINAL FORM
+	0x00f6: 0xfef2,	# ARABIC LETTER YEH FINAL FORM
+	0x00f7: 0xfed0,	# ARABIC LETTER GHAIN MEDIAL FORM
+	0x00f8: 0xfed5,	# ARABIC LETTER QAF ISOLATED FORM
+	0x00f9: 0xfef5,	# ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE ISOLATED FORM
+	0x00fa: 0xfef6,	# ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE FINAL FORM
+	0x00fb: 0xfedd,	# ARABIC LETTER LAM ISOLATED FORM
+	0x00fc: 0xfed9,	# ARABIC LETTER KAF ISOLATED FORM
+	0x00fd: 0xfef1,	# ARABIC LETTER YEH ISOLATED FORM
+	0x00fe: 0x25a0,	# BLACK SQUARE
+	0x00ff: None,	# UNDEFINED
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/lib-python/2.2/encodings/cp865.py b/lib-python/2.2/encodings/cp865.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/cp865.py
@@ -0,0 +1,172 @@
+""" Python Character Mapping Codec generated from 'CP865.TXT' with gencodec.py.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+	0x0080: 0x00c7,	# LATIN CAPITAL LETTER C WITH CEDILLA
+	0x0081: 0x00fc,	# LATIN SMALL LETTER U WITH DIAERESIS
+	0x0082: 0x00e9,	# LATIN SMALL LETTER E WITH ACUTE
+	0x0083: 0x00e2,	# LATIN SMALL LETTER A WITH CIRCUMFLEX
+	0x0084: 0x00e4,	# LATIN SMALL LETTER A WITH DIAERESIS
+	0x0085: 0x00e0,	# LATIN SMALL LETTER A WITH GRAVE
+	0x0086: 0x00e5,	# LATIN SMALL LETTER A WITH RING ABOVE
+	0x0087: 0x00e7,	# LATIN SMALL LETTER C WITH CEDILLA
+	0x0088: 0x00ea,	# LATIN SMALL LETTER E WITH CIRCUMFLEX
+	0x0089: 0x00eb,	# LATIN SMALL LETTER E WITH DIAERESIS
+	0x008a: 0x00e8,	# LATIN SMALL LETTER E WITH GRAVE
+	0x008b: 0x00ef,	# LATIN SMALL LETTER I WITH DIAERESIS
+	0x008c: 0x00ee,	# LATIN SMALL LETTER I WITH CIRCUMFLEX
+	0x008d: 0x00ec,	# LATIN SMALL LETTER I WITH GRAVE
+	0x008e: 0x00c4,	# LATIN CAPITAL LETTER A WITH DIAERESIS
+	0x008f: 0x00c5,	# LATIN CAPITAL LETTER A WITH RING ABOVE
+	0x0090: 0x00c9,	# LATIN CAPITAL LETTER E WITH ACUTE
+	0x0091: 0x00e6,	# LATIN SMALL LIGATURE AE
+	0x0092: 0x00c6,	# LATIN CAPITAL LIGATURE AE
+	0x0093: 0x00f4,	# LATIN SMALL LETTER O WITH CIRCUMFLEX
+	0x0094: 0x00f6,	# LATIN SMALL LETTER O WITH DIAERESIS
+	0x0095: 0x00f2,	# LATIN SMALL LETTER O WITH GRAVE
+	0x0096: 0x00fb,	# LATIN SMALL LETTER U WITH CIRCUMFLEX
+	0x0097: 0x00f9,	# LATIN SMALL LETTER U WITH GRAVE
+	0x0098: 0x00ff,	# LATIN SMALL LETTER Y WITH DIAERESIS
+	0x0099: 0x00d6,	# LATIN CAPITAL LETTER O WITH DIAERESIS
+	0x009a: 0x00dc,	# LATIN CAPITAL LETTER U WITH DIAERESIS
+	0x009b: 0x00f8,	# LATIN SMALL LETTER O WITH STROKE
+	0x009c: 0x00a3,	# POUND SIGN
+	0x009d: 0x00d8,	# LATIN CAPITAL LETTER O WITH STROKE
+	0x009e: 0x20a7,	# PESETA SIGN
+	0x009f: 0x0192,	# LATIN SMALL LETTER F WITH HOOK
+	0x00a0: 0x00e1,	# LATIN SMALL LETTER A WITH ACUTE
+	0x00a1: 0x00ed,	# LATIN SMALL LETTER I WITH ACUTE
+	0x00a2: 0x00f3,	# LATIN SMALL LETTER O WITH ACUTE
+	0x00a3: 0x00fa,	# LATIN SMALL LETTER U WITH ACUTE
+	0x00a4: 0x00f1,	# LATIN SMALL LETTER N WITH TILDE
+	0x00a5: 0x00d1,	# LATIN CAPITAL LETTER N WITH TILDE
+	0x00a6: 0x00aa,	# FEMININE ORDINAL INDICATOR
+	0x00a7: 0x00ba,	# MASCULINE ORDINAL INDICATOR
+	0x00a8: 0x00bf,	# INVERTED QUESTION MARK
+	0x00a9: 0x2310,	# REVERSED NOT SIGN
+	0x00aa: 0x00ac,	# NOT SIGN
+	0x00ab: 0x00bd,	# VULGAR FRACTION ONE HALF
+	0x00ac: 0x00bc,	# VULGAR FRACTION ONE QUARTER
+	0x00ad: 0x00a1,	# INVERTED EXCLAMATION MARK
+	0x00ae: 0x00ab,	# LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+	0x00af: 0x00a4,	# CURRENCY SIGN
+	0x00b0: 0x2591,	# LIGHT SHADE
+	0x00b1: 0x2592,	# MEDIUM SHADE
+	0x00b2: 0x2593,	# DARK SHADE
+	0x00b3: 0x2502,	# BOX DRAWINGS LIGHT VERTICAL
+	0x00b4: 0x2524,	# BOX DRAWINGS LIGHT VERTICAL AND LEFT
+	0x00b5: 0x2561,	# BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
+	0x00b6: 0x2562,	# BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
+	0x00b7: 0x2556,	# BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
+	0x00b8: 0x2555,	# BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
+	0x00b9: 0x2563,	# BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+	0x00ba: 0x2551,	# BOX DRAWINGS DOUBLE VERTICAL
+	0x00bb: 0x2557,	# BOX DRAWINGS DOUBLE DOWN AND LEFT
+	0x00bc: 0x255d,	# BOX DRAWINGS DOUBLE UP AND LEFT
+	0x00bd: 0x255c,	# BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
+	0x00be: 0x255b,	# BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
+	0x00bf: 0x2510,	# BOX DRAWINGS LIGHT DOWN AND LEFT
+	0x00c0: 0x2514,	# BOX DRAWINGS LIGHT UP AND RIGHT
+	0x00c1: 0x2534,	# BOX DRAWINGS LIGHT UP AND HORIZONTAL
+	0x00c2: 0x252c,	# BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+	0x00c3: 0x251c,	# BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+	0x00c4: 0x2500,	# BOX DRAWINGS LIGHT HORIZONTAL
+	0x00c5: 0x253c,	# BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+	0x00c6: 0x255e,	# BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
+	0x00c7: 0x255f,	# BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
+	0x00c8: 0x255a,	# BOX DRAWINGS DOUBLE UP AND RIGHT
+	0x00c9: 0x2554,	# BOX DRAWINGS DOUBLE DOWN AND RIGHT
+	0x00ca: 0x2569,	# BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+	0x00cb: 0x2566,	# BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+	0x00cc: 0x2560,	# BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+	0x00cd: 0x2550,	# BOX DRAWINGS DOUBLE HORIZONTAL
+	0x00ce: 0x256c,	# BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+	0x00cf: 0x2567,	# BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
+	0x00d0: 0x2568,	# BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
+	0x00d1: 0x2564,	# BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
+	0x00d2: 0x2565,	# BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
+	0x00d3: 0x2559,	# BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
+	0x00d4: 0x2558,	# BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
+	0x00d5: 0x2552,	# BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
+	0x00d6: 0x2553,	# BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
+	0x00d7: 0x256b,	# BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
+	0x00d8: 0x256a,	# BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
+	0x00d9: 0x2518,	# BOX DRAWINGS LIGHT UP AND LEFT
+	0x00da: 0x250c,	# BOX DRAWINGS LIGHT DOWN AND RIGHT
+	0x00db: 0x2588,	# FULL BLOCK
+	0x00dc: 0x2584,	# LOWER HALF BLOCK
+	0x00dd: 0x258c,	# LEFT HALF BLOCK
+	0x00de: 0x2590,	# RIGHT HALF BLOCK
+	0x00df: 0x2580,	# UPPER HALF BLOCK
+	0x00e0: 0x03b1,	# GREEK SMALL LETTER ALPHA
+	0x00e1: 0x00df,	# LATIN SMALL LETTER SHARP S
+	0x00e2: 0x0393,	# GREEK CAPITAL LETTER GAMMA
+	0x00e3: 0x03c0,	# GREEK SMALL LETTER PI
+	0x00e4: 0x03a3,	# GREEK CAPITAL LETTER SIGMA
+	0x00e5: 0x03c3,	# GREEK SMALL LETTER SIGMA
+	0x00e6: 0x00b5,	# MICRO SIGN
+	0x00e7: 0x03c4,	# GREEK SMALL LETTER TAU
+	0x00e8: 0x03a6,	# GREEK CAPITAL LETTER PHI
+	0x00e9: 0x0398,	# GREEK CAPITAL LETTER THETA
+	0x00ea: 0x03a9,	# GREEK CAPITAL LETTER OMEGA
+	0x00eb: 0x03b4,	# GREEK SMALL LETTER DELTA
+	0x00ec: 0x221e,	# INFINITY
+	0x00ed: 0x03c6,	# GREEK SMALL LETTER PHI
+	0x00ee: 0x03b5,	# GREEK SMALL LETTER EPSILON
+	0x00ef: 0x2229,	# INTERSECTION
+	0x00f0: 0x2261,	# IDENTICAL TO
+	0x00f1: 0x00b1,	# PLUS-MINUS SIGN
+	0x00f2: 0x2265,	# GREATER-THAN OR EQUAL TO
+	0x00f3: 0x2264,	# LESS-THAN OR EQUAL TO
+	0x00f4: 0x2320,	# TOP HALF INTEGRAL
+	0x00f5: 0x2321,	# BOTTOM HALF INTEGRAL
+	0x00f6: 0x00f7,	# DIVISION SIGN
+	0x00f7: 0x2248,	# ALMOST EQUAL TO
+	0x00f8: 0x00b0,	# DEGREE SIGN
+	0x00f9: 0x2219,	# BULLET OPERATOR
+	0x00fa: 0x00b7,	# MIDDLE DOT
+	0x00fb: 0x221a,	# SQUARE ROOT
+	0x00fc: 0x207f,	# SUPERSCRIPT LATIN SMALL LETTER N
+	0x00fd: 0x00b2,	# SUPERSCRIPT TWO
+	0x00fe: 0x25a0,	# BLACK SQUARE
+	0x00ff: 0x00a0,	# NO-BREAK SPACE
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/lib-python/2.2/encodings/cp866.py b/lib-python/2.2/encodings/cp866.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/cp866.py
@@ -0,0 +1,172 @@
+""" Python Character Mapping Codec generated from 'CP866.TXT' with gencodec.py.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+	0x0080: 0x0410,	# CYRILLIC CAPITAL LETTER A
+	0x0081: 0x0411,	# CYRILLIC CAPITAL LETTER BE
+	0x0082: 0x0412,	# CYRILLIC CAPITAL LETTER VE
+	0x0083: 0x0413,	# CYRILLIC CAPITAL LETTER GHE
+	0x0084: 0x0414,	# CYRILLIC CAPITAL LETTER DE
+	0x0085: 0x0415,	# CYRILLIC CAPITAL LETTER IE
+	0x0086: 0x0416,	# CYRILLIC CAPITAL LETTER ZHE
+	0x0087: 0x0417,	# CYRILLIC CAPITAL LETTER ZE
+	0x0088: 0x0418,	# CYRILLIC CAPITAL LETTER I
+	0x0089: 0x0419,	# CYRILLIC CAPITAL LETTER SHORT I
+	0x008a: 0x041a,	# CYRILLIC CAPITAL LETTER KA
+	0x008b: 0x041b,	# CYRILLIC CAPITAL LETTER EL
+	0x008c: 0x041c,	# CYRILLIC CAPITAL LETTER EM
+	0x008d: 0x041d,	# CYRILLIC CAPITAL LETTER EN
+	0x008e: 0x041e,	# CYRILLIC CAPITAL LETTER O
+	0x008f: 0x041f,	# CYRILLIC CAPITAL LETTER PE
+	0x0090: 0x0420,	# CYRILLIC CAPITAL LETTER ER
+	0x0091: 0x0421,	# CYRILLIC CAPITAL LETTER ES
+	0x0092: 0x0422,	# CYRILLIC CAPITAL LETTER TE
+	0x0093: 0x0423,	# CYRILLIC CAPITAL LETTER U
+	0x0094: 0x0424,	# CYRILLIC CAPITAL LETTER EF
+	0x0095: 0x0425,	# CYRILLIC CAPITAL LETTER HA
+	0x0096: 0x0426,	# CYRILLIC CAPITAL LETTER TSE
+	0x0097: 0x0427,	# CYRILLIC CAPITAL LETTER CHE
+	0x0098: 0x0428,	# CYRILLIC CAPITAL LETTER SHA
+	0x0099: 0x0429,	# CYRILLIC CAPITAL LETTER SHCHA
+	0x009a: 0x042a,	# CYRILLIC CAPITAL LETTER HARD SIGN
+	0x009b: 0x042b,	# CYRILLIC CAPITAL LETTER YERU
+	0x009c: 0x042c,	# CYRILLIC CAPITAL LETTER SOFT SIGN
+	0x009d: 0x042d,	# CYRILLIC CAPITAL LETTER E
+	0x009e: 0x042e,	# CYRILLIC CAPITAL LETTER YU
+	0x009f: 0x042f,	# CYRILLIC CAPITAL LETTER YA
+	0x00a0: 0x0430,	# CYRILLIC SMALL LETTER A
+	0x00a1: 0x0431,	# CYRILLIC SMALL LETTER BE
+	0x00a2: 0x0432,	# CYRILLIC SMALL LETTER VE
+	0x00a3: 0x0433,	# CYRILLIC SMALL LETTER GHE
+	0x00a4: 0x0434,	# CYRILLIC SMALL LETTER DE
+	0x00a5: 0x0435,	# CYRILLIC SMALL LETTER IE
+	0x00a6: 0x0436,	# CYRILLIC SMALL LETTER ZHE
+	0x00a7: 0x0437,	# CYRILLIC SMALL LETTER ZE
+	0x00a8: 0x0438,	# CYRILLIC SMALL LETTER I
+	0x00a9: 0x0439,	# CYRILLIC SMALL LETTER SHORT I
+	0x00aa: 0x043a,	# CYRILLIC SMALL LETTER KA
+	0x00ab: 0x043b,	# CYRILLIC SMALL LETTER EL
+	0x00ac: 0x043c,	# CYRILLIC SMALL LETTER EM
+	0x00ad: 0x043d,	# CYRILLIC SMALL LETTER EN
+	0x00ae: 0x043e,	# CYRILLIC SMALL LETTER O
+	0x00af: 0x043f,	# CYRILLIC SMALL LETTER PE
+	0x00b0: 0x2591,	# LIGHT SHADE
+	0x00b1: 0x2592,	# MEDIUM SHADE
+	0x00b2: 0x2593,	# DARK SHADE
+	0x00b3: 0x2502,	# BOX DRAWINGS LIGHT VERTICAL
+	0x00b4: 0x2524,	# BOX DRAWINGS LIGHT VERTICAL AND LEFT
+	0x00b5: 0x2561,	# BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
+	0x00b6: 0x2562,	# BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
+	0x00b7: 0x2556,	# BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
+	0x00b8: 0x2555,	# BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
+	0x00b9: 0x2563,	# BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+	0x00ba: 0x2551,	# BOX DRAWINGS DOUBLE VERTICAL
+	0x00bb: 0x2557,	# BOX DRAWINGS DOUBLE DOWN AND LEFT
+	0x00bc: 0x255d,	# BOX DRAWINGS DOUBLE UP AND LEFT
+	0x00bd: 0x255c,	# BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
+	0x00be: 0x255b,	# BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
+	0x00bf: 0x2510,	# BOX DRAWINGS LIGHT DOWN AND LEFT
+	0x00c0: 0x2514,	# BOX DRAWINGS LIGHT UP AND RIGHT
+	0x00c1: 0x2534,	# BOX DRAWINGS LIGHT UP AND HORIZONTAL
+	0x00c2: 0x252c,	# BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+	0x00c3: 0x251c,	# BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+	0x00c4: 0x2500,	# BOX DRAWINGS LIGHT HORIZONTAL
+	0x00c5: 0x253c,	# BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+	0x00c6: 0x255e,	# BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
+	0x00c7: 0x255f,	# BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
+	0x00c8: 0x255a,	# BOX DRAWINGS DOUBLE UP AND RIGHT
+	0x00c9: 0x2554,	# BOX DRAWINGS DOUBLE DOWN AND RIGHT
+	0x00ca: 0x2569,	# BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+	0x00cb: 0x2566,	# BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+	0x00cc: 0x2560,	# BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+	0x00cd: 0x2550,	# BOX DRAWINGS DOUBLE HORIZONTAL
+	0x00ce: 0x256c,	# BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+	0x00cf: 0x2567,	# BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
+	0x00d0: 0x2568,	# BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
+	0x00d1: 0x2564,	# BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
+	0x00d2: 0x2565,	# BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
+	0x00d3: 0x2559,	# BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
+	0x00d4: 0x2558,	# BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
+	0x00d5: 0x2552,	# BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
+	0x00d6: 0x2553,	# BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
+	0x00d7: 0x256b,	# BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
+	0x00d8: 0x256a,	# BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
+	0x00d9: 0x2518,	# BOX DRAWINGS LIGHT UP AND LEFT
+	0x00da: 0x250c,	# BOX DRAWINGS LIGHT DOWN AND RIGHT
+	0x00db: 0x2588,	# FULL BLOCK
+	0x00dc: 0x2584,	# LOWER HALF BLOCK
+	0x00dd: 0x258c,	# LEFT HALF BLOCK
+	0x00de: 0x2590,	# RIGHT HALF BLOCK
+	0x00df: 0x2580,	# UPPER HALF BLOCK
+	0x00e0: 0x0440,	# CYRILLIC SMALL LETTER ER
+	0x00e1: 0x0441,	# CYRILLIC SMALL LETTER ES
+	0x00e2: 0x0442,	# CYRILLIC SMALL LETTER TE
+	0x00e3: 0x0443,	# CYRILLIC SMALL LETTER U
+	0x00e4: 0x0444,	# CYRILLIC SMALL LETTER EF
+	0x00e5: 0x0445,	# CYRILLIC SMALL LETTER HA
+	0x00e6: 0x0446,	# CYRILLIC SMALL LETTER TSE
+	0x00e7: 0x0447,	# CYRILLIC SMALL LETTER CHE
+	0x00e8: 0x0448,	# CYRILLIC SMALL LETTER SHA
+	0x00e9: 0x0449,	# CYRILLIC SMALL LETTER SHCHA
+	0x00ea: 0x044a,	# CYRILLIC SMALL LETTER HARD SIGN
+	0x00eb: 0x044b,	# CYRILLIC SMALL LETTER YERU
+	0x00ec: 0x044c,	# CYRILLIC SMALL LETTER SOFT SIGN
+	0x00ed: 0x044d,	# CYRILLIC SMALL LETTER E
+	0x00ee: 0x044e,	# CYRILLIC SMALL LETTER YU
+	0x00ef: 0x044f,	# CYRILLIC SMALL LETTER YA
+	0x00f0: 0x0401,	# CYRILLIC CAPITAL LETTER IO
+	0x00f1: 0x0451,	# CYRILLIC SMALL LETTER IO
+	0x00f2: 0x0404,	# CYRILLIC CAPITAL LETTER UKRAINIAN IE
+	0x00f3: 0x0454,	# CYRILLIC SMALL LETTER UKRAINIAN IE
+	0x00f4: 0x0407,	# CYRILLIC CAPITAL LETTER YI
+	0x00f5: 0x0457,	# CYRILLIC SMALL LETTER YI
+	0x00f6: 0x040e,	# CYRILLIC CAPITAL LETTER SHORT U
+	0x00f7: 0x045e,	# CYRILLIC SMALL LETTER SHORT U
+	0x00f8: 0x00b0,	# DEGREE SIGN
+	0x00f9: 0x2219,	# BULLET OPERATOR
+	0x00fa: 0x00b7,	# MIDDLE DOT
+	0x00fb: 0x221a,	# SQUARE ROOT
+	0x00fc: 0x2116,	# NUMERO SIGN
+	0x00fd: 0x00a4,	# CURRENCY SIGN
+	0x00fe: 0x25a0,	# BLACK SQUARE
+	0x00ff: 0x00a0,	# NO-BREAK SPACE
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/lib-python/2.2/encodings/cp869.py b/lib-python/2.2/encodings/cp869.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/cp869.py
@@ -0,0 +1,172 @@
+""" Python Character Mapping Codec generated from 'CP869.TXT' with gencodec.py.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+	0x0080: None,	# UNDEFINED
+	0x0081: None,	# UNDEFINED
+	0x0082: None,	# UNDEFINED
+	0x0083: None,	# UNDEFINED
+	0x0084: None,	# UNDEFINED
+	0x0085: None,	# UNDEFINED
+	0x0086: 0x0386,	# GREEK CAPITAL LETTER ALPHA WITH TONOS
+	0x0087: None,	# UNDEFINED
+	0x0088: 0x00b7,	# MIDDLE DOT
+	0x0089: 0x00ac,	# NOT SIGN
+	0x008a: 0x00a6,	# BROKEN BAR
+	0x008b: 0x2018,	# LEFT SINGLE QUOTATION MARK
+	0x008c: 0x2019,	# RIGHT SINGLE QUOTATION MARK
+	0x008d: 0x0388,	# GREEK CAPITAL LETTER EPSILON WITH TONOS
+	0x008e: 0x2015,	# HORIZONTAL BAR
+	0x008f: 0x0389,	# GREEK CAPITAL LETTER ETA WITH TONOS
+	0x0090: 0x038a,	# GREEK CAPITAL LETTER IOTA WITH TONOS
+	0x0091: 0x03aa,	# GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
+	0x0092: 0x038c,	# GREEK CAPITAL LETTER OMICRON WITH TONOS
+	0x0093: None,	# UNDEFINED
+	0x0094: None,	# UNDEFINED
+	0x0095: 0x038e,	# GREEK CAPITAL LETTER UPSILON WITH TONOS
+	0x0096: 0x03ab,	# GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
+	0x0097: 0x00a9,	# COPYRIGHT SIGN
+	0x0098: 0x038f,	# GREEK CAPITAL LETTER OMEGA WITH TONOS
+	0x0099: 0x00b2,	# SUPERSCRIPT TWO
+	0x009a: 0x00b3,	# SUPERSCRIPT THREE
+	0x009b: 0x03ac,	# GREEK SMALL LETTER ALPHA WITH TONOS
+	0x009c: 0x00a3,	# POUND SIGN
+	0x009d: 0x03ad,	# GREEK SMALL LETTER EPSILON WITH TONOS
+	0x009e: 0x03ae,	# GREEK SMALL LETTER ETA WITH TONOS
+	0x009f: 0x03af,	# GREEK SMALL LETTER IOTA WITH TONOS
+	0x00a0: 0x03ca,	# GREEK SMALL LETTER IOTA WITH DIALYTIKA
+	0x00a1: 0x0390,	# GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
+	0x00a2: 0x03cc,	# GREEK SMALL LETTER OMICRON WITH TONOS
+	0x00a3: 0x03cd,	# GREEK SMALL LETTER UPSILON WITH TONOS
+	0x00a4: 0x0391,	# GREEK CAPITAL LETTER ALPHA
+	0x00a5: 0x0392,	# GREEK CAPITAL LETTER BETA
+	0x00a6: 0x0393,	# GREEK CAPITAL LETTER GAMMA
+	0x00a7: 0x0394,	# GREEK CAPITAL LETTER DELTA
+	0x00a8: 0x0395,	# GREEK CAPITAL LETTER EPSILON
+	0x00a9: 0x0396,	# GREEK CAPITAL LETTER ZETA
+	0x00aa: 0x0397,	# GREEK CAPITAL LETTER ETA
+	0x00ab: 0x00bd,	# VULGAR FRACTION ONE HALF
+	0x00ac: 0x0398,	# GREEK CAPITAL LETTER THETA
+	0x00ad: 0x0399,	# GREEK CAPITAL LETTER IOTA
+	0x00ae: 0x00ab,	# LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+	0x00af: 0x00bb,	# RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+	0x00b0: 0x2591,	# LIGHT SHADE
+	0x00b1: 0x2592,	# MEDIUM SHADE
+	0x00b2: 0x2593,	# DARK SHADE
+	0x00b3: 0x2502,	# BOX DRAWINGS LIGHT VERTICAL
+	0x00b4: 0x2524,	# BOX DRAWINGS LIGHT VERTICAL AND LEFT
+	0x00b5: 0x039a,	# GREEK CAPITAL LETTER KAPPA
+	0x00b6: 0x039b,	# GREEK CAPITAL LETTER LAMDA
+	0x00b7: 0x039c,	# GREEK CAPITAL LETTER MU
+	0x00b8: 0x039d,	# GREEK CAPITAL LETTER NU
+	0x00b9: 0x2563,	# BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+	0x00ba: 0x2551,	# BOX DRAWINGS DOUBLE VERTICAL
+	0x00bb: 0x2557,	# BOX DRAWINGS DOUBLE DOWN AND LEFT
+	0x00bc: 0x255d,	# BOX DRAWINGS DOUBLE UP AND LEFT
+	0x00bd: 0x039e,	# GREEK CAPITAL LETTER XI
+	0x00be: 0x039f,	# GREEK CAPITAL LETTER OMICRON
+	0x00bf: 0x2510,	# BOX DRAWINGS LIGHT DOWN AND LEFT
+	0x00c0: 0x2514,	# BOX DRAWINGS LIGHT UP AND RIGHT
+	0x00c1: 0x2534,	# BOX DRAWINGS LIGHT UP AND HORIZONTAL
+	0x00c2: 0x252c,	# BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+	0x00c3: 0x251c,	# BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+	0x00c4: 0x2500,	# BOX DRAWINGS LIGHT HORIZONTAL
+	0x00c5: 0x253c,	# BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+	0x00c6: 0x03a0,	# GREEK CAPITAL LETTER PI
+	0x00c7: 0x03a1,	# GREEK CAPITAL LETTER RHO
+	0x00c8: 0x255a,	# BOX DRAWINGS DOUBLE UP AND RIGHT
+	0x00c9: 0x2554,	# BOX DRAWINGS DOUBLE DOWN AND RIGHT
+	0x00ca: 0x2569,	# BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+	0x00cb: 0x2566,	# BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+	0x00cc: 0x2560,	# BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+	0x00cd: 0x2550,	# BOX DRAWINGS DOUBLE HORIZONTAL
+	0x00ce: 0x256c,	# BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+	0x00cf: 0x03a3,	# GREEK CAPITAL LETTER SIGMA
+	0x00d0: 0x03a4,	# GREEK CAPITAL LETTER TAU
+	0x00d1: 0x03a5,	# GREEK CAPITAL LETTER UPSILON
+	0x00d2: 0x03a6,	# GREEK CAPITAL LETTER PHI
+	0x00d3: 0x03a7,	# GREEK CAPITAL LETTER CHI
+	0x00d4: 0x03a8,	# GREEK CAPITAL LETTER PSI
+	0x00d5: 0x03a9,	# GREEK CAPITAL LETTER OMEGA
+	0x00d6: 0x03b1,	# GREEK SMALL LETTER ALPHA
+	0x00d7: 0x03b2,	# GREEK SMALL LETTER BETA
+	0x00d8: 0x03b3,	# GREEK SMALL LETTER GAMMA
+	0x00d9: 0x2518,	# BOX DRAWINGS LIGHT UP AND LEFT
+	0x00da: 0x250c,	# BOX DRAWINGS LIGHT DOWN AND RIGHT
+	0x00db: 0x2588,	# FULL BLOCK
+	0x00dc: 0x2584,	# LOWER HALF BLOCK
+	0x00dd: 0x03b4,	# GREEK SMALL LETTER DELTA
+	0x00de: 0x03b5,	# GREEK SMALL LETTER EPSILON
+	0x00df: 0x2580,	# UPPER HALF BLOCK
+	0x00e0: 0x03b6,	# GREEK SMALL LETTER ZETA
+	0x00e1: 0x03b7,	# GREEK SMALL LETTER ETA
+	0x00e2: 0x03b8,	# GREEK SMALL LETTER THETA
+	0x00e3: 0x03b9,	# GREEK SMALL LETTER IOTA
+	0x00e4: 0x03ba,	# GREEK SMALL LETTER KAPPA
+	0x00e5: 0x03bb,	# GREEK SMALL LETTER LAMDA
+	0x00e6: 0x03bc,	# GREEK SMALL LETTER MU
+	0x00e7: 0x03bd,	# GREEK SMALL LETTER NU
+	0x00e8: 0x03be,	# GREEK SMALL LETTER XI
+	0x00e9: 0x03bf,	# GREEK SMALL LETTER OMICRON
+	0x00ea: 0x03c0,	# GREEK SMALL LETTER PI
+	0x00eb: 0x03c1,	# GREEK SMALL LETTER RHO
+	0x00ec: 0x03c3,	# GREEK SMALL LETTER SIGMA
+	0x00ed: 0x03c2,	# GREEK SMALL LETTER FINAL SIGMA
+	0x00ee: 0x03c4,	# GREEK SMALL LETTER TAU
+	0x00ef: 0x0384,	# GREEK TONOS
+	0x00f0: 0x00ad,	# SOFT HYPHEN
+	0x00f1: 0x00b1,	# PLUS-MINUS SIGN
+	0x00f2: 0x03c5,	# GREEK SMALL LETTER UPSILON
+	0x00f3: 0x03c6,	# GREEK SMALL LETTER PHI
+	0x00f4: 0x03c7,	# GREEK SMALL LETTER CHI
+	0x00f5: 0x00a7,	# SECTION SIGN
+	0x00f6: 0x03c8,	# GREEK SMALL LETTER PSI
+	0x00f7: 0x0385,	# GREEK DIALYTIKA TONOS
+	0x00f8: 0x00b0,	# DEGREE SIGN
+	0x00f9: 0x00a8,	# DIAERESIS
+	0x00fa: 0x03c9,	# GREEK SMALL LETTER OMEGA
+	0x00fb: 0x03cb,	# GREEK SMALL LETTER UPSILON WITH DIALYTIKA
+	0x00fc: 0x03b0,	# GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
+	0x00fd: 0x03ce,	# GREEK SMALL LETTER OMEGA WITH TONOS
+	0x00fe: 0x25a0,	# BLACK SQUARE
+	0x00ff: 0x00a0,	# NO-BREAK SPACE
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/lib-python/2.2/encodings/cp874.py b/lib-python/2.2/encodings/cp874.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/cp874.py
@@ -0,0 +1,171 @@
+""" Python Character Mapping Codec generated from 'CP874.TXT' with gencodec.py.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+	0x0080: 0x20ac,	# EURO SIGN
+	0x0081: None,	# UNDEFINED
+	0x0082: None,	# UNDEFINED
+	0x0083: None,	# UNDEFINED
+	0x0084: None,	# UNDEFINED
+	0x0085: 0x2026,	# HORIZONTAL ELLIPSIS
+	0x0086: None,	# UNDEFINED
+	0x0087: None,	# UNDEFINED
+	0x0088: None,	# UNDEFINED
+	0x0089: None,	# UNDEFINED
+	0x008a: None,	# UNDEFINED
+	0x008b: None,	# UNDEFINED
+	0x008c: None,	# UNDEFINED
+	0x008d: None,	# UNDEFINED
+	0x008e: None,	# UNDEFINED
+	0x008f: None,	# UNDEFINED
+	0x0090: None,	# UNDEFINED
+	0x0091: 0x2018,	# LEFT SINGLE QUOTATION MARK
+	0x0092: 0x2019,	# RIGHT SINGLE QUOTATION MARK
+	0x0093: 0x201c,	# LEFT DOUBLE QUOTATION MARK
+	0x0094: 0x201d,	# RIGHT DOUBLE QUOTATION MARK
+	0x0095: 0x2022,	# BULLET
+	0x0096: 0x2013,	# EN DASH
+	0x0097: 0x2014,	# EM DASH
+	0x0098: None,	# UNDEFINED
+	0x0099: None,	# UNDEFINED
+	0x009a: None,	# UNDEFINED
+	0x009b: None,	# UNDEFINED
+	0x009c: None,	# UNDEFINED
+	0x009d: None,	# UNDEFINED
+	0x009e: None,	# UNDEFINED
+	0x009f: None,	# UNDEFINED
+	0x00a1: 0x0e01,	# THAI CHARACTER KO KAI
+	0x00a2: 0x0e02,	# THAI CHARACTER KHO KHAI
+	0x00a3: 0x0e03,	# THAI CHARACTER KHO KHUAT
+	0x00a4: 0x0e04,	# THAI CHARACTER KHO KHWAI
+	0x00a5: 0x0e05,	# THAI CHARACTER KHO KHON
+	0x00a6: 0x0e06,	# THAI CHARACTER KHO RAKHANG
+	0x00a7: 0x0e07,	# THAI CHARACTER NGO NGU
+	0x00a8: 0x0e08,	# THAI CHARACTER CHO CHAN
+	0x00a9: 0x0e09,	# THAI CHARACTER CHO CHING
+	0x00aa: 0x0e0a,	# THAI CHARACTER CHO CHANG
+	0x00ab: 0x0e0b,	# THAI CHARACTER SO SO
+	0x00ac: 0x0e0c,	# THAI CHARACTER CHO CHOE
+	0x00ad: 0x0e0d,	# THAI CHARACTER YO YING
+	0x00ae: 0x0e0e,	# THAI CHARACTER DO CHADA
+	0x00af: 0x0e0f,	# THAI CHARACTER TO PATAK
+	0x00b0: 0x0e10,	# THAI CHARACTER THO THAN
+	0x00b1: 0x0e11,	# THAI CHARACTER THO NANGMONTHO
+	0x00b2: 0x0e12,	# THAI CHARACTER THO PHUTHAO
+	0x00b3: 0x0e13,	# THAI CHARACTER NO NEN
+	0x00b4: 0x0e14,	# THAI CHARACTER DO DEK
+	0x00b5: 0x0e15,	# THAI CHARACTER TO TAO
+	0x00b6: 0x0e16,	# THAI CHARACTER THO THUNG
+	0x00b7: 0x0e17,	# THAI CHARACTER THO THAHAN
+	0x00b8: 0x0e18,	# THAI CHARACTER THO THONG
+	0x00b9: 0x0e19,	# THAI CHARACTER NO NU
+	0x00ba: 0x0e1a,	# THAI CHARACTER BO BAIMAI
+	0x00bb: 0x0e1b,	# THAI CHARACTER PO PLA
+	0x00bc: 0x0e1c,	# THAI CHARACTER PHO PHUNG
+	0x00bd: 0x0e1d,	# THAI CHARACTER FO FA
+	0x00be: 0x0e1e,	# THAI CHARACTER PHO PHAN
+	0x00bf: 0x0e1f,	# THAI CHARACTER FO FAN
+	0x00c0: 0x0e20,	# THAI CHARACTER PHO SAMPHAO
+	0x00c1: 0x0e21,	# THAI CHARACTER MO MA
+	0x00c2: 0x0e22,	# THAI CHARACTER YO YAK
+	0x00c3: 0x0e23,	# THAI CHARACTER RO RUA
+	0x00c4: 0x0e24,	# THAI CHARACTER RU
+	0x00c5: 0x0e25,	# THAI CHARACTER LO LING
+	0x00c6: 0x0e26,	# THAI CHARACTER LU
+	0x00c7: 0x0e27,	# THAI CHARACTER WO WAEN
+	0x00c8: 0x0e28,	# THAI CHARACTER SO SALA
+	0x00c9: 0x0e29,	# THAI CHARACTER SO RUSI
+	0x00ca: 0x0e2a,	# THAI CHARACTER SO SUA
+	0x00cb: 0x0e2b,	# THAI CHARACTER HO HIP
+	0x00cc: 0x0e2c,	# THAI CHARACTER LO CHULA
+	0x00cd: 0x0e2d,	# THAI CHARACTER O ANG
+	0x00ce: 0x0e2e,	# THAI CHARACTER HO NOKHUK
+	0x00cf: 0x0e2f,	# THAI CHARACTER PAIYANNOI
+	0x00d0: 0x0e30,	# THAI CHARACTER SARA A
+	0x00d1: 0x0e31,	# THAI CHARACTER MAI HAN-AKAT
+	0x00d2: 0x0e32,	# THAI CHARACTER SARA AA
+	0x00d3: 0x0e33,	# THAI CHARACTER SARA AM
+	0x00d4: 0x0e34,	# THAI CHARACTER SARA I
+	0x00d5: 0x0e35,	# THAI CHARACTER SARA II
+	0x00d6: 0x0e36,	# THAI CHARACTER SARA UE
+	0x00d7: 0x0e37,	# THAI CHARACTER SARA UEE
+	0x00d8: 0x0e38,	# THAI CHARACTER SARA U
+	0x00d9: 0x0e39,	# THAI CHARACTER SARA UU
+	0x00da: 0x0e3a,	# THAI CHARACTER PHINTHU
+	0x00db: None,	# UNDEFINED
+	0x00dc: None,	# UNDEFINED
+	0x00dd: None,	# UNDEFINED
+	0x00de: None,	# UNDEFINED
+	0x00df: 0x0e3f,	# THAI CURRENCY SYMBOL BAHT
+	0x00e0: 0x0e40,	# THAI CHARACTER SARA E
+	0x00e1: 0x0e41,	# THAI CHARACTER SARA AE
+	0x00e2: 0x0e42,	# THAI CHARACTER SARA O
+	0x00e3: 0x0e43,	# THAI CHARACTER SARA AI MAIMUAN
+	0x00e4: 0x0e44,	# THAI CHARACTER SARA AI MAIMALAI
+	0x00e5: 0x0e45,	# THAI CHARACTER LAKKHANGYAO
+	0x00e6: 0x0e46,	# THAI CHARACTER MAIYAMOK
+	0x00e7: 0x0e47,	# THAI CHARACTER MAITAIKHU
+	0x00e8: 0x0e48,	# THAI CHARACTER MAI EK
+	0x00e9: 0x0e49,	# THAI CHARACTER MAI THO
+	0x00ea: 0x0e4a,	# THAI CHARACTER MAI TRI
+	0x00eb: 0x0e4b,	# THAI CHARACTER MAI CHATTAWA
+	0x00ec: 0x0e4c,	# THAI CHARACTER THANTHAKHAT
+	0x00ed: 0x0e4d,	# THAI CHARACTER NIKHAHIT
+	0x00ee: 0x0e4e,	# THAI CHARACTER YAMAKKAN
+	0x00ef: 0x0e4f,	# THAI CHARACTER FONGMAN
+	0x00f0: 0x0e50,	# THAI DIGIT ZERO
+	0x00f1: 0x0e51,	# THAI DIGIT ONE
+	0x00f2: 0x0e52,	# THAI DIGIT TWO
+	0x00f3: 0x0e53,	# THAI DIGIT THREE
+	0x00f4: 0x0e54,	# THAI DIGIT FOUR
+	0x00f5: 0x0e55,	# THAI DIGIT FIVE
+	0x00f6: 0x0e56,	# THAI DIGIT SIX
+	0x00f7: 0x0e57,	# THAI DIGIT SEVEN
+	0x00f8: 0x0e58,	# THAI DIGIT EIGHT
+	0x00f9: 0x0e59,	# THAI DIGIT NINE
+	0x00fa: 0x0e5a,	# THAI CHARACTER ANGKHANKHU
+	0x00fb: 0x0e5b,	# THAI CHARACTER KHOMUT
+	0x00fc: None,	# UNDEFINED
+	0x00fd: None,	# UNDEFINED
+	0x00fe: None,	# UNDEFINED
+	0x00ff: None,	# UNDEFINED
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/lib-python/2.2/encodings/cp875.py b/lib-python/2.2/encodings/cp875.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/cp875.py
@@ -0,0 +1,281 @@
+""" Python Character Mapping Codec generated from 'CP875.TXT' with gencodec.py.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+	0x0004: 0x009c,	# CONTROL
+	0x0005: 0x0009,	# HORIZONTAL TABULATION
+	0x0006: 0x0086,	# CONTROL
+	0x0007: 0x007f,	# DELETE
+	0x0008: 0x0097,	# CONTROL
+	0x0009: 0x008d,	# CONTROL
+	0x000a: 0x008e,	# CONTROL
+	0x0014: 0x009d,	# CONTROL
+	0x0015: 0x0085,	# CONTROL
+	0x0016: 0x0008,	# BACKSPACE
+	0x0017: 0x0087,	# CONTROL
+	0x001a: 0x0092,	# CONTROL
+	0x001b: 0x008f,	# CONTROL
+	0x0020: 0x0080,	# CONTROL
+	0x0021: 0x0081,	# CONTROL
+	0x0022: 0x0082,	# CONTROL
+	0x0023: 0x0083,	# CONTROL
+	0x0024: 0x0084,	# CONTROL
+	0x0025: 0x000a,	# LINE FEED
+	0x0026: 0x0017,	# END OF TRANSMISSION BLOCK
+	0x0027: 0x001b,	# ESCAPE
+	0x0028: 0x0088,	# CONTROL
+	0x0029: 0x0089,	# CONTROL
+	0x002a: 0x008a,	# CONTROL
+	0x002b: 0x008b,	# CONTROL
+	0x002c: 0x008c,	# CONTROL
+	0x002d: 0x0005,	# ENQUIRY
+	0x002e: 0x0006,	# ACKNOWLEDGE
+	0x002f: 0x0007,	# BELL
+	0x0030: 0x0090,	# CONTROL
+	0x0031: 0x0091,	# CONTROL
+	0x0032: 0x0016,	# SYNCHRONOUS IDLE
+	0x0033: 0x0093,	# CONTROL
+	0x0034: 0x0094,	# CONTROL
+	0x0035: 0x0095,	# CONTROL
+	0x0036: 0x0096,	# CONTROL
+	0x0037: 0x0004,	# END OF TRANSMISSION
+	0x0038: 0x0098,	# CONTROL
+	0x0039: 0x0099,	# CONTROL
+	0x003a: 0x009a,	# CONTROL
+	0x003b: 0x009b,	# CONTROL
+	0x003c: 0x0014,	# DEVICE CONTROL FOUR
+	0x003d: 0x0015,	# NEGATIVE ACKNOWLEDGE
+	0x003e: 0x009e,	# CONTROL
+	0x003f: 0x001a,	# SUBSTITUTE
+	0x0040: 0x0020,	# SPACE
+	0x0041: 0x0391,	# GREEK CAPITAL LETTER ALPHA
+	0x0042: 0x0392,	# GREEK CAPITAL LETTER BETA
+	0x0043: 0x0393,	# GREEK CAPITAL LETTER GAMMA
+	0x0044: 0x0394,	# GREEK CAPITAL LETTER DELTA
+	0x0045: 0x0395,	# GREEK CAPITAL LETTER EPSILON
+	0x0046: 0x0396,	# GREEK CAPITAL LETTER ZETA
+	0x0047: 0x0397,	# GREEK CAPITAL LETTER ETA
+	0x0048: 0x0398,	# GREEK CAPITAL LETTER THETA
+	0x0049: 0x0399,	# GREEK CAPITAL LETTER IOTA
+	0x004a: 0x005b,	# LEFT SQUARE BRACKET
+	0x004b: 0x002e,	# FULL STOP
+	0x004c: 0x003c,	# LESS-THAN SIGN
+	0x004d: 0x0028,	# LEFT PARENTHESIS
+	0x004e: 0x002b,	# PLUS SIGN
+	0x004f: 0x0021,	# EXCLAMATION MARK
+	0x0050: 0x0026,	# AMPERSAND
+	0x0051: 0x039a,	# GREEK CAPITAL LETTER KAPPA
+	0x0052: 0x039b,	# GREEK CAPITAL LETTER LAMDA
+	0x0053: 0x039c,	# GREEK CAPITAL LETTER MU
+	0x0054: 0x039d,	# GREEK CAPITAL LETTER NU
+	0x0055: 0x039e,	# GREEK CAPITAL LETTER XI
+	0x0056: 0x039f,	# GREEK CAPITAL LETTER OMICRON
+	0x0057: 0x03a0,	# GREEK CAPITAL LETTER PI
+	0x0058: 0x03a1,	# GREEK CAPITAL LETTER RHO
+	0x0059: 0x03a3,	# GREEK CAPITAL LETTER SIGMA
+	0x005a: 0x005d,	# RIGHT SQUARE BRACKET
+	0x005b: 0x0024,	# DOLLAR SIGN
+	0x005c: 0x002a,	# ASTERISK
+	0x005d: 0x0029,	# RIGHT PARENTHESIS
+	0x005e: 0x003b,	# SEMICOLON
+	0x005f: 0x005e,	# CIRCUMFLEX ACCENT
+	0x0060: 0x002d,	# HYPHEN-MINUS
+	0x0061: 0x002f,	# SOLIDUS
+	0x0062: 0x03a4,	# GREEK CAPITAL LETTER TAU
+	0x0063: 0x03a5,	# GREEK CAPITAL LETTER UPSILON
+	0x0064: 0x03a6,	# GREEK CAPITAL LETTER PHI
+	0x0065: 0x03a7,	# GREEK CAPITAL LETTER CHI
+	0x0066: 0x03a8,	# GREEK CAPITAL LETTER PSI
+	0x0067: 0x03a9,	# GREEK CAPITAL LETTER OMEGA
+	0x0068: 0x03aa,	# GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
+	0x0069: 0x03ab,	# GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
+	0x006a: 0x007c,	# VERTICAL LINE
+	0x006b: 0x002c,	# COMMA
+	0x006c: 0x0025,	# PERCENT SIGN
+	0x006d: 0x005f,	# LOW LINE
+	0x006e: 0x003e,	# GREATER-THAN SIGN
+	0x006f: 0x003f,	# QUESTION MARK
+	0x0070: 0x00a8,	# DIAERESIS
+	0x0071: 0x0386,	# GREEK CAPITAL LETTER ALPHA WITH TONOS
+	0x0072: 0x0388,	# GREEK CAPITAL LETTER EPSILON WITH TONOS
+	0x0073: 0x0389,	# GREEK CAPITAL LETTER ETA WITH TONOS
+	0x0074: 0x00a0,	# NO-BREAK SPACE
+	0x0075: 0x038a,	# GREEK CAPITAL LETTER IOTA WITH TONOS
+	0x0076: 0x038c,	# GREEK CAPITAL LETTER OMICRON WITH TONOS
+	0x0077: 0x038e,	# GREEK CAPITAL LETTER UPSILON WITH TONOS
+	0x0078: 0x038f,	# GREEK CAPITAL LETTER OMEGA WITH TONOS
+	0x0079: 0x0060,	# GRAVE ACCENT
+	0x007a: 0x003a,	# COLON
+	0x007b: 0x0023,	# NUMBER SIGN
+	0x007c: 0x0040,	# COMMERCIAL AT
+	0x007d: 0x0027,	# APOSTROPHE
+	0x007e: 0x003d,	# EQUALS SIGN
+	0x007f: 0x0022,	# QUOTATION MARK
+	0x0080: 0x0385,	# GREEK DIALYTIKA TONOS
+	0x0081: 0x0061,	# LATIN SMALL LETTER A
+	0x0082: 0x0062,	# LATIN SMALL LETTER B
+	0x0083: 0x0063,	# LATIN SMALL LETTER C
+	0x0084: 0x0064,	# LATIN SMALL LETTER D
+	0x0085: 0x0065,	# LATIN SMALL LETTER E
+	0x0086: 0x0066,	# LATIN SMALL LETTER F
+	0x0087: 0x0067,	# LATIN SMALL LETTER G
+	0x0088: 0x0068,	# LATIN SMALL LETTER H
+	0x0089: 0x0069,	# LATIN SMALL LETTER I
+	0x008a: 0x03b1,	# GREEK SMALL LETTER ALPHA
+	0x008b: 0x03b2,	# GREEK SMALL LETTER BETA
+	0x008c: 0x03b3,	# GREEK SMALL LETTER GAMMA
+	0x008d: 0x03b4,	# GREEK SMALL LETTER DELTA
+	0x008e: 0x03b5,	# GREEK SMALL LETTER EPSILON
+	0x008f: 0x03b6,	# GREEK SMALL LETTER ZETA
+	0x0090: 0x00b0,	# DEGREE SIGN
+	0x0091: 0x006a,	# LATIN SMALL LETTER J
+	0x0092: 0x006b,	# LATIN SMALL LETTER K
+	0x0093: 0x006c,	# LATIN SMALL LETTER L
+	0x0094: 0x006d,	# LATIN SMALL LETTER M
+	0x0095: 0x006e,	# LATIN SMALL LETTER N
+	0x0096: 0x006f,	# LATIN SMALL LETTER O
+	0x0097: 0x0070,	# LATIN SMALL LETTER P
+	0x0098: 0x0071,	# LATIN SMALL LETTER Q
+	0x0099: 0x0072,	# LATIN SMALL LETTER R
+	0x009a: 0x03b7,	# GREEK SMALL LETTER ETA
+	0x009b: 0x03b8,	# GREEK SMALL LETTER THETA
+	0x009c: 0x03b9,	# GREEK SMALL LETTER IOTA
+	0x009d: 0x03ba,	# GREEK SMALL LETTER KAPPA
+	0x009e: 0x03bb,	# GREEK SMALL LETTER LAMDA
+	0x009f: 0x03bc,	# GREEK SMALL LETTER MU
+	0x00a0: 0x00b4,	# ACUTE ACCENT
+	0x00a1: 0x007e,	# TILDE
+	0x00a2: 0x0073,	# LATIN SMALL LETTER S
+	0x00a3: 0x0074,	# LATIN SMALL LETTER T
+	0x00a4: 0x0075,	# LATIN SMALL LETTER U
+	0x00a5: 0x0076,	# LATIN SMALL LETTER V
+	0x00a6: 0x0077,	# LATIN SMALL LETTER W
+	0x00a7: 0x0078,	# LATIN SMALL LETTER X
+	0x00a8: 0x0079,	# LATIN SMALL LETTER Y
+	0x00a9: 0x007a,	# LATIN SMALL LETTER Z
+	0x00aa: 0x03bd,	# GREEK SMALL LETTER NU
+	0x00ab: 0x03be,	# GREEK SMALL LETTER XI
+	0x00ac: 0x03bf,	# GREEK SMALL LETTER OMICRON
+	0x00ad: 0x03c0,	# GREEK SMALL LETTER PI
+	0x00ae: 0x03c1,	# GREEK SMALL LETTER RHO
+	0x00af: 0x03c3,	# GREEK SMALL LETTER SIGMA
+	0x00b0: 0x00a3,	# POUND SIGN
+	0x00b1: 0x03ac,	# GREEK SMALL LETTER ALPHA WITH TONOS
+	0x00b2: 0x03ad,	# GREEK SMALL LETTER EPSILON WITH TONOS
+	0x00b3: 0x03ae,	# GREEK SMALL LETTER ETA WITH TONOS
+	0x00b4: 0x03ca,	# GREEK SMALL LETTER IOTA WITH DIALYTIKA
+	0x00b5: 0x03af,	# GREEK SMALL LETTER IOTA WITH TONOS
+	0x00b6: 0x03cc,	# GREEK SMALL LETTER OMICRON WITH TONOS
+	0x00b7: 0x03cd,	# GREEK SMALL LETTER UPSILON WITH TONOS
+	0x00b8: 0x03cb,	# GREEK SMALL LETTER UPSILON WITH DIALYTIKA
+	0x00b9: 0x03ce,	# GREEK SMALL LETTER OMEGA WITH TONOS
+	0x00ba: 0x03c2,	# GREEK SMALL LETTER FINAL SIGMA
+	0x00bb: 0x03c4,	# GREEK SMALL LETTER TAU
+	0x00bc: 0x03c5,	# GREEK SMALL LETTER UPSILON
+	0x00bd: 0x03c6,	# GREEK SMALL LETTER PHI
+	0x00be: 0x03c7,	# GREEK SMALL LETTER CHI
+	0x00bf: 0x03c8,	# GREEK SMALL LETTER PSI
+	0x00c0: 0x007b,	# LEFT CURLY BRACKET
+	0x00c1: 0x0041,	# LATIN CAPITAL LETTER A
+	0x00c2: 0x0042,	# LATIN CAPITAL LETTER B
+	0x00c3: 0x0043,	# LATIN CAPITAL LETTER C
+	0x00c4: 0x0044,	# LATIN CAPITAL LETTER D
+	0x00c5: 0x0045,	# LATIN CAPITAL LETTER E
+	0x00c6: 0x0046,	# LATIN CAPITAL LETTER F
+	0x00c7: 0x0047,	# LATIN CAPITAL LETTER G
+	0x00c8: 0x0048,	# LATIN CAPITAL LETTER H
+	0x00c9: 0x0049,	# LATIN CAPITAL LETTER I
+	0x00ca: 0x00ad,	# SOFT HYPHEN
+	0x00cb: 0x03c9,	# GREEK SMALL LETTER OMEGA
+	0x00cc: 0x0390,	# GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
+	0x00cd: 0x03b0,	# GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
+	0x00ce: 0x2018,	# LEFT SINGLE QUOTATION MARK
+	0x00cf: 0x2015,	# HORIZONTAL BAR
+	0x00d0: 0x007d,	# RIGHT CURLY BRACKET
+	0x00d1: 0x004a,	# LATIN CAPITAL LETTER J
+	0x00d2: 0x004b,	# LATIN CAPITAL LETTER K
+	0x00d3: 0x004c,	# LATIN CAPITAL LETTER L
+	0x00d4: 0x004d,	# LATIN CAPITAL LETTER M
+	0x00d5: 0x004e,	# LATIN CAPITAL LETTER N
+	0x00d6: 0x004f,	# LATIN CAPITAL LETTER O
+	0x00d7: 0x0050,	# LATIN CAPITAL LETTER P
+	0x00d8: 0x0051,	# LATIN CAPITAL LETTER Q
+	0x00d9: 0x0052,	# LATIN CAPITAL LETTER R
+	0x00da: 0x00b1,	# PLUS-MINUS SIGN
+	0x00db: 0x00bd,	# VULGAR FRACTION ONE HALF
+	0x00dc: 0x001a,	# SUBSTITUTE
+	0x00dd: 0x0387,	# GREEK ANO TELEIA
+	0x00de: 0x2019,	# RIGHT SINGLE QUOTATION MARK
+	0x00df: 0x00a6,	# BROKEN BAR
+	0x00e0: 0x005c,	# REVERSE SOLIDUS
+	0x00e1: 0x001a,	# SUBSTITUTE
+	0x00e2: 0x0053,	# LATIN CAPITAL LETTER S
+	0x00e3: 0x0054,	# LATIN CAPITAL LETTER T
+	0x00e4: 0x0055,	# LATIN CAPITAL LETTER U
+	0x00e5: 0x0056,	# LATIN CAPITAL LETTER V
+	0x00e6: 0x0057,	# LATIN CAPITAL LETTER W
+	0x00e7: 0x0058,	# LATIN CAPITAL LETTER X
+	0x00e8: 0x0059,	# LATIN CAPITAL LETTER Y
+	0x00e9: 0x005a,	# LATIN CAPITAL LETTER Z
+	0x00ea: 0x00b2,	# SUPERSCRIPT TWO
+	0x00eb: 0x00a7,	# SECTION SIGN
+	0x00ec: 0x001a,	# SUBSTITUTE
+	0x00ed: 0x001a,	# SUBSTITUTE
+	0x00ee: 0x00ab,	# LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+	0x00ef: 0x00ac,	# NOT SIGN
+	0x00f0: 0x0030,	# DIGIT ZERO
+	0x00f1: 0x0031,	# DIGIT ONE
+	0x00f2: 0x0032,	# DIGIT TWO
+	0x00f3: 0x0033,	# DIGIT THREE
+	0x00f4: 0x0034,	# DIGIT FOUR
+	0x00f5: 0x0035,	# DIGIT FIVE
+	0x00f6: 0x0036,	# DIGIT SIX
+	0x00f7: 0x0037,	# DIGIT SEVEN
+	0x00f8: 0x0038,	# DIGIT EIGHT
+	0x00f9: 0x0039,	# DIGIT NINE
+	0x00fa: 0x00b3,	# SUPERSCRIPT THREE
+	0x00fb: 0x00a9,	# COPYRIGHT SIGN
+	0x00fc: 0x001a,	# SUBSTITUTE
+	0x00fd: 0x001a,	# SUBSTITUTE
+	0x00fe: 0x00bb,	# RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+	0x00ff: 0x009f,	# CONTROL
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/lib-python/2.2/encodings/hex_codec.py b/lib-python/2.2/encodings/hex_codec.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/hex_codec.py
@@ -0,0 +1,62 @@
+""" Python 'hex_codec' Codec - 2-digit hex content transfer encoding
+
+    Unlike most of the other codecs which target Unicode, this codec
+    will return Python string objects for both encode and decode.
+
+    Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+"""
+import codecs, binascii
+
+### Codec APIs
+
+def hex_encode(input,errors='strict'):
+
+    """ Encodes the object input and returns a tuple (output
+        object, length consumed).
+
+        errors defines the error handling to apply. It defaults to
+        'strict' handling which is the only currently supported
+        error handling for this codec.
+
+    """
+    assert errors == 'strict'
+    output = binascii.b2a_hex(input)
+    return (output, len(input))
+
+def hex_decode(input,errors='strict'):
+
+    """ Decodes the object input and returns a tuple (output
+        object, length consumed).
+
+        input must be an object which provides the bf_getreadbuf
+        buffer slot. Python strings, buffer objects and memory
+        mapped files are examples of objects providing this slot.
+
+        errors defines the error handling to apply. It defaults to
+        'strict' handling which is the only currently supported
+        error handling for this codec.
+
+    """
+    assert errors == 'strict'
+    output = binascii.a2b_hex(input)
+    return (output, len(input))
+
+class Codec(codecs.Codec):
+
+    def encode(self, input,errors='strict'):
+        return hex_encode(input,errors)
+    def decode(self, input,errors='strict'):
+        return hex_decode(input,errors)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (hex_encode,hex_decode,StreamReader,StreamWriter)
diff --git a/lib-python/2.2/encodings/iso8859_1.py b/lib-python/2.2/encodings/iso8859_1.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/iso8859_1.py
@@ -0,0 +1,44 @@
+""" Python Character Mapping Codec generated from '8859-1.TXT' with gencodec.py.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/lib-python/2.2/encodings/iso8859_10.py b/lib-python/2.2/encodings/iso8859_10.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/iso8859_10.py
@@ -0,0 +1,90 @@
+""" Python Character Mapping Codec generated from '8859-10.TXT' with gencodec.py.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+	0x00a1: 0x0104,	# 	LATIN CAPITAL LETTER A WITH OGONEK
+	0x00a2: 0x0112,	# 	LATIN CAPITAL LETTER E WITH MACRON
+	0x00a3: 0x0122,	# 	LATIN CAPITAL LETTER G WITH CEDILLA
+	0x00a4: 0x012a,	# 	LATIN CAPITAL LETTER I WITH MACRON
+	0x00a5: 0x0128,	# 	LATIN CAPITAL LETTER I WITH TILDE
+	0x00a6: 0x0136,	# 	LATIN CAPITAL LETTER K WITH CEDILLA
+	0x00a8: 0x013b,	# 	LATIN CAPITAL LETTER L WITH CEDILLA
+	0x00a9: 0x0110,	# 	LATIN CAPITAL LETTER D WITH STROKE
+	0x00aa: 0x0160,	# 	LATIN CAPITAL LETTER S WITH CARON
+	0x00ab: 0x0166,	# 	LATIN CAPITAL LETTER T WITH STROKE
+	0x00ac: 0x017d,	# 	LATIN CAPITAL LETTER Z WITH CARON
+	0x00ae: 0x016a,	# 	LATIN CAPITAL LETTER U WITH MACRON
+	0x00af: 0x014a,	# 	LATIN CAPITAL LETTER ENG
+	0x00b1: 0x0105,	# 	LATIN SMALL LETTER A WITH OGONEK
+	0x00b2: 0x0113,	# 	LATIN SMALL LETTER E WITH MACRON
+	0x00b3: 0x0123,	# 	LATIN SMALL LETTER G WITH CEDILLA
+	0x00b4: 0x012b,	# 	LATIN SMALL LETTER I WITH MACRON
+	0x00b5: 0x0129,	# 	LATIN SMALL LETTER I WITH TILDE
+	0x00b6: 0x0137,	# 	LATIN SMALL LETTER K WITH CEDILLA
+	0x00b8: 0x013c,	# 	LATIN SMALL LETTER L WITH CEDILLA
+	0x00b9: 0x0111,	# 	LATIN SMALL LETTER D WITH STROKE
+	0x00ba: 0x0161,	# 	LATIN SMALL LETTER S WITH CARON
+	0x00bb: 0x0167,	# 	LATIN SMALL LETTER T WITH STROKE
+	0x00bc: 0x017e,	# 	LATIN SMALL LETTER Z WITH CARON
+	0x00bd: 0x2015,	# 	HORIZONTAL BAR
+	0x00be: 0x016b,	# 	LATIN SMALL LETTER U WITH MACRON
+	0x00bf: 0x014b,	# 	LATIN SMALL LETTER ENG
+	0x00c0: 0x0100,	# 	LATIN CAPITAL LETTER A WITH MACRON
+	0x00c7: 0x012e,	# 	LATIN CAPITAL LETTER I WITH OGONEK
+	0x00c8: 0x010c,	# 	LATIN CAPITAL LETTER C WITH CARON
+	0x00ca: 0x0118,	# 	LATIN CAPITAL LETTER E WITH OGONEK
+	0x00cc: 0x0116,	# 	LATIN CAPITAL LETTER E WITH DOT ABOVE
+	0x00d1: 0x0145,	# 	LATIN CAPITAL LETTER N WITH CEDILLA
+	0x00d2: 0x014c,	# 	LATIN CAPITAL LETTER O WITH MACRON
+	0x00d7: 0x0168,	# 	LATIN CAPITAL LETTER U WITH TILDE
+	0x00d9: 0x0172,	# 	LATIN CAPITAL LETTER U WITH OGONEK
+	0x00e0: 0x0101,	# 	LATIN SMALL LETTER A WITH MACRON
+	0x00e7: 0x012f,	# 	LATIN SMALL LETTER I WITH OGONEK
+	0x00e8: 0x010d,	# 	LATIN SMALL LETTER C WITH CARON
+	0x00ea: 0x0119,	# 	LATIN SMALL LETTER E WITH OGONEK
+	0x00ec: 0x0117,	# 	LATIN SMALL LETTER E WITH DOT ABOVE
+	0x00f1: 0x0146,	# 	LATIN SMALL LETTER N WITH CEDILLA
+	0x00f2: 0x014d,	# 	LATIN SMALL LETTER O WITH MACRON
+	0x00f7: 0x0169,	# 	LATIN SMALL LETTER U WITH TILDE
+	0x00f9: 0x0173,	# 	LATIN SMALL LETTER U WITH OGONEK
+	0x00ff: 0x0138,	# 	LATIN SMALL LETTER KRA
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/lib-python/2.2/encodings/iso8859_13.py b/lib-python/2.2/encodings/iso8859_13.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/iso8859_13.py
@@ -0,0 +1,100 @@
+""" Python Character Mapping Codec generated from '8859-13.TXT' with gencodec.py.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+	0x00a1: 0x201d,	# 	RIGHT DOUBLE QUOTATION MARK
+	0x00a5: 0x201e,	# 	DOUBLE LOW-9 QUOTATION MARK
+	0x00a8: 0x00d8,	# 	LATIN CAPITAL LETTER O WITH STROKE
+	0x00aa: 0x0156,	# 	LATIN CAPITAL LETTER R WITH CEDILLA
+	0x00af: 0x00c6,	# 	LATIN CAPITAL LETTER AE
+	0x00b4: 0x201c,	# 	LEFT DOUBLE QUOTATION MARK
+	0x00b8: 0x00f8,	# 	LATIN SMALL LETTER O WITH STROKE
+	0x00ba: 0x0157,	# 	LATIN SMALL LETTER R WITH CEDILLA
+	0x00bf: 0x00e6,	# 	LATIN SMALL LETTER AE
+	0x00c0: 0x0104,	# 	LATIN CAPITAL LETTER A WITH OGONEK
+	0x00c1: 0x012e,	# 	LATIN CAPITAL LETTER I WITH OGONEK
+	0x00c2: 0x0100,	# 	LATIN CAPITAL LETTER A WITH MACRON
+	0x00c3: 0x0106,	# 	LATIN CAPITAL LETTER C WITH ACUTE
+	0x00c6: 0x0118,	# 	LATIN CAPITAL LETTER E WITH OGONEK
+	0x00c7: 0x0112,	# 	LATIN CAPITAL LETTER E WITH MACRON
+	0x00c8: 0x010c,	# 	LATIN CAPITAL LETTER C WITH CARON
+	0x00ca: 0x0179,	# 	LATIN CAPITAL LETTER Z WITH ACUTE
+	0x00cb: 0x0116,	# 	LATIN CAPITAL LETTER E WITH DOT ABOVE
+	0x00cc: 0x0122,	# 	LATIN CAPITAL LETTER G WITH CEDILLA
+	0x00cd: 0x0136,	# 	LATIN CAPITAL LETTER K WITH CEDILLA
+	0x00ce: 0x012a,	# 	LATIN CAPITAL LETTER I WITH MACRON
+	0x00cf: 0x013b,	# 	LATIN CAPITAL LETTER L WITH CEDILLA
+	0x00d0: 0x0160,	# 	LATIN CAPITAL LETTER S WITH CARON
+	0x00d1: 0x0143,	# 	LATIN CAPITAL LETTER N WITH ACUTE
+	0x00d2: 0x0145,	# 	LATIN CAPITAL LETTER N WITH CEDILLA
+	0x00d4: 0x014c,	# 	LATIN CAPITAL LETTER O WITH MACRON
+	0x00d8: 0x0172,	# 	LATIN CAPITAL LETTER U WITH OGONEK
+	0x00d9: 0x0141,	# 	LATIN CAPITAL LETTER L WITH STROKE
+	0x00da: 0x015a,	# 	LATIN CAPITAL LETTER S WITH ACUTE
+	0x00db: 0x016a,	# 	LATIN CAPITAL LETTER U WITH MACRON
+	0x00dd: 0x017b,	# 	LATIN CAPITAL LETTER Z WITH DOT ABOVE
+	0x00de: 0x017d,	# 	LATIN CAPITAL LETTER Z WITH CARON
+	0x00e0: 0x0105,	# 	LATIN SMALL LETTER A WITH OGONEK
+	0x00e1: 0x012f,	# 	LATIN SMALL LETTER I WITH OGONEK
+	0x00e2: 0x0101,	# 	LATIN SMALL LETTER A WITH MACRON
+	0x00e3: 0x0107,	# 	LATIN SMALL LETTER C WITH ACUTE
+	0x00e6: 0x0119,	# 	LATIN SMALL LETTER E WITH OGONEK
+	0x00e7: 0x0113,	# 	LATIN SMALL LETTER E WITH MACRON
+	0x00e8: 0x010d,	# 	LATIN SMALL LETTER C WITH CARON
+	0x00ea: 0x017a,	# 	LATIN SMALL LETTER Z WITH ACUTE
+	0x00eb: 0x0117,	# 	LATIN SMALL LETTER E WITH DOT ABOVE
+	0x00ec: 0x0123,	# 	LATIN SMALL LETTER G WITH CEDILLA
+	0x00ed: 0x0137,	# 	LATIN SMALL LETTER K WITH CEDILLA
+	0x00ee: 0x012b,	# 	LATIN SMALL LETTER I WITH MACRON
+	0x00ef: 0x013c,	# 	LATIN SMALL LETTER L WITH CEDILLA
+	0x00f0: 0x0161,	# 	LATIN SMALL LETTER S WITH CARON
+	0x00f1: 0x0144,	# 	LATIN SMALL LETTER N WITH ACUTE
+	0x00f2: 0x0146,	# 	LATIN SMALL LETTER N WITH CEDILLA
+	0x00f4: 0x014d,	# 	LATIN SMALL LETTER O WITH MACRON
+	0x00f8: 0x0173,	# 	LATIN SMALL LETTER U WITH OGONEK
+	0x00f9: 0x0142,	# 	LATIN SMALL LETTER L WITH STROKE
+	0x00fa: 0x015b,	# 	LATIN SMALL LETTER S WITH ACUTE
+	0x00fb: 0x016b,	# 	LATIN SMALL LETTER U WITH MACRON
+	0x00fd: 0x017c,	# 	LATIN SMALL LETTER Z WITH DOT ABOVE
+	0x00fe: 0x017e,	# 	LATIN SMALL LETTER Z WITH CARON
+	0x00ff: 0x2019,	# 	RIGHT SINGLE QUOTATION MARK
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/lib-python/2.2/encodings/iso8859_14.py b/lib-python/2.2/encodings/iso8859_14.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/iso8859_14.py
@@ -0,0 +1,75 @@
+""" Python Character Mapping Codec generated from '8859-14.TXT' with gencodec.py.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+	0x00a1: 0x1e02,	# 	LATIN CAPITAL LETTER B WITH DOT ABOVE
+	0x00a2: 0x1e03,	# 	LATIN SMALL LETTER B WITH DOT ABOVE
+	0x00a4: 0x010a,	# 	LATIN CAPITAL LETTER C WITH DOT ABOVE
+	0x00a5: 0x010b,	# 	LATIN SMALL LETTER C WITH DOT ABOVE
+	0x00a6: 0x1e0a,	# 	LATIN CAPITAL LETTER D WITH DOT ABOVE
+	0x00a8: 0x1e80,	# 	LATIN CAPITAL LETTER W WITH GRAVE
+	0x00aa: 0x1e82,	# 	LATIN CAPITAL LETTER W WITH ACUTE
+	0x00ab: 0x1e0b,	# 	LATIN SMALL LETTER D WITH DOT ABOVE
+	0x00ac: 0x1ef2,	# 	LATIN CAPITAL LETTER Y WITH GRAVE
+	0x00af: 0x0178,	# 	LATIN CAPITAL LETTER Y WITH DIAERESIS
+	0x00b0: 0x1e1e,	# 	LATIN CAPITAL LETTER F WITH DOT ABOVE
+	0x00b1: 0x1e1f,	# 	LATIN SMALL LETTER F WITH DOT ABOVE
+	0x00b2: 0x0120,	# 	LATIN CAPITAL LETTER G WITH DOT ABOVE
+	0x00b3: 0x0121,	# 	LATIN SMALL LETTER G WITH DOT ABOVE
+	0x00b4: 0x1e40,	# 	LATIN CAPITAL LETTER M WITH DOT ABOVE
+	0x00b5: 0x1e41,	# 	LATIN SMALL LETTER M WITH DOT ABOVE
+	0x00b7: 0x1e56,	# 	LATIN CAPITAL LETTER P WITH DOT ABOVE
+	0x00b8: 0x1e81,	# 	LATIN SMALL LETTER W WITH GRAVE
+	0x00b9: 0x1e57,	# 	LATIN SMALL LETTER P WITH DOT ABOVE
+	0x00ba: 0x1e83,	# 	LATIN SMALL LETTER W WITH ACUTE
+	0x00bb: 0x1e60,	# 	LATIN CAPITAL LETTER S WITH DOT ABOVE
+	0x00bc: 0x1ef3,	# 	LATIN SMALL LETTER Y WITH GRAVE
+	0x00bd: 0x1e84,	# 	LATIN CAPITAL LETTER W WITH DIAERESIS
+	0x00be: 0x1e85,	# 	LATIN SMALL LETTER W WITH DIAERESIS
+	0x00bf: 0x1e61,	# 	LATIN SMALL LETTER S WITH DOT ABOVE
+	0x00d0: 0x0174,	# 	LATIN CAPITAL LETTER W WITH CIRCUMFLEX
+	0x00d7: 0x1e6a,	# 	LATIN CAPITAL LETTER T WITH DOT ABOVE
+	0x00de: 0x0176,	# 	LATIN CAPITAL LETTER Y WITH CIRCUMFLEX
+	0x00f0: 0x0175,	# 	LATIN SMALL LETTER W WITH CIRCUMFLEX
+	0x00f7: 0x1e6b,	# 	LATIN SMALL LETTER T WITH DOT ABOVE
+	0x00fe: 0x0177,	# 	LATIN SMALL LETTER Y WITH CIRCUMFLEX
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/lib-python/2.2/encodings/iso8859_15.py b/lib-python/2.2/encodings/iso8859_15.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/iso8859_15.py
@@ -0,0 +1,52 @@
+""" Python Character Mapping Codec generated from '8859-15.TXT' with gencodec.py.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+	0x00a4: 0x20ac,	# 	EURO SIGN
+	0x00a6: 0x0160,	# 	LATIN CAPITAL LETTER S WITH CARON
+	0x00a8: 0x0161,	# 	LATIN SMALL LETTER S WITH CARON
+	0x00b4: 0x017d,	# 	LATIN CAPITAL LETTER Z WITH CARON
+	0x00b8: 0x017e,	# 	LATIN SMALL LETTER Z WITH CARON
+	0x00bc: 0x0152,	# 	LATIN CAPITAL LIGATURE OE
+	0x00bd: 0x0153,	# 	LATIN SMALL LIGATURE OE
+	0x00be: 0x0178,	# 	LATIN CAPITAL LETTER Y WITH DIAERESIS
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/lib-python/2.2/encodings/iso8859_2.py b/lib-python/2.2/encodings/iso8859_2.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/iso8859_2.py
@@ -0,0 +1,101 @@
+""" Python Character Mapping Codec generated from '8859-2.TXT' with gencodec.py.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+	0x00a1: 0x0104,	# 	LATIN CAPITAL LETTER A WITH OGONEK
+	0x00a2: 0x02d8,	# 	BREVE
+	0x00a3: 0x0141,	# 	LATIN CAPITAL LETTER L WITH STROKE
+	0x00a5: 0x013d,	# 	LATIN CAPITAL LETTER L WITH CARON
+	0x00a6: 0x015a,	# 	LATIN CAPITAL LETTER S WITH ACUTE
+	0x00a9: 0x0160,	# 	LATIN CAPITAL LETTER S WITH CARON
+	0x00aa: 0x015e,	# 	LATIN CAPITAL LETTER S WITH CEDILLA
+	0x00ab: 0x0164,	# 	LATIN CAPITAL LETTER T WITH CARON
+	0x00ac: 0x0179,	# 	LATIN CAPITAL LETTER Z WITH ACUTE
+	0x00ae: 0x017d,	# 	LATIN CAPITAL LETTER Z WITH CARON
+	0x00af: 0x017b,	# 	LATIN CAPITAL LETTER Z WITH DOT ABOVE
+	0x00b1: 0x0105,	# 	LATIN SMALL LETTER A WITH OGONEK
+	0x00b2: 0x02db,	# 	OGONEK
+	0x00b3: 0x0142,	# 	LATIN SMALL LETTER L WITH STROKE
+	0x00b5: 0x013e,	# 	LATIN SMALL LETTER L WITH CARON
+	0x00b6: 0x015b,	# 	LATIN SMALL LETTER S WITH ACUTE
+	0x00b7: 0x02c7,	# 	CARON
+	0x00b9: 0x0161,	# 	LATIN SMALL LETTER S WITH CARON
+	0x00ba: 0x015f,	# 	LATIN SMALL LETTER S WITH CEDILLA
+	0x00bb: 0x0165,	# 	LATIN SMALL LETTER T WITH CARON
+	0x00bc: 0x017a,	# 	LATIN SMALL LETTER Z WITH ACUTE
+	0x00bd: 0x02dd,	# 	DOUBLE ACUTE ACCENT
+	0x00be: 0x017e,	# 	LATIN SMALL LETTER Z WITH CARON
+	0x00bf: 0x017c,	# 	LATIN SMALL LETTER Z WITH DOT ABOVE
+	0x00c0: 0x0154,	# 	LATIN CAPITAL LETTER R WITH ACUTE
+	0x00c3: 0x0102,	# 	LATIN CAPITAL LETTER A WITH BREVE
+	0x00c5: 0x0139,	# 	LATIN CAPITAL LETTER L WITH ACUTE
+	0x00c6: 0x0106,	# 	LATIN CAPITAL LETTER C WITH ACUTE
+	0x00c8: 0x010c,	# 	LATIN CAPITAL LETTER C WITH CARON
+	0x00ca: 0x0118,	# 	LATIN CAPITAL LETTER E WITH OGONEK
+	0x00cc: 0x011a,	# 	LATIN CAPITAL LETTER E WITH CARON
+	0x00cf: 0x010e,	# 	LATIN CAPITAL LETTER D WITH CARON
+	0x00d0: 0x0110,	# 	LATIN CAPITAL LETTER D WITH STROKE
+	0x00d1: 0x0143,	# 	LATIN CAPITAL LETTER N WITH ACUTE
+	0x00d2: 0x0147,	# 	LATIN CAPITAL LETTER N WITH CARON
+	0x00d5: 0x0150,	# 	LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
+	0x00d8: 0x0158,	# 	LATIN CAPITAL LETTER R WITH CARON
+	0x00d9: 0x016e,	# 	LATIN CAPITAL LETTER U WITH RING ABOVE
+	0x00db: 0x0170,	# 	LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
+	0x00de: 0x0162,	# 	LATIN CAPITAL LETTER T WITH CEDILLA
+	0x00e0: 0x0155,	# 	LATIN SMALL LETTER R WITH ACUTE
+	0x00e3: 0x0103,	# 	LATIN SMALL LETTER A WITH BREVE
+	0x00e5: 0x013a,	# 	LATIN SMALL LETTER L WITH ACUTE
+	0x00e6: 0x0107,	# 	LATIN SMALL LETTER C WITH ACUTE
+	0x00e8: 0x010d,	# 	LATIN SMALL LETTER C WITH CARON
+	0x00ea: 0x0119,	# 	LATIN SMALL LETTER E WITH OGONEK
+	0x00ec: 0x011b,	# 	LATIN SMALL LETTER E WITH CARON
+	0x00ef: 0x010f,	# 	LATIN SMALL LETTER D WITH CARON
+	0x00f0: 0x0111,	# 	LATIN SMALL LETTER D WITH STROKE
+	0x00f1: 0x0144,	# 	LATIN SMALL LETTER N WITH ACUTE
+	0x00f2: 0x0148,	# 	LATIN SMALL LETTER N WITH CARON
+	0x00f5: 0x0151,	# 	LATIN SMALL LETTER O WITH DOUBLE ACUTE
+	0x00f8: 0x0159,	# 	LATIN SMALL LETTER R WITH CARON
+	0x00f9: 0x016f,	# 	LATIN SMALL LETTER U WITH RING ABOVE
+	0x00fb: 0x0171,	# 	LATIN SMALL LETTER U WITH DOUBLE ACUTE
+	0x00fe: 0x0163,	# 	LATIN SMALL LETTER T WITH CEDILLA
+	0x00ff: 0x02d9,	# 	DOT ABOVE
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/lib-python/2.2/encodings/iso8859_3.py b/lib-python/2.2/encodings/iso8859_3.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/iso8859_3.py
@@ -0,0 +1,79 @@
+""" Python Character Mapping Codec generated from '8859-3.TXT' with gencodec.py.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+	0x00a1: 0x0126,	# 	LATIN CAPITAL LETTER H WITH STROKE
+	0x00a2: 0x02d8,	# 	BREVE
+	0x00a5: None,
+	0x00a6: 0x0124,	# 	LATIN CAPITAL LETTER H WITH CIRCUMFLEX
+	0x00a9: 0x0130,	# 	LATIN CAPITAL LETTER I WITH DOT ABOVE
+	0x00aa: 0x015e,	# 	LATIN CAPITAL LETTER S WITH CEDILLA
+	0x00ab: 0x011e,	# 	LATIN CAPITAL LETTER G WITH BREVE
+	0x00ac: 0x0134,	# 	LATIN CAPITAL LETTER J WITH CIRCUMFLEX
+	0x00ae: None,
+	0x00af: 0x017b,	# 	LATIN CAPITAL LETTER Z WITH DOT ABOVE
+	0x00b1: 0x0127,	# 	LATIN SMALL LETTER H WITH STROKE
+	0x00b6: 0x0125,	# 	LATIN SMALL LETTER H WITH CIRCUMFLEX
+	0x00b9: 0x0131,	# 	LATIN SMALL LETTER DOTLESS I
+	0x00ba: 0x015f,	# 	LATIN SMALL LETTER S WITH CEDILLA
+	0x00bb: 0x011f,	# 	LATIN SMALL LETTER G WITH BREVE
+	0x00bc: 0x0135,	# 	LATIN SMALL LETTER J WITH CIRCUMFLEX
+	0x00be: None,
+	0x00bf: 0x017c,	# 	LATIN SMALL LETTER Z WITH DOT ABOVE
+	0x00c3: None,
+	0x00c5: 0x010a,	# 	LATIN CAPITAL LETTER C WITH DOT ABOVE
+	0x00c6: 0x0108,	# 	LATIN CAPITAL LETTER C WITH CIRCUMFLEX
+	0x00d0: None,
+	0x00d5: 0x0120,	# 	LATIN CAPITAL LETTER G WITH DOT ABOVE
+	0x00d8: 0x011c,	# 	LATIN CAPITAL LETTER G WITH CIRCUMFLEX
+	0x00dd: 0x016c,	# 	LATIN CAPITAL LETTER U WITH BREVE
+	0x00de: 0x015c,	# 	LATIN CAPITAL LETTER S WITH CIRCUMFLEX
+	0x00e3: None,
+	0x00e5: 0x010b,	# 	LATIN SMALL LETTER C WITH DOT ABOVE
+	0x00e6: 0x0109,	# 	LATIN SMALL LETTER C WITH CIRCUMFLEX
+	0x00f0: None,
+	0x00f5: 0x0121,	# 	LATIN SMALL LETTER G WITH DOT ABOVE
+	0x00f8: 0x011d,	# 	LATIN SMALL LETTER G WITH CIRCUMFLEX
+	0x00fd: 0x016d,	# 	LATIN SMALL LETTER U WITH BREVE
+	0x00fe: 0x015d,	# 	LATIN SMALL LETTER S WITH CIRCUMFLEX
+	0x00ff: 0x02d9,	# 	DOT ABOVE
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/lib-python/2.2/encodings/iso8859_4.py b/lib-python/2.2/encodings/iso8859_4.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/iso8859_4.py
@@ -0,0 +1,94 @@
+""" Python Character Mapping Codec generated from '8859-4.TXT' with gencodec.py.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+	0x00a1: 0x0104,	# 	LATIN CAPITAL LETTER A WITH OGONEK
+	0x00a2: 0x0138,	# 	LATIN SMALL LETTER KRA
+	0x00a3: 0x0156,	# 	LATIN CAPITAL LETTER R WITH CEDILLA
+	0x00a5: 0x0128,	# 	LATIN CAPITAL LETTER I WITH TILDE
+	0x00a6: 0x013b,	# 	LATIN CAPITAL LETTER L WITH CEDILLA
+	0x00a9: 0x0160,	# 	LATIN CAPITAL LETTER S WITH CARON
+	0x00aa: 0x0112,	# 	LATIN CAPITAL LETTER E WITH MACRON
+	0x00ab: 0x0122,	# 	LATIN CAPITAL LETTER G WITH CEDILLA
+	0x00ac: 0x0166,	# 	LATIN CAPITAL LETTER T WITH STROKE
+	0x00ae: 0x017d,	# 	LATIN CAPITAL LETTER Z WITH CARON
+	0x00b1: 0x0105,	# 	LATIN SMALL LETTER A WITH OGONEK
+	0x00b2: 0x02db,	# 	OGONEK
+	0x00b3: 0x0157,	# 	LATIN SMALL LETTER R WITH CEDILLA
+	0x00b5: 0x0129,	# 	LATIN SMALL LETTER I WITH TILDE
+	0x00b6: 0x013c,	# 	LATIN SMALL LETTER L WITH CEDILLA
+	0x00b7: 0x02c7,	# 	CARON
+	0x00b9: 0x0161,	# 	LATIN SMALL LETTER S WITH CARON
+	0x00ba: 0x0113,	# 	LATIN SMALL LETTER E WITH MACRON
+	0x00bb: 0x0123,	# 	LATIN SMALL LETTER G WITH CEDILLA
+	0x00bc: 0x0167,	# 	LATIN SMALL LETTER T WITH STROKE
+	0x00bd: 0x014a,	# 	LATIN CAPITAL LETTER ENG
+	0x00be: 0x017e,	# 	LATIN SMALL LETTER Z WITH CARON
+	0x00bf: 0x014b,	# 	LATIN SMALL LETTER ENG
+	0x00c0: 0x0100,	# 	LATIN CAPITAL LETTER A WITH MACRON
+	0x00c7: 0x012e,	# 	LATIN CAPITAL LETTER I WITH OGONEK
+	0x00c8: 0x010c,	# 	LATIN CAPITAL LETTER C WITH CARON
+	0x00ca: 0x0118,	# 	LATIN CAPITAL LETTER E WITH OGONEK
+	0x00cc: 0x0116,	# 	LATIN CAPITAL LETTER E WITH DOT ABOVE
+	0x00cf: 0x012a,	# 	LATIN CAPITAL LETTER I WITH MACRON
+	0x00d0: 0x0110,	# 	LATIN CAPITAL LETTER D WITH STROKE
+	0x00d1: 0x0145,	# 	LATIN CAPITAL LETTER N WITH CEDILLA
+	0x00d2: 0x014c,	# 	LATIN CAPITAL LETTER O WITH MACRON
+	0x00d3: 0x0136,	# 	LATIN CAPITAL LETTER K WITH CEDILLA
+	0x00d9: 0x0172,	# 	LATIN CAPITAL LETTER U WITH OGONEK
+	0x00dd: 0x0168,	# 	LATIN CAPITAL LETTER U WITH TILDE
+	0x00de: 0x016a,	# 	LATIN CAPITAL LETTER U WITH MACRON
+	0x00e0: 0x0101,	# 	LATIN SMALL LETTER A WITH MACRON
+	0x00e7: 0x012f,	# 	LATIN SMALL LETTER I WITH OGONEK
+	0x00e8: 0x010d,	# 	LATIN SMALL LETTER C WITH CARON
+	0x00ea: 0x0119,	# 	LATIN SMALL LETTER E WITH OGONEK
+	0x00ec: 0x0117,	# 	LATIN SMALL LETTER E WITH DOT ABOVE
+	0x00ef: 0x012b,	# 	LATIN SMALL LETTER I WITH MACRON
+	0x00f0: 0x0111,	# 	LATIN SMALL LETTER D WITH STROKE
+	0x00f1: 0x0146,	# 	LATIN SMALL LETTER N WITH CEDILLA
+	0x00f2: 0x014d,	# 	LATIN SMALL LETTER O WITH MACRON
+	0x00f3: 0x0137,	# 	LATIN SMALL LETTER K WITH CEDILLA
+	0x00f9: 0x0173,	# 	LATIN SMALL LETTER U WITH OGONEK
+	0x00fd: 0x0169,	# 	LATIN SMALL LETTER U WITH TILDE
+	0x00fe: 0x016b,	# 	LATIN SMALL LETTER U WITH MACRON
+	0x00ff: 0x02d9,	# 	DOT ABOVE
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/lib-python/2.2/encodings/iso8859_5.py b/lib-python/2.2/encodings/iso8859_5.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/iso8859_5.py
@@ -0,0 +1,138 @@
+""" Python Character Mapping Codec generated from '8859-5.TXT' with gencodec.py.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+	0x00a1: 0x0401,	# 	CYRILLIC CAPITAL LETTER IO
+	0x00a2: 0x0402,	# 	CYRILLIC CAPITAL LETTER DJE
+	0x00a3: 0x0403,	# 	CYRILLIC CAPITAL LETTER GJE
+	0x00a4: 0x0404,	# 	CYRILLIC CAPITAL LETTER UKRAINIAN IE
+	0x00a5: 0x0405,	# 	CYRILLIC CAPITAL LETTER DZE
+	0x00a6: 0x0406,	# 	CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
+	0x00a7: 0x0407,	# 	CYRILLIC CAPITAL LETTER YI
+	0x00a8: 0x0408,	# 	CYRILLIC CAPITAL LETTER JE
+	0x00a9: 0x0409,	# 	CYRILLIC CAPITAL LETTER LJE
+	0x00aa: 0x040a,	# 	CYRILLIC CAPITAL LETTER NJE
+	0x00ab: 0x040b,	# 	CYRILLIC CAPITAL LETTER TSHE
+	0x00ac: 0x040c,	# 	CYRILLIC CAPITAL LETTER KJE
+	0x00ae: 0x040e,	# 	CYRILLIC CAPITAL LETTER SHORT U
+	0x00af: 0x040f,	# 	CYRILLIC CAPITAL LETTER DZHE
+	0x00b0: 0x0410,	# 	CYRILLIC CAPITAL LETTER A
+	0x00b1: 0x0411,	# 	CYRILLIC CAPITAL LETTER BE
+	0x00b2: 0x0412,	# 	CYRILLIC CAPITAL LETTER VE
+	0x00b3: 0x0413,	# 	CYRILLIC CAPITAL LETTER GHE
+	0x00b4: 0x0414,	# 	CYRILLIC CAPITAL LETTER DE
+	0x00b5: 0x0415,	# 	CYRILLIC CAPITAL LETTER IE
+	0x00b6: 0x0416,	# 	CYRILLIC CAPITAL LETTER ZHE
+	0x00b7: 0x0417,	# 	CYRILLIC CAPITAL LETTER ZE
+	0x00b8: 0x0418,	# 	CYRILLIC CAPITAL LETTER I
+	0x00b9: 0x0419,	# 	CYRILLIC CAPITAL LETTER SHORT I
+	0x00ba: 0x041a,	# 	CYRILLIC CAPITAL LETTER KA
+	0x00bb: 0x041b,	# 	CYRILLIC CAPITAL LETTER EL
+	0x00bc: 0x041c,	# 	CYRILLIC CAPITAL LETTER EM
+	0x00bd: 0x041d,	# 	CYRILLIC CAPITAL LETTER EN
+	0x00be: 0x041e,	# 	CYRILLIC CAPITAL LETTER O
+	0x00bf: 0x041f,	# 	CYRILLIC CAPITAL LETTER PE
+	0x00c0: 0x0420,	# 	CYRILLIC CAPITAL LETTER ER
+	0x00c1: 0x0421,	# 	CYRILLIC CAPITAL LETTER ES
+	0x00c2: 0x0422,	# 	CYRILLIC CAPITAL LETTER TE
+	0x00c3: 0x0423,	# 	CYRILLIC CAPITAL LETTER U
+	0x00c4: 0x0424,	# 	CYRILLIC CAPITAL LETTER EF
+	0x00c5: 0x0425,	# 	CYRILLIC CAPITAL LETTER HA
+	0x00c6: 0x0426,	# 	CYRILLIC CAPITAL LETTER TSE
+	0x00c7: 0x0427,	# 	CYRILLIC CAPITAL LETTER CHE
+	0x00c8: 0x0428,	# 	CYRILLIC CAPITAL LETTER SHA
+	0x00c9: 0x0429,	# 	CYRILLIC CAPITAL LETTER SHCHA
+	0x00ca: 0x042a,	# 	CYRILLIC CAPITAL LETTER HARD SIGN
+	0x00cb: 0x042b,	# 	CYRILLIC CAPITAL LETTER YERU
+	0x00cc: 0x042c,	# 	CYRILLIC CAPITAL LETTER SOFT SIGN
+	0x00cd: 0x042d,	# 	CYRILLIC CAPITAL LETTER E
+	0x00ce: 0x042e,	# 	CYRILLIC CAPITAL LETTER YU
+	0x00cf: 0x042f,	# 	CYRILLIC CAPITAL LETTER YA
+	0x00d0: 0x0430,	# 	CYRILLIC SMALL LETTER A
+	0x00d1: 0x0431,	# 	CYRILLIC SMALL LETTER BE
+	0x00d2: 0x0432,	# 	CYRILLIC SMALL LETTER VE
+	0x00d3: 0x0433,	# 	CYRILLIC SMALL LETTER GHE
+	0x00d4: 0x0434,	# 	CYRILLIC SMALL LETTER DE
+	0x00d5: 0x0435,	# 	CYRILLIC SMALL LETTER IE
+	0x00d6: 0x0436,	# 	CYRILLIC SMALL LETTER ZHE
+	0x00d7: 0x0437,	# 	CYRILLIC SMALL LETTER ZE
+	0x00d8: 0x0438,	# 	CYRILLIC SMALL LETTER I
+	0x00d9: 0x0439,	# 	CYRILLIC SMALL LETTER SHORT I
+	0x00da: 0x043a,	# 	CYRILLIC SMALL LETTER KA
+	0x00db: 0x043b,	# 	CYRILLIC SMALL LETTER EL
+	0x00dc: 0x043c,	# 	CYRILLIC SMALL LETTER EM
+	0x00dd: 0x043d,	# 	CYRILLIC SMALL LETTER EN
+	0x00de: 0x043e,	# 	CYRILLIC SMALL LETTER O
+	0x00df: 0x043f,	# 	CYRILLIC SMALL LETTER PE
+	0x00e0: 0x0440,	# 	CYRILLIC SMALL LETTER ER
+	0x00e1: 0x0441,	# 	CYRILLIC SMALL LETTER ES
+	0x00e2: 0x0442,	# 	CYRILLIC SMALL LETTER TE
+	0x00e3: 0x0443,	# 	CYRILLIC SMALL LETTER U
+	0x00e4: 0x0444,	# 	CYRILLIC SMALL LETTER EF
+	0x00e5: 0x0445,	# 	CYRILLIC SMALL LETTER HA
+	0x00e6: 0x0446,	# 	CYRILLIC SMALL LETTER TSE
+	0x00e7: 0x0447,	# 	CYRILLIC SMALL LETTER CHE
+	0x00e8: 0x0448,	# 	CYRILLIC SMALL LETTER SHA
+	0x00e9: 0x0449,	# 	CYRILLIC SMALL LETTER SHCHA
+	0x00ea: 0x044a,	# 	CYRILLIC SMALL LETTER HARD SIGN
+	0x00eb: 0x044b,	# 	CYRILLIC SMALL LETTER YERU
+	0x00ec: 0x044c,	# 	CYRILLIC SMALL LETTER SOFT SIGN
+	0x00ed: 0x044d,	# 	CYRILLIC SMALL LETTER E
+	0x00ee: 0x044e,	# 	CYRILLIC SMALL LETTER YU
+	0x00ef: 0x044f,	# 	CYRILLIC SMALL LETTER YA
+	0x00f0: 0x2116,	# 	NUMERO SIGN
+	0x00f1: 0x0451,	# 	CYRILLIC SMALL LETTER IO
+	0x00f2: 0x0452,	# 	CYRILLIC SMALL LETTER DJE
+	0x00f3: 0x0453,	# 	CYRILLIC SMALL LETTER GJE
+	0x00f4: 0x0454,	# 	CYRILLIC SMALL LETTER UKRAINIAN IE
+	0x00f5: 0x0455,	# 	CYRILLIC SMALL LETTER DZE
+	0x00f6: 0x0456,	# 	CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
+	0x00f7: 0x0457,	# 	CYRILLIC SMALL LETTER YI
+	0x00f8: 0x0458,	# 	CYRILLIC SMALL LETTER JE
+	0x00f9: 0x0459,	# 	CYRILLIC SMALL LETTER LJE
+	0x00fa: 0x045a,	# 	CYRILLIC SMALL LETTER NJE
+	0x00fb: 0x045b,	# 	CYRILLIC SMALL LETTER TSHE
+	0x00fc: 0x045c,	# 	CYRILLIC SMALL LETTER KJE
+	0x00fd: 0x00a7,	# 	SECTION SIGN
+	0x00fe: 0x045e,	# 	CYRILLIC SMALL LETTER SHORT U
+	0x00ff: 0x045f,	# 	CYRILLIC SMALL LETTER DZHE
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/lib-python/2.2/encodings/iso8859_6.py b/lib-python/2.2/encodings/iso8859_6.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/iso8859_6.py
@@ -0,0 +1,137 @@
+""" Python Character Mapping Codec generated from '8859-6.TXT' with gencodec.py.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+	0x00a1: None,
+	0x00a2: None,
+	0x00a3: None,
+	0x00a5: None,
+	0x00a6: None,
+	0x00a7: None,
+	0x00a8: None,
+	0x00a9: None,
+	0x00aa: None,
+	0x00ab: None,
+	0x00ac: 0x060c,	# 	ARABIC COMMA
+	0x00ae: None,
+	0x00af: None,
+	0x00b0: None,
+	0x00b1: None,
+	0x00b2: None,
+	0x00b3: None,
+	0x00b4: None,
+	0x00b5: None,
+	0x00b6: None,
+	0x00b7: None,
+	0x00b8: None,
+	0x00b9: None,
+	0x00ba: None,
+	0x00bb: 0x061b,	# 	ARABIC SEMICOLON
+	0x00bc: None,
+	0x00bd: None,
+	0x00be: None,
+	0x00bf: 0x061f,	# 	ARABIC QUESTION MARK
+	0x00c0: None,
+	0x00c1: 0x0621,	# 	ARABIC LETTER HAMZA
+	0x00c2: 0x0622,	# 	ARABIC LETTER ALEF WITH MADDA ABOVE
+	0x00c3: 0x0623,	# 	ARABIC LETTER ALEF WITH HAMZA ABOVE
+	0x00c4: 0x0624,	# 	ARABIC LETTER WAW WITH HAMZA ABOVE
+	0x00c5: 0x0625,	# 	ARABIC LETTER ALEF WITH HAMZA BELOW
+	0x00c6: 0x0626,	# 	ARABIC LETTER YEH WITH HAMZA ABOVE
+	0x00c7: 0x0627,	# 	ARABIC LETTER ALEF
+	0x00c8: 0x0628,	# 	ARABIC LETTER BEH
+	0x00c9: 0x0629,	# 	ARABIC LETTER TEH MARBUTA
+	0x00ca: 0x062a,	# 	ARABIC LETTER TEH
+	0x00cb: 0x062b,	# 	ARABIC LETTER THEH
+	0x00cc: 0x062c,	# 	ARABIC LETTER JEEM
+	0x00cd: 0x062d,	# 	ARABIC LETTER HAH
+	0x00ce: 0x062e,	# 	ARABIC LETTER KHAH
+	0x00cf: 0x062f,	# 	ARABIC LETTER DAL
+	0x00d0: 0x0630,	# 	ARABIC LETTER THAL
+	0x00d1: 0x0631,	# 	ARABIC LETTER REH
+	0x00d2: 0x0632,	# 	ARABIC LETTER ZAIN
+	0x00d3: 0x0633,	# 	ARABIC LETTER SEEN
+	0x00d4: 0x0634,	# 	ARABIC LETTER SHEEN
+	0x00d5: 0x0635,	# 	ARABIC LETTER SAD
+	0x00d6: 0x0636,	# 	ARABIC LETTER DAD
+	0x00d7: 0x0637,	# 	ARABIC LETTER TAH
+	0x00d8: 0x0638,	# 	ARABIC LETTER ZAH
+	0x00d9: 0x0639,	# 	ARABIC LETTER AIN
+	0x00da: 0x063a,	# 	ARABIC LETTER GHAIN
+	0x00db: None,
+	0x00dc: None,
+	0x00dd: None,
+	0x00de: None,
+	0x00df: None,
+	0x00e0: 0x0640,	# 	ARABIC TATWEEL
+	0x00e1: 0x0641,	# 	ARABIC LETTER FEH
+	0x00e2: 0x0642,	# 	ARABIC LETTER QAF
+	0x00e3: 0x0643,	# 	ARABIC LETTER KAF
+	0x00e4: 0x0644,	# 	ARABIC LETTER LAM
+	0x00e5: 0x0645,	# 	ARABIC LETTER MEEM
+	0x00e6: 0x0646,	# 	ARABIC LETTER NOON
+	0x00e7: 0x0647,	# 	ARABIC LETTER HEH
+	0x00e8: 0x0648,	# 	ARABIC LETTER WAW
+	0x00e9: 0x0649,	# 	ARABIC LETTER ALEF MAKSURA
+	0x00ea: 0x064a,	# 	ARABIC LETTER YEH
+	0x00eb: 0x064b,	# 	ARABIC FATHATAN
+	0x00ec: 0x064c,	# 	ARABIC DAMMATAN
+	0x00ed: 0x064d,	# 	ARABIC KASRATAN
+	0x00ee: 0x064e,	# 	ARABIC FATHA
+	0x00ef: 0x064f,	# 	ARABIC DAMMA
+	0x00f0: 0x0650,	# 	ARABIC KASRA
+	0x00f1: 0x0651,	# 	ARABIC SHADDA
+	0x00f2: 0x0652,	# 	ARABIC SUKUN
+	0x00f3: None,
+	0x00f4: None,
+	0x00f5: None,
+	0x00f6: None,
+	0x00f7: None,
+	0x00f8: None,
+	0x00f9: None,
+	0x00fa: None,
+	0x00fb: None,
+	0x00fc: None,
+	0x00fd: None,
+	0x00fe: None,
+	0x00ff: None,
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/lib-python/2.2/encodings/iso8859_7.py b/lib-python/2.2/encodings/iso8859_7.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/iso8859_7.py
@@ -0,0 +1,124 @@
+""" Python Character Mapping Codec generated from '8859-7.TXT' with gencodec.py.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+	0x00a1: 0x2018,	# 	LEFT SINGLE QUOTATION MARK
+	0x00a2: 0x2019,	# 	RIGHT SINGLE QUOTATION MARK
+	0x00a4: None,
+	0x00a5: None,
+	0x00aa: None,
+	0x00ae: None,
+	0x00af: 0x2015,	# 	HORIZONTAL BAR
+	0x00b4: 0x0384,	# 	GREEK TONOS
+	0x00b5: 0x0385,	# 	GREEK DIALYTIKA TONOS
+	0x00b6: 0x0386,	# 	GREEK CAPITAL LETTER ALPHA WITH TONOS
+	0x00b8: 0x0388,	# 	GREEK CAPITAL LETTER EPSILON WITH TONOS
+	0x00b9: 0x0389,	# 	GREEK CAPITAL LETTER ETA WITH TONOS
+	0x00ba: 0x038a,	# 	GREEK CAPITAL LETTER IOTA WITH TONOS
+	0x00bc: 0x038c,	# 	GREEK CAPITAL LETTER OMICRON WITH TONOS
+	0x00be: 0x038e,	# 	GREEK CAPITAL LETTER UPSILON WITH TONOS
+	0x00bf: 0x038f,	# 	GREEK CAPITAL LETTER OMEGA WITH TONOS
+	0x00c0: 0x0390,	# 	GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
+	0x00c1: 0x0391,	# 	GREEK CAPITAL LETTER ALPHA
+	0x00c2: 0x0392,	# 	GREEK CAPITAL LETTER BETA
+	0x00c3: 0x0393,	# 	GREEK CAPITAL LETTER GAMMA
+	0x00c4: 0x0394,	# 	GREEK CAPITAL LETTER DELTA
+	0x00c5: 0x0395,	# 	GREEK CAPITAL LETTER EPSILON
+	0x00c6: 0x0396,	# 	GREEK CAPITAL LETTER ZETA
+	0x00c7: 0x0397,	# 	GREEK CAPITAL LETTER ETA
+	0x00c8: 0x0398,	# 	GREEK CAPITAL LETTER THETA
+	0x00c9: 0x0399,	# 	GREEK CAPITAL LETTER IOTA
+	0x00ca: 0x039a,	# 	GREEK CAPITAL LETTER KAPPA
+	0x00cb: 0x039b,	# 	GREEK CAPITAL LETTER LAMDA
+	0x00cc: 0x039c,	# 	GREEK CAPITAL LETTER MU
+	0x00cd: 0x039d,	# 	GREEK CAPITAL LETTER NU
+	0x00ce: 0x039e,	# 	GREEK CAPITAL LETTER XI
+	0x00cf: 0x039f,	# 	GREEK CAPITAL LETTER OMICRON
+	0x00d0: 0x03a0,	# 	GREEK CAPITAL LETTER PI
+	0x00d1: 0x03a1,	# 	GREEK CAPITAL LETTER RHO
+	0x00d2: None,
+	0x00d3: 0x03a3,	# 	GREEK CAPITAL LETTER SIGMA
+	0x00d4: 0x03a4,	# 	GREEK CAPITAL LETTER TAU
+	0x00d5: 0x03a5,	# 	GREEK CAPITAL LETTER UPSILON
+	0x00d6: 0x03a6,	# 	GREEK CAPITAL LETTER PHI
+	0x00d7: 0x03a7,	# 	GREEK CAPITAL LETTER CHI
+	0x00d8: 0x03a8,	# 	GREEK CAPITAL LETTER PSI
+	0x00d9: 0x03a9,	# 	GREEK CAPITAL LETTER OMEGA
+	0x00da: 0x03aa,	# 	GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
+	0x00db: 0x03ab,	# 	GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
+	0x00dc: 0x03ac,	# 	GREEK SMALL LETTER ALPHA WITH TONOS
+	0x00dd: 0x03ad,	# 	GREEK SMALL LETTER EPSILON WITH TONOS
+	0x00de: 0x03ae,	# 	GREEK SMALL LETTER ETA WITH TONOS
+	0x00df: 0x03af,	# 	GREEK SMALL LETTER IOTA WITH TONOS
+	0x00e0: 0x03b0,	# 	GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
+	0x00e1: 0x03b1,	# 	GREEK SMALL LETTER ALPHA
+	0x00e2: 0x03b2,	# 	GREEK SMALL LETTER BETA
+	0x00e3: 0x03b3,	# 	GREEK SMALL LETTER GAMMA
+	0x00e4: 0x03b4,	# 	GREEK SMALL LETTER DELTA
+	0x00e5: 0x03b5,	# 	GREEK SMALL LETTER EPSILON
+	0x00e6: 0x03b6,	# 	GREEK SMALL LETTER ZETA
+	0x00e7: 0x03b7,	# 	GREEK SMALL LETTER ETA
+	0x00e8: 0x03b8,	# 	GREEK SMALL LETTER THETA
+	0x00e9: 0x03b9,	# 	GREEK SMALL LETTER IOTA
+	0x00ea: 0x03ba,	# 	GREEK SMALL LETTER KAPPA
+	0x00eb: 0x03bb,	# 	GREEK SMALL LETTER LAMDA
+	0x00ec: 0x03bc,	# 	GREEK SMALL LETTER MU
+	0x00ed: 0x03bd,	# 	GREEK SMALL LETTER NU
+	0x00ee: 0x03be,	# 	GREEK SMALL LETTER XI
+	0x00ef: 0x03bf,	# 	GREEK SMALL LETTER OMICRON
+	0x00f0: 0x03c0,	# 	GREEK SMALL LETTER PI
+	0x00f1: 0x03c1,	# 	GREEK SMALL LETTER RHO
+	0x00f2: 0x03c2,	# 	GREEK SMALL LETTER FINAL SIGMA
+	0x00f3: 0x03c3,	# 	GREEK SMALL LETTER SIGMA
+	0x00f4: 0x03c4,	# 	GREEK SMALL LETTER TAU
+	0x00f5: 0x03c5,	# 	GREEK SMALL LETTER UPSILON
+	0x00f6: 0x03c6,	# 	GREEK SMALL LETTER PHI
+	0x00f7: 0x03c7,	# 	GREEK SMALL LETTER CHI
+	0x00f8: 0x03c8,	# 	GREEK SMALL LETTER PSI
+	0x00f9: 0x03c9,	# 	GREEK SMALL LETTER OMEGA
+	0x00fa: 0x03ca,	# 	GREEK SMALL LETTER IOTA WITH DIALYTIKA
+	0x00fb: 0x03cb,	# 	GREEK SMALL LETTER UPSILON WITH DIALYTIKA
+	0x00fc: 0x03cc,	# 	GREEK SMALL LETTER OMICRON WITH TONOS
+	0x00fd: 0x03cd,	# 	GREEK SMALL LETTER UPSILON WITH TONOS
+	0x00fe: 0x03ce,	# 	GREEK SMALL LETTER OMEGA WITH TONOS
+	0x00ff: None,
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/lib-python/2.2/encodings/iso8859_8.py b/lib-python/2.2/encodings/iso8859_8.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/iso8859_8.py
@@ -0,0 +1,112 @@
+""" Python Character Mapping Codec generated from '8859-8.TXT' with gencodec.py.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+	0x00a1: None,
+	0x00aa: 0x00d7,	# 	MULTIPLICATION SIGN
+	0x00ba: 0x00f7,	# 	DIVISION SIGN
+	0x00bf: None,
+	0x00c0: None,
+	0x00c1: None,
+	0x00c2: None,
+	0x00c3: None,
+	0x00c4: None,
+	0x00c5: None,
+	0x00c6: None,
+	0x00c7: None,
+	0x00c8: None,
+	0x00c9: None,
+	0x00ca: None,
+	0x00cb: None,
+	0x00cc: None,
+	0x00cd: None,
+	0x00ce: None,
+	0x00cf: None,
+	0x00d0: None,
+	0x00d1: None,
+	0x00d2: None,
+	0x00d3: None,
+	0x00d4: None,
+	0x00d5: None,
+	0x00d6: None,
+	0x00d7: None,
+	0x00d8: None,
+	0x00d9: None,
+	0x00da: None,
+	0x00db: None,
+	0x00dc: None,
+	0x00dd: None,
+	0x00de: None,
+	0x00df: 0x2017,	# 	DOUBLE LOW LINE
+	0x00e0: 0x05d0,	# 	HEBREW LETTER ALEF
+	0x00e1: 0x05d1,	# 	HEBREW LETTER BET
+	0x00e2: 0x05d2,	# 	HEBREW LETTER GIMEL
+	0x00e3: 0x05d3,	# 	HEBREW LETTER DALET
+	0x00e4: 0x05d4,	# 	HEBREW LETTER HE
+	0x00e5: 0x05d5,	# 	HEBREW LETTER VAV
+	0x00e6: 0x05d6,	# 	HEBREW LETTER ZAYIN
+	0x00e7: 0x05d7,	# 	HEBREW LETTER HET
+	0x00e8: 0x05d8,	# 	HEBREW LETTER TET
+	0x00e9: 0x05d9,	# 	HEBREW LETTER YOD
+	0x00ea: 0x05da,	# 	HEBREW LETTER FINAL KAF
+	0x00eb: 0x05db,	# 	HEBREW LETTER KAF
+	0x00ec: 0x05dc,	# 	HEBREW LETTER LAMED
+	0x00ed: 0x05dd,	# 	HEBREW LETTER FINAL MEM
+	0x00ee: 0x05de,	# 	HEBREW LETTER MEM
+	0x00ef: 0x05df,	# 	HEBREW LETTER FINAL NUN
+	0x00f0: 0x05e0,	# 	HEBREW LETTER NUN
+	0x00f1: 0x05e1,	# 	HEBREW LETTER SAMEKH
+	0x00f2: 0x05e2,	# 	HEBREW LETTER AYIN
+	0x00f3: 0x05e3,	# 	HEBREW LETTER FINAL PE
+	0x00f4: 0x05e4,	# 	HEBREW LETTER PE
+	0x00f5: 0x05e5,	# 	HEBREW LETTER FINAL TSADI
+	0x00f6: 0x05e6,	# 	HEBREW LETTER TSADI
+	0x00f7: 0x05e7,	# 	HEBREW LETTER QOF
+	0x00f8: 0x05e8,	# 	HEBREW LETTER RESH
+	0x00f9: 0x05e9,	# 	HEBREW LETTER SHIN
+	0x00fa: 0x05ea,	# 	HEBREW LETTER TAV
+	0x00fb: None,
+	0x00fc: None,
+	0x00fd: 0x200e,	# 	LEFT-TO-RIGHT MARK
+	0x00fe: 0x200f,	# 	RIGHT-TO-LEFT MARK
+	0x00ff: None,
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/lib-python/2.2/encodings/iso8859_9.py b/lib-python/2.2/encodings/iso8859_9.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/iso8859_9.py
@@ -0,0 +1,50 @@
+""" Python Character Mapping Codec generated from '8859-9.TXT' with gencodec.py.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+	0x00d0: 0x011e,	# 	LATIN CAPITAL LETTER G WITH BREVE
+	0x00dd: 0x0130,	# 	LATIN CAPITAL LETTER I WITH DOT ABOVE
+	0x00de: 0x015e,	# 	LATIN CAPITAL LETTER S WITH CEDILLA
+	0x00f0: 0x011f,	# 	LATIN SMALL LETTER G WITH BREVE
+	0x00fd: 0x0131,	# 	LATIN SMALL LETTER DOTLESS I
+	0x00fe: 0x015f,	# 	LATIN SMALL LETTER S WITH CEDILLA
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/lib-python/2.2/encodings/koi8_r.py b/lib-python/2.2/encodings/koi8_r.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/koi8_r.py
@@ -0,0 +1,172 @@
+""" Python Character Mapping Codec generated from 'KOI8-R.TXT' with gencodec.py.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+	0x0080: 0x2500,	# 	BOX DRAWINGS LIGHT HORIZONTAL
+	0x0081: 0x2502,	# 	BOX DRAWINGS LIGHT VERTICAL
+	0x0082: 0x250c,	# 	BOX DRAWINGS LIGHT DOWN AND RIGHT
+	0x0083: 0x2510,	# 	BOX DRAWINGS LIGHT DOWN AND LEFT
+	0x0084: 0x2514,	# 	BOX DRAWINGS LIGHT UP AND RIGHT
+	0x0085: 0x2518,	# 	BOX DRAWINGS LIGHT UP AND LEFT
+	0x0086: 0x251c,	# 	BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+	0x0087: 0x2524,	# 	BOX DRAWINGS LIGHT VERTICAL AND LEFT
+	0x0088: 0x252c,	# 	BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+	0x0089: 0x2534,	# 	BOX DRAWINGS LIGHT UP AND HORIZONTAL
+	0x008a: 0x253c,	# 	BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+	0x008b: 0x2580,	# 	UPPER HALF BLOCK
+	0x008c: 0x2584,	# 	LOWER HALF BLOCK
+	0x008d: 0x2588,	# 	FULL BLOCK
+	0x008e: 0x258c,	# 	LEFT HALF BLOCK
+	0x008f: 0x2590,	# 	RIGHT HALF BLOCK
+	0x0090: 0x2591,	# 	LIGHT SHADE
+	0x0091: 0x2592,	# 	MEDIUM SHADE
+	0x0092: 0x2593,	# 	DARK SHADE
+	0x0093: 0x2320,	# 	TOP HALF INTEGRAL
+	0x0094: 0x25a0,	# 	BLACK SQUARE
+	0x0095: 0x2219,	# 	BULLET OPERATOR
+	0x0096: 0x221a,	# 	SQUARE ROOT
+	0x0097: 0x2248,	# 	ALMOST EQUAL TO
+	0x0098: 0x2264,	# 	LESS-THAN OR EQUAL TO
+	0x0099: 0x2265,	# 	GREATER-THAN OR EQUAL TO
+	0x009a: 0x00a0,	# 	NO-BREAK SPACE
+	0x009b: 0x2321,	# 	BOTTOM HALF INTEGRAL
+	0x009c: 0x00b0,	# 	DEGREE SIGN
+	0x009d: 0x00b2,	# 	SUPERSCRIPT TWO
+	0x009e: 0x00b7,	# 	MIDDLE DOT
+	0x009f: 0x00f7,	# 	DIVISION SIGN
+	0x00a0: 0x2550,	# 	BOX DRAWINGS DOUBLE HORIZONTAL
+	0x00a1: 0x2551,	# 	BOX DRAWINGS DOUBLE VERTICAL
+	0x00a2: 0x2552,	# 	BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
+	0x00a3: 0x0451,	# 	CYRILLIC SMALL LETTER IO
+	0x00a4: 0x2553,	# 	BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
+	0x00a5: 0x2554,	# 	BOX DRAWINGS DOUBLE DOWN AND RIGHT
+	0x00a6: 0x2555,	# 	BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
+	0x00a7: 0x2556,	# 	BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
+	0x00a8: 0x2557,	# 	BOX DRAWINGS DOUBLE DOWN AND LEFT
+	0x00a9: 0x2558,	# 	BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
+	0x00aa: 0x2559,	# 	BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
+	0x00ab: 0x255a,	# 	BOX DRAWINGS DOUBLE UP AND RIGHT
+	0x00ac: 0x255b,	# 	BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
+	0x00ad: 0x255c,	# 	BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
+	0x00ae: 0x255d,	# 	BOX DRAWINGS DOUBLE UP AND LEFT
+	0x00af: 0x255e,	# 	BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
+	0x00b0: 0x255f,	# 	BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
+	0x00b1: 0x2560,	# 	BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+	0x00b2: 0x2561,	# 	BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
+	0x00b3: 0x0401,	# 	CYRILLIC CAPITAL LETTER IO
+	0x00b4: 0x2562,	# 	BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
+	0x00b5: 0x2563,	# 	BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+	0x00b6: 0x2564,	# 	BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
+	0x00b7: 0x2565,	# 	BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
+	0x00b8: 0x2566,	# 	BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+	0x00b9: 0x2567,	# 	BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
+	0x00ba: 0x2568,	# 	BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
+	0x00bb: 0x2569,	# 	BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+	0x00bc: 0x256a,	# 	BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
+	0x00bd: 0x256b,	# 	BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
+	0x00be: 0x256c,	# 	BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+	0x00bf: 0x00a9,	# 	COPYRIGHT SIGN
+	0x00c0: 0x044e,	# 	CYRILLIC SMALL LETTER YU
+	0x00c1: 0x0430,	# 	CYRILLIC SMALL LETTER A
+	0x00c2: 0x0431,	# 	CYRILLIC SMALL LETTER BE
+	0x00c3: 0x0446,	# 	CYRILLIC SMALL LETTER TSE
+	0x00c4: 0x0434,	# 	CYRILLIC SMALL LETTER DE
+	0x00c5: 0x0435,	# 	CYRILLIC SMALL LETTER IE
+	0x00c6: 0x0444,	# 	CYRILLIC SMALL LETTER EF
+	0x00c7: 0x0433,	# 	CYRILLIC SMALL LETTER GHE
+	0x00c8: 0x0445,	# 	CYRILLIC SMALL LETTER HA
+	0x00c9: 0x0438,	# 	CYRILLIC SMALL LETTER I
+	0x00ca: 0x0439,	# 	CYRILLIC SMALL LETTER SHORT I
+	0x00cb: 0x043a,	# 	CYRILLIC SMALL LETTER KA
+	0x00cc: 0x043b,	# 	CYRILLIC SMALL LETTER EL
+	0x00cd: 0x043c,	# 	CYRILLIC SMALL LETTER EM
+	0x00ce: 0x043d,	# 	CYRILLIC SMALL LETTER EN
+	0x00cf: 0x043e,	# 	CYRILLIC SMALL LETTER O
+	0x00d0: 0x043f,	# 	CYRILLIC SMALL LETTER PE
+	0x00d1: 0x044f,	# 	CYRILLIC SMALL LETTER YA
+	0x00d2: 0x0440,	# 	CYRILLIC SMALL LETTER ER
+	0x00d3: 0x0441,	# 	CYRILLIC SMALL LETTER ES
+	0x00d4: 0x0442,	# 	CYRILLIC SMALL LETTER TE
+	0x00d5: 0x0443,	# 	CYRILLIC SMALL LETTER U
+	0x00d6: 0x0436,	# 	CYRILLIC SMALL LETTER ZHE
+	0x00d7: 0x0432,	# 	CYRILLIC SMALL LETTER VE
+	0x00d8: 0x044c,	# 	CYRILLIC SMALL LETTER SOFT SIGN
+	0x00d9: 0x044b,	# 	CYRILLIC SMALL LETTER YERU
+	0x00da: 0x0437,	# 	CYRILLIC SMALL LETTER ZE
+	0x00db: 0x0448,	# 	CYRILLIC SMALL LETTER SHA
+	0x00dc: 0x044d,	# 	CYRILLIC SMALL LETTER E
+	0x00dd: 0x0449,	# 	CYRILLIC SMALL LETTER SHCHA
+	0x00de: 0x0447,	# 	CYRILLIC SMALL LETTER CHE
+	0x00df: 0x044a,	# 	CYRILLIC SMALL LETTER HARD SIGN
+	0x00e0: 0x042e,	# 	CYRILLIC CAPITAL LETTER YU
+	0x00e1: 0x0410,	# 	CYRILLIC CAPITAL LETTER A
+	0x00e2: 0x0411,	# 	CYRILLIC CAPITAL LETTER BE
+	0x00e3: 0x0426,	# 	CYRILLIC CAPITAL LETTER TSE
+	0x00e4: 0x0414,	# 	CYRILLIC CAPITAL LETTER DE
+	0x00e5: 0x0415,	# 	CYRILLIC CAPITAL LETTER IE
+	0x00e6: 0x0424,	# 	CYRILLIC CAPITAL LETTER EF
+	0x00e7: 0x0413,	# 	CYRILLIC CAPITAL LETTER GHE
+	0x00e8: 0x0425,	# 	CYRILLIC CAPITAL LETTER HA
+	0x00e9: 0x0418,	# 	CYRILLIC CAPITAL LETTER I
+	0x00ea: 0x0419,	# 	CYRILLIC CAPITAL LETTER SHORT I
+	0x00eb: 0x041a,	# 	CYRILLIC CAPITAL LETTER KA
+	0x00ec: 0x041b,	# 	CYRILLIC CAPITAL LETTER EL
+	0x00ed: 0x041c,	# 	CYRILLIC CAPITAL LETTER EM
+	0x00ee: 0x041d,	# 	CYRILLIC CAPITAL LETTER EN
+	0x00ef: 0x041e,	# 	CYRILLIC CAPITAL LETTER O
+	0x00f0: 0x041f,	# 	CYRILLIC CAPITAL LETTER PE
+	0x00f1: 0x042f,	# 	CYRILLIC CAPITAL LETTER YA
+	0x00f2: 0x0420,	# 	CYRILLIC CAPITAL LETTER ER
+	0x00f3: 0x0421,	# 	CYRILLIC CAPITAL LETTER ES
+	0x00f4: 0x0422,	# 	CYRILLIC CAPITAL LETTER TE
+	0x00f5: 0x0423,	# 	CYRILLIC CAPITAL LETTER U
+	0x00f6: 0x0416,	# 	CYRILLIC CAPITAL LETTER ZHE
+	0x00f7: 0x0412,	# 	CYRILLIC CAPITAL LETTER VE
+	0x00f8: 0x042c,	# 	CYRILLIC CAPITAL LETTER SOFT SIGN
+	0x00f9: 0x042b,	# 	CYRILLIC CAPITAL LETTER YERU
+	0x00fa: 0x0417,	# 	CYRILLIC CAPITAL LETTER ZE
+	0x00fb: 0x0428,	# 	CYRILLIC CAPITAL LETTER SHA
+	0x00fc: 0x042d,	# 	CYRILLIC CAPITAL LETTER E
+	0x00fd: 0x0429,	# 	CYRILLIC CAPITAL LETTER SHCHA
+	0x00fe: 0x0427,	# 	CYRILLIC CAPITAL LETTER CHE
+	0x00ff: 0x042a,	# 	CYRILLIC CAPITAL LETTER HARD SIGN
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/lib-python/2.2/encodings/koi8_u.py b/lib-python/2.2/encodings/koi8_u.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/koi8_u.py
@@ -0,0 +1,54 @@
+""" Python Character Mapping Codec for KOI8U.
+
+    This character scheme is compliant to RFC2319
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+Modified by Maxim Dzumanenko <mvd at mylinux.com.ua>.
+
+(c) Copyright 2002, Python Software Foundation.
+
+"""#"
+
+import codecs, koi8_r
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = koi8_r.decoding_map.copy()
+decoding_map.update({
+        0x00a4: 0x0454, #       CYRILLIC SMALL LETTER UKRAINIAN IE
+        0x00a6: 0x0456, #       CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
+        0x00a7: 0x0457, #       CYRILLIC SMALL LETTER YI (UKRAINIAN)
+        0x00ad: 0x0491, #       CYRILLIC SMALL LETTER UKRAINIAN GHE WITH UPTURN
+        0x00b4: 0x0403, #       CYRILLIC CAPITAL LETTER UKRAINIAN IE
+        0x00b6: 0x0406, #       CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
+        0x00b7: 0x0407, #       CYRILLIC CAPITAL LETTER YI (UKRAINIAN)
+        0x00bd: 0x0490, #       CYRILLIC CAPITAL LETTER UKRAINIAN GHE WITH UPTURN
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/lib-python/2.2/encodings/latin_1.py b/lib-python/2.2/encodings/latin_1.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/latin_1.py
@@ -0,0 +1,35 @@
+""" Python 'latin-1' Codec
+
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+
+"""
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    # Note: Binding these as C functions will result in the class not
+    # converting them to methods. This is intended.
+    encode = codecs.latin_1_encode
+    decode = codecs.latin_1_decode
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+class StreamConverter(StreamWriter,StreamReader):
+
+    encode = codecs.latin_1_decode
+    decode = codecs.latin_1_encode
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec.encode,Codec.decode,StreamReader,StreamWriter)
diff --git a/lib-python/2.2/encodings/mac_cyrillic.py b/lib-python/2.2/encodings/mac_cyrillic.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/mac_cyrillic.py
@@ -0,0 +1,167 @@
+""" Python Character Mapping Codec generated from 'CYRILLIC.TXT' with gencodec.py.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+	0x0080: 0x0410,	# CYRILLIC CAPITAL LETTER A
+	0x0081: 0x0411,	# CYRILLIC CAPITAL LETTER BE
+	0x0082: 0x0412,	# CYRILLIC CAPITAL LETTER VE
+	0x0083: 0x0413,	# CYRILLIC CAPITAL LETTER GHE
+	0x0084: 0x0414,	# CYRILLIC CAPITAL LETTER DE
+	0x0085: 0x0415,	# CYRILLIC CAPITAL LETTER IE
+	0x0086: 0x0416,	# CYRILLIC CAPITAL LETTER ZHE
+	0x0087: 0x0417,	# CYRILLIC CAPITAL LETTER ZE
+	0x0088: 0x0418,	# CYRILLIC CAPITAL LETTER I
+	0x0089: 0x0419,	# CYRILLIC CAPITAL LETTER SHORT I
+	0x008a: 0x041a,	# CYRILLIC CAPITAL LETTER KA
+	0x008b: 0x041b,	# CYRILLIC CAPITAL LETTER EL
+	0x008c: 0x041c,	# CYRILLIC CAPITAL LETTER EM
+	0x008d: 0x041d,	# CYRILLIC CAPITAL LETTER EN
+	0x008e: 0x041e,	# CYRILLIC CAPITAL LETTER O
+	0x008f: 0x041f,	# CYRILLIC CAPITAL LETTER PE
+	0x0090: 0x0420,	# CYRILLIC CAPITAL LETTER ER
+	0x0091: 0x0421,	# CYRILLIC CAPITAL LETTER ES
+	0x0092: 0x0422,	# CYRILLIC CAPITAL LETTER TE
+	0x0093: 0x0423,	# CYRILLIC CAPITAL LETTER U
+	0x0094: 0x0424,	# CYRILLIC CAPITAL LETTER EF
+	0x0095: 0x0425,	# CYRILLIC CAPITAL LETTER HA
+	0x0096: 0x0426,	# CYRILLIC CAPITAL LETTER TSE
+	0x0097: 0x0427,	# CYRILLIC CAPITAL LETTER CHE
+	0x0098: 0x0428,	# CYRILLIC CAPITAL LETTER SHA
+	0x0099: 0x0429,	# CYRILLIC CAPITAL LETTER SHCHA
+	0x009a: 0x042a,	# CYRILLIC CAPITAL LETTER HARD SIGN
+	0x009b: 0x042b,	# CYRILLIC CAPITAL LETTER YERU
+	0x009c: 0x042c,	# CYRILLIC CAPITAL LETTER SOFT SIGN
+	0x009d: 0x042d,	# CYRILLIC CAPITAL LETTER E
+	0x009e: 0x042e,	# CYRILLIC CAPITAL LETTER YU
+	0x009f: 0x042f,	# CYRILLIC CAPITAL LETTER YA
+	0x00a0: 0x2020,	# DAGGER
+	0x00a1: 0x00b0,	# DEGREE SIGN
+	0x00a4: 0x00a7,	# SECTION SIGN
+	0x00a5: 0x2022,	# BULLET
+	0x00a6: 0x00b6,	# PILCROW SIGN
+	0x00a7: 0x0406,	# CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
+	0x00a8: 0x00ae,	# REGISTERED SIGN
+	0x00aa: 0x2122,	# TRADE MARK SIGN
+	0x00ab: 0x0402,	# CYRILLIC CAPITAL LETTER DJE
+	0x00ac: 0x0452,	# CYRILLIC SMALL LETTER DJE
+	0x00ad: 0x2260,	# NOT EQUAL TO
+	0x00ae: 0x0403,	# CYRILLIC CAPITAL LETTER GJE
+	0x00af: 0x0453,	# CYRILLIC SMALL LETTER GJE
+	0x00b0: 0x221e,	# INFINITY
+	0x00b2: 0x2264,	# LESS-THAN OR EQUAL TO
+	0x00b3: 0x2265,	# GREATER-THAN OR EQUAL TO
+	0x00b4: 0x0456,	# CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
+	0x00b6: 0x2202,	# PARTIAL DIFFERENTIAL
+	0x00b7: 0x0408,	# CYRILLIC CAPITAL LETTER JE
+	0x00b8: 0x0404,	# CYRILLIC CAPITAL LETTER UKRAINIAN IE
+	0x00b9: 0x0454,	# CYRILLIC SMALL LETTER UKRAINIAN IE
+	0x00ba: 0x0407,	# CYRILLIC CAPITAL LETTER YI
+	0x00bb: 0x0457,	# CYRILLIC SMALL LETTER YI
+	0x00bc: 0x0409,	# CYRILLIC CAPITAL LETTER LJE
+	0x00bd: 0x0459,	# CYRILLIC SMALL LETTER LJE
+	0x00be: 0x040a,	# CYRILLIC CAPITAL LETTER NJE
+	0x00bf: 0x045a,	# CYRILLIC SMALL LETTER NJE
+	0x00c0: 0x0458,	# CYRILLIC SMALL LETTER JE
+	0x00c1: 0x0405,	# CYRILLIC CAPITAL LETTER DZE
+	0x00c2: 0x00ac,	# NOT SIGN
+	0x00c3: 0x221a,	# SQUARE ROOT
+	0x00c4: 0x0192,	# LATIN SMALL LETTER F WITH HOOK
+	0x00c5: 0x2248,	# ALMOST EQUAL TO
+	0x00c6: 0x2206,	# INCREMENT
+	0x00c7: 0x00ab,	# LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+	0x00c8: 0x00bb,	# RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+	0x00c9: 0x2026,	# HORIZONTAL ELLIPSIS
+	0x00ca: 0x00a0,	# NO-BREAK SPACE
+	0x00cb: 0x040b,	# CYRILLIC CAPITAL LETTER TSHE
+	0x00cc: 0x045b,	# CYRILLIC SMALL LETTER TSHE
+	0x00cd: 0x040c,	# CYRILLIC CAPITAL LETTER KJE
+	0x00ce: 0x045c,	# CYRILLIC SMALL LETTER KJE
+	0x00cf: 0x0455,	# CYRILLIC SMALL LETTER DZE
+	0x00d0: 0x2013,	# EN DASH
+	0x00d1: 0x2014,	# EM DASH
+	0x00d2: 0x201c,	# LEFT DOUBLE QUOTATION MARK
+	0x00d3: 0x201d,	# RIGHT DOUBLE QUOTATION MARK
+	0x00d4: 0x2018,	# LEFT SINGLE QUOTATION MARK
+	0x00d5: 0x2019,	# RIGHT SINGLE QUOTATION MARK
+	0x00d6: 0x00f7,	# DIVISION SIGN
+	0x00d7: 0x201e,	# DOUBLE LOW-9 QUOTATION MARK
+	0x00d8: 0x040e,	# CYRILLIC CAPITAL LETTER SHORT U
+	0x00d9: 0x045e,	# CYRILLIC SMALL LETTER SHORT U
+	0x00da: 0x040f,	# CYRILLIC CAPITAL LETTER DZHE
+	0x00db: 0x045f,	# CYRILLIC SMALL LETTER DZHE
+	0x00dc: 0x2116,	# NUMERO SIGN
+	0x00dd: 0x0401,	# CYRILLIC CAPITAL LETTER IO
+	0x00de: 0x0451,	# CYRILLIC SMALL LETTER IO
+	0x00df: 0x044f,	# CYRILLIC SMALL LETTER YA
+	0x00e0: 0x0430,	# CYRILLIC SMALL LETTER A
+	0x00e1: 0x0431,	# CYRILLIC SMALL LETTER BE
+	0x00e2: 0x0432,	# CYRILLIC SMALL LETTER VE
+	0x00e3: 0x0433,	# CYRILLIC SMALL LETTER GHE
+	0x00e4: 0x0434,	# CYRILLIC SMALL LETTER DE
+	0x00e5: 0x0435,	# CYRILLIC SMALL LETTER IE
+	0x00e6: 0x0436,	# CYRILLIC SMALL LETTER ZHE
+	0x00e7: 0x0437,	# CYRILLIC SMALL LETTER ZE
+	0x00e8: 0x0438,	# CYRILLIC SMALL LETTER I
+	0x00e9: 0x0439,	# CYRILLIC SMALL LETTER SHORT I
+	0x00ea: 0x043a,	# CYRILLIC SMALL LETTER KA
+	0x00eb: 0x043b,	# CYRILLIC SMALL LETTER EL
+	0x00ec: 0x043c,	# CYRILLIC SMALL LETTER EM
+	0x00ed: 0x043d,	# CYRILLIC SMALL LETTER EN
+	0x00ee: 0x043e,	# CYRILLIC SMALL LETTER O
+	0x00ef: 0x043f,	# CYRILLIC SMALL LETTER PE
+	0x00f0: 0x0440,	# CYRILLIC SMALL LETTER ER
+	0x00f1: 0x0441,	# CYRILLIC SMALL LETTER ES
+	0x00f2: 0x0442,	# CYRILLIC SMALL LETTER TE
+	0x00f3: 0x0443,	# CYRILLIC SMALL LETTER U
+	0x00f4: 0x0444,	# CYRILLIC SMALL LETTER EF
+	0x00f5: 0x0445,	# CYRILLIC SMALL LETTER HA
+	0x00f6: 0x0446,	# CYRILLIC SMALL LETTER TSE
+	0x00f7: 0x0447,	# CYRILLIC SMALL LETTER CHE
+	0x00f8: 0x0448,	# CYRILLIC SMALL LETTER SHA
+	0x00f9: 0x0449,	# CYRILLIC SMALL LETTER SHCHA
+	0x00fa: 0x044a,	# CYRILLIC SMALL LETTER HARD SIGN
+	0x00fb: 0x044b,	# CYRILLIC SMALL LETTER YERU
+	0x00fc: 0x044c,	# CYRILLIC SMALL LETTER SOFT SIGN
+	0x00fd: 0x044d,	# CYRILLIC SMALL LETTER E
+	0x00fe: 0x044e,	# CYRILLIC SMALL LETTER YU
+	0x00ff: 0x00a4,	# CURRENCY SIGN
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/lib-python/2.2/encodings/mac_greek.py b/lib-python/2.2/encodings/mac_greek.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/mac_greek.py
@@ -0,0 +1,170 @@
+""" Python Character Mapping Codec generated from 'GREEK.TXT' with gencodec.py.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+	0x0080: 0x00c4,	# LATIN CAPITAL LETTER A WITH DIAERESIS
+	0x0081: 0x00b9,	# SUPERSCRIPT ONE
+	0x0082: 0x00b2,	# SUPERSCRIPT TWO
+	0x0083: 0x00c9,	# LATIN CAPITAL LETTER E WITH ACUTE
+	0x0084: 0x00b3,	# SUPERSCRIPT THREE
+	0x0085: 0x00d6,	# LATIN CAPITAL LETTER O WITH DIAERESIS
+	0x0086: 0x00dc,	# LATIN CAPITAL LETTER U WITH DIAERESIS
+	0x0087: 0x0385,	# GREEK DIALYTIKA TONOS
+	0x0088: 0x00e0,	# LATIN SMALL LETTER A WITH GRAVE
+	0x0089: 0x00e2,	# LATIN SMALL LETTER A WITH CIRCUMFLEX
+	0x008a: 0x00e4,	# LATIN SMALL LETTER A WITH DIAERESIS
+	0x008b: 0x0384,	# GREEK TONOS
+	0x008c: 0x00a8,	# DIAERESIS
+	0x008d: 0x00e7,	# LATIN SMALL LETTER C WITH CEDILLA
+	0x008e: 0x00e9,	# LATIN SMALL LETTER E WITH ACUTE
+	0x008f: 0x00e8,	# LATIN SMALL LETTER E WITH GRAVE
+	0x0090: 0x00ea,	# LATIN SMALL LETTER E WITH CIRCUMFLEX
+	0x0091: 0x00eb,	# LATIN SMALL LETTER E WITH DIAERESIS
+	0x0092: 0x00a3,	# POUND SIGN
+	0x0093: 0x2122,	# TRADE MARK SIGN
+	0x0094: 0x00ee,	# LATIN SMALL LETTER I WITH CIRCUMFLEX
+	0x0095: 0x00ef,	# LATIN SMALL LETTER I WITH DIAERESIS
+	0x0096: 0x2022,	# BULLET
+	0x0097: 0x00bd,	# VULGAR FRACTION ONE HALF
+	0x0098: 0x2030,	# PER MILLE SIGN
+	0x0099: 0x00f4,	# LATIN SMALL LETTER O WITH CIRCUMFLEX
+	0x009a: 0x00f6,	# LATIN SMALL LETTER O WITH DIAERESIS
+	0x009b: 0x00a6,	# BROKEN BAR
+	0x009c: 0x00ad,	# SOFT HYPHEN
+	0x009d: 0x00f9,	# LATIN SMALL LETTER U WITH GRAVE
+	0x009e: 0x00fb,	# LATIN SMALL LETTER U WITH CIRCUMFLEX
+	0x009f: 0x00fc,	# LATIN SMALL LETTER U WITH DIAERESIS
+	0x00a0: 0x2020,	# DAGGER
+	0x00a1: 0x0393,	# GREEK CAPITAL LETTER GAMMA
+	0x00a2: 0x0394,	# GREEK CAPITAL LETTER DELTA
+	0x00a3: 0x0398,	# GREEK CAPITAL LETTER THETA
+	0x00a4: 0x039b,	# GREEK CAPITAL LETTER LAMBDA
+	0x00a5: 0x039e,	# GREEK CAPITAL LETTER XI
+	0x00a6: 0x03a0,	# GREEK CAPITAL LETTER PI
+	0x00a7: 0x00df,	# LATIN SMALL LETTER SHARP S
+	0x00a8: 0x00ae,	# REGISTERED SIGN
+	0x00aa: 0x03a3,	# GREEK CAPITAL LETTER SIGMA
+	0x00ab: 0x03aa,	# GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
+	0x00ac: 0x00a7,	# SECTION SIGN
+	0x00ad: 0x2260,	# NOT EQUAL TO
+	0x00ae: 0x00b0,	# DEGREE SIGN
+	0x00af: 0x0387,	# GREEK ANO TELEIA
+	0x00b0: 0x0391,	# GREEK CAPITAL LETTER ALPHA
+	0x00b2: 0x2264,	# LESS-THAN OR EQUAL TO
+	0x00b3: 0x2265,	# GREATER-THAN OR EQUAL TO
+	0x00b4: 0x00a5,	# YEN SIGN
+	0x00b5: 0x0392,	# GREEK CAPITAL LETTER BETA
+	0x00b6: 0x0395,	# GREEK CAPITAL LETTER EPSILON
+	0x00b7: 0x0396,	# GREEK CAPITAL LETTER ZETA
+	0x00b8: 0x0397,	# GREEK CAPITAL LETTER ETA
+	0x00b9: 0x0399,	# GREEK CAPITAL LETTER IOTA
+	0x00ba: 0x039a,	# GREEK CAPITAL LETTER KAPPA
+	0x00bb: 0x039c,	# GREEK CAPITAL LETTER MU
+	0x00bc: 0x03a6,	# GREEK CAPITAL LETTER PHI
+	0x00bd: 0x03ab,	# GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
+	0x00be: 0x03a8,	# GREEK CAPITAL LETTER PSI
+	0x00bf: 0x03a9,	# GREEK CAPITAL LETTER OMEGA
+	0x00c0: 0x03ac,	# GREEK SMALL LETTER ALPHA WITH TONOS
+	0x00c1: 0x039d,	# GREEK CAPITAL LETTER NU
+	0x00c2: 0x00ac,	# NOT SIGN
+	0x00c3: 0x039f,	# GREEK CAPITAL LETTER OMICRON
+	0x00c4: 0x03a1,	# GREEK CAPITAL LETTER RHO
+	0x00c5: 0x2248,	# ALMOST EQUAL TO
+	0x00c6: 0x03a4,	# GREEK CAPITAL LETTER TAU
+	0x00c7: 0x00ab,	# LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+	0x00c8: 0x00bb,	# RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+	0x00c9: 0x2026,	# HORIZONTAL ELLIPSIS
+	0x00ca: 0x00a0,	# NO-BREAK SPACE
+	0x00cb: 0x03a5,	# GREEK CAPITAL LETTER UPSILON
+	0x00cc: 0x03a7,	# GREEK CAPITAL LETTER CHI
+	0x00cd: 0x0386,	# GREEK CAPITAL LETTER ALPHA WITH TONOS
+	0x00ce: 0x0388,	# GREEK CAPITAL LETTER EPSILON WITH TONOS
+	0x00cf: 0x0153,	# LATIN SMALL LIGATURE OE
+	0x00d0: 0x2013,	# EN DASH
+	0x00d1: 0x2015,	# HORIZONTAL BAR
+	0x00d2: 0x201c,	# LEFT DOUBLE QUOTATION MARK
+	0x00d3: 0x201d,	# RIGHT DOUBLE QUOTATION MARK
+	0x00d4: 0x2018,	# LEFT SINGLE QUOTATION MARK
+	0x00d5: 0x2019,	# RIGHT SINGLE QUOTATION MARK
+	0x00d6: 0x00f7,	# DIVISION SIGN
+	0x00d7: 0x0389,	# GREEK CAPITAL LETTER ETA WITH TONOS
+	0x00d8: 0x038a,	# GREEK CAPITAL LETTER IOTA WITH TONOS
+	0x00d9: 0x038c,	# GREEK CAPITAL LETTER OMICRON WITH TONOS
+	0x00da: 0x038e,	# GREEK CAPITAL LETTER UPSILON WITH TONOS
+	0x00db: 0x03ad,	# GREEK SMALL LETTER EPSILON WITH TONOS
+	0x00dc: 0x03ae,	# GREEK SMALL LETTER ETA WITH TONOS
+	0x00dd: 0x03af,	# GREEK SMALL LETTER IOTA WITH TONOS
+	0x00de: 0x03cc,	# GREEK SMALL LETTER OMICRON WITH TONOS
+	0x00df: 0x038f,	# GREEK CAPITAL LETTER OMEGA WITH TONOS
+	0x00e0: 0x03cd,	# GREEK SMALL LETTER UPSILON WITH TONOS
+	0x00e1: 0x03b1,	# GREEK SMALL LETTER ALPHA
+	0x00e2: 0x03b2,	# GREEK SMALL LETTER BETA
+	0x00e3: 0x03c8,	# GREEK SMALL LETTER PSI
+	0x00e4: 0x03b4,	# GREEK SMALL LETTER DELTA
+	0x00e5: 0x03b5,	# GREEK SMALL LETTER EPSILON
+	0x00e6: 0x03c6,	# GREEK SMALL LETTER PHI
+	0x00e7: 0x03b3,	# GREEK SMALL LETTER GAMMA
+	0x00e8: 0x03b7,	# GREEK SMALL LETTER ETA
+	0x00e9: 0x03b9,	# GREEK SMALL LETTER IOTA
+	0x00ea: 0x03be,	# GREEK SMALL LETTER XI
+	0x00eb: 0x03ba,	# GREEK SMALL LETTER KAPPA
+	0x00ec: 0x03bb,	# GREEK SMALL LETTER LAMBDA
+	0x00ed: 0x03bc,	# GREEK SMALL LETTER MU
+	0x00ee: 0x03bd,	# GREEK SMALL LETTER NU
+	0x00ef: 0x03bf,	# GREEK SMALL LETTER OMICRON
+	0x00f0: 0x03c0,	# GREEK SMALL LETTER PI
+	0x00f1: 0x03ce,	# GREEK SMALL LETTER OMEGA WITH TONOS
+	0x00f2: 0x03c1,	# GREEK SMALL LETTER RHO
+	0x00f3: 0x03c3,	# GREEK SMALL LETTER SIGMA
+	0x00f4: 0x03c4,	# GREEK SMALL LETTER TAU
+	0x00f5: 0x03b8,	# GREEK SMALL LETTER THETA
+	0x00f6: 0x03c9,	# GREEK SMALL LETTER OMEGA
+	0x00f7: 0x03c2,	# GREEK SMALL LETTER FINAL SIGMA
+	0x00f8: 0x03c7,	# GREEK SMALL LETTER CHI
+	0x00f9: 0x03c5,	# GREEK SMALL LETTER UPSILON
+	0x00fa: 0x03b6,	# GREEK SMALL LETTER ZETA
+	0x00fb: 0x03ca,	# GREEK SMALL LETTER IOTA WITH DIALYTIKA
+	0x00fc: 0x03cb,	# GREEK SMALL LETTER UPSILON WITH DIALYTIKA
+	0x00fd: 0x0390,	# GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
+	0x00fe: 0x03b0,	# GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
+	0x00ff: None,	# UNDEFINED
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/lib-python/2.2/encodings/mac_iceland.py b/lib-python/2.2/encodings/mac_iceland.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/mac_iceland.py
@@ -0,0 +1,166 @@
+""" Python Character Mapping Codec generated from 'ICELAND.TXT' with gencodec.py.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+	0x0080: 0x00c4,	# LATIN CAPITAL LETTER A WITH DIAERESIS
+	0x0081: 0x00c5,	# LATIN CAPITAL LETTER A WITH RING ABOVE
+	0x0082: 0x00c7,	# LATIN CAPITAL LETTER C WITH CEDILLA
+	0x0083: 0x00c9,	# LATIN CAPITAL LETTER E WITH ACUTE
+	0x0084: 0x00d1,	# LATIN CAPITAL LETTER N WITH TILDE
+	0x0085: 0x00d6,	# LATIN CAPITAL LETTER O WITH DIAERESIS
+	0x0086: 0x00dc,	# LATIN CAPITAL LETTER U WITH DIAERESIS
+	0x0087: 0x00e1,	# LATIN SMALL LETTER A WITH ACUTE
+	0x0088: 0x00e0,	# LATIN SMALL LETTER A WITH GRAVE
+	0x0089: 0x00e2,	# LATIN SMALL LETTER A WITH CIRCUMFLEX
+	0x008a: 0x00e4,	# LATIN SMALL LETTER A WITH DIAERESIS
+	0x008b: 0x00e3,	# LATIN SMALL LETTER A WITH TILDE
+	0x008c: 0x00e5,	# LATIN SMALL LETTER A WITH RING ABOVE
+	0x008d: 0x00e7,	# LATIN SMALL LETTER C WITH CEDILLA
+	0x008e: 0x00e9,	# LATIN SMALL LETTER E WITH ACUTE
+	0x008f: 0x00e8,	# LATIN SMALL LETTER E WITH GRAVE
+	0x0090: 0x00ea,	# LATIN SMALL LETTER E WITH CIRCUMFLEX
+	0x0091: 0x00eb,	# LATIN SMALL LETTER E WITH DIAERESIS
+	0x0092: 0x00ed,	# LATIN SMALL LETTER I WITH ACUTE
+	0x0093: 0x00ec,	# LATIN SMALL LETTER I WITH GRAVE
+	0x0094: 0x00ee,	# LATIN SMALL LETTER I WITH CIRCUMFLEX
+	0x0095: 0x00ef,	# LATIN SMALL LETTER I WITH DIAERESIS
+	0x0096: 0x00f1,	# LATIN SMALL LETTER N WITH TILDE
+	0x0097: 0x00f3,	# LATIN SMALL LETTER O WITH ACUTE
+	0x0098: 0x00f2,	# LATIN SMALL LETTER O WITH GRAVE
+	0x0099: 0x00f4,	# LATIN SMALL LETTER O WITH CIRCUMFLEX
+	0x009a: 0x00f6,	# LATIN SMALL LETTER O WITH DIAERESIS
+	0x009b: 0x00f5,	# LATIN SMALL LETTER O WITH TILDE
+	0x009c: 0x00fa,	# LATIN SMALL LETTER U WITH ACUTE
+	0x009d: 0x00f9,	# LATIN SMALL LETTER U WITH GRAVE
+	0x009e: 0x00fb,	# LATIN SMALL LETTER U WITH CIRCUMFLEX
+	0x009f: 0x00fc,	# LATIN SMALL LETTER U WITH DIAERESIS
+	0x00a0: 0x00dd,	# LATIN CAPITAL LETTER Y WITH ACUTE
+	0x00a1: 0x00b0,	# DEGREE SIGN
+	0x00a4: 0x00a7,	# SECTION SIGN
+	0x00a5: 0x2022,	# BULLET
+	0x00a6: 0x00b6,	# PILCROW SIGN
+	0x00a7: 0x00df,	# LATIN SMALL LETTER SHARP S
+	0x00a8: 0x00ae,	# REGISTERED SIGN
+	0x00aa: 0x2122,	# TRADE MARK SIGN
+	0x00ab: 0x00b4,	# ACUTE ACCENT
+	0x00ac: 0x00a8,	# DIAERESIS
+	0x00ad: 0x2260,	# NOT EQUAL TO
+	0x00ae: 0x00c6,	# LATIN CAPITAL LIGATURE AE
+	0x00af: 0x00d8,	# LATIN CAPITAL LETTER O WITH STROKE
+	0x00b0: 0x221e,	# INFINITY
+	0x00b2: 0x2264,	# LESS-THAN OR EQUAL TO
+	0x00b3: 0x2265,	# GREATER-THAN OR EQUAL TO
+	0x00b4: 0x00a5,	# YEN SIGN
+	0x00b6: 0x2202,	# PARTIAL DIFFERENTIAL
+	0x00b7: 0x2211,	# N-ARY SUMMATION
+	0x00b8: 0x220f,	# N-ARY PRODUCT
+	0x00b9: 0x03c0,	# GREEK SMALL LETTER PI
+	0x00ba: 0x222b,	# INTEGRAL
+	0x00bb: 0x00aa,	# FEMININE ORDINAL INDICATOR
+	0x00bc: 0x00ba,	# MASCULINE ORDINAL INDICATOR
+	0x00bd: 0x2126,	# OHM SIGN
+	0x00be: 0x00e6,	# LATIN SMALL LIGATURE AE
+	0x00bf: 0x00f8,	# LATIN SMALL LETTER O WITH STROKE
+	0x00c0: 0x00bf,	# INVERTED QUESTION MARK
+	0x00c1: 0x00a1,	# INVERTED EXCLAMATION MARK
+	0x00c2: 0x00ac,	# NOT SIGN
+	0x00c3: 0x221a,	# SQUARE ROOT
+	0x00c4: 0x0192,	# LATIN SMALL LETTER F WITH HOOK
+	0x00c5: 0x2248,	# ALMOST EQUAL TO
+	0x00c6: 0x2206,	# INCREMENT
+	0x00c7: 0x00ab,	# LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+	0x00c8: 0x00bb,	# RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+	0x00c9: 0x2026,	# HORIZONTAL ELLIPSIS
+	0x00ca: 0x00a0,	# NO-BREAK SPACE
+	0x00cb: 0x00c0,	# LATIN CAPITAL LETTER A WITH GRAVE
+	0x00cc: 0x00c3,	# LATIN CAPITAL LETTER A WITH TILDE
+	0x00cd: 0x00d5,	# LATIN CAPITAL LETTER O WITH TILDE
+	0x00ce: 0x0152,	# LATIN CAPITAL LIGATURE OE
+	0x00cf: 0x0153,	# LATIN SMALL LIGATURE OE
+	0x00d0: 0x2013,	# EN DASH
+	0x00d1: 0x2014,	# EM DASH
+	0x00d2: 0x201c,	# LEFT DOUBLE QUOTATION MARK
+	0x00d3: 0x201d,	# RIGHT DOUBLE QUOTATION MARK
+	0x00d4: 0x2018,	# LEFT SINGLE QUOTATION MARK
+	0x00d5: 0x2019,	# RIGHT SINGLE QUOTATION MARK
+	0x00d6: 0x00f7,	# DIVISION SIGN
+	0x00d7: 0x25ca,	# LOZENGE
+	0x00d8: 0x00ff,	# LATIN SMALL LETTER Y WITH DIAERESIS
+	0x00d9: 0x0178,	# LATIN CAPITAL LETTER Y WITH DIAERESIS
+	0x00da: 0x2044,	# FRACTION SLASH
+	0x00db: 0x00a4,	# CURRENCY SIGN
+	0x00dc: 0x00d0,	# LATIN CAPITAL LETTER ETH
+	0x00dd: 0x00f0,	# LATIN SMALL LETTER ETH
+	0x00df: 0x00fe,	# LATIN SMALL LETTER THORN
+	0x00e0: 0x00fd,	# LATIN SMALL LETTER Y WITH ACUTE
+	0x00e1: 0x00b7,	# MIDDLE DOT
+	0x00e2: 0x201a,	# SINGLE LOW-9 QUOTATION MARK
+	0x00e3: 0x201e,	# DOUBLE LOW-9 QUOTATION MARK
+	0x00e4: 0x2030,	# PER MILLE SIGN
+	0x00e5: 0x00c2,	# LATIN CAPITAL LETTER A WITH CIRCUMFLEX
+	0x00e6: 0x00ca,	# LATIN CAPITAL LETTER E WITH CIRCUMFLEX
+	0x00e7: 0x00c1,	# LATIN CAPITAL LETTER A WITH ACUTE
+	0x00e8: 0x00cb,	# LATIN CAPITAL LETTER E WITH DIAERESIS
+	0x00e9: 0x00c8,	# LATIN CAPITAL LETTER E WITH GRAVE
+	0x00ea: 0x00cd,	# LATIN CAPITAL LETTER I WITH ACUTE
+	0x00eb: 0x00ce,	# LATIN CAPITAL LETTER I WITH CIRCUMFLEX
+	0x00ec: 0x00cf,	# LATIN CAPITAL LETTER I WITH DIAERESIS
+	0x00ed: 0x00cc,	# LATIN CAPITAL LETTER I WITH GRAVE
+	0x00ee: 0x00d3,	# LATIN CAPITAL LETTER O WITH ACUTE
+	0x00ef: 0x00d4,	# LATIN CAPITAL LETTER O WITH CIRCUMFLEX
+	0x00f0: None,	# UNDEFINED
+	0x00f1: 0x00d2,	# LATIN CAPITAL LETTER O WITH GRAVE
+	0x00f2: 0x00da,	# LATIN CAPITAL LETTER U WITH ACUTE
+	0x00f3: 0x00db,	# LATIN CAPITAL LETTER U WITH CIRCUMFLEX
+	0x00f4: 0x00d9,	# LATIN CAPITAL LETTER U WITH GRAVE
+	0x00f5: 0x0131,	# LATIN SMALL LETTER DOTLESS I
+	0x00f6: 0x02c6,	# MODIFIER LETTER CIRCUMFLEX ACCENT
+	0x00f7: 0x02dc,	# SMALL TILDE
+	0x00f8: 0x00af,	# MACRON
+	0x00f9: 0x02d8,	# BREVE
+	0x00fa: 0x02d9,	# DOT ABOVE
+	0x00fb: 0x02da,	# RING ABOVE
+	0x00fc: 0x00b8,	# CEDILLA
+	0x00fd: 0x02dd,	# DOUBLE ACUTE ACCENT
+	0x00fe: 0x02db,	# OGONEK
+	0x00ff: 0x02c7,	# CARON
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/lib-python/2.2/encodings/mac_latin2.py b/lib-python/2.2/encodings/mac_latin2.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/mac_latin2.py
@@ -0,0 +1,170 @@
+""" Python Character Mapping Codec generated from 'LATIN2.TXT' with gencodec.py.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+	0x0080: 0x00c4,	# LATIN CAPITAL LETTER A WITH DIAERESIS
+	0x0081: 0x0100,	# LATIN CAPITAL LETTER A WITH MACRON
+	0x0082: 0x0101,	# LATIN SMALL LETTER A WITH MACRON
+	0x0083: 0x00c9,	# LATIN CAPITAL LETTER E WITH ACUTE
+	0x0084: 0x0104,	# LATIN CAPITAL LETTER A WITH OGONEK
+	0x0085: 0x00d6,	# LATIN CAPITAL LETTER O WITH DIAERESIS
+	0x0086: 0x00dc,	# LATIN CAPITAL LETTER U WITH DIAERESIS
+	0x0087: 0x00e1,	# LATIN SMALL LETTER A WITH ACUTE
+	0x0088: 0x0105,	# LATIN SMALL LETTER A WITH OGONEK
+	0x0089: 0x010c,	# LATIN CAPITAL LETTER C WITH CARON
+	0x008a: 0x00e4,	# LATIN SMALL LETTER A WITH DIAERESIS
+	0x008b: 0x010d,	# LATIN SMALL LETTER C WITH CARON
+	0x008c: 0x0106,	# LATIN CAPITAL LETTER C WITH ACUTE
+	0x008d: 0x0107,	# LATIN SMALL LETTER C WITH ACUTE
+	0x008e: 0x00e9,	# LATIN SMALL LETTER E WITH ACUTE
+	0x008f: 0x0179,	# LATIN CAPITAL LETTER Z WITH ACUTE
+	0x0090: 0x017a,	# LATIN SMALL LETTER Z WITH ACUTE
+	0x0091: 0x010e,	# LATIN CAPITAL LETTER D WITH CARON
+	0x0092: 0x00ed,	# LATIN SMALL LETTER I WITH ACUTE
+	0x0093: 0x010f,	# LATIN SMALL LETTER D WITH CARON
+	0x0094: 0x0112,	# LATIN CAPITAL LETTER E WITH MACRON
+	0x0095: 0x0113,	# LATIN SMALL LETTER E WITH MACRON
+	0x0096: 0x0116,	# LATIN CAPITAL LETTER E WITH DOT ABOVE
+	0x0097: 0x00f3,	# LATIN SMALL LETTER O WITH ACUTE
+	0x0098: 0x0117,	# LATIN SMALL LETTER E WITH DOT ABOVE
+	0x0099: 0x00f4,	# LATIN SMALL LETTER O WITH CIRCUMFLEX
+	0x009a: 0x00f6,	# LATIN SMALL LETTER O WITH DIAERESIS
+	0x009b: 0x00f5,	# LATIN SMALL LETTER O WITH TILDE
+	0x009c: 0x00fa,	# LATIN SMALL LETTER U WITH ACUTE
+	0x009d: 0x011a,	# LATIN CAPITAL LETTER E WITH CARON
+	0x009e: 0x011b,	# LATIN SMALL LETTER E WITH CARON
+	0x009f: 0x00fc,	# LATIN SMALL LETTER U WITH DIAERESIS
+	0x00a0: 0x2020,	# DAGGER
+	0x00a1: 0x00b0,	# DEGREE SIGN
+	0x00a2: 0x0118,	# LATIN CAPITAL LETTER E WITH OGONEK
+	0x00a4: 0x00a7,	# SECTION SIGN
+	0x00a5: 0x2022,	# BULLET
+	0x00a6: 0x00b6,	# PILCROW SIGN
+	0x00a7: 0x00df,	# LATIN SMALL LETTER SHARP S
+	0x00a8: 0x00ae,	# REGISTERED SIGN
+	0x00aa: 0x2122,	# TRADE MARK SIGN
+	0x00ab: 0x0119,	# LATIN SMALL LETTER E WITH OGONEK
+	0x00ac: 0x00a8,	# DIAERESIS
+	0x00ad: 0x2260,	# NOT EQUAL TO
+	0x00ae: 0x0123,	# LATIN SMALL LETTER G WITH CEDILLA
+	0x00af: 0x012e,	# LATIN CAPITAL LETTER I WITH OGONEK
+	0x00b0: 0x012f,	# LATIN SMALL LETTER I WITH OGONEK
+	0x00b1: 0x012a,	# LATIN CAPITAL LETTER I WITH MACRON
+	0x00b2: 0x2264,	# LESS-THAN OR EQUAL TO
+	0x00b3: 0x2265,	# GREATER-THAN OR EQUAL TO
+	0x00b4: 0x012b,	# LATIN SMALL LETTER I WITH MACRON
+	0x00b5: 0x0136,	# LATIN CAPITAL LETTER K WITH CEDILLA
+	0x00b6: 0x2202,	# PARTIAL DIFFERENTIAL
+	0x00b7: 0x2211,	# N-ARY SUMMATION
+	0x00b8: 0x0142,	# LATIN SMALL LETTER L WITH STROKE
+	0x00b9: 0x013b,	# LATIN CAPITAL LETTER L WITH CEDILLA
+	0x00ba: 0x013c,	# LATIN SMALL LETTER L WITH CEDILLA
+	0x00bb: 0x013d,	# LATIN CAPITAL LETTER L WITH CARON
+	0x00bc: 0x013e,	# LATIN SMALL LETTER L WITH CARON
+	0x00bd: 0x0139,	# LATIN CAPITAL LETTER L WITH ACUTE
+	0x00be: 0x013a,	# LATIN SMALL LETTER L WITH ACUTE
+	0x00bf: 0x0145,	# LATIN CAPITAL LETTER N WITH CEDILLA
+	0x00c0: 0x0146,	# LATIN SMALL LETTER N WITH CEDILLA
+	0x00c1: 0x0143,	# LATIN CAPITAL LETTER N WITH ACUTE
+	0x00c2: 0x00ac,	# NOT SIGN
+	0x00c3: 0x221a,	# SQUARE ROOT
+	0x00c4: 0x0144,	# LATIN SMALL LETTER N WITH ACUTE
+	0x00c5: 0x0147,	# LATIN CAPITAL LETTER N WITH CARON
+	0x00c6: 0x2206,	# INCREMENT
+	0x00c7: 0x00ab,	# LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+	0x00c8: 0x00bb,	# RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+	0x00c9: 0x2026,	# HORIZONTAL ELLIPSIS
+	0x00ca: 0x00a0,	# NO-BREAK SPACE
+	0x00cb: 0x0148,	# LATIN SMALL LETTER N WITH CARON
+	0x00cc: 0x0150,	# LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
+	0x00cd: 0x00d5,	# LATIN CAPITAL LETTER O WITH TILDE
+	0x00ce: 0x0151,	# LATIN SMALL LETTER O WITH DOUBLE ACUTE
+	0x00cf: 0x014c,	# LATIN CAPITAL LETTER O WITH MACRON
+	0x00d0: 0x2013,	# EN DASH
+	0x00d1: 0x2014,	# EM DASH
+	0x00d2: 0x201c,	# LEFT DOUBLE QUOTATION MARK
+	0x00d3: 0x201d,	# RIGHT DOUBLE QUOTATION MARK
+	0x00d4: 0x2018,	# LEFT SINGLE QUOTATION MARK
+	0x00d5: 0x2019,	# RIGHT SINGLE QUOTATION MARK
+	0x00d6: 0x00f7,	# DIVISION SIGN
+	0x00d7: 0x25ca,	# LOZENGE
+	0x00d8: 0x014d,	# LATIN SMALL LETTER O WITH MACRON
+	0x00d9: 0x0154,	# LATIN CAPITAL LETTER R WITH ACUTE
+	0x00da: 0x0155,	# LATIN SMALL LETTER R WITH ACUTE
+	0x00db: 0x0158,	# LATIN CAPITAL LETTER R WITH CARON
+	0x00dc: 0x2039,	# SINGLE LEFT-POINTING ANGLE QUOTATION MARK
+	0x00dd: 0x203a,	# SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
+	0x00de: 0x0159,	# LATIN SMALL LETTER R WITH CARON
+	0x00df: 0x0156,	# LATIN CAPITAL LETTER R WITH CEDILLA
+	0x00e0: 0x0157,	# LATIN SMALL LETTER R WITH CEDILLA
+	0x00e1: 0x0160,	# LATIN CAPITAL LETTER S WITH CARON
+	0x00e2: 0x201a,	# SINGLE LOW-9 QUOTATION MARK
+	0x00e3: 0x201e,	# DOUBLE LOW-9 QUOTATION MARK
+	0x00e4: 0x0161,	# LATIN SMALL LETTER S WITH CARON
+	0x00e5: 0x015a,	# LATIN CAPITAL LETTER S WITH ACUTE
+	0x00e6: 0x015b,	# LATIN SMALL LETTER S WITH ACUTE
+	0x00e7: 0x00c1,	# LATIN CAPITAL LETTER A WITH ACUTE
+	0x00e8: 0x0164,	# LATIN CAPITAL LETTER T WITH CARON
+	0x00e9: 0x0165,	# LATIN SMALL LETTER T WITH CARON
+	0x00ea: 0x00cd,	# LATIN CAPITAL LETTER I WITH ACUTE
+	0x00eb: 0x017d,	# LATIN CAPITAL LETTER Z WITH CARON
+	0x00ec: 0x017e,	# LATIN SMALL LETTER Z WITH CARON
+	0x00ed: 0x016a,	# LATIN CAPITAL LETTER U WITH MACRON
+	0x00ee: 0x00d3,	# LATIN CAPITAL LETTER O WITH ACUTE
+	0x00ef: 0x00d4,	# LATIN CAPITAL LETTER O WITH CIRCUMFLEX
+	0x00f0: 0x016b,	# LATIN SMALL LETTER U WITH MACRON
+	0x00f1: 0x016e,	# LATIN CAPITAL LETTER U WITH RING ABOVE
+	0x00f2: 0x00da,	# LATIN CAPITAL LETTER U WITH ACUTE
+	0x00f3: 0x016f,	# LATIN SMALL LETTER U WITH RING ABOVE
+	0x00f4: 0x0170,	# LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
+	0x00f5: 0x0171,	# LATIN SMALL LETTER U WITH DOUBLE ACUTE
+	0x00f6: 0x0172,	# LATIN CAPITAL LETTER U WITH OGONEK
+	0x00f7: 0x0173,	# LATIN SMALL LETTER U WITH OGONEK
+	0x00f8: 0x00dd,	# LATIN CAPITAL LETTER Y WITH ACUTE
+	0x00f9: 0x00fd,	# LATIN SMALL LETTER Y WITH ACUTE
+	0x00fa: 0x0137,	# LATIN SMALL LETTER K WITH CEDILLA
+	0x00fb: 0x017b,	# LATIN CAPITAL LETTER Z WITH DOT ABOVE
+	0x00fc: 0x0141,	# LATIN CAPITAL LETTER L WITH STROKE
+	0x00fd: 0x017c,	# LATIN SMALL LETTER Z WITH DOT ABOVE
+	0x00fe: 0x0122,	# LATIN CAPITAL LETTER G WITH CEDILLA
+	0x00ff: 0x02c7,	# CARON
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/lib-python/2.2/encodings/mac_roman.py b/lib-python/2.2/encodings/mac_roman.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/mac_roman.py
@@ -0,0 +1,167 @@
+""" Python Character Mapping Codec generated from 'ROMAN.TXT' with gencodec.py.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+	0x0080: 0x00c4,	# LATIN CAPITAL LETTER A WITH DIAERESIS
+	0x0081: 0x00c5,	# LATIN CAPITAL LETTER A WITH RING ABOVE
+	0x0082: 0x00c7,	# LATIN CAPITAL LETTER C WITH CEDILLA
+	0x0083: 0x00c9,	# LATIN CAPITAL LETTER E WITH ACUTE
+	0x0084: 0x00d1,	# LATIN CAPITAL LETTER N WITH TILDE
+	0x0085: 0x00d6,	# LATIN CAPITAL LETTER O WITH DIAERESIS
+	0x0086: 0x00dc,	# LATIN CAPITAL LETTER U WITH DIAERESIS
+	0x0087: 0x00e1,	# LATIN SMALL LETTER A WITH ACUTE
+	0x0088: 0x00e0,	# LATIN SMALL LETTER A WITH GRAVE
+	0x0089: 0x00e2,	# LATIN SMALL LETTER A WITH CIRCUMFLEX
+	0x008a: 0x00e4,	# LATIN SMALL LETTER A WITH DIAERESIS
+	0x008b: 0x00e3,	# LATIN SMALL LETTER A WITH TILDE
+	0x008c: 0x00e5,	# LATIN SMALL LETTER A WITH RING ABOVE
+	0x008d: 0x00e7,	# LATIN SMALL LETTER C WITH CEDILLA
+	0x008e: 0x00e9,	# LATIN SMALL LETTER E WITH ACUTE
+	0x008f: 0x00e8,	# LATIN SMALL LETTER E WITH GRAVE
+	0x0090: 0x00ea,	# LATIN SMALL LETTER E WITH CIRCUMFLEX
+	0x0091: 0x00eb,	# LATIN SMALL LETTER E WITH DIAERESIS
+	0x0092: 0x00ed,	# LATIN SMALL LETTER I WITH ACUTE
+	0x0093: 0x00ec,	# LATIN SMALL LETTER I WITH GRAVE
+	0x0094: 0x00ee,	# LATIN SMALL LETTER I WITH CIRCUMFLEX
+	0x0095: 0x00ef,	# LATIN SMALL LETTER I WITH DIAERESIS
+	0x0096: 0x00f1,	# LATIN SMALL LETTER N WITH TILDE
+	0x0097: 0x00f3,	# LATIN SMALL LETTER O WITH ACUTE
+	0x0098: 0x00f2,	# LATIN SMALL LETTER O WITH GRAVE
+	0x0099: 0x00f4,	# LATIN SMALL LETTER O WITH CIRCUMFLEX
+	0x009a: 0x00f6,	# LATIN SMALL LETTER O WITH DIAERESIS
+	0x009b: 0x00f5,	# LATIN SMALL LETTER O WITH TILDE
+	0x009c: 0x00fa,	# LATIN SMALL LETTER U WITH ACUTE
+	0x009d: 0x00f9,	# LATIN SMALL LETTER U WITH GRAVE
+	0x009e: 0x00fb,	# LATIN SMALL LETTER U WITH CIRCUMFLEX
+	0x009f: 0x00fc,	# LATIN SMALL LETTER U WITH DIAERESIS
+	0x00a0: 0x2020,	# DAGGER
+	0x00a1: 0x00b0,	# DEGREE SIGN
+	0x00a4: 0x00a7,	# SECTION SIGN
+	0x00a5: 0x2022,	# BULLET
+	0x00a6: 0x00b6,	# PILCROW SIGN
+	0x00a7: 0x00df,	# LATIN SMALL LETTER SHARP S
+	0x00a8: 0x00ae,	# REGISTERED SIGN
+	0x00aa: 0x2122,	# TRADE MARK SIGN
+	0x00ab: 0x00b4,	# ACUTE ACCENT
+	0x00ac: 0x00a8,	# DIAERESIS
+	0x00ad: 0x2260,	# NOT EQUAL TO
+	0x00ae: 0x00c6,	# LATIN CAPITAL LIGATURE AE
+	0x00af: 0x00d8,	# LATIN CAPITAL LETTER O WITH STROKE
+	0x00b0: 0x221e,	# INFINITY
+	0x00b2: 0x2264,	# LESS-THAN OR EQUAL TO
+	0x00b3: 0x2265,	# GREATER-THAN OR EQUAL TO
+	0x00b4: 0x00a5,	# YEN SIGN
+	0x00b6: 0x2202,	# PARTIAL DIFFERENTIAL
+	0x00b7: 0x2211,	# N-ARY SUMMATION
+	0x00b8: 0x220f,	# N-ARY PRODUCT
+	0x00b9: 0x03c0,	# GREEK SMALL LETTER PI
+	0x00ba: 0x222b,	# INTEGRAL
+	0x00bb: 0x00aa,	# FEMININE ORDINAL INDICATOR
+	0x00bc: 0x00ba,	# MASCULINE ORDINAL INDICATOR
+	0x00bd: 0x2126,	# OHM SIGN
+	0x00be: 0x00e6,	# LATIN SMALL LIGATURE AE
+	0x00bf: 0x00f8,	# LATIN SMALL LETTER O WITH STROKE
+	0x00c0: 0x00bf,	# INVERTED QUESTION MARK
+	0x00c1: 0x00a1,	# INVERTED EXCLAMATION MARK
+	0x00c2: 0x00ac,	# NOT SIGN
+	0x00c3: 0x221a,	# SQUARE ROOT
+	0x00c4: 0x0192,	# LATIN SMALL LETTER F WITH HOOK
+	0x00c5: 0x2248,	# ALMOST EQUAL TO
+	0x00c6: 0x2206,	# INCREMENT
+	0x00c7: 0x00ab,	# LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+	0x00c8: 0x00bb,	# RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+	0x00c9: 0x2026,	# HORIZONTAL ELLIPSIS
+	0x00ca: 0x00a0,	# NO-BREAK SPACE
+	0x00cb: 0x00c0,	# LATIN CAPITAL LETTER A WITH GRAVE
+	0x00cc: 0x00c3,	# LATIN CAPITAL LETTER A WITH TILDE
+	0x00cd: 0x00d5,	# LATIN CAPITAL LETTER O WITH TILDE
+	0x00ce: 0x0152,	# LATIN CAPITAL LIGATURE OE
+	0x00cf: 0x0153,	# LATIN SMALL LIGATURE OE
+	0x00d0: 0x2013,	# EN DASH
+	0x00d1: 0x2014,	# EM DASH
+	0x00d2: 0x201c,	# LEFT DOUBLE QUOTATION MARK
+	0x00d3: 0x201d,	# RIGHT DOUBLE QUOTATION MARK
+	0x00d4: 0x2018,	# LEFT SINGLE QUOTATION MARK
+	0x00d5: 0x2019,	# RIGHT SINGLE QUOTATION MARK
+	0x00d6: 0x00f7,	# DIVISION SIGN
+	0x00d7: 0x25ca,	# LOZENGE
+	0x00d8: 0x00ff,	# LATIN SMALL LETTER Y WITH DIAERESIS
+	0x00d9: 0x0178,	# LATIN CAPITAL LETTER Y WITH DIAERESIS
+	0x00da: 0x2044,	# FRACTION SLASH
+	0x00db: 0x00a4,	# CURRENCY SIGN
+	0x00dc: 0x2039,	# SINGLE LEFT-POINTING ANGLE QUOTATION MARK
+	0x00dd: 0x203a,	# SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
+	0x00de: 0xfb01,	# LATIN SMALL LIGATURE FI
+	0x00df: 0xfb02,	# LATIN SMALL LIGATURE FL
+	0x00e0: 0x2021,	# DOUBLE DAGGER
+	0x00e1: 0x00b7,	# MIDDLE DOT
+	0x00e2: 0x201a,	# SINGLE LOW-9 QUOTATION MARK
+	0x00e3: 0x201e,	# DOUBLE LOW-9 QUOTATION MARK
+	0x00e4: 0x2030,	# PER MILLE SIGN
+	0x00e5: 0x00c2,	# LATIN CAPITAL LETTER A WITH CIRCUMFLEX
+	0x00e6: 0x00ca,	# LATIN CAPITAL LETTER E WITH CIRCUMFLEX
+	0x00e7: 0x00c1,	# LATIN CAPITAL LETTER A WITH ACUTE
+	0x00e8: 0x00cb,	# LATIN CAPITAL LETTER E WITH DIAERESIS
+	0x00e9: 0x00c8,	# LATIN CAPITAL LETTER E WITH GRAVE
+	0x00ea: 0x00cd,	# LATIN CAPITAL LETTER I WITH ACUTE
+	0x00eb: 0x00ce,	# LATIN CAPITAL LETTER I WITH CIRCUMFLEX
+	0x00ec: 0x00cf,	# LATIN CAPITAL LETTER I WITH DIAERESIS
+	0x00ed: 0x00cc,	# LATIN CAPITAL LETTER I WITH GRAVE
+	0x00ee: 0x00d3,	# LATIN CAPITAL LETTER O WITH ACUTE
+	0x00ef: 0x00d4,	# LATIN CAPITAL LETTER O WITH CIRCUMFLEX
+	0x00f0: None,	# UNDEFINED
+	0x00f1: 0x00d2,	# LATIN CAPITAL LETTER O WITH GRAVE
+	0x00f2: 0x00da,	# LATIN CAPITAL LETTER U WITH ACUTE
+	0x00f3: 0x00db,	# LATIN CAPITAL LETTER U WITH CIRCUMFLEX
+	0x00f4: 0x00d9,	# LATIN CAPITAL LETTER U WITH GRAVE
+	0x00f5: 0x0131,	# LATIN SMALL LETTER DOTLESS I
+	0x00f6: 0x02c6,	# MODIFIER LETTER CIRCUMFLEX ACCENT
+	0x00f7: 0x02dc,	# SMALL TILDE
+	0x00f8: 0x00af,	# MACRON
+	0x00f9: 0x02d8,	# BREVE
+	0x00fa: 0x02d9,	# DOT ABOVE
+	0x00fb: 0x02da,	# RING ABOVE
+	0x00fc: 0x00b8,	# CEDILLA
+	0x00fd: 0x02dd,	# DOUBLE ACUTE ACCENT
+	0x00fe: 0x02db,	# OGONEK
+	0x00ff: 0x02c7,	# CARON
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/lib-python/2.2/encodings/mac_turkish.py b/lib-python/2.2/encodings/mac_turkish.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/mac_turkish.py
@@ -0,0 +1,167 @@
+""" Python Character Mapping Codec generated from 'TURKISH.TXT' with gencodec.py.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+	0x0080: 0x00c4,	# LATIN CAPITAL LETTER A WITH DIAERESIS
+	0x0081: 0x00c5,	# LATIN CAPITAL LETTER A WITH RING ABOVE
+	0x0082: 0x00c7,	# LATIN CAPITAL LETTER C WITH CEDILLA
+	0x0083: 0x00c9,	# LATIN CAPITAL LETTER E WITH ACUTE
+	0x0084: 0x00d1,	# LATIN CAPITAL LETTER N WITH TILDE
+	0x0085: 0x00d6,	# LATIN CAPITAL LETTER O WITH DIAERESIS
+	0x0086: 0x00dc,	# LATIN CAPITAL LETTER U WITH DIAERESIS
+	0x0087: 0x00e1,	# LATIN SMALL LETTER A WITH ACUTE
+	0x0088: 0x00e0,	# LATIN SMALL LETTER A WITH GRAVE
+	0x0089: 0x00e2,	# LATIN SMALL LETTER A WITH CIRCUMFLEX
+	0x008a: 0x00e4,	# LATIN SMALL LETTER A WITH DIAERESIS
+	0x008b: 0x00e3,	# LATIN SMALL LETTER A WITH TILDE
+	0x008c: 0x00e5,	# LATIN SMALL LETTER A WITH RING ABOVE
+	0x008d: 0x00e7,	# LATIN SMALL LETTER C WITH CEDILLA
+	0x008e: 0x00e9,	# LATIN SMALL LETTER E WITH ACUTE
+	0x008f: 0x00e8,	# LATIN SMALL LETTER E WITH GRAVE
+	0x0090: 0x00ea,	# LATIN SMALL LETTER E WITH CIRCUMFLEX
+	0x0091: 0x00eb,	# LATIN SMALL LETTER E WITH DIAERESIS
+	0x0092: 0x00ed,	# LATIN SMALL LETTER I WITH ACUTE
+	0x0093: 0x00ec,	# LATIN SMALL LETTER I WITH GRAVE
+	0x0094: 0x00ee,	# LATIN SMALL LETTER I WITH CIRCUMFLEX
+	0x0095: 0x00ef,	# LATIN SMALL LETTER I WITH DIAERESIS
+	0x0096: 0x00f1,	# LATIN SMALL LETTER N WITH TILDE
+	0x0097: 0x00f3,	# LATIN SMALL LETTER O WITH ACUTE
+	0x0098: 0x00f2,	# LATIN SMALL LETTER O WITH GRAVE
+	0x0099: 0x00f4,	# LATIN SMALL LETTER O WITH CIRCUMFLEX
+	0x009a: 0x00f6,	# LATIN SMALL LETTER O WITH DIAERESIS
+	0x009b: 0x00f5,	# LATIN SMALL LETTER O WITH TILDE
+	0x009c: 0x00fa,	# LATIN SMALL LETTER U WITH ACUTE
+	0x009d: 0x00f9,	# LATIN SMALL LETTER U WITH GRAVE
+	0x009e: 0x00fb,	# LATIN SMALL LETTER U WITH CIRCUMFLEX
+	0x009f: 0x00fc,	# LATIN SMALL LETTER U WITH DIAERESIS
+	0x00a0: 0x2020,	# DAGGER
+	0x00a1: 0x00b0,	# DEGREE SIGN
+	0x00a4: 0x00a7,	# SECTION SIGN
+	0x00a5: 0x2022,	# BULLET
+	0x00a6: 0x00b6,	# PILCROW SIGN
+	0x00a7: 0x00df,	# LATIN SMALL LETTER SHARP S
+	0x00a8: 0x00ae,	# REGISTERED SIGN
+	0x00aa: 0x2122,	# TRADE MARK SIGN
+	0x00ab: 0x00b4,	# ACUTE ACCENT
+	0x00ac: 0x00a8,	# DIAERESIS
+	0x00ad: 0x2260,	# NOT EQUAL TO
+	0x00ae: 0x00c6,	# LATIN CAPITAL LIGATURE AE
+	0x00af: 0x00d8,	# LATIN CAPITAL LETTER O WITH STROKE
+	0x00b0: 0x221e,	# INFINITY
+	0x00b2: 0x2264,	# LESS-THAN OR EQUAL TO
+	0x00b3: 0x2265,	# GREATER-THAN OR EQUAL TO
+	0x00b4: 0x00a5,	# YEN SIGN
+	0x00b6: 0x2202,	# PARTIAL DIFFERENTIAL
+	0x00b7: 0x2211,	# N-ARY SUMMATION
+	0x00b8: 0x220f,	# N-ARY PRODUCT
+	0x00b9: 0x03c0,	# GREEK SMALL LETTER PI
+	0x00ba: 0x222b,	# INTEGRAL
+	0x00bb: 0x00aa,	# FEMININE ORDINAL INDICATOR
+	0x00bc: 0x00ba,	# MASCULINE ORDINAL INDICATOR
+	0x00bd: 0x2126,	# OHM SIGN
+	0x00be: 0x00e6,	# LATIN SMALL LIGATURE AE
+	0x00bf: 0x00f8,	# LATIN SMALL LETTER O WITH STROKE
+	0x00c0: 0x00bf,	# INVERTED QUESTION MARK
+	0x00c1: 0x00a1,	# INVERTED EXCLAMATION MARK
+	0x00c2: 0x00ac,	# NOT SIGN
+	0x00c3: 0x221a,	# SQUARE ROOT
+	0x00c4: 0x0192,	# LATIN SMALL LETTER F WITH HOOK
+	0x00c5: 0x2248,	# ALMOST EQUAL TO
+	0x00c6: 0x2206,	# INCREMENT
+	0x00c7: 0x00ab,	# LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+	0x00c8: 0x00bb,	# RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+	0x00c9: 0x2026,	# HORIZONTAL ELLIPSIS
+	0x00ca: 0x00a0,	# NO-BREAK SPACE
+	0x00cb: 0x00c0,	# LATIN CAPITAL LETTER A WITH GRAVE
+	0x00cc: 0x00c3,	# LATIN CAPITAL LETTER A WITH TILDE
+	0x00cd: 0x00d5,	# LATIN CAPITAL LETTER O WITH TILDE
+	0x00ce: 0x0152,	# LATIN CAPITAL LIGATURE OE
+	0x00cf: 0x0153,	# LATIN SMALL LIGATURE OE
+	0x00d0: 0x2013,	# EN DASH
+	0x00d1: 0x2014,	# EM DASH
+	0x00d2: 0x201c,	# LEFT DOUBLE QUOTATION MARK
+	0x00d3: 0x201d,	# RIGHT DOUBLE QUOTATION MARK
+	0x00d4: 0x2018,	# LEFT SINGLE QUOTATION MARK
+	0x00d5: 0x2019,	# RIGHT SINGLE QUOTATION MARK
+	0x00d6: 0x00f7,	# DIVISION SIGN
+	0x00d7: 0x25ca,	# LOZENGE
+	0x00d8: 0x00ff,	# LATIN SMALL LETTER Y WITH DIAERESIS
+	0x00d9: 0x0178,	# LATIN CAPITAL LETTER Y WITH DIAERESIS
+	0x00da: 0x011e,	# LATIN CAPITAL LETTER G WITH BREVE
+	0x00db: 0x011f,	# LATIN SMALL LETTER G WITH BREVE
+	0x00dc: 0x0130,	# LATIN CAPITAL LETTER I WITH DOT ABOVE
+	0x00dd: 0x0131,	# LATIN SMALL LETTER DOTLESS I
+	0x00de: 0x015e,	# LATIN CAPITAL LETTER S WITH CEDILLA
+	0x00df: 0x015f,	# LATIN SMALL LETTER S WITH CEDILLA
+	0x00e0: 0x2021,	# DOUBLE DAGGER
+	0x00e1: 0x00b7,	# MIDDLE DOT
+	0x00e2: 0x201a,	# SINGLE LOW-9 QUOTATION MARK
+	0x00e3: 0x201e,	# DOUBLE LOW-9 QUOTATION MARK
+	0x00e4: 0x2030,	# PER MILLE SIGN
+	0x00e5: 0x00c2,	# LATIN CAPITAL LETTER A WITH CIRCUMFLEX
+	0x00e6: 0x00ca,	# LATIN CAPITAL LETTER E WITH CIRCUMFLEX
+	0x00e7: 0x00c1,	# LATIN CAPITAL LETTER A WITH ACUTE
+	0x00e8: 0x00cb,	# LATIN CAPITAL LETTER E WITH DIAERESIS
+	0x00e9: 0x00c8,	# LATIN CAPITAL LETTER E WITH GRAVE
+	0x00ea: 0x00cd,	# LATIN CAPITAL LETTER I WITH ACUTE
+	0x00eb: 0x00ce,	# LATIN CAPITAL LETTER I WITH CIRCUMFLEX
+	0x00ec: 0x00cf,	# LATIN CAPITAL LETTER I WITH DIAERESIS
+	0x00ed: 0x00cc,	# LATIN CAPITAL LETTER I WITH GRAVE
+	0x00ee: 0x00d3,	# LATIN CAPITAL LETTER O WITH ACUTE
+	0x00ef: 0x00d4,	# LATIN CAPITAL LETTER O WITH CIRCUMFLEX
+	0x00f0: None,	# UNDEFINED
+	0x00f1: 0x00d2,	# LATIN CAPITAL LETTER O WITH GRAVE
+	0x00f2: 0x00da,	# LATIN CAPITAL LETTER U WITH ACUTE
+	0x00f3: 0x00db,	# LATIN CAPITAL LETTER U WITH CIRCUMFLEX
+	0x00f4: 0x00d9,	# LATIN CAPITAL LETTER U WITH GRAVE
+	0x00f5: None,	# UNDEFINED
+	0x00f6: 0x02c6,	# MODIFIER LETTER CIRCUMFLEX ACCENT
+	0x00f7: 0x02dc,	# SMALL TILDE
+	0x00f8: 0x00af,	# MACRON
+	0x00f9: 0x02d8,	# BREVE
+	0x00fa: 0x02d9,	# DOT ABOVE
+	0x00fb: 0x02da,	# RING ABOVE
+	0x00fc: 0x00b8,	# CEDILLA
+	0x00fd: 0x02dd,	# DOUBLE ACUTE ACCENT
+	0x00fe: 0x02db,	# OGONEK
+	0x00ff: 0x02c7,	# CARON
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/lib-python/2.2/encodings/mbcs.py b/lib-python/2.2/encodings/mbcs.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/mbcs.py
@@ -0,0 +1,36 @@
+""" Python 'mbcs' Codec for Windows
+
+
+Cloned by Mark Hammond (mhammond at skippinet.com.au) from ascii.py,
+which was written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+
+"""
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    # Note: Binding these as C functions will result in the class not
+    # converting them to methods. This is intended.
+    encode = codecs.mbcs_encode
+    decode = codecs.mbcs_decode
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+class StreamConverter(StreamWriter,StreamReader):
+
+    encode = codecs.mbcs_decode
+    decode = codecs.mbcs_encode
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec.encode,Codec.decode,StreamReader,StreamWriter)
diff --git a/lib-python/2.2/encodings/quopri_codec.py b/lib-python/2.2/encodings/quopri_codec.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/quopri_codec.py
@@ -0,0 +1,58 @@
+"""Codec for quoted-printable encoding.
+
+Like base64 and rot13, this returns Python strings, not Unicode.
+"""
+
+import codecs, quopri
+try:
+    from cStringIO import StringIO
+except ImportError:
+    from StringIO import StringIO
+
+def quopri_encode(input, errors='strict'):
+    """Encode the input, returning a tuple (output object, length consumed).
+
+    errors defines the error handling to apply. It defaults to
+    'strict' handling which is the only currently supported
+    error handling for this codec.
+
+    """
+    assert errors == 'strict'
+    f = StringIO(input)
+    g = StringIO()
+    quopri.encode(f, g, 1)
+    output = g.getvalue()
+    return (output, len(input))
+
+def quopri_decode(input, errors='strict'):
+    """Decode the input, returning a tuple (output object, length consumed).
+
+    errors defines the error handling to apply. It defaults to
+    'strict' handling which is the only currently supported
+    error handling for this codec.
+
+    """
+    assert errors == 'strict'
+    f = StringIO(input)
+    g = StringIO()
+    quopri.decode(f, g)
+    output = g.getvalue()
+    return (output, len(input))
+
+class Codec(codecs.Codec):
+
+    def encode(self, input,errors='strict'):
+        return quopri_encode(input,errors)
+    def decode(self, input,errors='strict'):
+        return quopri_decode(input,errors)
+
+class StreamWriter(Codec, codecs.StreamWriter):
+    pass
+
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+# encodings module API
+
+def getregentry():
+    return (quopri_encode, quopri_decode, StreamReader, StreamWriter)
diff --git a/lib-python/2.2/encodings/raw_unicode_escape.py b/lib-python/2.2/encodings/raw_unicode_escape.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/raw_unicode_escape.py
@@ -0,0 +1,30 @@
+""" Python 'raw-unicode-escape' Codec
+
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+
+"""
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    # Note: Binding these as C functions will result in the class not
+    # converting them to methods. This is intended.
+    encode = codecs.raw_unicode_escape_encode
+    decode = codecs.raw_unicode_escape_decode
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec.encode,Codec.decode,StreamReader,StreamWriter)
diff --git a/lib-python/2.2/encodings/rot_13.py b/lib-python/2.2/encodings/rot_13.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/rot_13.py
@@ -0,0 +1,105 @@
+#!/usr/bin/env python
+""" Python Character Mapping Codec for ROT13.
+
+    See http://ucsub.colorado.edu/~kominek/rot13/ for details.
+
+    Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+        
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+   0x0041: 0x004e,
+   0x0042: 0x004f,
+   0x0043: 0x0050,
+   0x0044: 0x0051,
+   0x0045: 0x0052,
+   0x0046: 0x0053,
+   0x0047: 0x0054,
+   0x0048: 0x0055,
+   0x0049: 0x0056,
+   0x004a: 0x0057,
+   0x004b: 0x0058,
+   0x004c: 0x0059,
+   0x004d: 0x005a,
+   0x004e: 0x0041,
+   0x004f: 0x0042,
+   0x0050: 0x0043,
+   0x0051: 0x0044,
+   0x0052: 0x0045,
+   0x0053: 0x0046,
+   0x0054: 0x0047,
+   0x0055: 0x0048,
+   0x0056: 0x0049,
+   0x0057: 0x004a,
+   0x0058: 0x004b,
+   0x0059: 0x004c,
+   0x005a: 0x004d,
+   0x0061: 0x006e,
+   0x0062: 0x006f,
+   0x0063: 0x0070,
+   0x0064: 0x0071,
+   0x0065: 0x0072,
+   0x0066: 0x0073,
+   0x0067: 0x0074,
+   0x0068: 0x0075,
+   0x0069: 0x0076,
+   0x006a: 0x0077,
+   0x006b: 0x0078,
+   0x006c: 0x0079,
+   0x006d: 0x007a,
+   0x006e: 0x0061,
+   0x006f: 0x0062,
+   0x0070: 0x0063,
+   0x0071: 0x0064,
+   0x0072: 0x0065,
+   0x0073: 0x0066,
+   0x0074: 0x0067,
+   0x0075: 0x0068,
+   0x0076: 0x0069,
+   0x0077: 0x006a,
+   0x0078: 0x006b,
+   0x0079: 0x006c,
+   0x007a: 0x006d,
+})
+
+### Encoding Map
+
+encoding_map = codecs.make_encoding_map(decoding_map)
+
+### Filter API
+
+def rot13(infile, outfile):
+    outfile.write(infile.read().encode('rot-13'))
+
+if __name__ == '__main__':
+    import sys
+    rot13(sys.stdin, sys.stdout)
diff --git a/lib-python/2.2/encodings/undefined.py b/lib-python/2.2/encodings/undefined.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/undefined.py
@@ -0,0 +1,34 @@
+""" Python 'undefined' Codec
+
+    This codec will always raise a ValueError exception when being
+    used. It is intended for use by the site.py file to switch off
+    automatic string to Unicode coercion.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+
+"""
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+        raise UnicodeError, "undefined encoding"
+
+    def decode(self,input,errors='strict'):
+        raise UnicodeError, "undefined encoding"
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
diff --git a/lib-python/2.2/encodings/unicode_escape.py b/lib-python/2.2/encodings/unicode_escape.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/unicode_escape.py
@@ -0,0 +1,30 @@
+""" Python 'unicode-escape' Codec
+
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+
+"""
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    # Note: Binding these as C functions will result in the class not
+    # converting them to methods. This is intended.
+    encode = codecs.unicode_escape_encode
+    decode = codecs.unicode_escape_decode
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec.encode,Codec.decode,StreamReader,StreamWriter)
diff --git a/lib-python/2.2/encodings/unicode_internal.py b/lib-python/2.2/encodings/unicode_internal.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/unicode_internal.py
@@ -0,0 +1,30 @@
+""" Python 'unicode-internal' Codec
+
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+
+"""
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    # Note: Binding these as C functions will result in the class not
+    # converting them to methods. This is intended.
+    encode = codecs.unicode_internal_encode
+    decode = codecs.unicode_internal_decode
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec.encode,Codec.decode,StreamReader,StreamWriter)
diff --git a/lib-python/2.2/encodings/utf_16.py b/lib-python/2.2/encodings/utf_16.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/utf_16.py
@@ -0,0 +1,61 @@
+""" Python 'utf-16' Codec
+
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+
+"""
+import codecs, sys
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    # Note: Binding these as C functions will result in the class not
+    # converting them to methods. This is intended.
+    encode = codecs.utf_16_encode
+    decode = codecs.utf_16_decode
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    def __init__(self, stream, errors='strict'):
+        self.bom_written = 0
+        codecs.StreamWriter.__init__(self, stream, errors)
+
+    def write(self, data):
+        result = codecs.StreamWriter.write(self, data)
+        if not self.bom_written:
+            self.bom_written = 1
+            if sys.byteorder == 'little':
+                self.encode = codecs.utf_16_le_encode
+            else:
+                self.encode = codecs.utf_16_be_encode
+        return result
+        
+class StreamReader(Codec,codecs.StreamReader):
+    def __init__(self, stream, errors='strict'):
+        self.bom_read = 0
+        codecs.StreamReader.__init__(self, stream, errors)
+
+    def read(self, size=-1):
+        if not self.bom_read:
+            signature = self.stream.read(2)
+            if signature == codecs.BOM_BE:
+                self.decode = codecs.utf_16_be_decode
+            elif signature == codecs.BOM_LE:
+                self.decode = codecs.utf_16_le_decode
+            else:
+                raise UnicodeError,"UTF-16 stream does not start with BOM"
+            if size > 2:
+                size -= 2
+            elif size >= 0:
+                size = 0
+            self.bom_read = 1
+        return codecs.StreamReader.read(self, size)
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec.encode,Codec.decode,StreamReader,StreamWriter)
+
diff --git a/lib-python/2.2/encodings/utf_16_be.py b/lib-python/2.2/encodings/utf_16_be.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/utf_16_be.py
@@ -0,0 +1,31 @@
+""" Python 'utf-16-be' Codec
+
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+
+"""
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    # Note: Binding these as C functions will result in the class not
+    # converting them to methods. This is intended.
+    encode = codecs.utf_16_be_encode
+    decode = codecs.utf_16_be_decode
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec.encode,Codec.decode,StreamReader,StreamWriter)
+
diff --git a/lib-python/2.2/encodings/utf_16_le.py b/lib-python/2.2/encodings/utf_16_le.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/utf_16_le.py
@@ -0,0 +1,31 @@
+""" Python 'utf-16-le' Codec
+
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+
+"""
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    # Note: Binding these as C functions will result in the class not
+    # converting them to methods. This is intended.
+    encode = codecs.utf_16_le_encode
+    decode = codecs.utf_16_le_decode
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec.encode,Codec.decode,StreamReader,StreamWriter)
+
diff --git a/lib-python/2.2/encodings/utf_7.py b/lib-python/2.2/encodings/utf_7.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/utf_7.py
@@ -0,0 +1,27 @@
+""" Python 'utf-7' Codec
+
+Written by Brian Quinlan (brian at sweetapp.com).
+"""
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    # Note: Binding these as C functions will result in the class not
+    # converting them to methods. This is intended.
+    encode = codecs.utf_7_encode
+    decode = codecs.utf_7_decode
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec.encode,Codec.decode,StreamReader,StreamWriter)
+
diff --git a/lib-python/2.2/encodings/utf_8.py b/lib-python/2.2/encodings/utf_8.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/utf_8.py
@@ -0,0 +1,31 @@
+""" Python 'utf-8' Codec
+
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+
+"""
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    # Note: Binding these as C functions will result in the class not
+    # converting them to methods. This is intended.
+    encode = codecs.utf_8_encode
+    decode = codecs.utf_8_decode
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec.encode,Codec.decode,StreamReader,StreamWriter)
+
diff --git a/lib-python/2.2/encodings/uu_codec.py b/lib-python/2.2/encodings/uu_codec.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/uu_codec.py
@@ -0,0 +1,112 @@
+""" Python 'uu_codec' Codec - UU content transfer encoding
+
+    Unlike most of the other codecs which target Unicode, this codec
+    will return Python string objects for both encode and decode.
+
+    Written by Marc-Andre Lemburg (mal at lemburg.com). Some details were
+    adapted from uu.py which was written by Lance Ellinghouse and
+    modified by Jack Jansen and Fredrik Lundh.
+
+"""
+import codecs, binascii
+
+### Codec APIs
+
+def uu_encode(input,errors='strict',filename='<data>',mode=0666):
+
+    """ Encodes the object input and returns a tuple (output
+        object, length consumed).
+
+        errors defines the error handling to apply. It defaults to
+        'strict' handling which is the only currently supported
+        error handling for this codec.
+
+    """
+    assert errors == 'strict'
+    from cStringIO import StringIO
+    from binascii import b2a_uu
+    infile = StringIO(input)
+    outfile = StringIO()
+    read = infile.read
+    write = outfile.write
+
+    # Encode
+    write('begin %o %s\n' % (mode & 0777, filename))
+    chunk = read(45)
+    while chunk:
+        write(b2a_uu(chunk))
+        chunk = read(45)
+    write(' \nend\n')
+    
+    return (outfile.getvalue(), len(input))
+
+def uu_decode(input,errors='strict'):
+
+    """ Decodes the object input and returns a tuple (output
+        object, length consumed).
+
+        input must be an object which provides the bf_getreadbuf
+        buffer slot. Python strings, buffer objects and memory
+        mapped files are examples of objects providing this slot.
+
+        errors defines the error handling to apply. It defaults to
+        'strict' handling which is the only currently supported
+        error handling for this codec.
+
+        Note: filename and file mode information in the input data is
+        ignored.
+
+    """
+    assert errors == 'strict'
+    from cStringIO import StringIO
+    from binascii import a2b_uu
+    infile = StringIO(input)
+    outfile = StringIO()
+    readline = infile.readline
+    write = outfile.write
+
+    # Find start of encoded data
+    while 1:
+        s = readline()
+        if not s:
+            raise ValueError, 'Missing "begin" line in input data'
+        if s[:5] == 'begin':
+            break
+
+    # Decode
+    while 1:
+        s = readline()
+        if not s or \
+           s == 'end\n':
+            break
+        try:
+            data = a2b_uu(s)
+        except binascii.Error, v:
+            # Workaround for broken uuencoders by /Fredrik Lundh
+            nbytes = (((ord(s[0])-32) & 63) * 4 + 5) / 3
+            data = a2b_uu(s[:nbytes])
+            #sys.stderr.write("Warning: %s\n" % str(v))
+        write(data)
+    if not s:
+        raise ValueError, 'Truncated input data'
+
+    return (outfile.getvalue(), len(input))
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+        return uu_encode(input,errors)
+    def decode(self,input,errors='strict'):
+        return uu_decode(input,errors)
+    
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (uu_encode,uu_decode,StreamReader,StreamWriter)
diff --git a/lib-python/2.2/encodings/zlib_codec.py b/lib-python/2.2/encodings/zlib_codec.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/encodings/zlib_codec.py
@@ -0,0 +1,63 @@
+""" Python 'zlib_codec' Codec - zlib compression encoding
+
+    Unlike most of the other codecs which target Unicode, this codec
+    will return Python string objects for both encode and decode.
+
+    Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+"""
+import codecs
+import zlib # this codec needs the optional zlib module !
+
+### Codec APIs
+
+def zlib_encode(input,errors='strict'):
+
+    """ Encodes the object input and returns a tuple (output
+        object, length consumed).
+
+        errors defines the error handling to apply. It defaults to
+        'strict' handling which is the only currently supported
+        error handling for this codec.
+
+    """
+    assert errors == 'strict'
+    output = zlib.compress(input)
+    return (output, len(input))
+
+def zlib_decode(input,errors='strict'):
+
+    """ Decodes the object input and returns a tuple (output
+        object, length consumed).
+
+        input must be an object which provides the bf_getreadbuf
+        buffer slot. Python strings, buffer objects and memory
+        mapped files are examples of objects providing this slot.
+
+        errors defines the error handling to apply. It defaults to
+        'strict' handling which is the only currently supported
+        error handling for this codec.
+
+    """
+    assert errors == 'strict'
+    output = zlib.decompress(input)
+    return (output, len(input))
+
+class Codec(codecs.Codec):
+
+    def encode(self, input, errors='strict'):
+        return zlib_encode(input, errors)
+    def decode(self, input, errors='strict'):
+        return zlib_decode(input, errors)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+        
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (zlib_encode,zlib_decode,StreamReader,StreamWriter)
diff --git a/lib-python/2.2/filecmp.py b/lib-python/2.2/filecmp.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/filecmp.py
@@ -0,0 +1,331 @@
+"""Utilities for comparing files and directories.
+
+Classes:
+    dircmp
+
+Functions:
+    cmp(f1, f2, shallow=1, use_statcache=0) -> int
+    cmpfiles(a, b, common) -> ([], [], [])
+
+"""
+
+import os
+import stat
+import statcache
+
+__all__ = ["cmp","dircmp","cmpfiles"]
+
+_cache = {}
+BUFSIZE=8*1024
+
+def cmp(f1, f2, shallow=1, use_statcache=0):
+    """Compare two files.
+
+    Arguments:
+
+    f1 -- First file name
+
+    f2 -- Second file name
+
+    shallow -- Just check stat signature (do not read the files).
+               defaults to 1.
+
+    use_statcache -- Do not stat() each file directly: go through
+                     the statcache module for more efficiency.
+
+    Return value:
+
+    integer -- 1 if the files are the same, 0 otherwise.
+
+    This function uses a cache for past comparisons and the results,
+    with a cache invalidation mechanism relying on stale signatures.
+    Of course, if 'use_statcache' is true, this mechanism is defeated,
+    and the cache will never grow stale.
+
+    """
+    if use_statcache:
+        stat_function = statcache.stat
+    else:
+        stat_function = os.stat
+    s1 = _sig(stat_function(f1))
+    s2 = _sig(stat_function(f2))
+    if s1[0] != stat.S_IFREG or s2[0] != stat.S_IFREG:
+        return 0
+    if shallow and s1 == s2:
+        return 1
+    if s1[1] != s2[1]:
+        return 0
+
+    result = _cache.get((f1, f2))
+    if result and (s1, s2) == result[:2]:
+        return result[2]
+    outcome = _do_cmp(f1, f2)
+    _cache[f1, f2] = s1, s2, outcome
+    return outcome
+
+def _sig(st):
+    return (stat.S_IFMT(st[stat.ST_MODE]),
+            st[stat.ST_SIZE],
+            st[stat.ST_MTIME])
+
+def _do_cmp(f1, f2):
+    bufsize = BUFSIZE
+    fp1 = open(f1, 'rb')
+    fp2 = open(f2, 'rb')
+    while 1:
+        b1 = fp1.read(bufsize)
+        b2 = fp2.read(bufsize)
+        if b1 != b2:
+            return 0
+        if not b1:
+            return 1
+
+# Directory comparison class.
+#
+class dircmp:
+    """A class that manages the comparison of 2 directories.
+
+    dircmp(a,b,ignore=None,hide=None)
+      A and B are directories.
+      IGNORE is a list of names to ignore,
+        defaults to ['RCS', 'CVS', 'tags'].
+      HIDE is a list of names to hide,
+        defaults to [os.curdir, os.pardir].
+
+    High level usage:
+      x = dircmp(dir1, dir2)
+      x.report() -> prints a report on the differences between dir1 and dir2
+       or
+      x.report_partial_closure() -> prints report on differences between dir1
+            and dir2, and reports on common immediate subdirectories.
+      x.report_full_closure() -> like report_partial_closure,
+            but fully recursive.
+
+    Attributes:
+     left_list, right_list: The files in dir1 and dir2,
+        filtered by hide and ignore.
+     common: a list of names in both dir1 and dir2.
+     left_only, right_only: names only in dir1, dir2.
+     common_dirs: subdirectories in both dir1 and dir2.
+     common_files: files in both dir1 and dir2.
+     common_funny: names in both dir1 and dir2 where the type differs between
+        dir1 and dir2, or the name is not stat-able.
+     same_files: list of identical files.
+     diff_files: list of filenames which differ.
+     funny_files: list of files which could not be compared.
+     subdirs: a dictionary of dircmp objects, keyed by names in common_dirs.
+     """
+
+    def __init__(self, a, b, ignore=None, hide=None): # Initialize
+        self.left = a
+        self.right = b
+        if hide is None:
+            self.hide = [os.curdir, os.pardir] # Names never to be shown
+        else:
+            self.hide = hide
+        if ignore is None:
+            self.ignore = ['RCS', 'CVS', 'tags'] # Names ignored in comparison
+        else:
+            self.ignore = ignore
+
+    def phase0(self): # Compare everything except common subdirectories
+        self.left_list = _filter(os.listdir(self.left),
+                                 self.hide+self.ignore)
+        self.right_list = _filter(os.listdir(self.right),
+                                  self.hide+self.ignore)
+        self.left_list.sort()
+        self.right_list.sort()
+
+    __p4_attrs = ('subdirs',)
+    __p3_attrs = ('same_files', 'diff_files', 'funny_files')
+    __p2_attrs = ('common_dirs', 'common_files', 'common_funny')
+    __p1_attrs = ('common', 'left_only', 'right_only')
+    __p0_attrs = ('left_list', 'right_list')
+
+    def __getattr__(self, attr):
+        if attr in self.__p4_attrs:
+            self.phase4()
+        elif attr in self.__p3_attrs:
+            self.phase3()
+        elif attr in self.__p2_attrs:
+            self.phase2()
+        elif attr in self.__p1_attrs:
+            self.phase1()
+        elif attr in self.__p0_attrs:
+            self.phase0()
+        else:
+            raise AttributeError, attr
+        return getattr(self, attr)
+
+    def phase1(self): # Compute common names
+        a_only, b_only = [], []
+        common = {}
+        b = {}
+        for fnm in self.right_list:
+            b[fnm] = 1
+        for x in self.left_list:
+            if b.get(x, 0):
+                common[x] = 1
+            else:
+                a_only.append(x)
+        for x in self.right_list:
+            if common.get(x, 0):
+                pass
+            else:
+                b_only.append(x)
+        self.common = common.keys()
+        self.left_only = a_only
+        self.right_only = b_only
+
+    def phase2(self): # Distinguish files, directories, funnies
+        self.common_dirs = []
+        self.common_files = []
+        self.common_funny = []
+
+        for x in self.common:
+            a_path = os.path.join(self.left, x)
+            b_path = os.path.join(self.right, x)
+
+            ok = 1
+            try:
+                a_stat = statcache.stat(a_path)
+            except os.error, why:
+                # print 'Can\'t stat', a_path, ':', why[1]
+                ok = 0
+            try:
+                b_stat = statcache.stat(b_path)
+            except os.error, why:
+                # print 'Can\'t stat', b_path, ':', why[1]
+                ok = 0
+
+            if ok:
+                a_type = stat.S_IFMT(a_stat[stat.ST_MODE])
+                b_type = stat.S_IFMT(b_stat[stat.ST_MODE])
+                if a_type != b_type:
+                    self.common_funny.append(x)
+                elif stat.S_ISDIR(a_type):
+                    self.common_dirs.append(x)
+                elif stat.S_ISREG(a_type):
+                    self.common_files.append(x)
+                else:
+                    self.common_funny.append(x)
+            else:
+                self.common_funny.append(x)
+
+    def phase3(self): # Find out differences between common files
+        xx = cmpfiles(self.left, self.right, self.common_files)
+        self.same_files, self.diff_files, self.funny_files = xx
+
+    def phase4(self): # Find out differences between common subdirectories
+        # A new dircmp object is created for each common subdirectory,
+        # these are stored in a dictionary indexed by filename.
+        # The hide and ignore properties are inherited from the parent
+        self.subdirs = {}
+        for x in self.common_dirs:
+            a_x = os.path.join(self.left, x)
+            b_x = os.path.join(self.right, x)
+            self.subdirs[x]  = dircmp(a_x, b_x, self.ignore, self.hide)
+
+    def phase4_closure(self): # Recursively call phase4() on subdirectories
+        self.phase4()
+        for x in self.subdirs.keys():
+            self.subdirs[x].phase4_closure()
+
+    def report(self): # Print a report on the differences between a and b
+        # Output format is purposely lousy
+        print 'diff', self.left, self.right
+        if self.left_only:
+            self.left_only.sort()
+            print 'Only in', self.left, ':', self.left_only
+        if self.right_only:
+            self.right_only.sort()
+            print 'Only in', self.right, ':', self.right_only
+        if self.same_files:
+            self.same_files.sort()
+            print 'Identical files :', self.same_files
+        if self.diff_files:
+            self.diff_files.sort()
+            print 'Differing files :', self.diff_files
+        if self.funny_files:
+            self.funny_files.sort()
+            print 'Trouble with common files :', self.funny_files
+        if self.common_dirs:
+            self.common_dirs.sort()
+            print 'Common subdirectories :', self.common_dirs
+        if self.common_funny:
+            self.common_funny.sort()
+            print 'Common funny cases :', self.common_funny
+
+    def report_partial_closure(self): # Print reports on self and on subdirs
+        self.report()
+        for x in self.subdirs.keys():
+            print
+            self.subdirs[x].report()
+
+    def report_full_closure(self): # Report on self and subdirs recursively
+        self.report()
+        for x in self.subdirs.keys():
+            print
+            self.subdirs[x].report_full_closure()
+
+
+def cmpfiles(a, b, common, shallow=1, use_statcache=0):
+    """Compare common files in two directories.
+
+    a, b -- directory names
+    common -- list of file names found in both directories
+    shallow -- if true, do comparison based solely on stat() information
+    use_statcache -- if true, use statcache.stat() instead of os.stat()
+
+    Returns a tuple of three lists:
+      files that compare equal
+      files that are different
+      filenames that aren't regular files.
+
+    """
+    res = ([], [], [])
+    for x in common:
+        ax = os.path.join(a, x)
+        bx = os.path.join(b, x)
+        res[_cmp(ax, bx, shallow, use_statcache)].append(x)
+    return res
+
+
+# Compare two files.
+# Return:
+#       0 for equal
+#       1 for different
+#       2 for funny cases (can't stat, etc.)
+#
+def _cmp(a, b, sh, st):
+    try:
+        return not abs(cmp(a, b, sh, st))
+    except os.error:
+        return 2
+
+
+# Return a copy with items that occur in skip removed.
+#
+def _filter(list, skip):
+    result = []
+    for item in list:
+        if item not in skip: result.append(item)
+    return result
+
+
+# Demonstration and testing.
+#
+def demo():
+    import sys
+    import getopt
+    options, args = getopt.getopt(sys.argv[1:], 'r')
+    if len(args) != 2:
+        raise getopt.error, 'need exactly two args'
+    dd = dircmp(args[0], args[1])
+    if ('-r', '') in options:
+        dd.report_full_closure()
+    else:
+        dd.report()
+
+if __name__ == '__main__':
+    demo()
diff --git a/lib-python/2.2/fileinput.py b/lib-python/2.2/fileinput.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/fileinput.py
@@ -0,0 +1,349 @@
+"""Helper class to quickly write a loop over all standard input files.
+
+Typical use is:
+
+    import fileinput
+    for line in fileinput.input():
+        process(line)
+
+This iterates over the lines of all files listed in sys.argv[1:],
+defaulting to sys.stdin if the list is empty.  If a filename is '-' it
+is also replaced by sys.stdin.  To specify an alternative list of
+filenames, pass it as the argument to input().  A single file name is
+also allowed.
+
+Functions filename(), lineno() return the filename and cumulative line
+number of the line that has just been read; filelineno() returns its
+line number in the current file; isfirstline() returns true iff the
+line just read is the first line of its file; isstdin() returns true
+iff the line was read from sys.stdin.  Function nextfile() closes the
+current file so that the next iteration will read the first line from
+the next file (if any); lines not read from the file will not count
+towards the cumulative line count; the filename is not changed until
+after the first line of the next file has been read.  Function close()
+closes the sequence.
+
+Before any lines have been read, filename() returns None and both line
+numbers are zero; nextfile() has no effect.  After all lines have been
+read, filename() and the line number functions return the values
+pertaining to the last line read; nextfile() has no effect.
+
+All files are opened in text mode.  If an I/O error occurs during
+opening or reading a file, the IOError exception is raised.
+
+If sys.stdin is used more than once, the second and further use will
+return no lines, except perhaps for interactive use, or if it has been
+explicitly reset (e.g. using sys.stdin.seek(0)).
+
+Empty files are opened and immediately closed; the only time their
+presence in the list of filenames is noticeable at all is when the
+last file opened is empty.
+
+It is possible that the last line of a file doesn't end in a newline
+character; otherwise lines are returned including the trailing
+newline.
+
+Class FileInput is the implementation; its methods filename(),
+lineno(), fileline(), isfirstline(), isstdin(), nextfile() and close()
+correspond to the functions in the module.  In addition it has a
+readline() method which returns the next input line, and a
+__getitem__() method which implements the sequence behavior.  The
+sequence must be accessed in strictly sequential order; sequence
+access and readline() cannot be mixed.
+
+Optional in-place filtering: if the keyword argument inplace=1 is
+passed to input() or to the FileInput constructor, the file is moved
+to a backup file and standard output is directed to the input file.
+This makes it possible to write a filter that rewrites its input file
+in place.  If the keyword argument backup=".<some extension>" is also
+given, it specifies the extension for the backup file, and the backup
+file remains around; by default, the extension is ".bak" and it is
+deleted when the output file is closed.  In-place filtering is
+disabled when standard input is read.  XXX The current implementation
+does not work for MS-DOS 8+3 filesystems.
+
+Performance: this module is unfortunately one of the slower ways of
+processing large numbers of input lines.  Nevertheless, a significant
+speed-up has been obtained by using readlines(bufsize) instead of
+readline().  A new keyword argument, bufsize=N, is present on the
+input() function and the FileInput() class to override the default
+buffer size.
+
+XXX Possible additions:
+
+- optional getopt argument processing
+- specify open mode ('r' or 'rb')
+- fileno()
+- isatty()
+- read(), read(size), even readlines()
+
+"""
+
+import sys, os, stat
+
+__all__ = ["input","close","nextfile","filename","lineno","filelineno",
+           "isfirstline","isstdin","FileInput"]
+
+_state = None
+
+DEFAULT_BUFSIZE = 8*1024
+
+def input(files=None, inplace=0, backup="", bufsize=0):
+    """input([files[, inplace[, backup]]])
+
+    Create an instance of the FileInput class. The instance will be used
+    as global state for the functions of this module, and is also returned
+    to use during iteration. The parameters to this function will be passed
+    along to the constructor of the FileInput class.
+    """
+    global _state
+    if _state and _state._file:
+        raise RuntimeError, "input() already active"
+    _state = FileInput(files, inplace, backup, bufsize)
+    return _state
+
+def close():
+    """Close the sequence."""
+    global _state
+    state = _state
+    _state = None
+    if state:
+        state.close()
+
+def nextfile():
+    """
+    Close the current file so that the next iteration will read the first
+    line from the next file (if any); lines not read from the file will
+    not count towards the cumulative line count. The filename is not
+    changed until after the first line of the next file has been read.
+    Before the first line has been read, this function has no effect;
+    it cannot be used to skip the first file. After the last line of the
+    last file has been read, this function has no effect.
+    """
+    if not _state:
+        raise RuntimeError, "no active input()"
+    return _state.nextfile()
+
+def filename():
+    """
+    Return the name of the file currently being read.
+    Before the first line has been read, returns None.
+    """
+    if not _state:
+        raise RuntimeError, "no active input()"
+    return _state.filename()
+
+def lineno():
+    """
+    Return the cumulative line number of the line that has just been read.
+    Before the first line has been read, returns 0. After the last line
+    of the last file has been read, returns the line number of that line.
+    """
+    if not _state:
+        raise RuntimeError, "no active input()"
+    return _state.lineno()
+
+def filelineno():
+    """
+    Return the line number in the current file. Before the first line
+    has been read, returns 0. After the last line of the last file has
+    been read, returns the line number of that line within the file.
+    """
+    if not _state:
+        raise RuntimeError, "no active input()"
+    return _state.filelineno()
+
+def isfirstline():
+    """
+    Returns true the line just read is the first line of its file,
+    otherwise returns false.
+    """
+    if not _state:
+        raise RuntimeError, "no active input()"
+    return _state.isfirstline()
+
+def isstdin():
+    """
+    Returns true if the last line was read from sys.stdin,
+    otherwise returns false.
+    """
+    if not _state:
+        raise RuntimeError, "no active input()"
+    return _state.isstdin()
+
+class FileInput:
+    """class FileInput([files[, inplace[, backup]]])
+
+    Class FileInput is the implementation of the module; its methods
+    filename(), lineno(), fileline(), isfirstline(), isstdin(), nextfile()
+    and close() correspond to the functions of the same name in the module.
+    In addition it has a readline() method which returns the next
+    input line, and a __getitem__() method which implements the
+    sequence behavior. The sequence must be accessed in strictly
+    sequential order; random access and readline() cannot be mixed.
+    """
+
+    def __init__(self, files=None, inplace=0, backup="", bufsize=0):
+        if type(files) == type(''):
+            files = (files,)
+        else:
+            if files is None:
+                files = sys.argv[1:]
+            if not files:
+                files = ('-',)
+            else:
+                files = tuple(files)
+        self._files = files
+        self._inplace = inplace
+        self._backup = backup
+        self._bufsize = bufsize or DEFAULT_BUFSIZE
+        self._savestdout = None
+        self._output = None
+        self._filename = None
+        self._lineno = 0
+        self._filelineno = 0
+        self._file = None
+        self._isstdin = 0
+        self._backupfilename = None
+        self._buffer = []
+        self._bufindex = 0
+
+    def __del__(self):
+        self.close()
+
+    def close(self):
+        self.nextfile()
+        self._files = ()
+
+    def __getitem__(self, i):
+        try:
+            line = self._buffer[self._bufindex]
+        except IndexError:
+            pass
+        else:
+            self._bufindex += 1
+            self._lineno += 1
+            self._filelineno += 1
+            return line
+        if i != self._lineno:
+            raise RuntimeError, "accessing lines out of order"
+        line = self.readline()
+        if not line:
+            raise IndexError, "end of input reached"
+        return line
+
+    def nextfile(self):
+        savestdout = self._savestdout
+        self._savestdout = 0
+        if savestdout:
+            sys.stdout = savestdout
+
+        output = self._output
+        self._output = 0
+        if output:
+            output.close()
+
+        file = self._file
+        self._file = 0
+        if file and not self._isstdin:
+            file.close()
+
+        backupfilename = self._backupfilename
+        self._backupfilename = 0
+        if backupfilename and not self._backup:
+            try: os.unlink(backupfilename)
+            except: pass
+
+        self._isstdin = 0
+        self._buffer = []
+        self._bufindex = 0
+
+    def readline(self):
+        try:
+            line = self._buffer[self._bufindex]
+        except IndexError:
+            pass
+        else:
+            self._bufindex += 1
+            self._lineno += 1
+            self._filelineno += 1
+            return line
+        if not self._file:
+            if not self._files:
+                return ""
+            self._filename = self._files[0]
+            self._files = self._files[1:]
+            self._filelineno = 0
+            self._file = None
+            self._isstdin = 0
+            self._backupfilename = 0
+            if self._filename == '-':
+                self._filename = '<stdin>'
+                self._file = sys.stdin
+                self._isstdin = 1
+            else:
+                if self._inplace:
+                    self._backupfilename = (
+                        self._filename + (self._backup or os.extsep+"bak"))
+                    try: os.unlink(self._backupfilename)
+                    except os.error: pass
+                    # The next few lines may raise IOError
+                    os.rename(self._filename, self._backupfilename)
+                    self._file = open(self._backupfilename, "r")
+                    try:
+                        perm = os.fstat(self._file.fileno())[stat.ST_MODE]
+                    except:
+                        self._output = open(self._filename, "w")
+                    else:
+                        fd = os.open(self._filename,
+                                     os.O_CREAT | os.O_WRONLY | os.O_TRUNC,
+                                     perm)
+                        self._output = os.fdopen(fd, "w")
+                        try:
+                            os.chmod(self._filename, perm)
+                        except:
+                            pass
+                    self._savestdout = sys.stdout
+                    sys.stdout = self._output
+                else:
+                    # This may raise IOError
+                    self._file = open(self._filename, "r")
+        self._buffer = self._file.readlines(self._bufsize)
+        self._bufindex = 0
+        if not self._buffer:
+            self.nextfile()
+        # Recursive call
+        return self.readline()
+
+    def filename(self):
+        return self._filename
+
+    def lineno(self):
+        return self._lineno
+
+    def filelineno(self):
+        return self._filelineno
+
+    def isfirstline(self):
+        return self._filelineno == 1
+
+    def isstdin(self):
+        return self._isstdin
+
+def _test():
+    import getopt
+    inplace = 0
+    backup = 0
+    opts, args = getopt.getopt(sys.argv[1:], "ib:")
+    for o, a in opts:
+        if o == '-i': inplace = 1
+        if o == '-b': backup = a
+    for line in input(args, inplace=inplace, backup=backup):
+        if line[-1:] == '\n': line = line[:-1]
+        if line[-1:] == '\r': line = line[:-1]
+        print "%d: %s[%d]%s %s" % (lineno(), filename(), filelineno(),
+                                   isfirstline() and "*" or "", line)
+    print "%d: %s[%d]" % (lineno(), filename(), filelineno())
+
+if __name__ == '__main__':
+    _test()
diff --git a/lib-python/2.2/fnmatch.py b/lib-python/2.2/fnmatch.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/fnmatch.py
@@ -0,0 +1,107 @@
+"""Filename matching with shell patterns.
+
+fnmatch(FILENAME, PATTERN) matches according to the local convention.
+fnmatchcase(FILENAME, PATTERN) always takes case in account.
+
+The functions operate by translating the pattern into a regular
+expression.  They cache the compiled regular expressions for speed.
+
+The function translate(PATTERN) returns a regular expression
+corresponding to PATTERN.  (It does not compile it.)
+"""
+
+import re
+
+__all__ = ["fnmatch","fnmatchcase","translate"]
+
+_cache = {}
+
+def fnmatch(name, pat):
+    """Test whether FILENAME matches PATTERN.
+
+    Patterns are Unix shell style:
+
+    *       matches everything
+    ?       matches any single character
+    [seq]   matches any character in seq
+    [!seq]  matches any char not in seq
+
+    An initial period in FILENAME is not special.
+    Both FILENAME and PATTERN are first case-normalized
+    if the operating system requires it.
+    If you don't want this, use fnmatchcase(FILENAME, PATTERN).
+    """
+
+    import os
+    name = os.path.normcase(name)
+    pat = os.path.normcase(pat)
+    return fnmatchcase(name, pat)
+
+def filter(names, pat):
+    """Return the subset of the list NAMES that match PAT"""
+    import os,posixpath
+    result=[]
+    pat=os.path.normcase(pat)
+    if not _cache.has_key(pat):
+        res = translate(pat)
+        _cache[pat] = re.compile(res)
+    match=_cache[pat].match
+    if os.path is posixpath:
+        # normcase on posix is NOP. Optimize it away from the loop.
+        for name in names:
+            if match(name):
+                result.append(name)
+    else:
+        for name in names:
+            if match(os.path.normcase(name)):
+                result.append(name)
+    return result
+
+def fnmatchcase(name, pat):
+    """Test whether FILENAME matches PATTERN, including case.
+
+    This is a version of fnmatch() which doesn't case-normalize
+    its arguments.
+    """
+
+    if not _cache.has_key(pat):
+        res = translate(pat)
+        _cache[pat] = re.compile(res)
+    return _cache[pat].match(name) is not None
+
+def translate(pat):
+    """Translate a shell PATTERN to a regular expression.
+
+    There is no way to quote meta-characters.
+    """
+
+    i, n = 0, len(pat)
+    res = ''
+    while i < n:
+        c = pat[i]
+        i = i+1
+        if c == '*':
+            res = res + '.*'
+        elif c == '?':
+            res = res + '.'
+        elif c == '[':
+            j = i
+            if j < n and pat[j] == '!':
+                j = j+1
+            if j < n and pat[j] == ']':
+                j = j+1
+            while j < n and pat[j] != ']':
+                j = j+1
+            if j >= n:
+                res = res + '\\['
+            else:
+                stuff = pat[i:j].replace('\\','\\\\')
+                i = j+1
+                if stuff[0] == '!':
+                    stuff = '^' + stuff[1:]
+                elif stuff[0] == '^':
+                    stuff = '\\' + stuff
+                res = '%s[%s]' % (res, stuff)
+        else:
+            res = res + re.escape(c)
+    return res + "$"
diff --git a/lib-python/2.2/formatter.py b/lib-python/2.2/formatter.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/formatter.py
@@ -0,0 +1,454 @@
+"""Generic output formatting.
+
+Formatter objects transform an abstract flow of formatting events into
+specific output events on writer objects. Formatters manage several stack
+structures to allow various properties of a writer object to be changed and
+restored; writers need not be able to handle relative changes nor any sort
+of ``change back'' operation. Specific writer properties which may be
+controlled via formatter objects are horizontal alignment, font, and left
+margin indentations. A mechanism is provided which supports providing
+arbitrary, non-exclusive style settings to a writer as well. Additional
+interfaces facilitate formatting events which are not reversible, such as
+paragraph separation.
+
+Writer objects encapsulate device interfaces. Abstract devices, such as
+file formats, are supported as well as physical devices. The provided
+implementations all work with abstract devices. The interface makes
+available mechanisms for setting the properties which formatter objects
+manage and inserting data into the output.
+"""
+
+import string
+import sys
+from types import StringType
+
+
+AS_IS = None
+
+
+class NullFormatter:
+    """A formatter which does nothing.
+
+    If the writer parameter is omitted, a NullWriter instance is created.
+    No methods of the writer are called by NullFormatter instances.
+
+    Implementations should inherit from this class if implementing a writer
+    interface but don't need to inherit any implementation.
+
+    """
+
+    def __init__(self, writer=None):
+        if not writer:
+            writer = NullWriter()
+        self.writer = writer
+    def end_paragraph(self, blankline): pass
+    def add_line_break(self): pass
+    def add_hor_rule(self, *args, **kw): pass
+    def add_label_data(self, format, counter, blankline=None): pass
+    def add_flowing_data(self, data): pass
+    def add_literal_data(self, data): pass
+    def flush_softspace(self): pass
+    def push_alignment(self, align): pass
+    def pop_alignment(self): pass
+    def push_font(self, x): pass
+    def pop_font(self): pass
+    def push_margin(self, margin): pass
+    def pop_margin(self): pass
+    def set_spacing(self, spacing): pass
+    def push_style(self, *styles): pass
+    def pop_style(self, n=1): pass
+    def assert_line_data(self, flag=1): pass
+
+
+class AbstractFormatter:
+    """The standard formatter.
+
+    This implementation has demonstrated wide applicability to many writers,
+    and may be used directly in most circumstances.  It has been used to
+    implement a full-featured World Wide Web browser.
+
+    """
+
+    #  Space handling policy:  blank spaces at the boundary between elements
+    #  are handled by the outermost context.  "Literal" data is not checked
+    #  to determine context, so spaces in literal data are handled directly
+    #  in all circumstances.
+
+    def __init__(self, writer):
+        self.writer = writer            # Output device
+        self.align = None               # Current alignment
+        self.align_stack = []           # Alignment stack
+        self.font_stack = []            # Font state
+        self.margin_stack = []          # Margin state
+        self.spacing = None             # Vertical spacing state
+        self.style_stack = []           # Other state, e.g. color
+        self.nospace = 1                # Should leading space be suppressed
+        self.softspace = 0              # Should a space be inserted
+        self.para_end = 1               # Just ended a paragraph
+        self.parskip = 0                # Skipped space between paragraphs?
+        self.hard_break = 1             # Have a hard break
+        self.have_label = 0
+
+    def end_paragraph(self, blankline):
+        if not self.hard_break:
+            self.writer.send_line_break()
+            self.have_label = 0
+        if self.parskip < blankline and not self.have_label:
+            self.writer.send_paragraph(blankline - self.parskip)
+            self.parskip = blankline
+            self.have_label = 0
+        self.hard_break = self.nospace = self.para_end = 1
+        self.softspace = 0
+
+    def add_line_break(self):
+        if not (self.hard_break or self.para_end):
+            self.writer.send_line_break()
+            self.have_label = self.parskip = 0
+        self.hard_break = self.nospace = 1
+        self.softspace = 0
+
+    def add_hor_rule(self, *args, **kw):
+        if not self.hard_break:
+            self.writer.send_line_break()
+        apply(self.writer.send_hor_rule, args, kw)
+        self.hard_break = self.nospace = 1
+        self.have_label = self.para_end = self.softspace = self.parskip = 0
+
+    def add_label_data(self, format, counter, blankline = None):
+        if self.have_label or not self.hard_break:
+            self.writer.send_line_break()
+        if not self.para_end:
+            self.writer.send_paragraph((blankline and 1) or 0)
+        if type(format) is StringType:
+            self.writer.send_label_data(self.format_counter(format, counter))
+        else:
+            self.writer.send_label_data(format)
+        self.nospace = self.have_label = self.hard_break = self.para_end = 1
+        self.softspace = self.parskip = 0
+
+    def format_counter(self, format, counter):
+        label = ''
+        for c in format:
+            if c == '1':
+                label = label + ('%d' % counter)
+            elif c in 'aA':
+                if counter > 0:
+                    label = label + self.format_letter(c, counter)
+            elif c in 'iI':
+                if counter > 0:
+                    label = label + self.format_roman(c, counter)
+            else:
+                label = label + c
+        return label
+
+    def format_letter(self, case, counter):
+        label = ''
+        while counter > 0:
+            counter, x = divmod(counter-1, 26)
+            # This makes a strong assumption that lowercase letters
+            # and uppercase letters form two contiguous blocks, with
+            # letters in order!
+            s = chr(ord(case) + x)
+            label = s + label
+        return label
+
+    def format_roman(self, case, counter):
+        ones = ['i', 'x', 'c', 'm']
+        fives = ['v', 'l', 'd']
+        label, index = '', 0
+        # This will die of IndexError when counter is too big
+        while counter > 0:
+            counter, x = divmod(counter, 10)
+            if x == 9:
+                label = ones[index] + ones[index+1] + label
+            elif x == 4:
+                label = ones[index] + fives[index] + label
+            else:
+                if x >= 5:
+                    s = fives[index]
+                    x = x-5
+                else:
+                    s = ''
+                s = s + ones[index]*x
+                label = s + label
+            index = index + 1
+        if case == 'I':
+            return label.upper()
+        return label
+
+    def add_flowing_data(self, data,
+                         # These are only here to load them into locals:
+                         whitespace = string.whitespace,
+                         join = string.join, split = string.split):
+        if not data: return
+        # The following looks a bit convoluted but is a great improvement over
+        # data = regsub.gsub('[' + string.whitespace + ']+', ' ', data)
+        prespace = data[:1] in whitespace
+        postspace = data[-1:] in whitespace
+        data = join(split(data))
+        if self.nospace and not data:
+            return
+        elif prespace or self.softspace:
+            if not data:
+                if not self.nospace:
+                    self.softspace = 1
+                    self.parskip = 0
+                return
+            if not self.nospace:
+                data = ' ' + data
+        self.hard_break = self.nospace = self.para_end = \
+                          self.parskip = self.have_label = 0
+        self.softspace = postspace
+        self.writer.send_flowing_data(data)
+
+    def add_literal_data(self, data):
+        if not data: return
+        if self.softspace:
+            self.writer.send_flowing_data(" ")
+        self.hard_break = data[-1:] == '\n'
+        self.nospace = self.para_end = self.softspace = \
+                       self.parskip = self.have_label = 0
+        self.writer.send_literal_data(data)
+
+    def flush_softspace(self):
+        if self.softspace:
+            self.hard_break = self.para_end = self.parskip = \
+                              self.have_label = self.softspace = 0
+            self.nospace = 1
+            self.writer.send_flowing_data(' ')
+
+    def push_alignment(self, align):
+        if align and align != self.align:
+            self.writer.new_alignment(align)
+            self.align = align
+            self.align_stack.append(align)
+        else:
+            self.align_stack.append(self.align)
+
+    def pop_alignment(self):
+        if self.align_stack:
+            del self.align_stack[-1]
+        if self.align_stack:
+            self.align = align = self.align_stack[-1]
+            self.writer.new_alignment(align)
+        else:
+            self.align = None
+            self.writer.new_alignment(None)
+
+    def push_font(self, (size, i, b, tt)):
+        if self.softspace:
+            self.hard_break = self.para_end = self.softspace = 0
+            self.nospace = 1
+            self.writer.send_flowing_data(' ')
+        if self.font_stack:
+            csize, ci, cb, ctt = self.font_stack[-1]
+            if size is AS_IS: size = csize
+            if i is AS_IS: i = ci
+            if b is AS_IS: b = cb
+            if tt is AS_IS: tt = ctt
+        font = (size, i, b, tt)
+        self.font_stack.append(font)
+        self.writer.new_font(font)
+
+    def pop_font(self):
+        if self.font_stack:
+            del self.font_stack[-1]
+        if self.font_stack:
+            font = self.font_stack[-1]
+        else:
+            font = None
+        self.writer.new_font(font)
+
+    def push_margin(self, margin):
+        self.margin_stack.append(margin)
+        fstack = filter(None, self.margin_stack)
+        if not margin and fstack:
+            margin = fstack[-1]
+        self.writer.new_margin(margin, len(fstack))
+
+    def pop_margin(self):
+        if self.margin_stack:
+            del self.margin_stack[-1]
+        fstack = filter(None, self.margin_stack)
+        if fstack:
+            margin = fstack[-1]
+        else:
+            margin = None
+        self.writer.new_margin(margin, len(fstack))
+
+    def set_spacing(self, spacing):
+        self.spacing = spacing
+        self.writer.new_spacing(spacing)
+
+    def push_style(self, *styles):
+        if self.softspace:
+            self.hard_break = self.para_end = self.softspace = 0
+            self.nospace = 1
+            self.writer.send_flowing_data(' ')
+        for style in styles:
+            self.style_stack.append(style)
+        self.writer.new_styles(tuple(self.style_stack))
+
+    def pop_style(self, n=1):
+        del self.style_stack[-n:]
+        self.writer.new_styles(tuple(self.style_stack))
+
+    def assert_line_data(self, flag=1):
+        self.nospace = self.hard_break = not flag
+        self.para_end = self.parskip = self.have_label = 0
+
+
+class NullWriter:
+    """Minimal writer interface to use in testing & inheritance.
+
+    A writer which only provides the interface definition; no actions are
+    taken on any methods.  This should be the base class for all writers
+    which do not need to inherit any implementation methods.
+
+    """
+    def __init__(self): pass
+    def flush(self): pass
+    def new_alignment(self, align): pass
+    def new_font(self, font): pass
+    def new_margin(self, margin, level): pass
+    def new_spacing(self, spacing): pass
+    def new_styles(self, styles): pass
+    def send_paragraph(self, blankline): pass
+    def send_line_break(self): pass
+    def send_hor_rule(self, *args, **kw): pass
+    def send_label_data(self, data): pass
+    def send_flowing_data(self, data): pass
+    def send_literal_data(self, data): pass
+
+
+class AbstractWriter(NullWriter):
+    """A writer which can be used in debugging formatters, but not much else.
+
+    Each method simply announces itself by printing its name and
+    arguments on standard output.
+
+    """
+
+    def new_alignment(self, align):
+        print "new_alignment(%s)" % `align`
+
+    def new_font(self, font):
+        print "new_font(%s)" % `font`
+
+    def new_margin(self, margin, level):
+        print "new_margin(%s, %d)" % (`margin`, level)
+
+    def new_spacing(self, spacing):
+        print "new_spacing(%s)" % `spacing`
+
+    def new_styles(self, styles):
+        print "new_styles(%s)" % `styles`
+
+    def send_paragraph(self, blankline):
+        print "send_paragraph(%s)" % `blankline`
+
+    def send_line_break(self):
+        print "send_line_break()"
+
+    def send_hor_rule(self, *args, **kw):
+        print "send_hor_rule()"
+
+    def send_label_data(self, data):
+        print "send_label_data(%s)" % `data`
+
+    def send_flowing_data(self, data):
+        print "send_flowing_data(%s)" % `data`
+
+    def send_literal_data(self, data):
+        print "send_literal_data(%s)" % `data`
+
+
+class DumbWriter(NullWriter):
+    """Simple writer class which writes output on the file object passed in
+    as the file parameter or, if file is omitted, on standard output.  The
+    output is simply word-wrapped to the number of columns specified by
+    the maxcol parameter.  This class is suitable for reflowing a sequence
+    of paragraphs.
+
+    """
+
+    def __init__(self, file=None, maxcol=72):
+        self.file = file or sys.stdout
+        self.maxcol = maxcol
+        NullWriter.__init__(self)
+        self.reset()
+
+    def reset(self):
+        self.col = 0
+        self.atbreak = 0
+
+    def send_paragraph(self, blankline):
+        self.file.write('\n'*blankline)
+        self.col = 0
+        self.atbreak = 0
+
+    def send_line_break(self):
+        self.file.write('\n')
+        self.col = 0
+        self.atbreak = 0
+
+    def send_hor_rule(self, *args, **kw):
+        self.file.write('\n')
+        self.file.write('-'*self.maxcol)
+        self.file.write('\n')
+        self.col = 0
+        self.atbreak = 0
+
+    def send_literal_data(self, data):
+        self.file.write(data)
+        i = data.rfind('\n')
+        if i >= 0:
+            self.col = 0
+            data = data[i+1:]
+        data = data.expandtabs()
+        self.col = self.col + len(data)
+        self.atbreak = 0
+
+    def send_flowing_data(self, data):
+        if not data: return
+        atbreak = self.atbreak or data[0] in string.whitespace
+        col = self.col
+        maxcol = self.maxcol
+        write = self.file.write
+        for word in data.split():
+            if atbreak:
+                if col + len(word) >= maxcol:
+                    write('\n')
+                    col = 0
+                else:
+                    write(' ')
+                    col = col + 1
+            write(word)
+            col = col + len(word)
+            atbreak = 1
+        self.col = col
+        self.atbreak = data[-1] in string.whitespace
+
+
+def test(file = None):
+    w = DumbWriter()
+    f = AbstractFormatter(w)
+    if file:
+        fp = open(file)
+    elif sys.argv[1:]:
+        fp = open(sys.argv[1])
+    else:
+        fp = sys.stdin
+    while 1:
+        line = fp.readline()
+        if not line:
+            break
+        if line == '\n':
+            f.end_paragraph(1)
+        else:
+            f.add_flowing_data(line)
+    f.end_paragraph(0)
+
+
+if __name__ == '__main__':
+    test()
diff --git a/lib-python/2.2/fpformat.py b/lib-python/2.2/fpformat.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/fpformat.py
@@ -0,0 +1,142 @@
+"""General floating point formatting functions.
+
+Functions:
+fix(x, digits_behind)
+sci(x, digits_behind)
+
+Each takes a number or a string and a number of digits as arguments.
+
+Parameters:
+x:             number to be formatted; or a string resembling a number
+digits_behind: number of digits behind the decimal point
+"""
+
+import re
+
+__all__ = ["fix","sci","NotANumber"]
+
+# Compiled regular expression to "decode" a number
+decoder = re.compile(r'^([-+]?)0*(\d*)((?:\.\d*)?)(([eE][-+]?\d+)?)$')
+# \0 the whole thing
+# \1 leading sign or empty
+# \2 digits left of decimal point
+# \3 fraction (empty or begins with point)
+# \4 exponent part (empty or begins with 'e' or 'E')
+
+try:
+    class NotANumber(ValueError):
+        pass
+except TypeError:
+    NotANumber = 'fpformat.NotANumber'
+
+def extract(s):
+    """Return (sign, intpart, fraction, expo) or raise an exception:
+    sign is '+' or '-'
+    intpart is 0 or more digits beginning with a nonzero
+    fraction is 0 or more digits
+    expo is an integer"""
+    res = decoder.match(s)
+    if res is None: raise NotANumber, s
+    sign, intpart, fraction, exppart = res.group(1,2,3,4)
+    if sign == '+': sign = ''
+    if fraction: fraction = fraction[1:]
+    if exppart: expo = int(exppart[1:])
+    else: expo = 0
+    return sign, intpart, fraction, expo
+
+def unexpo(intpart, fraction, expo):
+    """Remove the exponent by changing intpart and fraction."""
+    if expo > 0: # Move the point left
+        f = len(fraction)
+        intpart, fraction = intpart + fraction[:expo], fraction[expo:]
+        if expo > f:
+            intpart = intpart + '0'*(expo-f)
+    elif expo < 0: # Move the point right
+        i = len(intpart)
+        intpart, fraction = intpart[:expo], intpart[expo:] + fraction
+        if expo < -i:
+            fraction = '0'*(-expo-i) + fraction
+    return intpart, fraction
+
+def roundfrac(intpart, fraction, digs):
+    """Round or extend the fraction to size digs."""
+    f = len(fraction)
+    if f <= digs:
+        return intpart, fraction + '0'*(digs-f)
+    i = len(intpart)
+    if i+digs < 0:
+        return '0'*-digs, ''
+    total = intpart + fraction
+    nextdigit = total[i+digs]
+    if nextdigit >= '5': # Hard case: increment last digit, may have carry!
+        n = i + digs - 1
+        while n >= 0:
+            if total[n] != '9': break
+            n = n-1
+        else:
+            total = '0' + total
+            i = i+1
+            n = 0
+        total = total[:n] + chr(ord(total[n]) + 1) + '0'*(len(total)-n-1)
+        intpart, fraction = total[:i], total[i:]
+    if digs >= 0:
+        return intpart, fraction[:digs]
+    else:
+        return intpart[:digs] + '0'*-digs, ''
+
+def fix(x, digs):
+    """Format x as [-]ddd.ddd with 'digs' digits after the point
+    and at least one digit before.
+    If digs <= 0, the point is suppressed."""
+    if type(x) != type(''): x = `x`
+    try:
+        sign, intpart, fraction, expo = extract(x)
+    except NotANumber:
+        return x
+    intpart, fraction = unexpo(intpart, fraction, expo)
+    intpart, fraction = roundfrac(intpart, fraction, digs)
+    while intpart and intpart[0] == '0': intpart = intpart[1:]
+    if intpart == '': intpart = '0'
+    if digs > 0: return sign + intpart + '.' + fraction
+    else: return sign + intpart
+
+def sci(x, digs):
+    """Format x as [-]d.dddE[+-]ddd with 'digs' digits after the point
+    and exactly one digit before.
+    If digs is <= 0, one digit is kept and the point is suppressed."""
+    if type(x) != type(''): x = `x`
+    sign, intpart, fraction, expo = extract(x)
+    if not intpart:
+        while fraction and fraction[0] == '0':
+            fraction = fraction[1:]
+            expo = expo - 1
+        if fraction:
+            intpart, fraction = fraction[0], fraction[1:]
+            expo = expo - 1
+        else:
+            intpart = '0'
+    else:
+        expo = expo + len(intpart) - 1
+        intpart, fraction = intpart[0], intpart[1:] + fraction
+    digs = max(0, digs)
+    intpart, fraction = roundfrac(intpart, fraction, digs)
+    if len(intpart) > 1:
+        intpart, fraction, expo = \
+            intpart[0], intpart[1:] + fraction[:-1], \
+            expo + len(intpart) - 1
+    s = sign + intpart
+    if digs > 0: s = s + '.' + fraction
+    e = `abs(expo)`
+    e = '0'*(3-len(e)) + e
+    if expo < 0: e = '-' + e
+    else: e = '+' + e
+    return s + 'e' + e
+
+def test():
+    """Interactive test run."""
+    try:
+        while 1:
+            x, digs = input('Enter (x, digs): ')
+            print x, fix(x, digs), sci(x, digs)
+    except (EOFError, KeyboardInterrupt):
+        pass
diff --git a/lib-python/2.2/ftplib.py b/lib-python/2.2/ftplib.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/ftplib.py
@@ -0,0 +1,804 @@
+"""An FTP client class and some helper functions.
+
+Based on RFC 959: File Transfer Protocol (FTP), by J. Postel and J. Reynolds
+
+Example:
+
+>>> from ftplib import FTP
+>>> ftp = FTP('ftp.python.org') # connect to host, default port
+>>> ftp.login() # default, i.e.: user anonymous, passwd anonymous@
+'230 Guest login ok, access restrictions apply.'
+>>> ftp.retrlines('LIST') # list directory contents
+total 9
+drwxr-xr-x   8 root     wheel        1024 Jan  3  1994 .
+drwxr-xr-x   8 root     wheel        1024 Jan  3  1994 ..
+drwxr-xr-x   2 root     wheel        1024 Jan  3  1994 bin
+drwxr-xr-x   2 root     wheel        1024 Jan  3  1994 etc
+d-wxrwxr-x   2 ftp      wheel        1024 Sep  5 13:43 incoming
+drwxr-xr-x   2 root     wheel        1024 Nov 17  1993 lib
+drwxr-xr-x   6 1094     wheel        1024 Sep 13 19:07 pub
+drwxr-xr-x   3 root     wheel        1024 Jan  3  1994 usr
+-rw-r--r--   1 root     root          312 Aug  1  1994 welcome.msg
+'226 Transfer complete.'
+>>> ftp.quit()
+'221 Goodbye.'
+>>>
+
+A nice test that reveals some of the network dialogue would be:
+python ftplib.py -d localhost -l -p -l
+"""
+
+#
+# Changes and improvements suggested by Steve Majewski.
+# Modified by Jack to work on the mac.
+# Modified by Siebren to support docstrings and PASV.
+#
+
+import os
+import sys
+import string
+
+# Import SOCKS module if it exists, else standard socket module socket
+try:
+    import SOCKS; socket = SOCKS; del SOCKS # import SOCKS as socket
+    from socket import getfqdn; socket.getfqdn = getfqdn; del getfqdn
+except ImportError:
+    import socket
+
+__all__ = ["FTP","Netrc"]
+
+# Magic number from <socket.h>
+MSG_OOB = 0x1                           # Process data out of band
+
+
+# The standard FTP server control port
+FTP_PORT = 21
+
+
+# Exception raised when an error or invalid response is received
+class Error(Exception): pass
+class error_reply(Error): pass          # unexpected [123]xx reply
+class error_temp(Error): pass           # 4xx errors
+class error_perm(Error): pass           # 5xx errors
+class error_proto(Error): pass          # response does not begin with [1-5]
+
+
+# All exceptions (hopefully) that may be raised here and that aren't
+# (always) programming errors on our side
+all_errors = (Error, socket.error, IOError, EOFError)
+
+
+# Line terminators (we always output CRLF, but accept any of CRLF, CR, LF)
+CRLF = '\r\n'
+
+
+# The class itself
+class FTP:
+
+    '''An FTP client class.
+
+    To create a connection, call the class using these argument:
+            host, user, passwd, acct
+    These are all strings, and have default value ''.
+    Then use self.connect() with optional host and port argument.
+
+    To download a file, use ftp.retrlines('RETR ' + filename),
+    or ftp.retrbinary() with slightly different arguments.
+    To upload a file, use ftp.storlines() or ftp.storbinary(),
+    which have an open file as argument (see their definitions
+    below for details).
+    The download/upload functions first issue appropriate TYPE
+    and PORT or PASV commands.
+'''
+
+    debugging = 0
+    host = ''
+    port = FTP_PORT
+    sock = None
+    file = None
+    welcome = None
+    passiveserver = 1
+
+    # Initialization method (called by class instantiation).
+    # Initialize host to localhost, port to standard ftp port
+    # Optional arguments are host (for connect()),
+    # and user, passwd, acct (for login())
+    def __init__(self, host='', user='', passwd='', acct=''):
+        if host:
+            self.connect(host)
+            if user: self.login(user, passwd, acct)
+
+    def connect(self, host = '', port = 0):
+        '''Connect to host.  Arguments are:
+        - host: hostname to connect to (string, default previous host)
+        - port: port to connect to (integer, default previous port)'''
+        if host: self.host = host
+        if port: self.port = port
+        msg = "getaddrinfo returns an empty list"
+        for res in socket.getaddrinfo(self.host, self.port, 0, socket.SOCK_STREAM):
+            af, socktype, proto, canonname, sa = res
+            try:
+                self.sock = socket.socket(af, socktype, proto)
+                self.sock.connect(sa)
+            except socket.error, msg:
+                if self.sock:
+                    self.sock.close()
+                self.sock = None
+                continue
+            break
+        if not self.sock:
+            raise socket.error, msg
+        self.af = af
+        self.file = self.sock.makefile('rb')
+        self.welcome = self.getresp()
+        return self.welcome
+
+    def getwelcome(self):
+        '''Get the welcome message from the server.
+        (this is read and squirreled away by connect())'''
+        if self.debugging:
+            print '*welcome*', self.sanitize(self.welcome)
+        return self.welcome
+
+    def set_debuglevel(self, level):
+        '''Set the debugging level.
+        The required argument level means:
+        0: no debugging output (default)
+        1: print commands and responses but not body text etc.
+        2: also print raw lines read and sent before stripping CR/LF'''
+        self.debugging = level
+    debug = set_debuglevel
+
+    def set_pasv(self, val):
+        '''Use passive or active mode for data transfers.
+        With a false argument, use the normal PORT mode,
+        With a true argument, use the PASV command.'''
+        self.passiveserver = val
+
+    # Internal: "sanitize" a string for printing
+    def sanitize(self, s):
+        if s[:5] == 'pass ' or s[:5] == 'PASS ':
+            i = len(s)
+            while i > 5 and s[i-1] in '\r\n':
+                i = i-1
+            s = s[:5] + '*'*(i-5) + s[i:]
+        return `s`
+
+    # Internal: send one line to the server, appending CRLF
+    def putline(self, line):
+        line = line + CRLF
+        if self.debugging > 1: print '*put*', self.sanitize(line)
+        self.sock.sendall(line)
+
+    # Internal: send one command to the server (through putline())
+    def putcmd(self, line):
+        if self.debugging: print '*cmd*', self.sanitize(line)
+        self.putline(line)
+
+    # Internal: return one line from the server, stripping CRLF.
+    # Raise EOFError if the connection is closed
+    def getline(self):
+        line = self.file.readline()
+        if self.debugging > 1:
+            print '*get*', self.sanitize(line)
+        if not line: raise EOFError
+        if line[-2:] == CRLF: line = line[:-2]
+        elif line[-1:] in CRLF: line = line[:-1]
+        return line
+
+    # Internal: get a response from the server, which may possibly
+    # consist of multiple lines.  Return a single string with no
+    # trailing CRLF.  If the response consists of multiple lines,
+    # these are separated by '\n' characters in the string
+    def getmultiline(self):
+        line = self.getline()
+        if line[3:4] == '-':
+            code = line[:3]
+            while 1:
+                nextline = self.getline()
+                line = line + ('\n' + nextline)
+                if nextline[:3] == code and \
+                        nextline[3:4] != '-':
+                    break
+        return line
+
+    # Internal: get a response from the server.
+    # Raise various errors if the response indicates an error
+    def getresp(self):
+        resp = self.getmultiline()
+        if self.debugging: print '*resp*', self.sanitize(resp)
+        self.lastresp = resp[:3]
+        c = resp[:1]
+        if c == '4':
+            raise error_temp, resp
+        if c == '5':
+            raise error_perm, resp
+        if c not in '123':
+            raise error_proto, resp
+        return resp
+
+    def voidresp(self):
+        """Expect a response beginning with '2'."""
+        resp = self.getresp()
+        if resp[0] != '2':
+            raise error_reply, resp
+        return resp
+
+    def abort(self):
+        '''Abort a file transfer.  Uses out-of-band data.
+        This does not follow the procedure from the RFC to send Telnet
+        IP and Synch; that doesn't seem to work with the servers I've
+        tried.  Instead, just send the ABOR command as OOB data.'''
+        line = 'ABOR' + CRLF
+        if self.debugging > 1: print '*put urgent*', self.sanitize(line)
+        self.sock.sendall(line, MSG_OOB)
+        resp = self.getmultiline()
+        if resp[:3] not in ('426', '226'):
+            raise error_proto, resp
+
+    def sendcmd(self, cmd):
+        '''Send a command and return the response.'''
+        self.putcmd(cmd)
+        return self.getresp()
+
+    def voidcmd(self, cmd):
+        """Send a command and expect a response beginning with '2'."""
+        self.putcmd(cmd)
+        return self.voidresp()
+
+    def sendport(self, host, port):
+        '''Send a PORT command with the current host and the given
+        port number.
+        '''
+        hbytes = host.split('.')
+        pbytes = [`port/256`, `port%256`]
+        bytes = hbytes + pbytes
+        cmd = 'PORT ' + ','.join(bytes)
+        return self.voidcmd(cmd)
+
+    def sendeprt(self, host, port):
+        '''Send a EPRT command with the current host and the given port number.'''
+        af = 0
+        if self.af == socket.AF_INET:
+            af = 1
+        if self.af == socket.AF_INET6:
+            af = 2
+        if af == 0:
+            raise error_proto, 'unsupported address family'
+        fields = ['', `af`, host, `port`, '']
+        cmd = 'EPRT ' + string.joinfields(fields, '|')
+        return self.voidcmd(cmd)
+
+    def makeport(self):
+        '''Create a new socket and send a PORT command for it.'''
+        msg = "getaddrinfo returns an empty list"
+        sock = None
+        for res in socket.getaddrinfo(None, 0, self.af, socket.SOCK_STREAM, 0, socket.AI_PASSIVE):
+            af, socktype, proto, canonname, sa = res
+            try:
+                sock = socket.socket(af, socktype, proto)
+                sock.bind(sa)
+            except socket.error, msg:
+                if sock:
+                    sock.close()
+                sock = None
+                continue
+            break
+        if not sock:
+            raise socket.error, msg
+        sock.listen(1)
+        port = sock.getsockname()[1] # Get proper port
+        host = self.sock.getsockname()[0] # Get proper host
+        if self.af == socket.AF_INET:
+            resp = self.sendport(host, port)
+        else:
+            resp = self.sendeprt(host, port)
+        return sock
+
+    def makepasv(self):
+        if self.af == socket.AF_INET:
+            host, port = parse227(self.sendcmd('PASV'))
+        else:
+            host, port = parse229(self.sendcmd('EPSV'), self.sock.getpeername())
+        return host, port
+
+    def ntransfercmd(self, cmd, rest=None):
+        """Initiate a transfer over the data connection.
+
+        If the transfer is active, send a port command and the
+        transfer command, and accept the connection.  If the server is
+        passive, send a pasv command, connect to it, and start the
+        transfer command.  Either way, return the socket for the
+        connection and the expected size of the transfer.  The
+        expected size may be None if it could not be determined.
+
+        Optional `rest' argument can be a string that is sent as the
+        argument to a RESTART command.  This is essentially a server
+        marker used to tell the server to skip over any data up to the
+        given marker.
+        """
+        size = None
+        if self.passiveserver:
+            host, port = self.makepasv()
+            af, socktype, proto, canon, sa = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)[0]
+            conn = socket.socket(af, socktype, proto)
+            conn.connect(sa)
+            if rest is not None:
+                self.sendcmd("REST %s" % rest)
+            resp = self.sendcmd(cmd)
+            if resp[0] != '1':
+                raise error_reply, resp
+        else:
+            sock = self.makeport()
+            if rest is not None:
+                self.sendcmd("REST %s" % rest)
+            resp = self.sendcmd(cmd)
+            if resp[0] != '1':
+                raise error_reply, resp
+            conn, sockaddr = sock.accept()
+        if resp[:3] == '150':
+            # this is conditional in case we received a 125
+            size = parse150(resp)
+        return conn, size
+
+    def transfercmd(self, cmd, rest=None):
+        """Like ntransfercmd() but returns only the socket."""
+        return self.ntransfercmd(cmd, rest)[0]
+
+    def login(self, user = '', passwd = '', acct = ''):
+        '''Login, default anonymous.'''
+        if not user: user = 'anonymous'
+        if not passwd: passwd = ''
+        if not acct: acct = ''
+        if user == 'anonymous' and passwd in ('', '-'):
+	    # If there is no anonymous ftp password specified
+	    # then we'll just use anonymous@
+	    # We don't send any other thing because:
+	    # - We want to remain anonymous
+	    # - We want to stop SPAM
+	    # - We don't want to let ftp sites to discriminate by the user,
+	    #   host or country.
+            passwd = passwd + 'anonymous@'
+        resp = self.sendcmd('USER ' + user)
+        if resp[0] == '3': resp = self.sendcmd('PASS ' + passwd)
+        if resp[0] == '3': resp = self.sendcmd('ACCT ' + acct)
+        if resp[0] != '2':
+            raise error_reply, resp
+        return resp
+
+    def retrbinary(self, cmd, callback, blocksize=8192, rest=None):
+        """Retrieve data in binary mode.
+
+        `cmd' is a RETR command.  `callback' is a callback function is
+        called for each block.  No more than `blocksize' number of
+        bytes will be read from the socket.  Optional `rest' is passed
+        to transfercmd().
+
+        A new port is created for you.  Return the response code.
+        """
+        self.voidcmd('TYPE I')
+        conn = self.transfercmd(cmd, rest)
+        while 1:
+            data = conn.recv(blocksize)
+            if not data:
+                break
+            callback(data)
+        conn.close()
+        return self.voidresp()
+
+    def retrlines(self, cmd, callback = None):
+        '''Retrieve data in line mode.
+        The argument is a RETR or LIST command.
+        The callback function (2nd argument) is called for each line,
+        with trailing CRLF stripped.  This creates a new port for you.
+        print_line() is the default callback.'''
+        if not callback: callback = print_line
+        resp = self.sendcmd('TYPE A')
+        conn = self.transfercmd(cmd)
+        fp = conn.makefile('rb')
+        while 1:
+            line = fp.readline()
+            if self.debugging > 2: print '*retr*', `line`
+            if not line:
+                break
+            if line[-2:] == CRLF:
+                line = line[:-2]
+            elif line[-1:] == '\n':
+                line = line[:-1]
+            callback(line)
+        fp.close()
+        conn.close()
+        return self.voidresp()
+
+    def storbinary(self, cmd, fp, blocksize=8192):
+        '''Store a file in binary mode.'''
+        self.voidcmd('TYPE I')
+        conn = self.transfercmd(cmd)
+        while 1:
+            buf = fp.read(blocksize)
+            if not buf: break
+            conn.sendall(buf)
+        conn.close()
+        return self.voidresp()
+
+    def storlines(self, cmd, fp):
+        '''Store a file in line mode.'''
+        self.voidcmd('TYPE A')
+        conn = self.transfercmd(cmd)
+        while 1:
+            buf = fp.readline()
+            if not buf: break
+            if buf[-2:] != CRLF:
+                if buf[-1] in CRLF: buf = buf[:-1]
+                buf = buf + CRLF
+            conn.sendall(buf)
+        conn.close()
+        return self.voidresp()
+
+    def acct(self, password):
+        '''Send new account name.'''
+        cmd = 'ACCT ' + password
+        return self.voidcmd(cmd)
+
+    def nlst(self, *args):
+        '''Return a list of files in a given directory (default the current).'''
+        cmd = 'NLST'
+        for arg in args:
+            cmd = cmd + (' ' + arg)
+        files = []
+        self.retrlines(cmd, files.append)
+        return files
+
+    def dir(self, *args):
+        '''List a directory in long form.
+        By default list current directory to stdout.
+        Optional last argument is callback function; all
+        non-empty arguments before it are concatenated to the
+        LIST command.  (This *should* only be used for a pathname.)'''
+        cmd = 'LIST'
+        func = None
+        if args[-1:] and type(args[-1]) != type(''):
+            args, func = args[:-1], args[-1]
+        for arg in args:
+            if arg:
+                cmd = cmd + (' ' + arg)
+        self.retrlines(cmd, func)
+
+    def rename(self, fromname, toname):
+        '''Rename a file.'''
+        resp = self.sendcmd('RNFR ' + fromname)
+        if resp[0] != '3':
+            raise error_reply, resp
+        return self.voidcmd('RNTO ' + toname)
+
+    def delete(self, filename):
+        '''Delete a file.'''
+        resp = self.sendcmd('DELE ' + filename)
+        if resp[:3] in ('250', '200'):
+            return resp
+        elif resp[:1] == '5':
+            raise error_perm, resp
+        else:
+            raise error_reply, resp
+
+    def cwd(self, dirname):
+        '''Change to a directory.'''
+        if dirname == '..':
+            try:
+                return self.voidcmd('CDUP')
+            except error_perm, msg:
+                if msg.args[0][:3] != '500':
+                    raise
+        elif dirname == '':
+            dirname = '.'  # does nothing, but could return error
+        cmd = 'CWD ' + dirname
+        return self.voidcmd(cmd)
+
+    def size(self, filename):
+        '''Retrieve the size of a file.'''
+        # Note that the RFC doesn't say anything about 'SIZE'
+        resp = self.sendcmd('SIZE ' + filename)
+        if resp[:3] == '213':
+            s = resp[3:].strip()
+            try:
+                return int(s)
+            except (OverflowError, ValueError):
+                return long(s)
+
+    def mkd(self, dirname):
+        '''Make a directory, return its full pathname.'''
+        resp = self.sendcmd('MKD ' + dirname)
+        return parse257(resp)
+
+    def rmd(self, dirname):
+        '''Remove a directory.'''
+        return self.voidcmd('RMD ' + dirname)
+
+    def pwd(self):
+        '''Return current working directory.'''
+        resp = self.sendcmd('PWD')
+        return parse257(resp)
+
+    def quit(self):
+        '''Quit, and close the connection.'''
+        resp = self.voidcmd('QUIT')
+        self.close()
+        return resp
+
+    def close(self):
+        '''Close the connection without assuming anything about it.'''
+        if self.file:
+            self.file.close()
+            self.sock.close()
+            self.file = self.sock = None
+
+
+_150_re = None
+
+def parse150(resp):
+    '''Parse the '150' response for a RETR request.
+    Returns the expected transfer size or None; size is not guaranteed to
+    be present in the 150 message.
+    '''
+    if resp[:3] != '150':
+        raise error_reply, resp
+    global _150_re
+    if _150_re is None:
+        import re
+        _150_re = re.compile("150 .* \((\d+) bytes\)", re.IGNORECASE)
+    m = _150_re.match(resp)
+    if not m:
+        return None
+    s = m.group(1)
+    try:
+        return int(s)
+    except (OverflowError, ValueError):
+        return long(s)
+
+
+_227_re = None
+
+def parse227(resp):
+    '''Parse the '227' response for a PASV request.
+    Raises error_proto if it does not contain '(h1,h2,h3,h4,p1,p2)'
+    Return ('host.addr.as.numbers', port#) tuple.'''
+
+    if resp[:3] != '227':
+        raise error_reply, resp
+    global _227_re
+    if _227_re is None:
+        import re
+        _227_re = re.compile(r'(\d+),(\d+),(\d+),(\d+),(\d+),(\d+)')
+    m = _227_re.search(resp)
+    if not m:
+        raise error_proto, resp
+    numbers = m.groups()
+    host = '.'.join(numbers[:4])
+    port = (int(numbers[4]) << 8) + int(numbers[5])
+    return host, port
+
+
+def parse229(resp, peer):
+    '''Parse the '229' response for a EPSV request.
+    Raises error_proto if it does not contain '(|||port|)'
+    Return ('host.addr.as.numbers', port#) tuple.'''
+
+    if resp[:3] <> '229':
+        raise error_reply, resp
+    left = string.find(resp, '(')
+    if left < 0: raise error_proto, resp
+    right = string.find(resp, ')', left + 1)
+    if right < 0:
+        raise error_proto, resp # should contain '(|||port|)'
+    if resp[left + 1] <> resp[right - 1]:
+        raise error_proto, resp
+    parts = string.split(resp[left + 1:right], resp[left+1])
+    if len(parts) <> 5:
+        raise error_proto, resp
+    host = peer[0]
+    port = string.atoi(parts[3])
+    return host, port
+
+
+def parse257(resp):
+    '''Parse the '257' response for a MKD or PWD request.
+    This is a response to a MKD or PWD request: a directory name.
+    Returns the directoryname in the 257 reply.'''
+
+    if resp[:3] != '257':
+        raise error_reply, resp
+    if resp[3:5] != ' "':
+        return '' # Not compliant to RFC 959, but UNIX ftpd does this
+    dirname = ''
+    i = 5
+    n = len(resp)
+    while i < n:
+        c = resp[i]
+        i = i+1
+        if c == '"':
+            if i >= n or resp[i] != '"':
+                break
+            i = i+1
+        dirname = dirname + c
+    return dirname
+
+
+def print_line(line):
+    '''Default retrlines callback to print a line.'''
+    print line
+
+
+def ftpcp(source, sourcename, target, targetname = '', type = 'I'):
+    '''Copy file from one FTP-instance to another.'''
+    if not targetname: targetname = sourcename
+    type = 'TYPE ' + type
+    source.voidcmd(type)
+    target.voidcmd(type)
+    sourcehost, sourceport = parse227(source.sendcmd('PASV'))
+    target.sendport(sourcehost, sourceport)
+    # RFC 959: the user must "listen" [...] BEFORE sending the
+    # transfer request.
+    # So: STOR before RETR, because here the target is a "user".
+    treply = target.sendcmd('STOR ' + targetname)
+    if treply[:3] not in ('125', '150'): raise error_proto  # RFC 959
+    sreply = source.sendcmd('RETR ' + sourcename)
+    if sreply[:3] not in ('125', '150'): raise error_proto  # RFC 959
+    source.voidresp()
+    target.voidresp()
+
+
+class Netrc:
+    """Class to parse & provide access to 'netrc' format files.
+
+    See the netrc(4) man page for information on the file format.
+
+    WARNING: This class is obsolete -- use module netrc instead.
+
+    """
+    __defuser = None
+    __defpasswd = None
+    __defacct = None
+
+    def __init__(self, filename=None):
+        if not filename:
+            if os.environ.has_key("HOME"):
+                filename = os.path.join(os.environ["HOME"],
+                                        ".netrc")
+            else:
+                raise IOError, \
+                      "specify file to load or set $HOME"
+        self.__hosts = {}
+        self.__macros = {}
+        fp = open(filename, "r")
+        in_macro = 0
+        while 1:
+            line = fp.readline()
+            if not line: break
+            if in_macro and line.strip():
+                macro_lines.append(line)
+                continue
+            elif in_macro:
+                self.__macros[macro_name] = tuple(macro_lines)
+                in_macro = 0
+            words = line.split()
+            host = user = passwd = acct = None
+            default = 0
+            i = 0
+            while i < len(words):
+                w1 = words[i]
+                if i+1 < len(words):
+                    w2 = words[i + 1]
+                else:
+                    w2 = None
+                if w1 == 'default':
+                    default = 1
+                elif w1 == 'machine' and w2:
+                    host = w2.lower()
+                    i = i + 1
+                elif w1 == 'login' and w2:
+                    user = w2
+                    i = i + 1
+                elif w1 == 'password' and w2:
+                    passwd = w2
+                    i = i + 1
+                elif w1 == 'account' and w2:
+                    acct = w2
+                    i = i + 1
+                elif w1 == 'macdef' and w2:
+                    macro_name = w2
+                    macro_lines = []
+                    in_macro = 1
+                    break
+                i = i + 1
+            if default:
+                self.__defuser = user or self.__defuser
+                self.__defpasswd = passwd or self.__defpasswd
+                self.__defacct = acct or self.__defacct
+            if host:
+                if self.__hosts.has_key(host):
+                    ouser, opasswd, oacct = \
+                           self.__hosts[host]
+                    user = user or ouser
+                    passwd = passwd or opasswd
+                    acct = acct or oacct
+                self.__hosts[host] = user, passwd, acct
+        fp.close()
+
+    def get_hosts(self):
+        """Return a list of hosts mentioned in the .netrc file."""
+        return self.__hosts.keys()
+
+    def get_account(self, host):
+        """Returns login information for the named host.
+
+        The return value is a triple containing userid,
+        password, and the accounting field.
+
+        """
+        host = host.lower()
+        user = passwd = acct = None
+        if self.__hosts.has_key(host):
+            user, passwd, acct = self.__hosts[host]
+        user = user or self.__defuser
+        passwd = passwd or self.__defpasswd
+        acct = acct or self.__defacct
+        return user, passwd, acct
+
+    def get_macros(self):
+        """Return a list of all defined macro names."""
+        return self.__macros.keys()
+
+    def get_macro(self, macro):
+        """Return a sequence of lines which define a named macro."""
+        return self.__macros[macro]
+
+
+
+def test():
+    '''Test program.
+    Usage: ftp [-d] [-r[file]] host [-l[dir]] [-d[dir]] [-p] [file] ...'''
+
+    debugging = 0
+    rcfile = None
+    while sys.argv[1] == '-d':
+        debugging = debugging+1
+        del sys.argv[1]
+    if sys.argv[1][:2] == '-r':
+        # get name of alternate ~/.netrc file:
+        rcfile = sys.argv[1][2:]
+        del sys.argv[1]
+    host = sys.argv[1]
+    ftp = FTP(host)
+    ftp.set_debuglevel(debugging)
+    userid = passwd = acct = ''
+    try:
+        netrc = Netrc(rcfile)
+    except IOError:
+        if rcfile is not None:
+            sys.stderr.write("Could not open account file"
+                             " -- using anonymous login.")
+    else:
+        try:
+            userid, passwd, acct = netrc.get_account(host)
+        except KeyError:
+            # no account for host
+            sys.stderr.write(
+                    "No account -- using anonymous login.")
+    ftp.login(userid, passwd, acct)
+    for file in sys.argv[2:]:
+        if file[:2] == '-l':
+            ftp.dir(file[2:])
+        elif file[:2] == '-d':
+            cmd = 'CWD'
+            if file[2:]: cmd = cmd + ' ' + file[2:]
+            resp = ftp.sendcmd(cmd)
+        elif file == '-p':
+            ftp.set_pasv(not ftp.passiveserver)
+        else:
+            ftp.retrbinary('RETR ' + file, \
+                           sys.stdout.write, 1024)
+    ftp.quit()
+
+
+if __name__ == '__main__':
+    test()
diff --git a/lib-python/2.2/getopt.py b/lib-python/2.2/getopt.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/getopt.py
@@ -0,0 +1,144 @@
+"""Parser for command line options.
+
+This module helps scripts to parse the command line arguments in
+sys.argv.  It supports the same conventions as the Unix getopt()
+function (including the special meanings of arguments of the form `-'
+and `--').  Long options similar to those supported by GNU software
+may be used as well via an optional third argument.  This module
+provides a single function and an exception:
+
+getopt() -- Parse command line options
+GetoptError -- exception (class) raised with 'opt' attribute, which is the
+option involved with the exception.
+"""
+
+# Long option support added by Lars Wirzenius <liw at iki.fi>.
+
+# Gerrit Holl <gerrit at nl.linux.org> moved the string-based exceptions
+# to class-based exceptions.
+
+__all__ = ["GetoptError","error","getopt"]
+
+class GetoptError(Exception):
+    opt = ''
+    msg = ''
+    def __init__(self, msg, opt):
+        self.msg = msg
+        self.opt = opt
+        Exception.__init__(self, msg, opt)
+
+    def __str__(self):
+        return self.msg
+
+error = GetoptError # backward compatibility
+
+def getopt(args, shortopts, longopts = []):
+    """getopt(args, options[, long_options]) -> opts, args
+
+    Parses command line options and parameter list.  args is the
+    argument list to be parsed, without the leading reference to the
+    running program.  Typically, this means "sys.argv[1:]".  shortopts
+    is the string of option letters that the script wants to
+    recognize, with options that require an argument followed by a
+    colon (i.e., the same format that Unix getopt() uses).  If
+    specified, longopts is a list of strings with the names of the
+    long options which should be supported.  The leading '--'
+    characters should not be included in the option name.  Options
+    which require an argument should be followed by an equal sign
+    ('=').
+
+    The return value consists of two elements: the first is a list of
+    (option, value) pairs; the second is the list of program arguments
+    left after the option list was stripped (this is a trailing slice
+    of the first argument).  Each option-and-value pair returned has
+    the option as its first element, prefixed with a hyphen (e.g.,
+    '-x'), and the option argument as its second element, or an empty
+    string if the option has no argument.  The options occur in the
+    list in the same order in which they were found, thus allowing
+    multiple occurrences.  Long and short options may be mixed.
+
+    """
+
+    opts = []
+    if type(longopts) == type(""):
+        longopts = [longopts]
+    else:
+        longopts = list(longopts)
+    while args and args[0].startswith('-') and args[0] != '-':
+        if args[0] == '--':
+            args = args[1:]
+            break
+        if args[0].startswith('--'):
+            opts, args = do_longs(opts, args[0][2:], longopts, args[1:])
+        else:
+            opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:])
+
+    return opts, args
+
+def do_longs(opts, opt, longopts, args):
+    try:
+        i = opt.index('=')
+    except ValueError:
+        optarg = None
+    else:
+        opt, optarg = opt[:i], opt[i+1:]
+
+    has_arg, opt = long_has_args(opt, longopts)
+    if has_arg:
+        if optarg is None:
+            if not args:
+                raise GetoptError('option --%s requires argument' % opt, opt)
+            optarg, args = args[0], args[1:]
+    elif optarg:
+        raise GetoptError('option --%s must not have an argument' % opt, opt)
+    opts.append(('--' + opt, optarg or ''))
+    return opts, args
+
+# Return:
+#   has_arg?
+#   full option name
+def long_has_args(opt, longopts):
+    possibilities = [o for o in longopts if o.startswith(opt)]
+    if not possibilities:
+        raise GetoptError('option --%s not recognized' % opt, opt)
+    # Is there an exact match?
+    if opt in possibilities:
+        return 0, opt
+    elif opt + '=' in possibilities:
+        return 1, opt
+    # No exact match, so better be unique.
+    if len(possibilities) > 1:
+        # XXX since possibilities contains all valid continuations, might be
+        # nice to work them into the error msg
+        raise GetoptError('option --%s not a unique prefix' % opt, opt)
+    assert len(possibilities) == 1
+    unique_match = possibilities[0]
+    has_arg = unique_match.endswith('=')
+    if has_arg:
+        unique_match = unique_match[:-1]
+    return has_arg, unique_match
+
+def do_shorts(opts, optstring, shortopts, args):
+    while optstring != '':
+        opt, optstring = optstring[0], optstring[1:]
+        if short_has_arg(opt, shortopts):
+            if optstring == '':
+                if not args:
+                    raise GetoptError('option -%s requires argument' % opt,
+                                      opt)
+                optstring, args = args[0], args[1:]
+            optarg, optstring = optstring, ''
+        else:
+            optarg = ''
+        opts.append(('-' + opt, optarg))
+    return opts, args
+
+def short_has_arg(opt, shortopts):
+    for i in range(len(shortopts)):
+        if opt == shortopts[i] != ':':
+            return shortopts.startswith(':', i+1)
+    raise GetoptError('option -%s not recognized' % opt, opt)
+
+if __name__ == '__main__':
+    import sys
+    print getopt(sys.argv[1:], "a:b", ["alpha=", "beta"])
diff --git a/lib-python/2.2/getpass.py b/lib-python/2.2/getpass.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/getpass.py
@@ -0,0 +1,123 @@
+"""Utilities to get a password and/or the current user name.
+
+getpass(prompt) - prompt for a password, with echo turned off
+getuser() - get the user name from the environment or password database
+
+On Windows, the msvcrt module will be used.
+On the Mac EasyDialogs.AskPassword is used, if available.
+
+"""
+
+# Authors: Piers Lauder (original)
+#          Guido van Rossum (Windows support and cleanup)
+
+import sys
+
+__all__ = ["getpass","getuser"]
+
+def unix_getpass(prompt='Password: '):
+    """Prompt for a password, with echo turned off.
+
+    Restore terminal settings at end.
+    """
+
+    try:
+        fd = sys.stdin.fileno()
+    except:
+        return default_getpass(prompt)
+
+    old = termios.tcgetattr(fd)     # a copy to save
+    new = old[:]
+
+    new[3] = new[3] & ~termios.ECHO # 3 == 'lflags'
+    try:
+        termios.tcsetattr(fd, termios.TCSADRAIN, new)
+        passwd = _raw_input(prompt)
+    finally:
+        termios.tcsetattr(fd, termios.TCSADRAIN, old)
+
+    sys.stdout.write('\n')
+    return passwd
+
+
+def win_getpass(prompt='Password: '):
+    """Prompt for password with echo off, using Windows getch()."""
+    if sys.stdin is not sys.__stdin__:
+        return default_getpass(prompt)
+    import msvcrt
+    for c in prompt:
+        msvcrt.putch(c)
+    pw = ""
+    while 1:
+        c = msvcrt.getch()
+        if c == '\r' or c == '\n':
+            break
+        if c == '\003':
+            raise KeyboardInterrupt
+        if c == '\b':
+            pw = pw[:-1]
+        else:
+            pw = pw + c
+    msvcrt.putch('\r')
+    msvcrt.putch('\n')
+    return pw
+
+
+def default_getpass(prompt='Password: '):
+    print "Warning: Problem with getpass. Passwords may be echoed."
+    return _raw_input(prompt)
+
+
+def _raw_input(prompt=""):
+    # A raw_input() replacement that doesn't save the string in the
+    # GNU readline history.
+    prompt = str(prompt)
+    if prompt:
+        sys.stdout.write(prompt)
+    line = sys.stdin.readline()
+    if not line:
+        raise EOFError
+    if line[-1] == '\n':
+        line = line[:-1]
+    return line
+
+
+def getuser():
+    """Get the username from the environment or password database.
+
+    First try various environment variables, then the password
+    database.  This works on Windows as long as USERNAME is set.
+
+    """
+
+    import os
+
+    for name in ('LOGNAME', 'USER', 'LNAME', 'USERNAME'):
+        user = os.environ.get(name)
+        if user:
+            return user
+
+    # If this fails, the exception will "explain" why
+    import pwd
+    return pwd.getpwuid(os.getuid())[0]
+
+# Bind the name getpass to the appropriate function
+try:
+    import termios
+    # it's possible there is an incompatible termios from the
+    # McMillan Installer, make sure we have a UNIX-compatible termios
+    termios.tcgetattr, termios.tcsetattr
+except (ImportError, AttributeError):
+    try:
+        import msvcrt
+    except ImportError:
+        try:
+            from EasyDialogs import AskPassword
+        except ImportError:
+            getpass = default_getpass
+        else:
+            getpass = AskPassword
+    else:
+        getpass = win_getpass
+else:
+    getpass = unix_getpass
diff --git a/lib-python/2.2/gettext.py b/lib-python/2.2/gettext.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/gettext.py
@@ -0,0 +1,311 @@
+"""Internationalization and localization support.
+
+This module provides internationalization (I18N) and localization (L10N)
+support for your Python programs by providing an interface to the GNU gettext
+message catalog library.
+
+I18N refers to the operation by which a program is made aware of multiple
+languages.  L10N refers to the adaptation of your program, once
+internationalized, to the local language and cultural habits.
+
+"""
+
+# This module represents the integration of work, contributions, feedback, and
+# suggestions from the following people:
+#
+# Martin von Loewis, who wrote the initial implementation of the underlying
+# C-based libintlmodule (later renamed _gettext), along with a skeletal
+# gettext.py implementation.
+#
+# Peter Funk, who wrote fintl.py, a fairly complete wrapper around intlmodule,
+# which also included a pure-Python implementation to read .mo files if
+# intlmodule wasn't available.
+#
+# James Henstridge, who also wrote a gettext.py module, which has some
+# interesting, but currently unsupported experimental features: the notion of
+# a Catalog class and instances, and the ability to add to a catalog file via
+# a Python API.
+#
+# Barry Warsaw integrated these modules, wrote the .install() API and code,
+# and conformed all C and Python code to Python's coding standards.
+#
+# Francois Pinard and Marc-Andre Lemburg also contributed valuably to this
+# module.
+#
+# TODO:
+# - Lazy loading of .mo files.  Currently the entire catalog is loaded into
+#   memory, but that's probably bad for large translated programs.  Instead,
+#   the lexical sort of original strings in GNU .mo files should be exploited
+#   to do binary searches and lazy initializations.  Or you might want to use
+#   the undocumented double-hash algorithm for .mo files with hash tables, but
+#   you'll need to study the GNU gettext code to do this.
+#
+# - Support Solaris .mo file formats.  Unfortunately, we've been unable to
+#   find this format documented anywhere.
+
+import os
+import sys
+import struct
+from errno import ENOENT
+
+__all__ = ['NullTranslations', 'GNUTranslations', 'Catalog',
+           'find', 'translation', 'install', 'textdomain', 'bindtextdomain',
+           'dgettext', 'gettext',
+           ]
+
+_default_localedir = os.path.join(sys.prefix, 'share', 'locale')
+
+
+
+def _expand_lang(locale):
+    from locale import normalize
+    locale = normalize(locale)
+    COMPONENT_CODESET   = 1 << 0
+    COMPONENT_TERRITORY = 1 << 1
+    COMPONENT_MODIFIER  = 1 << 2
+    # split up the locale into its base components
+    mask = 0
+    pos = locale.find('@')
+    if pos >= 0:
+        modifier = locale[pos:]
+        locale = locale[:pos]
+        mask |= COMPONENT_MODIFIER
+    else:
+        modifier = ''
+    pos = locale.find('.')
+    if pos >= 0:
+        codeset = locale[pos:]
+        locale = locale[:pos]
+        mask |= COMPONENT_CODESET
+    else:
+        codeset = ''
+    pos = locale.find('_')
+    if pos >= 0:
+        territory = locale[pos:]
+        locale = locale[:pos]
+        mask |= COMPONENT_TERRITORY
+    else:
+        territory = ''
+    language = locale
+    ret = []
+    for i in range(mask+1):
+        if not (i & ~mask):  # if all components for this combo exist ...
+            val = language
+            if i & COMPONENT_TERRITORY: val += territory
+            if i & COMPONENT_CODESET:   val += codeset
+            if i & COMPONENT_MODIFIER:  val += modifier
+            ret.append(val)
+    ret.reverse()
+    return ret
+
+
+
+class NullTranslations:
+    def __init__(self, fp=None):
+        self._info = {}
+        self._charset = None
+        if fp:
+            self._parse(fp)
+
+    def _parse(self, fp):
+        pass
+
+    def gettext(self, message):
+        return message
+
+    def ugettext(self, message):
+        return unicode(message)
+
+    def info(self):
+        return self._info
+
+    def charset(self):
+        return self._charset
+
+    def install(self, unicode=0):
+        import __builtin__
+        __builtin__.__dict__['_'] = unicode and self.ugettext or self.gettext
+
+
+class GNUTranslations(NullTranslations):
+    # Magic number of .mo files
+    LE_MAGIC = 0x950412de
+    BE_MAGIC = 0xde120495
+
+    def _parse(self, fp):
+        """Override this method to support alternative .mo formats."""
+        # We need to & all 32 bit unsigned integers with 0xffffffff for
+        # portability to 64 bit machines.
+        MASK = 0xffffffff
+        unpack = struct.unpack
+        filename = getattr(fp, 'name', '')
+        # Parse the .mo file header, which consists of 5 little endian 32
+        # bit words.
+        self._catalog = catalog = {}
+        buf = fp.read()
+        buflen = len(buf)
+        # Are we big endian or little endian?
+        magic = unpack('<i', buf[:4])[0] & MASK
+        if magic == self.LE_MAGIC:
+            version, msgcount, masteridx, transidx = unpack('<4i', buf[4:20])
+            ii = '<ii'
+        elif magic == self.BE_MAGIC:
+            version, msgcount, masteridx, transidx = unpack('>4i', buf[4:20])
+            ii = '>ii'
+        else:
+            raise IOError(0, 'Bad magic number', filename)
+        # more unsigned ints
+        msgcount &= MASK
+        masteridx &= MASK
+        transidx &= MASK
+        # Now put all messages from the .mo file buffer into the catalog
+        # dictionary.
+        for i in xrange(0, msgcount):
+            mlen, moff = unpack(ii, buf[masteridx:masteridx+8])
+            moff &= MASK
+            mend = moff + (mlen & MASK)
+            tlen, toff = unpack(ii, buf[transidx:transidx+8])
+            toff &= MASK
+            tend = toff + (tlen & MASK)
+            if mend < buflen and tend < buflen:
+                tmsg = buf[toff:tend]
+                catalog[buf[moff:mend]] = tmsg
+            else:
+                raise IOError(0, 'File is corrupt', filename)
+            # See if we're looking at GNU .mo conventions for metadata
+            if mlen == 0 and tmsg.lower().startswith('project-id-version:'):
+                # Catalog description
+                lastk = None
+                for item in tmsg.split('\n'):
+                    item = item.strip()
+                    if not item:
+                        continue
+                    if ':' in item:
+                        k, v = item.split(':', 1)
+                        k = k.strip().lower()
+                        v = v.strip()
+                        self._info[k] = v
+                        lastk = k
+                    elif lastk:
+                        self._info[lastk] += '\n' + item
+                    if k == 'content-type':
+                        self._charset = v.split('charset=')[1]
+            # advance to next entry in the seek tables
+            masteridx += 8
+            transidx += 8
+
+    def gettext(self, message):
+        return self._catalog.get(message, message)
+
+    def ugettext(self, message):
+        tmsg = self._catalog.get(message, message)
+        return unicode(tmsg, self._charset)
+
+
+
+# Locate a .mo file using the gettext strategy
+def find(domain, localedir=None, languages=None):
+    # Get some reasonable defaults for arguments that were not supplied
+    if localedir is None:
+        localedir = _default_localedir
+    if languages is None:
+        languages = []
+        for envar in ('LANGUAGE', 'LC_ALL', 'LC_MESSAGES', 'LANG'):
+            val = os.environ.get(envar)
+            if val:
+                languages = val.split(':')
+                break
+        if 'C' not in languages:
+            languages.append('C')
+    # now normalize and expand the languages
+    nelangs = []
+    for lang in languages:
+        for nelang in _expand_lang(lang):
+            if nelang not in nelangs:
+                nelangs.append(nelang)
+    # select a language
+    for lang in nelangs:
+        if lang == 'C':
+            break
+        mofile = os.path.join(localedir, lang, 'LC_MESSAGES', '%s.mo' % domain)
+        if os.path.exists(mofile):
+            return mofile
+    return None
+
+
+
+# a mapping between absolute .mo file path and Translation object
+_translations = {}
+
+def translation(domain, localedir=None, languages=None,
+                class_=None, fallback=0):
+    if class_ is None:
+        class_ = GNUTranslations
+    mofile = find(domain, localedir, languages)
+    if mofile is None:
+        if fallback:
+            return NullTranslations()
+        raise IOError(ENOENT, 'No translation file found for domain', domain)
+    key = os.path.abspath(mofile)
+    # TBD: do we need to worry about the file pointer getting collected?
+    # Avoid opening, reading, and parsing the .mo file after it's been done
+    # once.
+    t = _translations.get(key)
+    if t is None:
+        t = _translations.setdefault(key, class_(open(mofile, 'rb')))
+    return t
+
+
+
+def install(domain, localedir=None, unicode=0):
+    translation(domain, localedir, fallback=1).install(unicode)
+
+
+
+# a mapping b/w domains and locale directories
+_localedirs = {}
+# current global domain, `messages' used for compatibility w/ GNU gettext
+_current_domain = 'messages'
+
+
+def textdomain(domain=None):
+    global _current_domain
+    if domain is not None:
+        _current_domain = domain
+    return _current_domain
+
+
+def bindtextdomain(domain, localedir=None):
+    global _localedirs
+    if localedir is not None:
+        _localedirs[domain] = localedir
+    return _localedirs.get(domain, _default_localedir)
+
+
+def dgettext(domain, message):
+    try:
+        t = translation(domain, _localedirs.get(domain, None))
+    except IOError:
+        return message
+    return t.gettext(message)
+
+
+def gettext(message):
+    return dgettext(_current_domain, message)
+
+
+# dcgettext() has been deemed unnecessary and is not implemented.
+
+# James Henstridge's Catalog constructor from GNOME gettext.  Documented usage
+# was:
+#
+#    import gettext
+#    cat = gettext.Catalog(PACKAGE, localedir=LOCALEDIR)
+#    _ = cat.gettext
+#    print _('Hello World')
+
+# The resulting catalog object currently don't support access through a
+# dictionary API, which was supported (but apparently unused) in GNOME
+# gettext.
+
+Catalog = translation
diff --git a/lib-python/2.2/glob.py b/lib-python/2.2/glob.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/glob.py
@@ -0,0 +1,56 @@
+"""Filename globbing utility."""
+
+import os
+import fnmatch
+import re
+
+__all__ = ["glob"]
+
+def glob(pathname):
+    """Return a list of paths matching a pathname pattern.
+
+    The pattern may contain simple shell-style wildcards a la fnmatch.
+
+    """
+    if not has_magic(pathname):
+        if os.path.exists(pathname):
+            return [pathname]
+        else:
+            return []
+    dirname, basename = os.path.split(pathname)
+    if not dirname:
+        return glob1(os.curdir, basename)
+    elif has_magic(dirname):
+        list = glob(dirname)
+    else:
+        list = [dirname]
+    if not has_magic(basename):
+        result = []
+        for dirname in list:
+            if basename or os.path.isdir(dirname):
+                name = os.path.join(dirname, basename)
+                if os.path.exists(name):
+                    result.append(name)
+    else:
+        result = []
+        for dirname in list:
+            sublist = glob1(dirname, basename)
+            for name in sublist:
+                result.append(os.path.join(dirname, name))
+    return result
+
+def glob1(dirname, pattern):
+    if not dirname: dirname = os.curdir
+    try:
+        names = os.listdir(dirname)
+    except os.error:
+        return []
+    if pattern[0]!='.':
+        names=filter(lambda x: x[0]!='.',names)
+    return fnmatch.filter(names,pattern)
+
+
+magic_check = re.compile('[*?[]')
+
+def has_magic(s):
+    return magic_check.search(s) is not None
diff --git a/lib-python/2.2/gopherlib.py b/lib-python/2.2/gopherlib.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/gopherlib.py
@@ -0,0 +1,205 @@
+"""Gopher protocol client interface."""
+
+__all__ = ["send_selector","send_query"]
+
+# Default selector, host and port
+DEF_SELECTOR = '1/'
+DEF_HOST     = 'gopher.micro.umn.edu'
+DEF_PORT     = 70
+
+# Recognized file types
+A_TEXT       = '0'
+A_MENU       = '1'
+A_CSO        = '2'
+A_ERROR      = '3'
+A_MACBINHEX  = '4'
+A_PCBINHEX   = '5'
+A_UUENCODED  = '6'
+A_INDEX      = '7'
+A_TELNET     = '8'
+A_BINARY     = '9'
+A_DUPLICATE  = '+'
+A_SOUND      = 's'
+A_EVENT      = 'e'
+A_CALENDAR   = 'c'
+A_HTML       = 'h'
+A_TN3270     = 'T'
+A_MIME       = 'M'
+A_IMAGE      = 'I'
+A_WHOIS      = 'w'
+A_QUERY      = 'q'
+A_GIF        = 'g'
+A_HTML       = 'h'          # HTML file
+A_WWW        = 'w'          # WWW address
+A_PLUS_IMAGE = ':'
+A_PLUS_MOVIE = ';'
+A_PLUS_SOUND = '<'
+
+
+_names = dir()
+_type_to_name_map = {}
+def type_to_name(gtype):
+    """Map all file types to strings; unknown types become TYPE='x'."""
+    global _type_to_name_map
+    if _type_to_name_map=={}:
+        for name in _names:
+            if name[:2] == 'A_':
+                _type_to_name_map[eval(name)] = name[2:]
+    if _type_to_name_map.has_key(gtype):
+        return _type_to_name_map[gtype]
+    return 'TYPE=' + `gtype`
+
+# Names for characters and strings
+CRLF = '\r\n'
+TAB = '\t'
+
+def send_selector(selector, host, port = 0):
+    """Send a selector to a given host and port, return a file with the reply."""
+    import socket
+    if not port:
+        i = host.find(':')
+        if i >= 0:
+            host, port = host[:i], int(host[i+1:])
+    if not port:
+        port = DEF_PORT
+    elif type(port) == type(''):
+        port = int(port)
+    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+    s.connect((host, port))
+    s.sendall(selector + CRLF)
+    s.shutdown(1)
+    return s.makefile('rb')
+
+def send_query(selector, query, host, port = 0):
+    """Send a selector and a query string."""
+    return send_selector(selector + '\t' + query, host, port)
+
+def path_to_selector(path):
+    """Takes a path as returned by urlparse and returns the appropriate selector."""
+    if path=="/":
+        return "/"
+    else:
+        return path[2:] # Cuts initial slash and data type identifier
+
+def path_to_datatype_name(path):
+    """Takes a path as returned by urlparse and maps it to a string.
+    See section 3.4 of RFC 1738 for details."""
+    if path=="/":
+        # No way to tell, although "INDEX" is likely
+        return "TYPE='unknown'"
+    else:
+        return type_to_name(path[1])
+
+# The following functions interpret the data returned by the gopher
+# server according to the expected type, e.g. textfile or directory
+
+def get_directory(f):
+    """Get a directory in the form of a list of entries."""
+    list = []
+    while 1:
+        line = f.readline()
+        if not line:
+            print '(Unexpected EOF from server)'
+            break
+        if line[-2:] == CRLF:
+            line = line[:-2]
+        elif line[-1:] in CRLF:
+            line = line[:-1]
+        if line == '.':
+            break
+        if not line:
+            print '(Empty line from server)'
+            continue
+        gtype = line[0]
+        parts = line[1:].split(TAB)
+        if len(parts) < 4:
+            print '(Bad line from server:', `line`, ')'
+            continue
+        if len(parts) > 4:
+            if parts[4:] != ['+']:
+                print '(Extra info from server:',
+                print parts[4:], ')'
+        else:
+            parts.append('')
+        parts.insert(0, gtype)
+        list.append(parts)
+    return list
+
+def get_textfile(f):
+    """Get a text file as a list of lines, with trailing CRLF stripped."""
+    list = []
+    get_alt_textfile(f, list.append)
+    return list
+
+def get_alt_textfile(f, func):
+    """Get a text file and pass each line to a function, with trailing CRLF stripped."""
+    while 1:
+        line = f.readline()
+        if not line:
+            print '(Unexpected EOF from server)'
+            break
+        if line[-2:] == CRLF:
+            line = line[:-2]
+        elif line[-1:] in CRLF:
+            line = line[:-1]
+        if line == '.':
+            break
+        if line[:2] == '..':
+            line = line[1:]
+        func(line)
+
+def get_binary(f):
+    """Get a binary file as one solid data block."""
+    data = f.read()
+    return data
+
+def get_alt_binary(f, func, blocksize):
+    """Get a binary file and pass each block to a function."""
+    while 1:
+        data = f.read(blocksize)
+        if not data:
+            break
+        func(data)
+
+def test():
+    """Trivial test program."""
+    import sys
+    import getopt
+    opts, args = getopt.getopt(sys.argv[1:], '')
+    selector = DEF_SELECTOR
+    type = selector[0]
+    host = DEF_HOST
+    if args:
+        host = args[0]
+        args = args[1:]
+    if args:
+        type = args[0]
+        args = args[1:]
+        if len(type) > 1:
+            type, selector = type[0], type
+        else:
+            selector = ''
+            if args:
+                selector = args[0]
+                args = args[1:]
+        query = ''
+        if args:
+            query = args[0]
+            args = args[1:]
+    if type == A_INDEX:
+        f = send_query(selector, query, host)
+    else:
+        f = send_selector(selector, host)
+    if type == A_TEXT:
+        list = get_textfile(f)
+        for item in list: print item
+    elif type in (A_MENU, A_INDEX):
+        list = get_directory(f)
+        for item in list: print item
+    else:
+        data = get_binary(f)
+        print 'binary data:', len(data), 'bytes:', `data[:100]`[:40]
+
+# Run the test when run as script
+if __name__ == '__main__':
+    test()
diff --git a/lib-python/2.2/gzip.py b/lib-python/2.2/gzip.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/gzip.py
@@ -0,0 +1,390 @@
+"""Functions that read and write gzipped files.
+
+The user of the file doesn't have to worry about the compression,
+but random access is not allowed."""
+
+# based on Andrew Kuchling's minigzip.py distributed with the zlib module
+
+import struct, sys, time
+import zlib
+import __builtin__
+
+__all__ = ["GzipFile","open"]
+
+FTEXT, FHCRC, FEXTRA, FNAME, FCOMMENT = 1, 2, 4, 8, 16
+
+READ, WRITE = 1, 2
+
+def write32(output, value):
+    output.write(struct.pack("<l", value))
+
+def write32u(output, value):
+    if value < 0:
+        value = value + 0x100000000L
+    output.write(struct.pack("<L", value))
+
+def read32(input):
+    return struct.unpack("<l", input.read(4))[0]
+
+def open(filename, mode="rb", compresslevel=9):
+    return GzipFile(filename, mode, compresslevel)
+
+class GzipFile:
+
+    myfileobj = None
+
+    def __init__(self, filename=None, mode=None,
+                 compresslevel=9, fileobj=None):
+        # guarantee the file is opened in binary mode on platforms
+        # that care about that sort of thing
+        if mode and 'b' not in mode:
+            mode += 'b'
+        if fileobj is None:
+            fileobj = self.myfileobj = __builtin__.open(filename, mode or 'rb')
+        if filename is None:
+            if hasattr(fileobj, 'name'): filename = fileobj.name
+            else: filename = ''
+        if mode is None:
+            if hasattr(fileobj, 'mode'): mode = fileobj.mode
+            else: mode = 'rb'
+
+        if mode[0:1] == 'r':
+            self.mode = READ
+            # Set flag indicating start of a new member
+            self._new_member = 1
+            self.extrabuf = ""
+            self.extrasize = 0
+            self.filename = filename
+
+        elif mode[0:1] == 'w' or mode[0:1] == 'a':
+            self.mode = WRITE
+            self._init_write(filename)
+            self.compress = zlib.compressobj(compresslevel,
+                                             zlib.DEFLATED,
+                                             -zlib.MAX_WBITS,
+                                             zlib.DEF_MEM_LEVEL,
+                                             0)
+        else:
+            raise ValueError, "Mode " + mode + " not supported"
+
+        self.fileobj = fileobj
+        self.offset = 0
+
+        if self.mode == WRITE:
+            self._write_gzip_header()
+
+    def __repr__(self):
+        s = repr(self.fileobj)
+        return '<gzip ' + s[1:-1] + ' ' + hex(id(self)) + '>'
+
+    def _init_write(self, filename):
+        if filename[-3:] != '.gz':
+            filename = filename + '.gz'
+        self.filename = filename
+        self.crc = zlib.crc32("")
+        self.size = 0
+        self.writebuf = []
+        self.bufsize = 0
+
+    def _write_gzip_header(self):
+        self.fileobj.write('\037\213')             # magic header
+        self.fileobj.write('\010')                 # compression method
+        fname = self.filename[:-3]
+        flags = 0
+        if fname:
+            flags = FNAME
+        self.fileobj.write(chr(flags))
+        write32u(self.fileobj, long(time.time()))
+        self.fileobj.write('\002')
+        self.fileobj.write('\377')
+        if fname:
+            self.fileobj.write(fname + '\000')
+
+    def _init_read(self):
+        self.crc = zlib.crc32("")
+        self.size = 0
+
+    def _read_gzip_header(self):
+        magic = self.fileobj.read(2)
+        if magic != '\037\213':
+            raise IOError, 'Not a gzipped file'
+        method = ord( self.fileobj.read(1) )
+        if method != 8:
+            raise IOError, 'Unknown compression method'
+        flag = ord( self.fileobj.read(1) )
+        # modtime = self.fileobj.read(4)
+        # extraflag = self.fileobj.read(1)
+        # os = self.fileobj.read(1)
+        self.fileobj.read(6)
+
+        if flag & FEXTRA:
+            # Read & discard the extra field, if present
+            xlen=ord(self.fileobj.read(1))
+            xlen=xlen+256*ord(self.fileobj.read(1))
+            self.fileobj.read(xlen)
+        if flag & FNAME:
+            # Read and discard a null-terminated string containing the filename
+            while (1):
+                s=self.fileobj.read(1)
+                if not s or s=='\000': break
+        if flag & FCOMMENT:
+            # Read and discard a null-terminated string containing a comment
+            while (1):
+                s=self.fileobj.read(1)
+                if not s or s=='\000': break
+        if flag & FHCRC:
+            self.fileobj.read(2)     # Read & discard the 16-bit header CRC
+
+
+    def write(self,data):
+        if self.fileobj is None:
+            raise ValueError, "write() on closed GzipFile object"
+        if len(data) > 0:
+            self.size = self.size + len(data)
+            self.crc = zlib.crc32(data, self.crc)
+            self.fileobj.write( self.compress.compress(data) )
+            self.offset += len(data)
+
+    def read(self, size=-1):
+        if self.extrasize <= 0 and self.fileobj is None:
+            return ''
+
+        readsize = 1024
+        if size < 0:        # get the whole thing
+            try:
+                while 1:
+                    self._read(readsize)
+                    readsize = readsize * 2
+            except EOFError:
+                size = self.extrasize
+        else:               # just get some more of it
+            try:
+                while size > self.extrasize:
+                    self._read(readsize)
+                    readsize = readsize * 2
+            except EOFError:
+                if size > self.extrasize:
+                    size = self.extrasize
+
+        chunk = self.extrabuf[:size]
+        self.extrabuf = self.extrabuf[size:]
+        self.extrasize = self.extrasize - size
+
+        self.offset += size
+        return chunk
+
+    def _unread(self, buf):
+        self.extrabuf = buf + self.extrabuf
+        self.extrasize = len(buf) + self.extrasize
+        self.offset -= len(buf)
+
+    def _read(self, size=1024):
+        if self.fileobj is None: raise EOFError, "Reached EOF"
+
+        if self._new_member:
+            # If the _new_member flag is set, we have to
+            # jump to the next member, if there is one.
+            #
+            # First, check if we're at the end of the file;
+            # if so, it's time to stop; no more members to read.
+            pos = self.fileobj.tell()   # Save current position
+            self.fileobj.seek(0, 2)     # Seek to end of file
+            if pos == self.fileobj.tell():
+                raise EOFError, "Reached EOF"
+            else:
+                self.fileobj.seek( pos ) # Return to original position
+
+            self._init_read()
+            self._read_gzip_header()
+            self.decompress = zlib.decompressobj(-zlib.MAX_WBITS)
+            self._new_member = 0
+
+        # Read a chunk of data from the file
+        buf = self.fileobj.read(size)
+
+        # If the EOF has been reached, flush the decompression object
+        # and mark this object as finished.
+
+        if buf == "":
+            uncompress = self.decompress.flush()
+            self._read_eof()
+            self._add_read_data( uncompress )
+            raise EOFError, 'Reached EOF'
+
+        uncompress = self.decompress.decompress(buf)
+        self._add_read_data( uncompress )
+
+        if self.decompress.unused_data != "":
+            # Ending case: we've come to the end of a member in the file,
+            # so seek back to the start of the unused data, finish up
+            # this member, and read a new gzip header.
+            # (The number of bytes to seek back is the length of the unused
+            # data, minus 8 because _read_eof() will rewind a further 8 bytes)
+            self.fileobj.seek( -len(self.decompress.unused_data)+8, 1)
+
+            # Check the CRC and file size, and set the flag so we read
+            # a new member on the next call
+            self._read_eof()
+            self._new_member = 1
+
+    def _add_read_data(self, data):
+        self.crc = zlib.crc32(data, self.crc)
+        self.extrabuf = self.extrabuf + data
+        self.extrasize = self.extrasize + len(data)
+        self.size = self.size + len(data)
+
+    def _read_eof(self):
+        # We've read to the end of the file, so we have to rewind in order
+        # to reread the 8 bytes containing the CRC and the file size.
+        # We check the that the computed CRC and size of the
+        # uncompressed data matches the stored values.
+        self.fileobj.seek(-8, 1)
+        crc32 = read32(self.fileobj)
+        isize = read32(self.fileobj)
+        if crc32%0x100000000L != self.crc%0x100000000L:
+            raise ValueError, "CRC check failed"
+        elif isize != self.size:
+            raise ValueError, "Incorrect length of data produced"
+
+    def close(self):
+        if self.mode == WRITE:
+            self.fileobj.write(self.compress.flush())
+            write32(self.fileobj, self.crc)
+            write32(self.fileobj, self.size)
+            self.fileobj = None
+        elif self.mode == READ:
+            self.fileobj = None
+        if self.myfileobj:
+            self.myfileobj.close()
+            self.myfileobj = None
+
+    def __del__(self):
+        try:
+            if (self.myfileobj is None and
+                self.fileobj is None):
+                return
+        except AttributeError:
+            return
+        self.close()
+
+    def flush(self):
+        self.fileobj.flush()
+
+    def isatty(self):
+        return 0
+
+    def tell(self):
+        return self.offset
+
+    def rewind(self):
+        '''Return the uncompressed stream file position indicator to the
+        beginning of the file'''
+        if self.mode != READ:
+            raise IOError("Can't rewind in write mode")
+        self.fileobj.seek(0)
+        self._new_member = 1
+        self.extrabuf = ""
+        self.extrasize = 0
+        self.offset = 0
+
+    def seek(self, offset):
+        if self.mode == WRITE:
+            if offset < self.offset:
+                raise IOError('Negative seek in write mode')
+            count = offset - self.offset
+            for i in range(count/1024):
+                self.write(1024*'\0')
+            self.write((count%1024)*'\0')
+        elif self.mode == READ:
+            if offset < self.offset:
+                # for negative seek, rewind and do positive seek
+                self.rewind()
+            count = offset - self.offset
+            for i in range(count/1024): self.read(1024)
+            self.read(count % 1024)
+
+    def readline(self, size=-1):
+        if size < 0: size = sys.maxint
+        bufs = []
+        readsize = min(100, size)    # Read from the file in small chunks
+        while 1:
+            if size == 0:
+                return "".join(bufs) # Return resulting line
+
+            c = self.read(readsize)
+            i = c.find('\n')
+            if size is not None:
+                # We set i=size to break out of the loop under two
+                # conditions: 1) there's no newline, and the chunk is
+                # larger than size, or 2) there is a newline, but the
+                # resulting line would be longer than 'size'.
+                if i==-1 and len(c) > size: i=size-1
+                elif size <= i: i = size -1
+
+            if i >= 0 or c == '':
+                bufs.append(c[:i+1])    # Add portion of last chunk
+                self._unread(c[i+1:])   # Push back rest of chunk
+                return ''.join(bufs)    # Return resulting line
+
+            # Append chunk to list, decrease 'size',
+            bufs.append(c)
+            size = size - len(c)
+            readsize = min(size, readsize * 2)
+
+    def readlines(self, sizehint=0):
+        # Negative numbers result in reading all the lines
+        if sizehint <= 0: sizehint = sys.maxint
+        L = []
+        while sizehint > 0:
+            line = self.readline()
+            if line == "": break
+            L.append( line )
+            sizehint = sizehint - len(line)
+
+        return L
+
+    def writelines(self, L):
+        for line in L:
+            self.write(line)
+
+
+def _test():
+    # Act like gzip; with -d, act like gunzip.
+    # The input file is not deleted, however, nor are any other gzip
+    # options or features supported.
+    args = sys.argv[1:]
+    decompress = args and args[0] == "-d"
+    if decompress:
+        args = args[1:]
+    if not args:
+        args = ["-"]
+    for arg in args:
+        if decompress:
+            if arg == "-":
+                f = GzipFile(filename="", mode="rb", fileobj=sys.stdin)
+                g = sys.stdout
+            else:
+                if arg[-3:] != ".gz":
+                    print "filename doesn't end in .gz:", `arg`
+                    continue
+                f = open(arg, "rb")
+                g = __builtin__.open(arg[:-3], "wb")
+        else:
+            if arg == "-":
+                f = sys.stdin
+                g = GzipFile(filename="", mode="wb", fileobj=sys.stdout)
+            else:
+                f = __builtin__.open(arg, "rb")
+                g = open(arg + ".gz", "wb")
+        while 1:
+            chunk = f.read(1024)
+            if not chunk:
+                break
+            g.write(chunk)
+        if g is not sys.stdout:
+            g.close()
+        if f is not sys.stdin:
+            f.close()
+
+if __name__ == '__main__':
+    _test()
diff --git a/lib-python/2.2/hmac.py b/lib-python/2.2/hmac.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/hmac.py
@@ -0,0 +1,99 @@
+"""HMAC (Keyed-Hashing for Message Authentication) Python module.
+
+Implements the HMAC algorithm as described by RFC 2104.
+"""
+
+import string
+
+def _strxor(s1, s2):
+    """Utility method. XOR the two strings s1 and s2 (must have same length).
+    """
+    return "".join(map(lambda x, y: chr(ord(x) ^ ord(y)), s1, s2))
+
+# The size of the digests returned by HMAC depends on the underlying
+# hashing module used.
+digest_size = None
+
+class HMAC:
+    """RFC2104 HMAC class.
+
+    This supports the API for Cryptographic Hash Functions (PEP 247).
+    """
+
+    def __init__(self, key, msg = None, digestmod = None):
+        """Create a new HMAC object.
+
+        key:       key for the keyed hash object.
+        msg:       Initial input for the hash, if provided.
+        digestmod: A module supporting PEP 247. Defaults to the md5 module.
+        """
+        if digestmod == None:
+            import md5
+            digestmod = md5
+
+        self.digestmod = digestmod
+        self.outer = digestmod.new()
+        self.inner = digestmod.new()
+        self.digest_size = digestmod.digest_size
+
+        blocksize = 64
+        ipad = "\x36" * blocksize
+        opad = "\x5C" * blocksize
+
+        if len(key) > blocksize:
+            key = digestmod.new(key).digest()
+
+        key = key + chr(0) * (blocksize - len(key))
+        self.outer.update(_strxor(key, opad))
+        self.inner.update(_strxor(key, ipad))
+        if (msg):
+            self.update(msg)
+
+##    def clear(self):
+##        raise NotImplementedError, "clear() method not available in HMAC."
+
+    def update(self, msg):
+        """Update this hashing object with the string msg.
+        """
+        self.inner.update(msg)
+
+    def copy(self):
+        """Return a separate copy of this hashing object.
+
+        An update to this copy won't affect the original object.
+        """
+        other = HMAC("")
+        other.digestmod = self.digestmod
+        other.inner = self.inner.copy()
+        other.outer = self.outer.copy()
+        return other
+
+    def digest(self):
+        """Return the hash value of this hashing object.
+
+        This returns a string containing 8-bit data.  The object is
+        not altered in any way by this function; you can continue
+        updating the object after calling this function.
+        """
+        h = self.outer.copy()
+        h.update(self.inner.digest())
+        return h.digest()
+
+    def hexdigest(self):
+        """Like digest(), but returns a string of hexadecimal digits instead.
+        """
+        return "".join([string.zfill(hex(ord(x))[2:], 2)
+                        for x in tuple(self.digest())])
+
+def new(key, msg = None, digestmod = None):
+    """Create a new hashing object and return it.
+
+    key: The starting key for the hash.
+    msg: if available, will immediately be hashed into the object's starting
+    state.
+
+    You can now feed arbitrary strings into the object using its update()
+    method, and can ask for the hash value at any time by calling its digest()
+    method.
+    """
+    return HMAC(key, msg, digestmod)
diff --git a/lib-python/2.2/hotshot/__init__.py b/lib-python/2.2/hotshot/__init__.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/hotshot/__init__.py
@@ -0,0 +1,41 @@
+"""High-perfomance logging profiler, mostly written in C."""
+
+import _hotshot
+
+from _hotshot import ProfilerError
+
+
+class Profile:
+    def __init__(self, logfn, lineevents=0, linetimings=1):
+        self.lineevents = lineevents and 1 or 0
+        self.linetimings = (linetimings and lineevents) and 1 or 0
+        self._prof = p = _hotshot.profiler(
+            logfn, self.lineevents, self.linetimings)
+
+    def close(self):
+        self._prof.close()
+
+    def start(self):
+        self._prof.start()
+
+    def stop(self):
+        self._prof.stop()
+
+    def addinfo(self, key, value):
+        self._prof.addinfo(key, value)
+
+    # These methods offer the same interface as the profile.Profile class,
+    # but delegate most of the work to the C implementation underneath.
+
+    def run(self, cmd):
+        import __main__
+        dict = __main__.__dict__
+        return self.runctx(cmd, dict, dict)
+
+    def runctx(self, cmd, globals, locals):
+        code = compile(cmd, "<string>", "exec")
+        self._prof.runcode(code, globals, locals)
+        return self
+
+    def runcall(self, func, *args, **kw):
+        return self._prof.runcall(func, args, kw)
diff --git a/lib-python/2.2/hotshot/log.py b/lib-python/2.2/hotshot/log.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/hotshot/log.py
@@ -0,0 +1,194 @@
+import _hotshot
+import os.path
+import parser
+import symbol
+import sys
+
+from _hotshot import \
+     WHAT_ENTER, \
+     WHAT_EXIT, \
+     WHAT_LINENO, \
+     WHAT_DEFINE_FILE, \
+     WHAT_DEFINE_FUNC, \
+     WHAT_ADD_INFO
+
+
+__all__ = ["LogReader", "ENTER", "EXIT", "LINE"]
+
+
+ENTER = WHAT_ENTER
+EXIT  = WHAT_EXIT
+LINE  = WHAT_LINENO
+
+
+try:
+    StopIteration
+except NameError:
+    StopIteration = IndexError
+
+
+class LogReader:
+    def __init__(self, logfn):
+        # fileno -> filename
+        self._filemap = {}
+        # (fileno, lineno) -> filename, funcname
+        self._funcmap = {}
+
+        self._reader = _hotshot.logreader(logfn)
+        self._nextitem = self._reader.next
+        self._info = self._reader.info
+        if self._info.has_key('current-directory'):
+            self.cwd = self._info['current-directory']
+        else:
+            self.cwd = None
+        self._stack = []
+        self._append = self._stack.append
+        self._pop = self._stack.pop
+
+    def addinfo(self, key, value):
+        """This method is called for each additional ADD_INFO record.
+
+        This can be overridden by applications that want to receive
+        these events.  The default implementation does not need to be
+        called by alternate implementations.
+
+        The initial set of ADD_INFO records do not pass through this
+        mechanism; this is only needed to receive notification when
+        new values are added.  Subclasses can inspect self._info after
+        calling LogReader.__init__().
+        """
+        pass
+
+    def get_filename(self, fileno):
+        try:
+            return self._filemap[fileno]
+        except KeyError:
+            raise ValueError, "unknown fileno"
+
+    def get_filenames(self):
+        return self._filemap.values()
+
+    def get_fileno(self, filename):
+        filename = os.path.normcase(os.path.normpath(filename))
+        for fileno, name in self._filemap.items():
+            if name == filename:
+                return fileno
+        raise ValueError, "unknown filename"
+
+    def get_funcname(self, fileno, lineno):
+        try:
+            return self._funcmap[(fileno, lineno)]
+        except KeyError:
+            raise ValueError, "unknown function location"
+
+    # Iteration support:
+    # This adds an optional (& ignored) parameter to next() so that the
+    # same bound method can be used as the __getitem__() method -- this
+    # avoids using an additional method call which kills the performance.
+
+    def next(self, index=0):
+        while 1:
+            try:
+                what, tdelta, fileno, lineno = self._nextitem()
+            except TypeError:
+                # logreader().next() returns None at the end
+                self._reader.close()
+                raise StopIteration()
+
+            # handle the most common cases first
+
+            if what == WHAT_ENTER:
+                filename, funcname = self._decode_location(fileno, lineno)
+                self._append((filename, funcname, lineno))
+                return what, (filename, lineno, funcname), tdelta
+
+            if what == WHAT_EXIT:
+                filename, funcname, lineno = self._pop()
+                return what, (filename, lineno, funcname), tdelta
+
+            if what == WHAT_LINENO:
+                filename, funcname, firstlineno = self._stack[-1]
+                return what, (filename, lineno, funcname), tdelta
+
+            if what == WHAT_DEFINE_FILE:
+                filename = os.path.normcase(os.path.normpath(tdelta))
+                self._filemap[fileno] = filename
+            elif what == WHAT_DEFINE_FUNC:
+                filename = self._filemap[fileno]
+                self._funcmap[(fileno, lineno)] = (filename, tdelta)
+            elif what == WHAT_ADD_INFO:
+                # value already loaded into self.info; call the
+                # overridable addinfo() handler so higher-level code
+                # can pick up the new value
+                if tdelta == 'current-directory':
+                    self.cwd = lineno
+                self.addinfo(tdelta, lineno)
+            else:
+                raise ValueError, "unknown event type"
+
+    if sys.version < "2.2":
+        # Don't add this for newer Python versions; we only want iteration
+        # support, not general sequence support.
+        __getitem__ = next
+    else:
+        def __iter__(self):
+            return self
+
+    #
+    #  helpers
+    #
+
+    def _decode_location(self, fileno, lineno):
+        try:
+            return self._funcmap[(fileno, lineno)]
+        except KeyError:
+            #
+            # This should only be needed when the log file does not
+            # contain all the DEFINE_FUNC records needed to allow the
+            # function name to be retrieved from the log file.
+            #
+            if self._loadfile(fileno):
+                filename = funcname = None
+            try:
+                filename, funcname = self._funcmap[(fileno, lineno)]
+            except KeyError:
+                filename = self._filemap.get(fileno)
+                funcname = None
+                self._funcmap[(fileno, lineno)] = (filename, funcname)
+        return filename, funcname
+
+    def _loadfile(self, fileno):
+        try:
+            filename = self._filemap[fileno]
+        except KeyError:
+            print "Could not identify fileId", fileno
+            return 1
+        if filename is None:
+            return 1
+        absname = os.path.normcase(os.path.join(self.cwd, filename))
+
+        try:
+            fp = open(absname)
+        except IOError:
+            return
+        st = parser.suite(fp.read())
+        fp.close()
+
+        # Scan the tree looking for def and lambda nodes, filling in
+        # self._funcmap with all the available information.
+        funcdef = symbol.funcdef
+        lambdef = symbol.lambdef
+
+        stack = [st.totuple(1)]
+
+        while stack:
+            tree = stack.pop()
+            try:
+                sym = tree[0]
+            except (IndexError, TypeError):
+                continue
+            if sym == funcdef:
+                self._funcmap[(fileno, tree[2][2])] = filename, tree[2][1]
+            elif sym == lambdef:
+                self._funcmap[(fileno, tree[1][2])] = filename, "<lambda>"
+            stack.extend(list(tree[1:]))
diff --git a/lib-python/2.2/hotshot/stats.py b/lib-python/2.2/hotshot/stats.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/hotshot/stats.py
@@ -0,0 +1,93 @@
+"""Statistics analyzer for HotShot."""
+
+import profile
+import pstats
+
+import hotshot.log
+
+from hotshot.log import ENTER, EXIT
+
+
+def load(filename):
+    return StatsLoader(filename).load()
+
+
+class StatsLoader:
+    def __init__(self, logfn):
+        self._logfn = logfn
+        self._code = {}
+        self._stack = []
+        self.pop_frame = self._stack.pop
+
+    def load(self):
+        # The timer selected by the profiler should never be used, so make
+        # sure it doesn't work:
+        p = Profile()
+        p.get_time = _brokentimer
+        log = hotshot.log.LogReader(self._logfn)
+        taccum = 0
+        for event in log:
+            what, (filename, lineno, funcname), tdelta = event
+            if tdelta > 0:
+                taccum += tdelta
+
+            # We multiply taccum to convert from the microseconds we
+            # have to the seconds that the profile/pstats module work
+            # with; this allows the numbers to have some basis in
+            # reality (ignoring calibration issues for now).
+
+            if what == ENTER:
+                frame = self.new_frame(filename, lineno, funcname)
+                p.trace_dispatch_call(frame, taccum * .000001)
+                taccum = 0
+
+            elif what == EXIT:
+                frame = self.pop_frame()
+                p.trace_dispatch_return(frame, taccum * .000001)
+                taccum = 0
+
+            # no further work for line events
+
+        assert not self._stack
+        return pstats.Stats(p)
+
+    def new_frame(self, *args):
+        # args must be filename, firstlineno, funcname
+        # our code objects are cached since we don't need to create
+        # new ones every time
+        try:
+            code = self._code[args]
+        except KeyError:
+            code = FakeCode(*args)
+            self._code[args] = code
+        # frame objects are create fresh, since the back pointer will
+        # vary considerably
+        if self._stack:
+            back = self._stack[-1]
+        else:
+            back = None
+        frame = FakeFrame(code, back)
+        self._stack.append(frame)
+        return frame
+
+
+class Profile(profile.Profile):
+    def simulate_cmd_complete(self):
+        pass
+
+
+class FakeCode:
+    def __init__(self, filename, firstlineno, funcname):
+        self.co_filename = filename
+        self.co_firstlineno = firstlineno
+        self.co_name = self.__name__ = funcname
+
+
+class FakeFrame:
+    def __init__(self, code, back):
+        self.f_back = back
+        self.f_code = code
+
+
+def _brokentimer():
+    raise RuntimeError, "this timer should not be called"
diff --git a/lib-python/2.2/htmlentitydefs.py b/lib-python/2.2/htmlentitydefs.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/htmlentitydefs.py
@@ -0,0 +1,257 @@
+"""HTML character entity references."""
+
+entitydefs = {
+    'AElig':    '\306',         # latin capital letter AE = latin capital ligature AE, U+00C6 ISOlat1
+    'Aacute':   '\301',         # latin capital letter A with acute, U+00C1 ISOlat1
+    'Acirc':    '\302',         # latin capital letter A with circumflex, U+00C2 ISOlat1
+    'Agrave':   '\300',         # latin capital letter A with grave = latin capital letter A grave, U+00C0 ISOlat1
+    'Alpha':    '&#913;',       # greek capital letter alpha, U+0391
+    'Aring':    '\305',         # latin capital letter A with ring above = latin capital letter A ring, U+00C5 ISOlat1
+    'Atilde':   '\303',         # latin capital letter A with tilde, U+00C3 ISOlat1
+    'Auml':     '\304',         # latin capital letter A with diaeresis, U+00C4 ISOlat1
+    'Beta':     '&#914;',       # greek capital letter beta, U+0392
+    'Ccedil':   '\307',         # latin capital letter C with cedilla, U+00C7 ISOlat1
+    'Chi':      '&#935;',       # greek capital letter chi, U+03A7
+    'Dagger':   '&#8225;',      # double dagger, U+2021 ISOpub
+    'Delta':    '&#916;',       # greek capital letter delta, U+0394 ISOgrk3
+    'ETH':      '\320',         # latin capital letter ETH, U+00D0 ISOlat1
+    'Eacute':   '\311',         # latin capital letter E with acute, U+00C9 ISOlat1
+    'Ecirc':    '\312',         # latin capital letter E with circumflex, U+00CA ISOlat1
+    'Egrave':   '\310',         # latin capital letter E with grave, U+00C8 ISOlat1
+    'Epsilon':  '&#917;',       # greek capital letter epsilon, U+0395
+    'Eta':      '&#919;',       # greek capital letter eta, U+0397
+    'Euml':     '\313',         # latin capital letter E with diaeresis, U+00CB ISOlat1
+    'Gamma':    '&#915;',       # greek capital letter gamma, U+0393 ISOgrk3
+    'Iacute':   '\315',         # latin capital letter I with acute, U+00CD ISOlat1
+    'Icirc':    '\316',         # latin capital letter I with circumflex, U+00CE ISOlat1
+    'Igrave':   '\314',         # latin capital letter I with grave, U+00CC ISOlat1
+    'Iota':     '&#921;',       # greek capital letter iota, U+0399
+    'Iuml':     '\317',         # latin capital letter I with diaeresis, U+00CF ISOlat1
+    'Kappa':    '&#922;',       # greek capital letter kappa, U+039A
+    'Lambda':   '&#923;',       # greek capital letter lambda, U+039B ISOgrk3
+    'Mu':       '&#924;',       # greek capital letter mu, U+039C
+    'Ntilde':   '\321',         # latin capital letter N with tilde, U+00D1 ISOlat1
+    'Nu':       '&#925;',       # greek capital letter nu, U+039D
+    'OElig':    '&#338;',       # latin capital ligature OE, U+0152 ISOlat2
+    'Oacute':   '\323',         # latin capital letter O with acute, U+00D3 ISOlat1
+    'Ocirc':    '\324',         # latin capital letter O with circumflex, U+00D4 ISOlat1
+    'Ograve':   '\322',         # latin capital letter O with grave, U+00D2 ISOlat1
+    'Omega':    '&#937;',       # greek capital letter omega, U+03A9 ISOgrk3
+    'Omicron':  '&#927;',       # greek capital letter omicron, U+039F
+    'Oslash':   '\330',         # latin capital letter O with stroke = latin capital letter O slash, U+00D8 ISOlat1
+    'Otilde':   '\325',         # latin capital letter O with tilde, U+00D5 ISOlat1
+    'Ouml':     '\326',         # latin capital letter O with diaeresis, U+00D6 ISOlat1
+    'Phi':      '&#934;',       # greek capital letter phi, U+03A6 ISOgrk3
+    'Pi':       '&#928;',       # greek capital letter pi, U+03A0 ISOgrk3
+    'Prime':    '&#8243;',      # double prime = seconds = inches, U+2033 ISOtech
+    'Psi':      '&#936;',       # greek capital letter psi, U+03A8 ISOgrk3
+    'Rho':      '&#929;',       # greek capital letter rho, U+03A1
+    'Scaron':   '&#352;',       # latin capital letter S with caron, U+0160 ISOlat2
+    'Sigma':    '&#931;',       # greek capital letter sigma, U+03A3 ISOgrk3
+    'THORN':    '\336',         # latin capital letter THORN, U+00DE ISOlat1
+    'Tau':      '&#932;',       # greek capital letter tau, U+03A4
+    'Theta':    '&#920;',       # greek capital letter theta, U+0398 ISOgrk3
+    'Uacute':   '\332',         # latin capital letter U with acute, U+00DA ISOlat1
+    'Ucirc':    '\333',         # latin capital letter U with circumflex, U+00DB ISOlat1
+    'Ugrave':   '\331',         # latin capital letter U with grave, U+00D9 ISOlat1
+    'Upsilon':  '&#933;',       # greek capital letter upsilon, U+03A5 ISOgrk3
+    'Uuml':     '\334',         # latin capital letter U with diaeresis, U+00DC ISOlat1
+    'Xi':       '&#926;',       # greek capital letter xi, U+039E ISOgrk3
+    'Yacute':   '\335',         # latin capital letter Y with acute, U+00DD ISOlat1
+    'Yuml':     '&#376;',       # latin capital letter Y with diaeresis, U+0178 ISOlat2
+    'Zeta':     '&#918;',       # greek capital letter zeta, U+0396
+    'aacute':   '\341',         # latin small letter a with acute, U+00E1 ISOlat1
+    'acirc':    '\342',         # latin small letter a with circumflex, U+00E2 ISOlat1
+    'acute':    '\264',         # acute accent = spacing acute, U+00B4 ISOdia
+    'aelig':    '\346',         # latin small letter ae = latin small ligature ae, U+00E6 ISOlat1
+    'agrave':   '\340',         # latin small letter a with grave = latin small letter a grave, U+00E0 ISOlat1
+    'alefsym':  '&#8501;',      # alef symbol = first transfinite cardinal, U+2135 NEW
+    'alpha':    '&#945;',       # greek small letter alpha, U+03B1 ISOgrk3
+    'amp':      '\46',          # ampersand, U+0026 ISOnum
+    'and':      '&#8743;',      # logical and = wedge, U+2227 ISOtech
+    'ang':      '&#8736;',      # angle, U+2220 ISOamso
+    'aring':    '\345',         # latin small letter a with ring above = latin small letter a ring, U+00E5 ISOlat1
+    'asymp':    '&#8776;',      # almost equal to = asymptotic to, U+2248 ISOamsr
+    'atilde':   '\343',         # latin small letter a with tilde, U+00E3 ISOlat1
+    'auml':     '\344',         # latin small letter a with diaeresis, U+00E4 ISOlat1
+    'bdquo':    '&#8222;',      # double low-9 quotation mark, U+201E NEW
+    'beta':     '&#946;',       # greek small letter beta, U+03B2 ISOgrk3
+    'brvbar':   '\246',         # broken bar = broken vertical bar, U+00A6 ISOnum
+    'bull':     '&#8226;',      # bullet = black small circle, U+2022 ISOpub
+    'cap':      '&#8745;',      # intersection = cap, U+2229 ISOtech
+    'ccedil':   '\347',         # latin small letter c with cedilla, U+00E7 ISOlat1
+    'cedil':    '\270',         # cedilla = spacing cedilla, U+00B8 ISOdia
+    'cent':     '\242',         # cent sign, U+00A2 ISOnum
+    'chi':      '&#967;',       # greek small letter chi, U+03C7 ISOgrk3
+    'circ':     '&#710;',       # modifier letter circumflex accent, U+02C6 ISOpub
+    'clubs':    '&#9827;',      # black club suit = shamrock, U+2663 ISOpub
+    'cong':     '&#8773;',      # approximately equal to, U+2245 ISOtech
+    'copy':     '\251',         # copyright sign, U+00A9 ISOnum
+    'crarr':    '&#8629;',      # downwards arrow with corner leftwards = carriage return, U+21B5 NEW
+    'cup':      '&#8746;',      # union = cup, U+222A ISOtech
+    'curren':   '\244',         # currency sign, U+00A4 ISOnum
+    'dArr':     '&#8659;',      # downwards double arrow, U+21D3 ISOamsa
+    'dagger':   '&#8224;',      # dagger, U+2020 ISOpub
+    'darr':     '&#8595;',      # downwards arrow, U+2193 ISOnum
+    'deg':      '\260',         # degree sign, U+00B0 ISOnum
+    'delta':    '&#948;',       # greek small letter delta, U+03B4 ISOgrk3
+    'diams':    '&#9830;',      # black diamond suit, U+2666 ISOpub
+    'divide':   '\367',         # division sign, U+00F7 ISOnum
+    'eacute':   '\351',         # latin small letter e with acute, U+00E9 ISOlat1
+    'ecirc':    '\352',         # latin small letter e with circumflex, U+00EA ISOlat1
+    'egrave':   '\350',         # latin small letter e with grave, U+00E8 ISOlat1
+    'empty':    '&#8709;',      # empty set = null set = diameter, U+2205 ISOamso
+    'emsp':     '&#8195;',      # em space, U+2003 ISOpub
+    'ensp':     '&#8194;',      # en space, U+2002 ISOpub
+    'epsilon':  '&#949;',       # greek small letter epsilon, U+03B5 ISOgrk3
+    'equiv':    '&#8801;',      # identical to, U+2261 ISOtech
+    'eta':      '&#951;',       # greek small letter eta, U+03B7 ISOgrk3
+    'eth':      '\360',         # latin small letter eth, U+00F0 ISOlat1
+    'euml':     '\353',         # latin small letter e with diaeresis, U+00EB ISOlat1
+    'euro':     '&#8364;',      # euro sign, U+20AC NEW
+    'exist':    '&#8707;',      # there exists, U+2203 ISOtech
+    'fnof':     '&#402;',       # latin small f with hook = function = florin, U+0192 ISOtech
+    'forall':   '&#8704;',      # for all, U+2200 ISOtech
+    'frac12':   '\275',         # vulgar fraction one half = fraction one half, U+00BD ISOnum
+    'frac14':   '\274',         # vulgar fraction one quarter = fraction one quarter, U+00BC ISOnum
+    'frac34':   '\276',         # vulgar fraction three quarters = fraction three quarters, U+00BE ISOnum
+    'frasl':    '&#8260;',      # fraction slash, U+2044 NEW
+    'gamma':    '&#947;',       # greek small letter gamma, U+03B3 ISOgrk3
+    'ge':       '&#8805;',      # greater-than or equal to, U+2265 ISOtech
+    'gt':       '\76',          # greater-than sign, U+003E ISOnum
+    'hArr':     '&#8660;',      # left right double arrow, U+21D4 ISOamsa
+    'harr':     '&#8596;',      # left right arrow, U+2194 ISOamsa
+    'hearts':   '&#9829;',      # black heart suit = valentine, U+2665 ISOpub
+    'hellip':   '&#8230;',      # horizontal ellipsis = three dot leader, U+2026 ISOpub
+    'iacute':   '\355',         # latin small letter i with acute, U+00ED ISOlat1
+    'icirc':    '\356',         # latin small letter i with circumflex, U+00EE ISOlat1
+    'iexcl':    '\241',         # inverted exclamation mark, U+00A1 ISOnum
+    'igrave':   '\354',         # latin small letter i with grave, U+00EC ISOlat1
+    'image':    '&#8465;',      # blackletter capital I = imaginary part, U+2111 ISOamso
+    'infin':    '&#8734;',      # infinity, U+221E ISOtech
+    'int':      '&#8747;',      # integral, U+222B ISOtech
+    'iota':     '&#953;',       # greek small letter iota, U+03B9 ISOgrk3
+    'iquest':   '\277',         # inverted question mark = turned question mark, U+00BF ISOnum
+    'isin':     '&#8712;',      # element of, U+2208 ISOtech
+    'iuml':     '\357',         # latin small letter i with diaeresis, U+00EF ISOlat1
+    'kappa':    '&#954;',       # greek small letter kappa, U+03BA ISOgrk3
+    'lArr':     '&#8656;',      # leftwards double arrow, U+21D0 ISOtech
+    'lambda':   '&#955;',       # greek small letter lambda, U+03BB ISOgrk3
+    'lang':     '&#9001;',      # left-pointing angle bracket = bra, U+2329 ISOtech
+    'laquo':    '\253',         # left-pointing double angle quotation mark = left pointing guillemet, U+00AB ISOnum
+    'larr':     '&#8592;',      # leftwards arrow, U+2190 ISOnum
+    'lceil':    '&#8968;',      # left ceiling = apl upstile, U+2308 ISOamsc
+    'ldquo':    '&#8220;',      # left double quotation mark, U+201C ISOnum
+    'le':       '&#8804;',      # less-than or equal to, U+2264 ISOtech
+    'lfloor':   '&#8970;',      # left floor = apl downstile, U+230A ISOamsc
+    'lowast':   '&#8727;',      # asterisk operator, U+2217 ISOtech
+    'loz':      '&#9674;',      # lozenge, U+25CA ISOpub
+    'lrm':      '&#8206;',      # left-to-right mark, U+200E NEW RFC 2070
+    'lsaquo':   '&#8249;',      # single left-pointing angle quotation mark, U+2039 ISO proposed
+    'lsquo':    '&#8216;',      # left single quotation mark, U+2018 ISOnum
+    'lt':       '\74',          # less-than sign, U+003C ISOnum
+    'macr':     '\257',         # macron = spacing macron = overline = APL overbar, U+00AF ISOdia
+    'mdash':    '&#8212;',      # em dash, U+2014 ISOpub
+    'micro':    '\265',         # micro sign, U+00B5 ISOnum
+    'middot':   '\267',         # middle dot = Georgian comma = Greek middle dot, U+00B7 ISOnum
+    'minus':    '&#8722;',      # minus sign, U+2212 ISOtech
+    'mu':       '&#956;',       # greek small letter mu, U+03BC ISOgrk3
+    'nabla':    '&#8711;',      # nabla = backward difference, U+2207 ISOtech
+    'nbsp':     '\240',         # no-break space = non-breaking space, U+00A0 ISOnum
+    'ndash':    '&#8211;',      # en dash, U+2013 ISOpub
+    'ne':       '&#8800;',      # not equal to, U+2260 ISOtech
+    'ni':       '&#8715;',      # contains as member, U+220B ISOtech
+    'not':      '\254',         # not sign, U+00AC ISOnum
+    'notin':    '&#8713;',      # not an element of, U+2209 ISOtech
+    'nsub':     '&#8836;',      # not a subset of, U+2284 ISOamsn
+    'ntilde':   '\361',         # latin small letter n with tilde, U+00F1 ISOlat1
+    'nu':       '&#957;',       # greek small letter nu, U+03BD ISOgrk3
+    'oacute':   '\363',         # latin small letter o with acute, U+00F3 ISOlat1
+    'ocirc':    '\364',         # latin small letter o with circumflex, U+00F4 ISOlat1
+    'oelig':    '&#339;',       # latin small ligature oe, U+0153 ISOlat2
+    'ograve':   '\362',         # latin small letter o with grave, U+00F2 ISOlat1
+    'oline':    '&#8254;',      # overline = spacing overscore, U+203E NEW
+    'omega':    '&#969;',       # greek small letter omega, U+03C9 ISOgrk3
+    'omicron':  '&#959;',       # greek small letter omicron, U+03BF NEW
+    'oplus':    '&#8853;',      # circled plus = direct sum, U+2295 ISOamsb
+    'or':       '&#8744;',      # logical or = vee, U+2228 ISOtech
+    'ordf':     '\252',         # feminine ordinal indicator, U+00AA ISOnum
+    'ordm':     '\272',         # masculine ordinal indicator, U+00BA ISOnum
+    'oslash':   '\370',         # latin small letter o with stroke, = latin small letter o slash, U+00F8 ISOlat1
+    'otilde':   '\365',         # latin small letter o with tilde, U+00F5 ISOlat1
+    'otimes':   '&#8855;',      # circled times = vector product, U+2297 ISOamsb
+    'ouml':     '\366',         # latin small letter o with diaeresis, U+00F6 ISOlat1
+    'para':     '\266',         # pilcrow sign = paragraph sign, U+00B6 ISOnum
+    'part':     '&#8706;',      # partial differential, U+2202 ISOtech
+    'permil':   '&#8240;',      # per mille sign, U+2030 ISOtech
+    'perp':     '&#8869;',      # up tack = orthogonal to = perpendicular, U+22A5 ISOtech
+    'phi':      '&#966;',       # greek small letter phi, U+03C6 ISOgrk3
+    'pi':       '&#960;',       # greek small letter pi, U+03C0 ISOgrk3
+    'piv':      '&#982;',       # greek pi symbol, U+03D6 ISOgrk3
+    'plusmn':   '\261',         # plus-minus sign = plus-or-minus sign, U+00B1 ISOnum
+    'pound':    '\243',         # pound sign, U+00A3 ISOnum
+    'prime':    '&#8242;',      # prime = minutes = feet, U+2032 ISOtech
+    'prod':     '&#8719;',      # n-ary product = product sign, U+220F ISOamsb
+    'prop':     '&#8733;',      # proportional to, U+221D ISOtech
+    'psi':      '&#968;',       # greek small letter psi, U+03C8 ISOgrk3
+    'quot':     '\42',          # quotation mark = APL quote, U+0022 ISOnum
+    'rArr':     '&#8658;',      # rightwards double arrow, U+21D2 ISOtech
+    'radic':    '&#8730;',      # square root = radical sign, U+221A ISOtech
+    'rang':     '&#9002;',      # right-pointing angle bracket = ket, U+232A ISOtech
+    'raquo':    '\273',         # right-pointing double angle quotation mark = right pointing guillemet, U+00BB ISOnum
+    'rarr':     '&#8594;',      # rightwards arrow, U+2192 ISOnum
+    'rceil':    '&#8969;',      # right ceiling, U+2309 ISOamsc
+    'rdquo':    '&#8221;',      # right double quotation mark, U+201D ISOnum
+    'real':     '&#8476;',      # blackletter capital R = real part symbol, U+211C ISOamso
+    'reg':      '\256',         # registered sign = registered trade mark sign, U+00AE ISOnum
+    'rfloor':   '&#8971;',      # right floor, U+230B ISOamsc
+    'rho':      '&#961;',       # greek small letter rho, U+03C1 ISOgrk3
+    'rlm':      '&#8207;',      # right-to-left mark, U+200F NEW RFC 2070
+    'rsaquo':   '&#8250;',      # single right-pointing angle quotation mark, U+203A ISO proposed
+    'rsquo':    '&#8217;',      # right single quotation mark, U+2019 ISOnum
+    'sbquo':    '&#8218;',      # single low-9 quotation mark, U+201A NEW
+    'scaron':   '&#353;',       # latin small letter s with caron, U+0161 ISOlat2
+    'sdot':     '&#8901;',      # dot operator, U+22C5 ISOamsb
+    'sect':     '\247',         # section sign, U+00A7 ISOnum
+    'shy':      '\255',         # soft hyphen = discretionary hyphen, U+00AD ISOnum
+    'sigma':    '&#963;',       # greek small letter sigma, U+03C3 ISOgrk3
+    'sigmaf':   '&#962;',       # greek small letter final sigma, U+03C2 ISOgrk3
+    'sim':      '&#8764;',      # tilde operator = varies with = similar to, U+223C ISOtech
+    'spades':   '&#9824;',      # black spade suit, U+2660 ISOpub
+    'sub':      '&#8834;',      # subset of, U+2282 ISOtech
+    'sube':     '&#8838;',      # subset of or equal to, U+2286 ISOtech
+    'sum':      '&#8721;',      # n-ary sumation, U+2211 ISOamsb
+    'sup':      '&#8835;',      # superset of, U+2283 ISOtech
+    'sup1':     '\271',         # superscript one = superscript digit one, U+00B9 ISOnum
+    'sup2':     '\262',         # superscript two = superscript digit two = squared, U+00B2 ISOnum
+    'sup3':     '\263',         # superscript three = superscript digit three = cubed, U+00B3 ISOnum
+    'supe':     '&#8839;',      # superset of or equal to, U+2287 ISOtech
+    'szlig':    '\337',         # latin small letter sharp s = ess-zed, U+00DF ISOlat1
+    'tau':      '&#964;',       # greek small letter tau, U+03C4 ISOgrk3
+    'there4':   '&#8756;',      # therefore, U+2234 ISOtech
+    'theta':    '&#952;',       # greek small letter theta, U+03B8 ISOgrk3
+    'thetasym': '&#977;',       # greek small letter theta symbol, U+03D1 NEW
+    'thinsp':   '&#8201;',      # thin space, U+2009 ISOpub
+    'thorn':    '\376',         # latin small letter thorn with, U+00FE ISOlat1
+    'tilde':    '&#732;',       # small tilde, U+02DC ISOdia
+    'times':    '\327',         # multiplication sign, U+00D7 ISOnum
+    'trade':    '&#8482;',      # trade mark sign, U+2122 ISOnum
+    'uArr':     '&#8657;',      # upwards double arrow, U+21D1 ISOamsa
+    'uacute':   '\372',         # latin small letter u with acute, U+00FA ISOlat1
+    'uarr':     '&#8593;',      # upwards arrow, U+2191 ISOnum
+    'ucirc':    '\373',         # latin small letter u with circumflex, U+00FB ISOlat1
+    'ugrave':   '\371',         # latin small letter u with grave, U+00F9 ISOlat1
+    'uml':      '\250',         # diaeresis = spacing diaeresis, U+00A8 ISOdia
+    'upsih':    '&#978;',       # greek upsilon with hook symbol, U+03D2 NEW
+    'upsilon':  '&#965;',       # greek small letter upsilon, U+03C5 ISOgrk3
+    'uuml':     '\374',         # latin small letter u with diaeresis, U+00FC ISOlat1
+    'weierp':   '&#8472;',      # script capital P = power set = Weierstrass p, U+2118 ISOamso
+    'xi':       '&#958;',       # greek small letter xi, U+03BE ISOgrk3
+    'yacute':   '\375',         # latin small letter y with acute, U+00FD ISOlat1
+    'yen':      '\245',         # yen sign = yuan sign, U+00A5 ISOnum
+    'yuml':     '\377',         # latin small letter y with diaeresis, U+00FF ISOlat1
+    'zeta':     '&#950;',       # greek small letter zeta, U+03B6 ISOgrk3
+    'zwj':      '&#8205;',      # zero width joiner, U+200D NEW RFC 2070
+    'zwnj':     '&#8204;',      # zero width non-joiner, U+200C NEW RFC 2070
+
+}
diff --git a/lib-python/2.2/htmllib.py b/lib-python/2.2/htmllib.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/htmllib.py
@@ -0,0 +1,475 @@
+"""HTML 2.0 parser.
+
+See the HTML 2.0 specification:
+http://www.w3.org/hypertext/WWW/MarkUp/html-spec/html-spec_toc.html
+"""
+
+
+from sgmllib import SGMLParser
+from formatter import AS_IS
+
+__all__ = ["HTMLParser"]
+
+class HTMLParser(SGMLParser):
+    """This is the basic HTML parser class.
+
+    It supports all entity names required by the HTML 2.0 specification
+    RFC 1866.  It also defines handlers for all HTML 2.0 and many HTML 3.0
+    and 3.2 elements.
+
+    """
+
+    from htmlentitydefs import entitydefs
+
+    def __init__(self, formatter, verbose=0):
+        """Creates an instance of the HTMLParser class.
+
+        The formatter parameter is the formatter instance associated with
+        the parser.
+
+        """
+        SGMLParser.__init__(self, verbose)
+        self.formatter = formatter
+        self.savedata = None
+        self.isindex = 0
+        self.title = None
+        self.base = None
+        self.anchor = None
+        self.anchorlist = []
+        self.nofill = 0
+        self.list_stack = []
+
+    # ------ Methods used internally; some may be overridden
+
+    # --- Formatter interface, taking care of 'savedata' mode;
+    # shouldn't need to be overridden
+
+    def handle_data(self, data):
+        if self.savedata is not None:
+            self.savedata = self.savedata + data
+        else:
+            if self.nofill:
+                self.formatter.add_literal_data(data)
+            else:
+                self.formatter.add_flowing_data(data)
+
+    # --- Hooks to save data; shouldn't need to be overridden
+
+    def save_bgn(self):
+        """Begins saving character data in a buffer instead of sending it
+        to the formatter object.
+
+        Retrieve the stored data via the save_end() method.  Use of the
+        save_bgn() / save_end() pair may not be nested.
+
+        """
+        self.savedata = ''
+
+    def save_end(self):
+        """Ends buffering character data and returns all data saved since
+        the preceding call to the save_bgn() method.
+
+        If the nofill flag is false, whitespace is collapsed to single
+        spaces.  A call to this method without a preceding call to the
+        save_bgn() method will raise a TypeError exception.
+
+        """
+        data = self.savedata
+        self.savedata = None
+        if not self.nofill:
+            data = ' '.join(data.split())
+        return data
+
+    # --- Hooks for anchors; should probably be overridden
+
+    def anchor_bgn(self, href, name, type):
+        """This method is called at the start of an anchor region.
+
+        The arguments correspond to the attributes of the <A> tag with
+        the same names.  The default implementation maintains a list of
+        hyperlinks (defined by the HREF attribute for <A> tags) within
+        the document.  The list of hyperlinks is available as the data
+        attribute anchorlist.
+
+        """
+        self.anchor = href
+        if self.anchor:
+            self.anchorlist.append(href)
+
+    def anchor_end(self):
+        """This method is called at the end of an anchor region.
+
+        The default implementation adds a textual footnote marker using an
+        index into the list of hyperlinks created by the anchor_bgn()method.
+
+        """
+        if self.anchor:
+            self.handle_data("[%d]" % len(self.anchorlist))
+            self.anchor = None
+
+    # --- Hook for images; should probably be overridden
+
+    def handle_image(self, src, alt, *args):
+        """This method is called to handle images.
+
+        The default implementation simply passes the alt value to the
+        handle_data() method.
+
+        """
+        self.handle_data(alt)
+
+    # --------- Top level elememts
+
+    def start_html(self, attrs): pass
+    def end_html(self): pass
+
+    def start_head(self, attrs): pass
+    def end_head(self): pass
+
+    def start_body(self, attrs): pass
+    def end_body(self): pass
+
+    # ------ Head elements
+
+    def start_title(self, attrs):
+        self.save_bgn()
+
+    def end_title(self):
+        self.title = self.save_end()
+
+    def do_base(self, attrs):
+        for a, v in attrs:
+            if a == 'href':
+                self.base = v
+
+    def do_isindex(self, attrs):
+        self.isindex = 1
+
+    def do_link(self, attrs):
+        pass
+
+    def do_meta(self, attrs):
+        pass
+
+    def do_nextid(self, attrs): # Deprecated
+        pass
+
+    # ------ Body elements
+
+    # --- Headings
+
+    def start_h1(self, attrs):
+        self.formatter.end_paragraph(1)
+        self.formatter.push_font(('h1', 0, 1, 0))
+
+    def end_h1(self):
+        self.formatter.end_paragraph(1)
+        self.formatter.pop_font()
+
+    def start_h2(self, attrs):
+        self.formatter.end_paragraph(1)
+        self.formatter.push_font(('h2', 0, 1, 0))
+
+    def end_h2(self):
+        self.formatter.end_paragraph(1)
+        self.formatter.pop_font()
+
+    def start_h3(self, attrs):
+        self.formatter.end_paragraph(1)
+        self.formatter.push_font(('h3', 0, 1, 0))
+
+    def end_h3(self):
+        self.formatter.end_paragraph(1)
+        self.formatter.pop_font()
+
+    def start_h4(self, attrs):
+        self.formatter.end_paragraph(1)
+        self.formatter.push_font(('h4', 0, 1, 0))
+
+    def end_h4(self):
+        self.formatter.end_paragraph(1)
+        self.formatter.pop_font()
+
+    def start_h5(self, attrs):
+        self.formatter.end_paragraph(1)
+        self.formatter.push_font(('h5', 0, 1, 0))
+
+    def end_h5(self):
+        self.formatter.end_paragraph(1)
+        self.formatter.pop_font()
+
+    def start_h6(self, attrs):
+        self.formatter.end_paragraph(1)
+        self.formatter.push_font(('h6', 0, 1, 0))
+
+    def end_h6(self):
+        self.formatter.end_paragraph(1)
+        self.formatter.pop_font()
+
+    # --- Block Structuring Elements
+
+    def do_p(self, attrs):
+        self.formatter.end_paragraph(1)
+
+    def start_pre(self, attrs):
+        self.formatter.end_paragraph(1)
+        self.formatter.push_font((AS_IS, AS_IS, AS_IS, 1))
+        self.nofill = self.nofill + 1
+
+    def end_pre(self):
+        self.formatter.end_paragraph(1)
+        self.formatter.pop_font()
+        self.nofill = max(0, self.nofill - 1)
+
+    def start_xmp(self, attrs):
+        self.start_pre(attrs)
+        self.setliteral('xmp') # Tell SGML parser
+
+    def end_xmp(self):
+        self.end_pre()
+
+    def start_listing(self, attrs):
+        self.start_pre(attrs)
+        self.setliteral('listing') # Tell SGML parser
+
+    def end_listing(self):
+        self.end_pre()
+
+    def start_address(self, attrs):
+        self.formatter.end_paragraph(0)
+        self.formatter.push_font((AS_IS, 1, AS_IS, AS_IS))
+
+    def end_address(self):
+        self.formatter.end_paragraph(0)
+        self.formatter.pop_font()
+
+    def start_blockquote(self, attrs):
+        self.formatter.end_paragraph(1)
+        self.formatter.push_margin('blockquote')
+
+    def end_blockquote(self):
+        self.formatter.end_paragraph(1)
+        self.formatter.pop_margin()
+
+    # --- List Elements
+
+    def start_ul(self, attrs):
+        self.formatter.end_paragraph(not self.list_stack)
+        self.formatter.push_margin('ul')
+        self.list_stack.append(['ul', '*', 0])
+
+    def end_ul(self):
+        if self.list_stack: del self.list_stack[-1]
+        self.formatter.end_paragraph(not self.list_stack)
+        self.formatter.pop_margin()
+
+    def do_li(self, attrs):
+        self.formatter.end_paragraph(0)
+        if self.list_stack:
+            [dummy, label, counter] = top = self.list_stack[-1]
+            top[2] = counter = counter+1
+        else:
+            label, counter = '*', 0
+        self.formatter.add_label_data(label, counter)
+
+    def start_ol(self, attrs):
+        self.formatter.end_paragraph(not self.list_stack)
+        self.formatter.push_margin('ol')
+        label = '1.'
+        for a, v in attrs:
+            if a == 'type':
+                if len(v) == 1: v = v + '.'
+                label = v
+        self.list_stack.append(['ol', label, 0])
+
+    def end_ol(self):
+        if self.list_stack: del self.list_stack[-1]
+        self.formatter.end_paragraph(not self.list_stack)
+        self.formatter.pop_margin()
+
+    def start_menu(self, attrs):
+        self.start_ul(attrs)
+
+    def end_menu(self):
+        self.end_ul()
+
+    def start_dir(self, attrs):
+        self.start_ul(attrs)
+
+    def end_dir(self):
+        self.end_ul()
+
+    def start_dl(self, attrs):
+        self.formatter.end_paragraph(1)
+        self.list_stack.append(['dl', '', 0])
+
+    def end_dl(self):
+        self.ddpop(1)
+        if self.list_stack: del self.list_stack[-1]
+
+    def do_dt(self, attrs):
+        self.ddpop()
+
+    def do_dd(self, attrs):
+        self.ddpop()
+        self.formatter.push_margin('dd')
+        self.list_stack.append(['dd', '', 0])
+
+    def ddpop(self, bl=0):
+        self.formatter.end_paragraph(bl)
+        if self.list_stack:
+            if self.list_stack[-1][0] == 'dd':
+                del self.list_stack[-1]
+                self.formatter.pop_margin()
+
+    # --- Phrase Markup
+
+    # Idiomatic Elements
+
+    def start_cite(self, attrs): self.start_i(attrs)
+    def end_cite(self): self.end_i()
+
+    def start_code(self, attrs): self.start_tt(attrs)
+    def end_code(self): self.end_tt()
+
+    def start_em(self, attrs): self.start_i(attrs)
+    def end_em(self): self.end_i()
+
+    def start_kbd(self, attrs): self.start_tt(attrs)
+    def end_kbd(self): self.end_tt()
+
+    def start_samp(self, attrs): self.start_tt(attrs)
+    def end_samp(self): self.end_tt()
+
+    def start_strong(self, attrs): self.start_b(attrs)
+    def end_strong(self): self.end_b()
+
+    def start_var(self, attrs): self.start_i(attrs)
+    def end_var(self): self.end_i()
+
+    # Typographic Elements
+
+    def start_i(self, attrs):
+        self.formatter.push_font((AS_IS, 1, AS_IS, AS_IS))
+    def end_i(self):
+        self.formatter.pop_font()
+
+    def start_b(self, attrs):
+        self.formatter.push_font((AS_IS, AS_IS, 1, AS_IS))
+    def end_b(self):
+        self.formatter.pop_font()
+
+    def start_tt(self, attrs):
+        self.formatter.push_font((AS_IS, AS_IS, AS_IS, 1))
+    def end_tt(self):
+        self.formatter.pop_font()
+
+    def start_a(self, attrs):
+        href = ''
+        name = ''
+        type = ''
+        for attrname, value in attrs:
+            value = value.strip()
+            if attrname == 'href':
+                href = value
+            if attrname == 'name':
+                name = value
+            if attrname == 'type':
+                type = value.lower()
+        self.anchor_bgn(href, name, type)
+
+    def end_a(self):
+        self.anchor_end()
+
+    # --- Line Break
+
+    def do_br(self, attrs):
+        self.formatter.add_line_break()
+
+    # --- Horizontal Rule
+
+    def do_hr(self, attrs):
+        self.formatter.add_hor_rule()
+
+    # --- Image
+
+    def do_img(self, attrs):
+        align = ''
+        alt = '(image)'
+        ismap = ''
+        src = ''
+        width = 0
+        height = 0
+        for attrname, value in attrs:
+            if attrname == 'align':
+                align = value
+            if attrname == 'alt':
+                alt = value
+            if attrname == 'ismap':
+                ismap = value
+            if attrname == 'src':
+                src = value
+            if attrname == 'width':
+                try: width = int(value)
+                except ValueError: pass
+            if attrname == 'height':
+                try: height = int(value)
+                except ValueError: pass
+        self.handle_image(src, alt, ismap, align, width, height)
+
+    # --- Really Old Unofficial Deprecated Stuff
+
+    def do_plaintext(self, attrs):
+        self.start_pre(attrs)
+        self.setnomoretags() # Tell SGML parser
+
+    # --- Unhandled tags
+
+    def unknown_starttag(self, tag, attrs):
+        pass
+
+    def unknown_endtag(self, tag):
+        pass
+
+
+def test(args = None):
+    import sys, formatter
+
+    if not args:
+        args = sys.argv[1:]
+
+    silent = args and args[0] == '-s'
+    if silent:
+        del args[0]
+
+    if args:
+        file = args[0]
+    else:
+        file = 'test.html'
+
+    if file == '-':
+        f = sys.stdin
+    else:
+        try:
+            f = open(file, 'r')
+        except IOError, msg:
+            print file, ":", msg
+            sys.exit(1)
+
+    data = f.read()
+
+    if f is not sys.stdin:
+        f.close()
+
+    if silent:
+        f = formatter.NullFormatter()
+    else:
+        f = formatter.AbstractFormatter(formatter.DumbWriter())
+
+    p = HTMLParser(f)
+    p.feed(data)
+    p.close()
+
+
+if __name__ == '__main__':
+    test()
diff --git a/lib-python/2.2/httplib.py b/lib-python/2.2/httplib.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/httplib.py
@@ -0,0 +1,1238 @@
+"""HTTP/1.1 client library
+
+<intro stuff goes here>
+<other stuff, too>
+
+HTTPConnection go through a number of "states", which defines when a client
+may legally make another request or fetch the response for a particular
+request. This diagram details these state transitions:
+
+    (null)
+      |
+      | HTTPConnection()
+      v
+    Idle
+      |
+      | putrequest()
+      v
+    Request-started
+      |
+      | ( putheader() )*  endheaders()
+      v
+    Request-sent
+      |
+      | response = getresponse()
+      v
+    Unread-response   [Response-headers-read]
+      |\____________________
+      |                     |
+      | response.read()     | putrequest()
+      v                     v
+    Idle                  Req-started-unread-response
+                     ______/|
+                   /        |
+   response.read() |        | ( putheader() )*  endheaders()
+                   v        v
+       Request-started    Req-sent-unread-response
+                            |
+                            | response.read()
+                            v
+                          Request-sent
+
+This diagram presents the following rules:
+  -- a second request may not be started until {response-headers-read}
+  -- a response [object] cannot be retrieved until {request-sent}
+  -- there is no differentiation between an unread response body and a
+     partially read response body
+
+Note: this enforcement is applied by the HTTPConnection class. The
+      HTTPResponse class does not enforce this state machine, which
+      implies sophisticated clients may accelerate the request/response
+      pipeline. Caution should be taken, though: accelerating the states
+      beyond the above pattern may imply knowledge of the server's
+      connection-close behavior for certain requests. For example, it
+      is impossible to tell whether the server will close the connection
+      UNTIL the response headers have been read; this means that further
+      requests cannot be placed into the pipeline until it is known that
+      the server will NOT be closing the connection.
+
+Logical State                  __state            __response
+-------------                  -------            ----------
+Idle                           _CS_IDLE           None
+Request-started                _CS_REQ_STARTED    None
+Request-sent                   _CS_REQ_SENT       None
+Unread-response                _CS_IDLE           <response_class>
+Req-started-unread-response    _CS_REQ_STARTED    <response_class>
+Req-sent-unread-response       _CS_REQ_SENT       <response_class>
+"""
+
+import errno
+import mimetools
+import socket
+from urlparse import urlsplit
+
+try:
+    from cStringIO import StringIO
+except ImportError:
+    from StringIO import StringIO
+
+__all__ = ["HTTP", "HTTPResponse", "HTTPConnection", "HTTPSConnection",
+           "HTTPException", "NotConnected", "UnknownProtocol",
+           "UnknownTransferEncoding", "UnimplementedFileMode",
+           "IncompleteRead", "InvalidURL", "ImproperConnectionState",
+           "CannotSendRequest", "CannotSendHeader", "ResponseNotReady",
+           "BadStatusLine", "error"]
+
+HTTP_PORT = 80
+HTTPS_PORT = 443
+
+_UNKNOWN = 'UNKNOWN'
+
+# connection states
+_CS_IDLE = 'Idle'
+_CS_REQ_STARTED = 'Request-started'
+_CS_REQ_SENT = 'Request-sent'
+
+class HTTPMessage(mimetools.Message):
+
+    def addheader(self, key, value):
+        """Add header for field key handling repeats."""
+        prev = self.dict.get(key)
+        if prev is None:
+            self.dict[key] = value
+        else:
+            combined = ", ".join((prev, value))
+            self.dict[key] = combined
+
+    def addcontinue(self, key, more):
+        """Add more field data from a continuation line."""
+        prev = self.dict[key]
+        self.dict[key] = prev + "\n " + more
+
+    def readheaders(self):
+        """Read header lines.
+
+        Read header lines up to the entirely blank line that terminates them.
+        The (normally blank) line that ends the headers is skipped, but not
+        included in the returned list.  If a non-header line ends the headers,
+        (which is an error), an attempt is made to backspace over it; it is
+        never included in the returned list.
+
+        The variable self.status is set to the empty string if all went well,
+        otherwise it is an error message.  The variable self.headers is a
+        completely uninterpreted list of lines contained in the header (so
+        printing them will reproduce the header exactly as it appears in the
+        file).
+
+        If multiple header fields with the same name occur, they are combined
+        according to the rules in RFC 2616 sec 4.2:
+
+        Appending each subsequent field-value to the first, each separated
+        by a comma. The order in which header fields with the same field-name
+        are received is significant to the interpretation of the combined
+        field value.
+        """
+        # XXX The implementation overrides the readheaders() method of
+        # rfc822.Message.  The base class design isn't amenable to
+        # customized behavior here so the method here is a copy of the
+        # base class code with a few small changes.
+
+        self.dict = {}
+        self.unixfrom = ''
+        self.headers = list = []
+        self.status = ''
+        headerseen = ""
+        firstline = 1
+        startofline = unread = tell = None
+        if hasattr(self.fp, 'unread'):
+            unread = self.fp.unread
+        elif self.seekable:
+            tell = self.fp.tell
+        while 1:
+            if tell:
+                try:
+                    startofline = tell()
+                except IOError:
+                    startofline = tell = None
+                    self.seekable = 0
+            line = self.fp.readline()
+            if not line:
+                self.status = 'EOF in headers'
+                break
+            # Skip unix From name time lines
+            if firstline and line.startswith('From '):
+                self.unixfrom = self.unixfrom + line
+                continue
+            firstline = 0
+            if headerseen and line[0] in ' \t':
+                # XXX Not sure if continuation lines are handled properly
+                # for http and/or for repeating headers
+                # It's a continuation line.
+                list.append(line)
+                x = self.dict[headerseen] + "\n " + line.strip()
+                self.addcontinue(headerseen, line.strip())
+                continue
+            elif self.iscomment(line):
+                # It's a comment.  Ignore it.
+                continue
+            elif self.islast(line):
+                # Note! No pushback here!  The delimiter line gets eaten.
+                break
+            headerseen = self.isheader(line)
+            if headerseen:
+                # It's a legal header line, save it.
+                list.append(line)
+                self.addheader(headerseen, line[len(headerseen)+1:].strip())
+                continue
+            else:
+                # It's not a header line; throw it back and stop here.
+                if not self.dict:
+                    self.status = 'No headers'
+                else:
+                    self.status = 'Non-header line where header expected'
+                # Try to undo the read.
+                if unread:
+                    unread(line)
+                elif tell:
+                    self.fp.seek(startofline)
+                else:
+                    self.status = self.status + '; bad seek'
+                break
+
+class HTTPResponse:
+
+    # strict: If true, raise BadStatusLine if the status line can't be
+    # parsed as a valid HTTP/1.0 or 1.1 status line.  By default it is
+    # false because it prevents clients from talking to HTTP/0.9
+    # servers.  Note that a response with a sufficiently corrupted
+    # status line will look like an HTTP/0.9 response.
+
+    # See RFC 2616 sec 19.6 and RFC 1945 sec 6 for details.
+
+    def __init__(self, sock, debuglevel=0, strict=0):
+        self.fp = sock.makefile('rb', 0)
+        self.debuglevel = debuglevel
+        self.strict = strict
+
+        self.msg = None
+
+        # from the Status-Line of the response
+        self.version = _UNKNOWN # HTTP-Version
+        self.status = _UNKNOWN  # Status-Code
+        self.reason = _UNKNOWN  # Reason-Phrase
+
+        self.chunked = _UNKNOWN         # is "chunked" being used?
+        self.chunk_left = _UNKNOWN      # bytes left to read in current chunk
+        self.length = _UNKNOWN          # number of bytes left in response
+        self.will_close = _UNKNOWN      # conn will close at end of response
+
+    def _read_status(self):
+        # Initialize with Simple-Response defaults
+        line = self.fp.readline()
+        if self.debuglevel > 0:
+            print "reply:", repr(line)
+        if not line:
+            # Presumably, the server closed the connection before
+            # sending a valid response.
+            raise BadStatusLine(line)
+        try:
+            [version, status, reason] = line.split(None, 2)
+        except ValueError:
+            try:
+                [version, status] = line.split(None, 1)
+                reason = ""
+            except ValueError:
+                # empty version will cause next test to fail and status
+                # will be treated as 0.9 response.
+                version = ""
+        if not version.startswith('HTTP/'):
+            if self.strict:
+                self.close()
+                raise BadStatusLine(line)
+            else:
+                # assume it's a Simple-Response from an 0.9 server
+                self.fp = LineAndFileWrapper(line, self.fp)
+                return "HTTP/0.9", 200, ""
+
+        # The status code is a three-digit number
+        try:
+            status = int(status)
+            if status < 100 or status > 999:
+                raise BadStatusLine(line)
+        except ValueError:
+            raise BadStatusLine(line)
+        return version, status, reason
+
+    def begin(self):
+        if self.msg is not None:
+            # we've already started reading the response
+            return
+
+        # read until we get a non-100 response
+        while 1:
+            version, status, reason = self._read_status()
+            if status != 100:
+                break
+            # skip the header from the 100 response
+            while 1:
+                skip = self.fp.readline().strip()
+                if not skip:
+                    break
+                if self.debuglevel > 0:
+                    print "header:", skip
+
+        self.status = status
+        self.reason = reason.strip()
+        if version == 'HTTP/1.0':
+            self.version = 10
+        elif version.startswith('HTTP/1.'):
+            self.version = 11   # use HTTP/1.1 code for HTTP/1.x where x>=1
+        elif version == 'HTTP/0.9':
+            self.version = 9
+        else:
+            raise UnknownProtocol(version)
+
+        if self.version == 9:
+            self.chunked = 0
+            self.will_close = 1
+            self.msg = HTTPMessage(StringIO())
+            return
+
+        self.msg = HTTPMessage(self.fp, 0)
+        if self.debuglevel > 0:
+            for hdr in self.msg.headers:
+                print "header:", hdr,
+
+        # don't let the msg keep an fp
+        self.msg.fp = None
+
+        # are we using the chunked-style of transfer encoding?
+        tr_enc = self.msg.getheader('transfer-encoding')
+        if tr_enc and tr_enc.lower() == "chunked":
+            self.chunked = 1
+            self.chunk_left = None
+        else:
+            self.chunked = 0
+
+        # will the connection close at the end of the response?
+        conn = self.msg.getheader('connection')
+        if conn:
+            conn = conn.lower()
+            # a "Connection: close" will always close the connection. if we
+            # don't see that and this is not HTTP/1.1, then the connection will
+            # close unless we see a Keep-Alive header.
+            self.will_close = conn.find('close') != -1 or \
+                              ( self.version != 11 and \
+                                not self.msg.getheader('keep-alive') )
+        else:
+            # for HTTP/1.1, the connection will always remain open
+            # otherwise, it will remain open IFF we see a Keep-Alive header
+            self.will_close = self.version != 11 and \
+                              not self.msg.getheader('keep-alive')
+
+        # do we have a Content-Length?
+        # NOTE: RFC 2616, S4.4, #3 says we ignore this if tr_enc is "chunked"
+        length = self.msg.getheader('content-length')
+        if length and not self.chunked:
+            try:
+                self.length = int(length)
+            except ValueError:
+                self.length = None
+        else:
+            self.length = None
+
+        # does the body have a fixed length? (of zero)
+        if (status == 204 or            # No Content
+            status == 304 or            # Not Modified
+            100 <= status < 200):       # 1xx codes
+            self.length = 0
+
+        # if the connection remains open, and we aren't using chunked, and
+        # a content-length was not provided, then assume that the connection
+        # WILL close.
+        if not self.will_close and \
+           not self.chunked and \
+           self.length is None:
+            self.will_close = 1
+
+    def close(self):
+        if self.fp:
+            self.fp.close()
+            self.fp = None
+
+    def isclosed(self):
+        # NOTE: it is possible that we will not ever call self.close(). This
+        #       case occurs when will_close is TRUE, length is None, and we
+        #       read up to the last byte, but NOT past it.
+        #
+        # IMPLIES: if will_close is FALSE, then self.close() will ALWAYS be
+        #          called, meaning self.isclosed() is meaningful.
+        return self.fp is None
+
+    def read(self, amt=None):
+        if self.fp is None:
+            return ''
+
+        if self.chunked:
+            return self._read_chunked(amt)
+
+        if amt is None:
+            # unbounded read
+            if self.will_close:
+                s = self.fp.read()
+            else:
+                s = self._safe_read(self.length)
+            self.close()        # we read everything
+            return s
+
+        if self.length is not None:
+            if amt > self.length:
+                # clip the read to the "end of response"
+                amt = self.length
+            self.length -= amt
+
+        # we do not use _safe_read() here because this may be a .will_close
+        # connection, and the user is reading more bytes than will be provided
+        # (for example, reading in 1k chunks)
+        s = self.fp.read(amt)
+
+        return s
+
+    def _read_chunked(self, amt):
+        assert self.chunked != _UNKNOWN
+        chunk_left = self.chunk_left
+        value = ''
+
+        # XXX This accumulates chunks by repeated string concatenation,
+        # which is not efficient as the number or size of chunks gets big.
+        while 1:
+            if chunk_left is None:
+                line = self.fp.readline()
+                i = line.find(';')
+                if i >= 0:
+                    line = line[:i] # strip chunk-extensions
+                chunk_left = int(line, 16)
+                if chunk_left == 0:
+                    break
+            if amt is None:
+                value += self._safe_read(chunk_left)
+            elif amt < chunk_left:
+                value += self._safe_read(amt)
+                self.chunk_left = chunk_left - amt
+                return value
+            elif amt == chunk_left:
+                value += self._safe_read(amt)
+                self._safe_read(2)  # toss the CRLF at the end of the chunk
+                self.chunk_left = None
+                return value
+            else:
+                value += self._safe_read(chunk_left)
+                amt -= chunk_left
+
+            # we read the whole chunk, get another
+            self._safe_read(2)      # toss the CRLF at the end of the chunk
+            chunk_left = None
+
+        # read and discard trailer up to the CRLF terminator
+        ### note: we shouldn't have any trailers!
+        while 1:
+            line = self.fp.readline()
+            if line == '\r\n':
+                break
+
+        # we read everything; close the "file"
+        # XXX Shouldn't the client close the file?
+        self.close()
+
+        return value
+
+    def _safe_read(self, amt):
+        """Read the number of bytes requested, compensating for partial reads.
+
+        Normally, we have a blocking socket, but a read() can be interrupted
+        by a signal (resulting in a partial read).
+
+        Note that we cannot distinguish between EOF and an interrupt when zero
+        bytes have been read. IncompleteRead() will be raised in this
+        situation.
+
+        This function should be used when <amt> bytes "should" be present for
+        reading. If the bytes are truly not available (due to EOF), then the
+        IncompleteRead exception can be used to detect the problem.
+        """
+        s = ''
+        while amt > 0:
+            chunk = self.fp.read(amt)
+            if not chunk:
+                raise IncompleteRead(s)
+            s = s + chunk
+            amt = amt - len(chunk)
+        return s
+
+    def getheader(self, name, default=None):
+        if self.msg is None:
+            raise ResponseNotReady()
+        return self.msg.getheader(name, default)
+
+
+class HTTPConnection:
+
+    _http_vsn = 11
+    _http_vsn_str = 'HTTP/1.1'
+
+    response_class = HTTPResponse
+    default_port = HTTP_PORT
+    auto_open = 1
+    debuglevel = 0
+    strict = 0
+
+    def __init__(self, host, port=None, strict=None):
+        self.sock = None
+        self._buffer = []
+        self.__response = None
+        self.__state = _CS_IDLE
+
+        self._set_hostport(host, port)
+        if strict is not None:
+            self.strict = strict
+
+    def _set_hostport(self, host, port):
+        if port is None:
+            i = host.find(':')
+            if i >= 0:
+                try:
+                    port = int(host[i+1:])
+                except ValueError:
+                    raise InvalidURL("nonnumeric port: '%s'" % host[i+1:])
+                host = host[:i]
+            else:
+                port = self.default_port
+        self.host = host
+        self.port = port
+
+    def set_debuglevel(self, level):
+        self.debuglevel = level
+
+    def connect(self):
+        """Connect to the host and port specified in __init__."""
+        msg = "getaddrinfo returns an empty list"
+        for res in socket.getaddrinfo(self.host, self.port, 0,
+                                      socket.SOCK_STREAM):
+            af, socktype, proto, canonname, sa = res
+            try:
+                self.sock = socket.socket(af, socktype, proto)
+                if self.debuglevel > 0:
+                    print "connect: (%s, %s)" % (self.host, self.port)
+                self.sock.connect(sa)
+            except socket.error, msg:
+                if self.debuglevel > 0:
+                    print 'connect fail:', (self.host, self.port)
+                if self.sock:
+                    self.sock.close()
+                self.sock = None
+                continue
+            break
+        if not self.sock:
+            raise socket.error, msg
+
+    def close(self):
+        """Close the connection to the HTTP server."""
+        if self.sock:
+            self.sock.close()   # close it manually... there may be other refs
+            self.sock = None
+        if self.__response:
+            self.__response.close()
+            self.__response = None
+        self.__state = _CS_IDLE
+
+    def send(self, str):
+        """Send `str' to the server."""
+        if self.sock is None:
+            if self.auto_open:
+                self.connect()
+            else:
+                raise NotConnected()
+
+        # send the data to the server. if we get a broken pipe, then close
+        # the socket. we want to reconnect when somebody tries to send again.
+        #
+        # NOTE: we DO propagate the error, though, because we cannot simply
+        #       ignore the error... the caller will know if they can retry.
+        if self.debuglevel > 0:
+            print "send:", repr(str)
+        try:
+            self.sock.sendall(str)
+        except socket.error, v:
+            if v[0] == 32:      # Broken pipe
+                self.close()
+            raise
+
+    def _output(self, s):
+        """Add a line of output to the current request buffer.
+
+        Assumes that the line does *not* end with \\r\\n.
+        """
+        self._buffer.append(s)
+
+    def _send_output(self):
+        """Send the currently buffered request and clear the buffer.
+
+        Appends an extra \\r\\n to the buffer.
+        """
+        self._buffer.extend(("", ""))
+        msg = "\r\n".join(self._buffer)
+        del self._buffer[:]
+        self.send(msg)
+
+    def putrequest(self, method, url, skip_host=0):
+        """Send a request to the server.
+
+        `method' specifies an HTTP request method, e.g. 'GET'.
+        `url' specifies the object being requested, e.g. '/index.html'.
+        """
+
+        # check if a prior response has been completed
+        # XXX What if it hasn't?
+        if self.__response and self.__response.isclosed():
+            self.__response = None
+
+        #
+        # in certain cases, we cannot issue another request on this connection.
+        # this occurs when:
+        #   1) we are in the process of sending a request.   (_CS_REQ_STARTED)
+        #   2) a response to a previous request has signalled that it is going
+        #      to close the connection upon completion.
+        #   3) the headers for the previous response have not been read, thus
+        #      we cannot determine whether point (2) is true.   (_CS_REQ_SENT)
+        #
+        # if there is no prior response, then we can request at will.
+        #
+        # if point (2) is true, then we will have passed the socket to the
+        # response (effectively meaning, "there is no prior response"), and
+        # will open a new one when a new request is made.
+        #
+        # Note: if a prior response exists, then we *can* start a new request.
+        #       We are not allowed to begin fetching the response to this new
+        #       request, however, until that prior response is complete.
+        #
+        if self.__state == _CS_IDLE:
+            self.__state = _CS_REQ_STARTED
+        else:
+            raise CannotSendRequest()
+
+        if not url:
+            url = '/'
+        str = '%s %s %s' % (method, url, self._http_vsn_str)
+
+        self._output(str)
+
+        if self._http_vsn == 11:
+            # Issue some standard headers for better HTTP/1.1 compliance
+
+            if not skip_host:
+                # this header is issued *only* for HTTP/1.1
+                # connections. more specifically, this means it is
+                # only issued when the client uses the new
+                # HTTPConnection() class. backwards-compat clients
+                # will be using HTTP/1.0 and those clients may be
+                # issuing this header themselves. we should NOT issue
+                # it twice; some web servers (such as Apache) barf
+                # when they see two Host: headers
+
+                # If we need a non-standard port,include it in the
+                # header.  If the request is going through a proxy,
+                # but the host of the actual URL, not the host of the
+                # proxy.
+
+                netloc = ''
+                if url.startswith('http'):
+                    nil, netloc, nil, nil, nil = urlsplit(url)
+
+                if netloc:
+                    self.putheader('Host', netloc)
+                elif self.port == HTTP_PORT:
+                    self.putheader('Host', self.host)
+                else:
+                    self.putheader('Host', "%s:%s" % (self.host, self.port))
+
+            # note: we are assuming that clients will not attempt to set these
+            #       headers since *this* library must deal with the
+            #       consequences. this also means that when the supporting
+            #       libraries are updated to recognize other forms, then this
+            #       code should be changed (removed or updated).
+
+            # we only want a Content-Encoding of "identity" since we don't
+            # support encodings such as x-gzip or x-deflate.
+            self.putheader('Accept-Encoding', 'identity')
+
+            # we can accept "chunked" Transfer-Encodings, but no others
+            # NOTE: no TE header implies *only* "chunked"
+            #self.putheader('TE', 'chunked')
+
+            # if TE is supplied in the header, then it must appear in a
+            # Connection header.
+            #self.putheader('Connection', 'TE')
+
+        else:
+            # For HTTP/1.0, the server will assume "not chunked"
+            pass
+
+    def putheader(self, header, value):
+        """Send a request header line to the server.
+
+        For example: h.putheader('Accept', 'text/html')
+        """
+        if self.__state != _CS_REQ_STARTED:
+            raise CannotSendHeader()
+
+        str = '%s: %s' % (header, value)
+        self._output(str)
+
+    def endheaders(self):
+        """Indicate that the last header line has been sent to the server."""
+
+        if self.__state == _CS_REQ_STARTED:
+            self.__state = _CS_REQ_SENT
+        else:
+            raise CannotSendHeader()
+
+        self._send_output()
+
+    def request(self, method, url, body=None, headers={}):
+        """Send a complete request to the server."""
+
+        try:
+            self._send_request(method, url, body, headers)
+        except socket.error, v:
+            # trap 'Broken pipe' if we're allowed to automatically reconnect
+            if v[0] != 32 or not self.auto_open:
+                raise
+            # try one more time
+            self._send_request(method, url, body, headers)
+
+    def _send_request(self, method, url, body, headers):
+        # If headers already contains a host header, then define the
+        # optional skip_host argument to putrequest().  The check is
+        # harder because field names are case insensitive.
+        if 'Host' in (headers
+            or [k for k in headers.iterkeys() if k.lower() == "host"]):
+            self.putrequest(method, url, skip_host=1)
+        else:
+            self.putrequest(method, url)
+
+        if body:
+            self.putheader('Content-Length', str(len(body)))
+        for hdr, value in headers.items():
+            self.putheader(hdr, value)
+        self.endheaders()
+
+        if body:
+            self.send(body)
+
+    def getresponse(self):
+        "Get the response from the server."
+
+        # check if a prior response has been completed
+        if self.__response and self.__response.isclosed():
+            self.__response = None
+
+        #
+        # if a prior response exists, then it must be completed (otherwise, we
+        # cannot read this response's header to determine the connection-close
+        # behavior)
+        #
+        # note: if a prior response existed, but was connection-close, then the
+        # socket and response were made independent of this HTTPConnection
+        # object since a new request requires that we open a whole new
+        # connection
+        #
+        # this means the prior response had one of two states:
+        #   1) will_close: this connection was reset and the prior socket and
+        #                  response operate independently
+        #   2) persistent: the response was retained and we await its
+        #                  isclosed() status to become true.
+        #
+        if self.__state != _CS_REQ_SENT or self.__response:
+            raise ResponseNotReady()
+
+        if self.debuglevel > 0:
+            response = self.response_class(self.sock, self.debuglevel,
+                                           strict=self.strict)
+        else:
+            response = self.response_class(self.sock, strict=self.strict)
+
+        response.begin()
+        assert response.will_close != _UNKNOWN
+        self.__state = _CS_IDLE
+
+        if response.will_close:
+            # this effectively passes the connection to the response
+            self.close()
+        else:
+            # remember this, so we can tell when it is complete
+            self.__response = response
+
+        return response
+
+# The next several classes are used to define FakeSocket,a socket-like
+# interface to an SSL connection.
+
+# The primary complexity comes from faking a makefile() method.  The
+# standard socket makefile() implementation calls dup() on the socket
+# file descriptor.  As a consequence, clients can call close() on the
+# parent socket and its makefile children in any order.  The underlying
+# socket isn't closed until they are all closed.
+
+# The implementation uses reference counting to keep the socket open
+# until the last client calls close().  SharedSocket keeps track of
+# the reference counting and SharedSocketClient provides an constructor
+# and close() method that call incref() and decref() correctly.
+
+class SharedSocket:
+
+    def __init__(self, sock):
+        self.sock = sock
+        self._refcnt = 0
+
+    def incref(self):
+        self._refcnt += 1
+
+    def decref(self):
+        self._refcnt -= 1
+        assert self._refcnt >= 0
+        if self._refcnt == 0:
+            self.sock.close()
+
+    def __del__(self):
+        self.sock.close()
+
+class SharedSocketClient:
+
+    def __init__(self, shared):
+        self._closed = 0
+        self._shared = shared
+        self._shared.incref()
+        self._sock = shared.sock
+
+    def close(self):
+        if not self._closed:
+            self._shared.decref()
+            self._closed = 1
+            self._shared = None
+
+class SSLFile(SharedSocketClient):
+    """File-like object wrapping an SSL socket."""
+
+    BUFSIZE = 8192
+
+    def __init__(self, sock, ssl, bufsize=None):
+        SharedSocketClient.__init__(self, sock)
+        self._ssl = ssl
+        self._buf = ''
+        self._bufsize = bufsize or self.__class__.BUFSIZE
+
+    def _read(self):
+        buf = ''
+        # put in a loop so that we retry on transient errors
+        while 1:
+            try:
+                buf = self._ssl.read(self._bufsize)
+            except socket.sslerror, err:
+                if (err[0] == socket.SSL_ERROR_WANT_READ
+                    or err[0] == socket.SSL_ERROR_WANT_WRITE):
+                    continue
+                if (err[0] == socket.SSL_ERROR_ZERO_RETURN
+                    or err[0] == socket.SSL_ERROR_EOF):
+                    break
+                raise
+            except socket.error, err:
+                if err[0] == errno.EINTR:
+                    continue
+                if err[0] == errno.EBADF:
+                    # XXX socket was closed?
+                    break
+                raise
+            else:
+                break
+        return buf
+
+    def read(self, size=None):
+        L = [self._buf]
+        avail = len(self._buf)
+        while size is None or avail < size:
+            s = self._read()
+            if s == '':
+                break
+            L.append(s)
+            avail += len(s)
+        all = "".join(L)
+        if size is None:
+            self._buf = ''
+            return all
+        else:
+            self._buf = all[size:]
+            return all[:size]
+
+    def readline(self):
+        L = [self._buf]
+        self._buf = ''
+        while 1:
+            i = L[-1].find("\n")
+            if i >= 0:
+                break
+            s = self._read()
+            if s == '':
+                break
+            L.append(s)
+        if i == -1:
+            # loop exited because there is no more data
+            return "".join(L)
+        else:
+            all = "".join(L)
+            # XXX could do enough bookkeeping not to do a 2nd search
+            i = all.find("\n") + 1
+            line = all[:i]
+            self._buf = all[i:]
+            return line
+
+class FakeSocket(SharedSocketClient):
+
+    class _closedsocket:
+        def __getattr__(self, name):
+            raise error(9, 'Bad file descriptor')
+
+    def __init__(self, sock, ssl):
+        sock = SharedSocket(sock)
+        SharedSocketClient.__init__(self, sock)
+        self._ssl = ssl
+
+    def close(self):
+        SharedSocketClient.close(self)
+        self._sock = self.__class__._closedsocket()
+
+    def makefile(self, mode, bufsize=None):
+        if mode != 'r' and mode != 'rb':
+            raise UnimplementedFileMode()
+        return SSLFile(self._shared, self._ssl, bufsize)
+
+    def send(self, stuff, flags = 0):
+        return self._ssl.write(stuff)
+
+    sendall = send
+
+    def recv(self, len = 1024, flags = 0):
+        return self._ssl.read(len)
+
+    def __getattr__(self, attr):
+        return getattr(self._sock, attr)
+
+
+class HTTPSConnection(HTTPConnection):
+    "This class allows communication via SSL."
+
+    default_port = HTTPS_PORT
+
+    def __init__(self, host, port=None, key_file=None, cert_file=None,
+                 strict=None):
+        HTTPConnection.__init__(self, host, port, strict)
+        self.key_file = key_file
+        self.cert_file = cert_file
+
+    def connect(self):
+        "Connect to a host on a given (SSL) port."
+
+        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        sock.connect((self.host, self.port))
+        realsock = sock
+        if hasattr(sock, "_sock"):
+            realsock = sock._sock
+        ssl = socket.ssl(realsock, self.key_file, self.cert_file)
+        self.sock = FakeSocket(sock, ssl)
+
+
+class HTTP:
+    "Compatibility class with httplib.py from 1.5."
+
+    _http_vsn = 10
+    _http_vsn_str = 'HTTP/1.0'
+
+    debuglevel = 0
+
+    _connection_class = HTTPConnection
+
+    def __init__(self, host='', port=None, strict=None):
+        "Provide a default host, since the superclass requires one."
+
+        # some joker passed 0 explicitly, meaning default port
+        if port == 0:
+            port = None
+
+        # Note that we may pass an empty string as the host; this will throw
+        # an error when we attempt to connect. Presumably, the client code
+        # will call connect before then, with a proper host.
+        self._setup(self._connection_class(host, port, strict))
+
+    def _setup(self, conn):
+        self._conn = conn
+
+        # set up delegation to flesh out interface
+        self.send = conn.send
+        self.putrequest = conn.putrequest
+        self.endheaders = conn.endheaders
+        self.set_debuglevel = conn.set_debuglevel
+
+        conn._http_vsn = self._http_vsn
+        conn._http_vsn_str = self._http_vsn_str
+
+        self.file = None
+
+    def connect(self, host=None, port=None):
+        "Accept arguments to set the host/port, since the superclass doesn't."
+
+        if host is not None:
+            self._conn._set_hostport(host, port)
+        self._conn.connect()
+
+    def getfile(self):
+        "Provide a getfile, since the superclass' does not use this concept."
+        return self.file
+
+    def putheader(self, header, *values):
+        "The superclass allows only one value argument."
+        self._conn.putheader(header, '\r\n\t'.join(values))
+
+    def getreply(self):
+        """Compat definition since superclass does not define it.
+
+        Returns a tuple consisting of:
+        - server status code (e.g. '200' if all goes well)
+        - server "reason" corresponding to status code
+        - any RFC822 headers in the response from the server
+        """
+        try:
+            response = self._conn.getresponse()
+        except BadStatusLine, e:
+            ### hmm. if getresponse() ever closes the socket on a bad request,
+            ### then we are going to have problems with self.sock
+
+            ### should we keep this behavior? do people use it?
+            # keep the socket open (as a file), and return it
+            self.file = self._conn.sock.makefile('rb', 0)
+
+            # close our socket -- we want to restart after any protocol error
+            self.close()
+
+            self.headers = None
+            return -1, e.line, None
+
+        self.headers = response.msg
+        self.file = response.fp
+        return response.status, response.reason, response.msg
+
+    def close(self):
+        self._conn.close()
+
+        # note that self.file == response.fp, which gets closed by the
+        # superclass. just clear the object ref here.
+        ### hmm. messy. if status==-1, then self.file is owned by us.
+        ### well... we aren't explicitly closing, but losing this ref will
+        ### do it
+        self.file = None
+
+if hasattr(socket, 'ssl'):
+    class HTTPS(HTTP):
+        """Compatibility with 1.5 httplib interface
+
+        Python 1.5.2 did not have an HTTPS class, but it defined an
+        interface for sending http requests that is also useful for
+        https.
+        """
+
+        _connection_class = HTTPSConnection
+
+        def __init__(self, host='', port=None, key_file=None, cert_file=None,
+                     strict=None):
+            # provide a default host, pass the X509 cert info
+
+            # urf. compensate for bad input.
+            if port == 0:
+                port = None
+            self._setup(self._connection_class(host, port, key_file,
+                                               cert_file, strict))
+
+            # we never actually use these for anything, but we keep them
+            # here for compatibility with post-1.5.2 CVS.
+            self.key_file = key_file
+            self.cert_file = cert_file
+
+
+class HTTPException(Exception):
+    # Subclasses that define an __init__ must call Exception.__init__
+    # or define self.args.  Otherwise, str() will fail.
+    pass
+
+class NotConnected(HTTPException):
+    pass
+
+class InvalidURL(HTTPException):
+    pass
+
+class UnknownProtocol(HTTPException):
+    def __init__(self, version):
+        self.args = version,
+        self.version = version
+
+class UnknownTransferEncoding(HTTPException):
+    pass
+
+class UnimplementedFileMode(HTTPException):
+    pass
+
+class IncompleteRead(HTTPException):
+    def __init__(self, partial):
+        self.args = partial,
+        self.partial = partial
+
+class ImproperConnectionState(HTTPException):
+    pass
+
+class CannotSendRequest(ImproperConnectionState):
+    pass
+
+class CannotSendHeader(ImproperConnectionState):
+    pass
+
+class ResponseNotReady(ImproperConnectionState):
+    pass
+
+class BadStatusLine(HTTPException):
+    def __init__(self, line):
+        self.args = line,
+        self.line = line
+
+# for backwards compatibility
+error = HTTPException
+
+class LineAndFileWrapper:
+    """A limited file-like object for HTTP/0.9 responses."""
+
+    # The status-line parsing code calls readline(), which normally
+    # get the HTTP status line.  For a 0.9 response, however, this is
+    # actually the first line of the body!  Clients need to get a
+    # readable file object that contains that line.
+
+    def __init__(self, line, file):
+        self._line = line
+        self._file = file
+        self._line_consumed = 0
+        self._line_offset = 0
+        self._line_left = len(line)
+
+    def __getattr__(self, attr):
+        return getattr(self._file, attr)
+
+    def _done(self):
+        # called when the last byte is read from the line.  After the
+        # call, all read methods are delegated to the underlying file
+        # obhect.
+        self._line_consumed = 1
+        self.read = self._file.read
+        self.readline = self._file.readline
+        self.readlines = self._file.readlines
+
+    def read(self, amt=None):
+        assert not self._line_consumed and self._line_left
+        if amt is None or amt > self._line_left:
+            s = self._line[self._line_offset:]
+            self._done()
+            if amt is None:
+                return s + self._file.read()
+            else:
+                return s + self._file.read(amt - len(s))
+        else:
+            assert amt <= self._line_left
+            i = self._line_offset
+            j = i + amt
+            s = self._line[i:j]
+            self._line_offset = j
+            self._line_left -= amt
+            if self._line_left == 0:
+                self._done()
+            return s
+
+    def readline(self):
+        s = self._line[self._line_offset:]
+        self._done()
+        return s
+
+    def readlines(self, size=None):
+        L = [self._line[self._line_offset:]]
+        self._done()
+        if size is None:
+            return L + self._file.readlines()
+        else:
+            return L + self._file.readlines(size)
+
+def test():
+    """Test this module.
+
+    A hodge podge of tests collected here, because they have too many
+    external dependencies for the regular test suite.
+    """
+
+    import sys
+    import getopt
+    opts, args = getopt.getopt(sys.argv[1:], 'd')
+    dl = 0
+    for o, a in opts:
+        if o == '-d': dl = dl + 1
+    host = 'www.python.org'
+    selector = '/'
+    if args[0:]: host = args[0]
+    if args[1:]: selector = args[1]
+    h = HTTP()
+    h.set_debuglevel(dl)
+    h.connect(host)
+    h.putrequest('GET', selector)
+    h.endheaders()
+    status, reason, headers = h.getreply()
+    print 'status =', status
+    print 'reason =', reason
+    print "read", len(h.getfile().read())
+    print
+    if headers:
+        for header in headers.headers: print header.strip()
+    print
+
+    # minimal test that code to extract host from url works
+    class HTTP11(HTTP):
+        _http_vsn = 11
+        _http_vsn_str = 'HTTP/1.1'
+
+    h = HTTP11('www.python.org')
+    h.putrequest('GET', 'http://www.python.org/~jeremy/')
+    h.endheaders()
+    h.getreply()
+    h.close()
+
+    if hasattr(socket, 'ssl'):
+
+        for host, selector in (('sourceforge.net', '/projects/python'),
+                               ):
+            print "https://%s%s" % (host, selector)
+            hs = HTTPS()
+            hs.set_debuglevel(dl)
+            hs.connect(host)
+            hs.putrequest('GET', selector)
+            hs.endheaders()
+            status, reason, headers = hs.getreply()
+            print 'status =', status
+            print 'reason =', reason
+            print "read", len(hs.getfile().read())
+            print
+            if headers:
+                for header in headers.headers: print header.strip()
+            print
+
+if __name__ == '__main__':
+    test()
diff --git a/lib-python/2.2/ihooks.py b/lib-python/2.2/ihooks.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/ihooks.py
@@ -0,0 +1,511 @@
+"""Import hook support.
+
+Consistent use of this module will make it possible to change the
+different mechanisms involved in loading modules independently.
+
+While the built-in module imp exports interfaces to the built-in
+module searching and loading algorithm, and it is possible to replace
+the built-in function __import__ in order to change the semantics of
+the import statement, until now it has been difficult to combine the
+effect of different __import__ hacks, like loading modules from URLs
+by rimport.py, or restricted execution by rexec.py.
+
+This module defines three new concepts:
+
+1) A "file system hooks" class provides an interface to a filesystem.
+
+One hooks class is defined (Hooks), which uses the interface provided
+by standard modules os and os.path.  It should be used as the base
+class for other hooks classes.
+
+2) A "module loader" class provides an interface to to search for a
+module in a search path and to load it.  It defines a method which
+searches for a module in a single directory; by overriding this method
+one can redefine the details of the search.  If the directory is None,
+built-in and frozen modules are searched instead.
+
+Two module loader class are defined, both implementing the search
+strategy used by the built-in __import__ function: ModuleLoader uses
+the imp module's find_module interface, while HookableModuleLoader
+uses a file system hooks class to interact with the file system.  Both
+use the imp module's load_* interfaces to actually load the module.
+
+3) A "module importer" class provides an interface to import a
+module, as well as interfaces to reload and unload a module.  It also
+provides interfaces to install and uninstall itself instead of the
+default __import__ and reload (and unload) functions.
+
+One module importer class is defined (ModuleImporter), which uses a
+module loader instance passed in (by default HookableModuleLoader is
+instantiated).
+
+The classes defined here should be used as base classes for extended
+functionality along those lines.
+
+If a module importer class supports dotted names, its import_module()
+must return a different value depending on whether it is called on
+behalf of a "from ... import ..." statement or not.  (This is caused
+by the way the __import__ hook is used by the Python interpreter.)  It
+would also do wise to install a different version of reload().
+
+"""
+
+
+import __builtin__
+import imp
+import os
+import sys
+
+__all__ = ["BasicModuleLoader","Hooks","ModuleLoader","FancyModuleLoader",
+           "BasicModuleImporter","ModuleImporter","install","uninstall"]
+
+VERBOSE = 0
+
+
+from imp import C_EXTENSION, PY_SOURCE, PY_COMPILED
+from imp import C_BUILTIN, PY_FROZEN, PKG_DIRECTORY
+BUILTIN_MODULE = C_BUILTIN
+FROZEN_MODULE = PY_FROZEN
+
+
+class _Verbose:
+
+    def __init__(self, verbose = VERBOSE):
+        self.verbose = verbose
+
+    def get_verbose(self):
+        return self.verbose
+
+    def set_verbose(self, verbose):
+        self.verbose = verbose
+
+    # XXX The following is an experimental interface
+
+    def note(self, *args):
+        if self.verbose:
+            apply(self.message, args)
+
+    def message(self, format, *args):
+        if args:
+            print format%args
+        else:
+            print format
+
+
+class BasicModuleLoader(_Verbose):
+
+    """Basic module loader.
+
+    This provides the same functionality as built-in import.  It
+    doesn't deal with checking sys.modules -- all it provides is
+    find_module() and a load_module(), as well as find_module_in_dir()
+    which searches just one directory, and can be overridden by a
+    derived class to change the module search algorithm when the basic
+    dependency on sys.path is unchanged.
+
+    The interface is a little more convenient than imp's:
+    find_module(name, [path]) returns None or 'stuff', and
+    load_module(name, stuff) loads the module.
+
+    """
+
+    def find_module(self, name, path = None):
+        if path is None:
+            path = [None] + self.default_path()
+        for dir in path:
+            stuff = self.find_module_in_dir(name, dir)
+            if stuff: return stuff
+        return None
+
+    def default_path(self):
+        return sys.path
+
+    def find_module_in_dir(self, name, dir):
+        if dir is None:
+            return self.find_builtin_module(name)
+        else:
+            try:
+                return imp.find_module(name, [dir])
+            except ImportError:
+                return None
+
+    def find_builtin_module(self, name):
+        # XXX frozen packages?
+        if imp.is_builtin(name):
+            return None, '', ('', '', BUILTIN_MODULE)
+        if imp.is_frozen(name):
+            return None, '', ('', '', FROZEN_MODULE)
+        return None
+
+    def load_module(self, name, stuff):
+        file, filename, info = stuff
+        try:
+            return imp.load_module(name, file, filename, info)
+        finally:
+            if file: file.close()
+
+
+class Hooks(_Verbose):
+
+    """Hooks into the filesystem and interpreter.
+
+    By deriving a subclass you can redefine your filesystem interface,
+    e.g. to merge it with the URL space.
+
+    This base class behaves just like the native filesystem.
+
+    """
+
+    # imp interface
+    def get_suffixes(self): return imp.get_suffixes()
+    def new_module(self, name): return imp.new_module(name)
+    def is_builtin(self, name): return imp.is_builtin(name)
+    def init_builtin(self, name): return imp.init_builtin(name)
+    def is_frozen(self, name): return imp.is_frozen(name)
+    def init_frozen(self, name): return imp.init_frozen(name)
+    def get_frozen_object(self, name): return imp.get_frozen_object(name)
+    def load_source(self, name, filename, file=None):
+        return imp.load_source(name, filename, file)
+    def load_compiled(self, name, filename, file=None):
+        return imp.load_compiled(name, filename, file)
+    def load_dynamic(self, name, filename, file=None):
+        return imp.load_dynamic(name, filename, file)
+    def load_package(self, name, filename, file=None):
+        return imp.load_module(name, file, filename, ("", "", PKG_DIRECTORY))
+
+    def add_module(self, name):
+        d = self.modules_dict()
+        if d.has_key(name): return d[name]
+        d[name] = m = self.new_module(name)
+        return m
+
+    # sys interface
+    def modules_dict(self): return sys.modules
+    def default_path(self): return sys.path
+
+    def path_split(self, x): return os.path.split(x)
+    def path_join(self, x, y): return os.path.join(x, y)
+    def path_isabs(self, x): return os.path.isabs(x)
+    # etc.
+
+    def path_exists(self, x): return os.path.exists(x)
+    def path_isdir(self, x): return os.path.isdir(x)
+    def path_isfile(self, x): return os.path.isfile(x)
+    def path_islink(self, x): return os.path.islink(x)
+    # etc.
+
+    def openfile(self, *x): return apply(open, x)
+    openfile_error = IOError
+    def listdir(self, x): return os.listdir(x)
+    listdir_error = os.error
+    # etc.
+
+
+class ModuleLoader(BasicModuleLoader):
+
+    """Default module loader; uses file system hooks.
+
+    By defining suitable hooks, you might be able to load modules from
+    other sources than the file system, e.g. from compressed or
+    encrypted files, tar files or (if you're brave!) URLs.
+
+    """
+
+    def __init__(self, hooks = None, verbose = VERBOSE):
+        BasicModuleLoader.__init__(self, verbose)
+        self.hooks = hooks or Hooks(verbose)
+
+    def default_path(self):
+        return self.hooks.default_path()
+
+    def modules_dict(self):
+        return self.hooks.modules_dict()
+
+    def get_hooks(self):
+        return self.hooks
+
+    def set_hooks(self, hooks):
+        self.hooks = hooks
+
+    def find_builtin_module(self, name):
+        # XXX frozen packages?
+        if self.hooks.is_builtin(name):
+            return None, '', ('', '', BUILTIN_MODULE)
+        if self.hooks.is_frozen(name):
+            return None, '', ('', '', FROZEN_MODULE)
+        return None
+
+    def find_module_in_dir(self, name, dir, allow_packages=1):
+        if dir is None:
+            return self.find_builtin_module(name)
+        if allow_packages:
+            fullname = self.hooks.path_join(dir, name)
+            if self.hooks.path_isdir(fullname):
+                stuff = self.find_module_in_dir("__init__", fullname, 0)
+                if stuff:
+                    file = stuff[0]
+                    if file: file.close()
+                    return None, fullname, ('', '', PKG_DIRECTORY)
+        for info in self.hooks.get_suffixes():
+            suff, mode, type = info
+            fullname = self.hooks.path_join(dir, name+suff)
+            try:
+                fp = self.hooks.openfile(fullname, mode)
+                return fp, fullname, info
+            except self.hooks.openfile_error:
+                pass
+        return None
+
+    def load_module(self, name, stuff):
+        file, filename, info = stuff
+        (suff, mode, type) = info
+        try:
+            if type == BUILTIN_MODULE:
+                return self.hooks.init_builtin(name)
+            if type == FROZEN_MODULE:
+                return self.hooks.init_frozen(name)
+            if type == C_EXTENSION:
+                m = self.hooks.load_dynamic(name, filename, file)
+            elif type == PY_SOURCE:
+                m = self.hooks.load_source(name, filename, file)
+            elif type == PY_COMPILED:
+                m = self.hooks.load_compiled(name, filename, file)
+            elif type == PKG_DIRECTORY:
+                m = self.hooks.load_package(name, filename, file)
+            else:
+                raise ImportError, "Unrecognized module type (%s) for %s" % \
+                      (`type`, name)
+        finally:
+            if file: file.close()
+        m.__file__ = filename
+        return m
+
+
+class FancyModuleLoader(ModuleLoader):
+
+    """Fancy module loader -- parses and execs the code itself."""
+
+    def load_module(self, name, stuff):
+        file, filename, (suff, mode, type) = stuff
+        realfilename = filename
+        path = None
+
+        if type == PKG_DIRECTORY:
+            initstuff = self.find_module_in_dir("__init__", filename, 0)
+            if not initstuff:
+                raise ImportError, "No __init__ module in package %s" % name
+            initfile, initfilename, initinfo = initstuff
+            initsuff, initmode, inittype = initinfo
+            if inittype not in (PY_COMPILED, PY_SOURCE):
+                if initfile: initfile.close()
+                raise ImportError, \
+                    "Bad type (%s) for __init__ module in package %s" % (
+                    `inittype`, name)
+            path = [filename]
+            file = initfile
+            realfilename = initfilename
+            type = inittype
+
+        if type == FROZEN_MODULE:
+            code = self.hooks.get_frozen_object(name)
+        elif type == PY_COMPILED:
+            import marshal
+            file.seek(8)
+            code = marshal.load(file)
+        elif type == PY_SOURCE:
+            data = file.read()
+            code = compile(data, realfilename, 'exec')
+        else:
+            return ModuleLoader.load_module(self, name, stuff)
+
+        m = self.hooks.add_module(name)
+        if path:
+            m.__path__ = path
+        m.__file__ = filename
+        exec code in m.__dict__
+        return m
+
+
+class BasicModuleImporter(_Verbose):
+
+    """Basic module importer; uses module loader.
+
+    This provides basic import facilities but no package imports.
+
+    """
+
+    def __init__(self, loader = None, verbose = VERBOSE):
+        _Verbose.__init__(self, verbose)
+        self.loader = loader or ModuleLoader(None, verbose)
+        self.modules = self.loader.modules_dict()
+
+    def get_loader(self):
+        return self.loader
+
+    def set_loader(self, loader):
+        self.loader = loader
+
+    def get_hooks(self):
+        return self.loader.get_hooks()
+
+    def set_hooks(self, hooks):
+        return self.loader.set_hooks(hooks)
+
+    def import_module(self, name, globals={}, locals={}, fromlist=[]):
+        if self.modules.has_key(name):
+            return self.modules[name] # Fast path
+        stuff = self.loader.find_module(name)
+        if not stuff:
+            raise ImportError, "No module named %s" % name
+        return self.loader.load_module(name, stuff)
+
+    def reload(self, module, path = None):
+        name = module.__name__
+        stuff = self.loader.find_module(name, path)
+        if not stuff:
+            raise ImportError, "Module %s not found for reload" % name
+        return self.loader.load_module(name, stuff)
+
+    def unload(self, module):
+        del self.modules[module.__name__]
+        # XXX Should this try to clear the module's namespace?
+
+    def install(self):
+        self.save_import_module = __builtin__.__import__
+        self.save_reload = __builtin__.reload
+        if not hasattr(__builtin__, 'unload'):
+            __builtin__.unload = None
+        self.save_unload = __builtin__.unload
+        __builtin__.__import__ = self.import_module
+        __builtin__.reload = self.reload
+        __builtin__.unload = self.unload
+
+    def uninstall(self):
+        __builtin__.__import__ = self.save_import_module
+        __builtin__.reload = self.save_reload
+        __builtin__.unload = self.save_unload
+        if not __builtin__.unload:
+            del __builtin__.unload
+
+
+class ModuleImporter(BasicModuleImporter):
+
+    """A module importer that supports packages."""
+
+    def import_module(self, name, globals=None, locals=None, fromlist=None):
+        parent = self.determine_parent(globals)
+        q, tail = self.find_head_package(parent, name)
+        m = self.load_tail(q, tail)
+        if not fromlist:
+            return q
+        if hasattr(m, "__path__"):
+            self.ensure_fromlist(m, fromlist)
+        return m
+
+    def determine_parent(self, globals):
+        if not globals or not globals.has_key("__name__"):
+            return None
+        pname = globals['__name__']
+        if globals.has_key("__path__"):
+            parent = self.modules[pname]
+            assert globals is parent.__dict__
+            return parent
+        if '.' in pname:
+            i = pname.rfind('.')
+            pname = pname[:i]
+            parent = self.modules[pname]
+            assert parent.__name__ == pname
+            return parent
+        return None
+
+    def find_head_package(self, parent, name):
+        if '.' in name:
+            i = name.find('.')
+            head = name[:i]
+            tail = name[i+1:]
+        else:
+            head = name
+            tail = ""
+        if parent:
+            qname = "%s.%s" % (parent.__name__, head)
+        else:
+            qname = head
+        q = self.import_it(head, qname, parent)
+        if q: return q, tail
+        if parent:
+            qname = head
+            parent = None
+            q = self.import_it(head, qname, parent)
+            if q: return q, tail
+        raise ImportError, "No module named " + qname
+
+    def load_tail(self, q, tail):
+        m = q
+        while tail:
+            i = tail.find('.')
+            if i < 0: i = len(tail)
+            head, tail = tail[:i], tail[i+1:]
+            mname = "%s.%s" % (m.__name__, head)
+            m = self.import_it(head, mname, m)
+            if not m:
+                raise ImportError, "No module named " + mname
+        return m
+
+    def ensure_fromlist(self, m, fromlist, recursive=0):
+        for sub in fromlist:
+            if sub == "*":
+                if not recursive:
+                    try:
+                        all = m.__all__
+                    except AttributeError:
+                        pass
+                    else:
+                        self.ensure_fromlist(m, all, 1)
+                continue
+            if sub != "*" and not hasattr(m, sub):
+                subname = "%s.%s" % (m.__name__, sub)
+                submod = self.import_it(sub, subname, m)
+                if not submod:
+                    raise ImportError, "No module named " + subname
+
+    def import_it(self, partname, fqname, parent, force_load=0):
+        if not partname:
+            raise ValueError, "Empty module name"
+        if not force_load:
+            try:
+                return self.modules[fqname]
+            except KeyError:
+                pass
+        try:
+            path = parent and parent.__path__
+        except AttributeError:
+            return None
+        stuff = self.loader.find_module(partname, path)
+        if not stuff:
+            return None
+        m = self.loader.load_module(fqname, stuff)
+        if parent:
+            setattr(parent, partname, m)
+        return m
+
+    def reload(self, module):
+        name = module.__name__
+        if '.' not in name:
+            return self.import_it(name, name, None, force_load=1)
+        i = name.rfind('.')
+        pname = name[:i]
+        parent = self.modules[pname]
+        return self.import_it(name[i+1:], name, parent, force_load=1)
+
+
+default_importer = None
+current_importer = None
+
+def install(importer = None):
+    global current_importer
+    current_importer = importer or default_importer or ModuleImporter()
+    current_importer.install()
+
+def uninstall():
+    global current_importer
+    current_importer.uninstall()
diff --git a/lib-python/2.2/imaplib.py b/lib-python/2.2/imaplib.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/imaplib.py
@@ -0,0 +1,1208 @@
+"""IMAP4 client.
+
+Based on RFC 2060.
+
+Public class:           IMAP4
+Public variable:        Debug
+Public functions:       Internaldate2tuple
+                        Int2AP
+                        ParseFlags
+                        Time2Internaldate
+"""
+
+# Author: Piers Lauder <piers at cs.su.oz.au> December 1997.
+#
+# Authentication code contributed by Donn Cave <donn at u.washington.edu> June 1998.
+# String method conversion by ESR, February 2001.
+# GET/SETACL contributed by Anthony Baxter <anthony at interlink.com.au> April 2001.
+
+__version__ = "2.49"
+
+import binascii, re, socket, time, random, sys
+
+__all__ = ["IMAP4", "Internaldate2tuple",
+           "Int2AP", "ParseFlags", "Time2Internaldate"]
+
+#       Globals
+
+CRLF = '\r\n'
+Debug = 0
+IMAP4_PORT = 143
+AllowedVersions = ('IMAP4REV1', 'IMAP4')        # Most recent first
+
+#       Commands
+
+Commands = {
+        # name            valid states
+        'APPEND':       ('AUTH', 'SELECTED'),
+        'AUTHENTICATE': ('NONAUTH',),
+        'CAPABILITY':   ('NONAUTH', 'AUTH', 'SELECTED', 'LOGOUT'),
+        'CHECK':        ('SELECTED',),
+        'CLOSE':        ('SELECTED',),
+        'COPY':         ('SELECTED',),
+        'CREATE':       ('AUTH', 'SELECTED'),
+        'DELETE':       ('AUTH', 'SELECTED'),
+        'EXAMINE':      ('AUTH', 'SELECTED'),
+        'EXPUNGE':      ('SELECTED',),
+        'FETCH':        ('SELECTED',),
+        'GETACL':       ('AUTH', 'SELECTED'),
+        'LIST':         ('AUTH', 'SELECTED'),
+        'LOGIN':        ('NONAUTH',),
+        'LOGOUT':       ('NONAUTH', 'AUTH', 'SELECTED', 'LOGOUT'),
+        'LSUB':         ('AUTH', 'SELECTED'),
+        'NAMESPACE':    ('AUTH', 'SELECTED'),
+        'NOOP':         ('NONAUTH', 'AUTH', 'SELECTED', 'LOGOUT'),
+        'PARTIAL':      ('SELECTED',),
+        'RENAME':       ('AUTH', 'SELECTED'),
+        'SEARCH':       ('SELECTED',),
+        'SELECT':       ('AUTH', 'SELECTED'),
+        'SETACL':       ('AUTH', 'SELECTED'),
+        'SORT':         ('SELECTED',),
+        'STATUS':       ('AUTH', 'SELECTED'),
+        'STORE':        ('SELECTED',),
+        'SUBSCRIBE':    ('AUTH', 'SELECTED'),
+        'UID':          ('SELECTED',),
+        'UNSUBSCRIBE':  ('AUTH', 'SELECTED'),
+        }
+
+#       Patterns to match server responses
+
+Continuation = re.compile(r'\+( (?P<data>.*))?')
+Flags = re.compile(r'.*FLAGS \((?P<flags>[^\)]*)\)')
+InternalDate = re.compile(r'.*INTERNALDATE "'
+        r'(?P<day>[ 123][0-9])-(?P<mon>[A-Z][a-z][a-z])-(?P<year>[0-9][0-9][0-9][0-9])'
+        r' (?P<hour>[0-9][0-9]):(?P<min>[0-9][0-9]):(?P<sec>[0-9][0-9])'
+        r' (?P<zonen>[-+])(?P<zoneh>[0-9][0-9])(?P<zonem>[0-9][0-9])'
+        r'"')
+Literal = re.compile(r'.*{(?P<size>\d+)}$')
+Response_code = re.compile(r'\[(?P<type>[A-Z-]+)( (?P<data>[^\]]*))?\]')
+Untagged_response = re.compile(r'\* (?P<type>[A-Z-]+)( (?P<data>.*))?')
+Untagged_status = re.compile(r'\* (?P<data>\d+) (?P<type>[A-Z-]+)( (?P<data2>.*))?')
+
+
+
+class IMAP4:
+
+    """IMAP4 client class.
+
+    Instantiate with: IMAP4([host[, port]])
+
+            host - host's name (default: localhost);
+            port - port number (default: standard IMAP4 port).
+
+    All IMAP4rev1 commands are supported by methods of the same
+    name (in lower-case).
+
+    All arguments to commands are converted to strings, except for
+    AUTHENTICATE, and the last argument to APPEND which is passed as
+    an IMAP4 literal.  If necessary (the string contains any
+    non-printing characters or white-space and isn't enclosed with
+    either parentheses or double quotes) each string is quoted.
+    However, the 'password' argument to the LOGIN command is always
+    quoted.  If you want to avoid having an argument string quoted
+    (eg: the 'flags' argument to STORE) then enclose the string in
+    parentheses (eg: "(\Deleted)").
+
+    Each command returns a tuple: (type, [data, ...]) where 'type'
+    is usually 'OK' or 'NO', and 'data' is either the text from the
+    tagged response, or untagged results from command.
+
+    Errors raise the exception class <instance>.error("<reason>").
+    IMAP4 server errors raise <instance>.abort("<reason>"),
+    which is a sub-class of 'error'. Mailbox status changes
+    from READ-WRITE to READ-ONLY raise the exception class
+    <instance>.readonly("<reason>"), which is a sub-class of 'abort'.
+
+    "error" exceptions imply a program error.
+    "abort" exceptions imply the connection should be reset, and
+            the command re-tried.
+    "readonly" exceptions imply the command should be re-tried.
+
+    Note: to use this module, you must read the RFCs pertaining
+    to the IMAP4 protocol, as the semantics of the arguments to
+    each IMAP4 command are left to the invoker, not to mention
+    the results.
+    """
+
+    class error(Exception): pass    # Logical errors - debug required
+    class abort(error): pass        # Service errors - close and retry
+    class readonly(abort): pass     # Mailbox status changed to READ-ONLY
+
+    mustquote = re.compile(r"[^\w!#$%&'*+,.:;<=>?^`|~-]")
+
+    def __init__(self, host = '', port = IMAP4_PORT):
+        self.host = host
+        self.port = port
+        self.debug = Debug
+        self.state = 'LOGOUT'
+        self.literal = None             # A literal argument to a command
+        self.tagged_commands = {}       # Tagged commands awaiting response
+        self.untagged_responses = {}    # {typ: [data, ...], ...}
+        self.continuation_response = '' # Last continuation response
+        self.is_readonly = None         # READ-ONLY desired state
+        self.tagnum = 0
+
+        # Open socket to server.
+
+        self.open(host, port)
+
+        # Create unique tag for this session,
+        # and compile tagged response matcher.
+
+        self.tagpre = Int2AP(random.randint(0, 31999))
+        self.tagre = re.compile(r'(?P<tag>'
+                        + self.tagpre
+                        + r'\d+) (?P<type>[A-Z]+) (?P<data>.*)')
+
+        # Get server welcome message,
+        # request and store CAPABILITY response.
+
+        if __debug__:
+            if self.debug >= 1:
+                _mesg('imaplib version %s' % __version__)
+                _mesg('new IMAP4 connection, tag=%s' % self.tagpre)
+
+        self.welcome = self._get_response()
+        if self.untagged_responses.has_key('PREAUTH'):
+            self.state = 'AUTH'
+        elif self.untagged_responses.has_key('OK'):
+            self.state = 'NONAUTH'
+        else:
+            raise self.error(self.welcome)
+
+        cap = 'CAPABILITY'
+        self._simple_command(cap)
+        if not self.untagged_responses.has_key(cap):
+            raise self.error('no CAPABILITY response from server')
+        self.capabilities = tuple(self.untagged_responses[cap][-1].upper().split())
+
+        if __debug__:
+            if self.debug >= 3:
+                _mesg('CAPABILITIES: %s' % `self.capabilities`)
+
+        for version in AllowedVersions:
+            if not version in self.capabilities:
+                continue
+            self.PROTOCOL_VERSION = version
+            return
+
+        raise self.error('server not IMAP4 compliant')
+
+
+    def __getattr__(self, attr):
+        #       Allow UPPERCASE variants of IMAP4 command methods.
+        if Commands.has_key(attr):
+            return getattr(self, attr.lower())
+        raise AttributeError("Unknown IMAP4 command: '%s'" % attr)
+
+
+
+    #       Overridable methods
+
+
+    def open(self, host, port):
+        """Setup connection to remote server on "host:port".
+        This connection will be used by the routines:
+            read, readline, send, shutdown.
+        """
+        self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        self.sock.connect((self.host, self.port))
+        self.file = self.sock.makefile('rb')
+
+
+    def read(self, size):
+        """Read 'size' bytes from remote."""
+        return self.file.read(size)
+
+
+    def readline(self):
+        """Read line from remote."""
+        return self.file.readline()
+
+
+    def send(self, data):
+        """Send data to remote."""
+        self.sock.sendall(data)
+
+    def shutdown(self):
+        """Close I/O established in "open"."""
+        self.file.close()
+        self.sock.close()
+
+
+    def socket(self):
+        """Return socket instance used to connect to IMAP4 server.
+
+        socket = <instance>.socket()
+        """
+        return self.sock
+
+
+
+    #       Utility methods
+
+
+    def recent(self):
+        """Return most recent 'RECENT' responses if any exist,
+        else prompt server for an update using the 'NOOP' command.
+
+        (typ, [data]) = <instance>.recent()
+
+        'data' is None if no new messages,
+        else list of RECENT responses, most recent last.
+        """
+        name = 'RECENT'
+        typ, dat = self._untagged_response('OK', [None], name)
+        if dat[-1]:
+            return typ, dat
+        typ, dat = self.noop()  # Prod server for response
+        return self._untagged_response(typ, dat, name)
+
+
+    def response(self, code):
+        """Return data for response 'code' if received, or None.
+
+        Old value for response 'code' is cleared.
+
+        (code, [data]) = <instance>.response(code)
+        """
+        return self._untagged_response(code, [None], code.upper())
+
+
+
+    #       IMAP4 commands
+
+
+    def append(self, mailbox, flags, date_time, message):
+        """Append message to named mailbox.
+
+        (typ, [data]) = <instance>.append(mailbox, flags, date_time, message)
+
+                All args except `message' can be None.
+        """
+        name = 'APPEND'
+        if not mailbox:
+            mailbox = 'INBOX'
+        if flags:
+            if (flags[0],flags[-1]) != ('(',')'):
+                flags = '(%s)' % flags
+        else:
+            flags = None
+        if date_time:
+            date_time = Time2Internaldate(date_time)
+        else:
+            date_time = None
+        self.literal = message
+        return self._simple_command(name, mailbox, flags, date_time)
+
+
+    def authenticate(self, mechanism, authobject):
+        """Authenticate command - requires response processing.
+
+        'mechanism' specifies which authentication mechanism is to
+        be used - it must appear in <instance>.capabilities in the
+        form AUTH=<mechanism>.
+
+        'authobject' must be a callable object:
+
+                data = authobject(response)
+
+        It will be called to process server continuation responses.
+        It should return data that will be encoded and sent to server.
+        It should return None if the client abort response '*' should
+        be sent instead.
+        """
+        mech = mechanism.upper()
+        cap = 'AUTH=%s' % mech
+        if not cap in self.capabilities:
+            raise self.error("Server doesn't allow %s authentication." % mech)
+        self.literal = _Authenticator(authobject).process
+        typ, dat = self._simple_command('AUTHENTICATE', mech)
+        if typ != 'OK':
+            raise self.error(dat[-1])
+        self.state = 'AUTH'
+        return typ, dat
+
+
+    def check(self):
+        """Checkpoint mailbox on server.
+
+        (typ, [data]) = <instance>.check()
+        """
+        return self._simple_command('CHECK')
+
+
+    def close(self):
+        """Close currently selected mailbox.
+
+        Deleted messages are removed from writable mailbox.
+        This is the recommended command before 'LOGOUT'.
+
+        (typ, [data]) = <instance>.close()
+        """
+        try:
+            typ, dat = self._simple_command('CLOSE')
+        finally:
+            self.state = 'AUTH'
+        return typ, dat
+
+
+    def copy(self, message_set, new_mailbox):
+        """Copy 'message_set' messages onto end of 'new_mailbox'.
+
+        (typ, [data]) = <instance>.copy(message_set, new_mailbox)
+        """
+        return self._simple_command('COPY', message_set, new_mailbox)
+
+
+    def create(self, mailbox):
+        """Create new mailbox.
+
+        (typ, [data]) = <instance>.create(mailbox)
+        """
+        return self._simple_command('CREATE', mailbox)
+
+
+    def delete(self, mailbox):
+        """Delete old mailbox.
+
+        (typ, [data]) = <instance>.delete(mailbox)
+        """
+        return self._simple_command('DELETE', mailbox)
+
+
+    def expunge(self):
+        """Permanently remove deleted items from selected mailbox.
+
+        Generates 'EXPUNGE' response for each deleted message.
+
+        (typ, [data]) = <instance>.expunge()
+
+        'data' is list of 'EXPUNGE'd message numbers in order received.
+        """
+        name = 'EXPUNGE'
+        typ, dat = self._simple_command(name)
+        return self._untagged_response(typ, dat, name)
+
+
+    def fetch(self, message_set, message_parts):
+        """Fetch (parts of) messages.
+
+        (typ, [data, ...]) = <instance>.fetch(message_set, message_parts)
+
+        'message_parts' should be a string of selected parts
+        enclosed in parentheses, eg: "(UID BODY[TEXT])".
+
+        'data' are tuples of message part envelope and data.
+        """
+        name = 'FETCH'
+        typ, dat = self._simple_command(name, message_set, message_parts)
+        return self._untagged_response(typ, dat, name)
+
+
+    def getacl(self, mailbox):
+        """Get the ACLs for a mailbox.
+
+        (typ, [data]) = <instance>.getacl(mailbox)
+        """
+        typ, dat = self._simple_command('GETACL', mailbox)
+        return self._untagged_response(typ, dat, 'ACL')
+
+
+    def list(self, directory='""', pattern='*'):
+        """List mailbox names in directory matching pattern.
+
+        (typ, [data]) = <instance>.list(directory='""', pattern='*')
+
+        'data' is list of LIST responses.
+        """
+        name = 'LIST'
+        typ, dat = self._simple_command(name, directory, pattern)
+        return self._untagged_response(typ, dat, name)
+
+
+    def login(self, user, password):
+        """Identify client using plaintext password.
+
+        (typ, [data]) = <instance>.login(user, password)
+
+        NB: 'password' will be quoted.
+        """
+        #if not 'AUTH=LOGIN' in self.capabilities:
+        #       raise self.error("Server doesn't allow LOGIN authentication." % mech)
+        typ, dat = self._simple_command('LOGIN', user, self._quote(password))
+        if typ != 'OK':
+            raise self.error(dat[-1])
+        self.state = 'AUTH'
+        return typ, dat
+
+
+    def logout(self):
+        """Shutdown connection to server.
+
+        (typ, [data]) = <instance>.logout()
+
+        Returns server 'BYE' response.
+        """
+        self.state = 'LOGOUT'
+        try: typ, dat = self._simple_command('LOGOUT')
+        except: typ, dat = 'NO', ['%s: %s' % sys.exc_info()[:2]]
+        self.shutdown()
+        if self.untagged_responses.has_key('BYE'):
+            return 'BYE', self.untagged_responses['BYE']
+        return typ, dat
+
+
+    def lsub(self, directory='""', pattern='*'):
+        """List 'subscribed' mailbox names in directory matching pattern.
+
+        (typ, [data, ...]) = <instance>.lsub(directory='""', pattern='*')
+
+        'data' are tuples of message part envelope and data.
+        """
+        name = 'LSUB'
+        typ, dat = self._simple_command(name, directory, pattern)
+        return self._untagged_response(typ, dat, name)
+
+
+    def namespace(self):
+        """ Returns IMAP namespaces ala rfc2342
+
+        (typ, [data, ...]) = <instance>.namespace()
+        """
+        name = 'NAMESPACE'
+        typ, dat = self._simple_command(name)
+        return self._untagged_response(typ, dat, name)
+
+
+    def noop(self):
+        """Send NOOP command.
+
+        (typ, data) = <instance>.noop()
+        """
+        if __debug__:
+            if self.debug >= 3:
+                _dump_ur(self.untagged_responses)
+        return self._simple_command('NOOP')
+
+
+    def partial(self, message_num, message_part, start, length):
+        """Fetch truncated part of a message.
+
+        (typ, [data, ...]) = <instance>.partial(message_num, message_part, start, length)
+
+        'data' is tuple of message part envelope and data.
+        """
+        name = 'PARTIAL'
+        typ, dat = self._simple_command(name, message_num, message_part, start, length)
+        return self._untagged_response(typ, dat, 'FETCH')
+
+
+    def rename(self, oldmailbox, newmailbox):
+        """Rename old mailbox name to new.
+
+        (typ, data) = <instance>.rename(oldmailbox, newmailbox)
+        """
+        return self._simple_command('RENAME', oldmailbox, newmailbox)
+
+
+    def search(self, charset, *criteria):
+        """Search mailbox for matching messages.
+
+        (typ, [data]) = <instance>.search(charset, criterium, ...)
+
+        'data' is space separated list of matching message numbers.
+        """
+        name = 'SEARCH'
+        if charset:
+            typ, dat = apply(self._simple_command, (name, 'CHARSET', charset) + criteria)
+        else:
+            typ, dat = apply(self._simple_command, (name,) + criteria)
+        return self._untagged_response(typ, dat, name)
+
+
+    def select(self, mailbox='INBOX', readonly=None):
+        """Select a mailbox.
+
+        Flush all untagged responses.
+
+        (typ, [data]) = <instance>.select(mailbox='INBOX', readonly=None)
+
+        'data' is count of messages in mailbox ('EXISTS' response).
+        """
+        # Mandated responses are ('FLAGS', 'EXISTS', 'RECENT', 'UIDVALIDITY')
+        self.untagged_responses = {}    # Flush old responses.
+        self.is_readonly = readonly
+        if readonly:
+            name = 'EXAMINE'
+        else:
+            name = 'SELECT'
+        typ, dat = self._simple_command(name, mailbox)
+        if typ != 'OK':
+            self.state = 'AUTH'     # Might have been 'SELECTED'
+            return typ, dat
+        self.state = 'SELECTED'
+        if self.untagged_responses.has_key('READ-ONLY') \
+                and not readonly:
+            if __debug__:
+                if self.debug >= 1:
+                    _dump_ur(self.untagged_responses)
+            raise self.readonly('%s is not writable' % mailbox)
+        return typ, self.untagged_responses.get('EXISTS', [None])
+
+
+    def setacl(self, mailbox, who, what):
+        """Set a mailbox acl.
+
+        (typ, [data]) = <instance>.create(mailbox, who, what)
+        """
+        return self._simple_command('SETACL', mailbox, who, what)
+
+
+    def sort(self, sort_criteria, charset, *search_criteria):
+        """IMAP4rev1 extension SORT command.
+
+        (typ, [data]) = <instance>.sort(sort_criteria, charset, search_criteria, ...)
+        """
+        name = 'SORT'
+        #if not name in self.capabilities:      # Let the server decide!
+        #       raise self.error('unimplemented extension command: %s' % name)
+        if (sort_criteria[0],sort_criteria[-1]) != ('(',')'):
+            sort_criteria = '(%s)' % sort_criteria
+        typ, dat = apply(self._simple_command, (name, sort_criteria, charset) + search_criteria)
+        return self._untagged_response(typ, dat, name)
+
+
+    def status(self, mailbox, names):
+        """Request named status conditions for mailbox.
+
+        (typ, [data]) = <instance>.status(mailbox, names)
+        """
+        name = 'STATUS'
+        #if self.PROTOCOL_VERSION == 'IMAP4':   # Let the server decide!
+        #    raise self.error('%s unimplemented in IMAP4 (obtain IMAP4rev1 server, or re-code)' % name)
+        typ, dat = self._simple_command(name, mailbox, names)
+        return self._untagged_response(typ, dat, name)
+
+
+    def store(self, message_set, command, flags):
+        """Alters flag dispositions for messages in mailbox.
+
+        (typ, [data]) = <instance>.store(message_set, command, flags)
+        """
+        if (flags[0],flags[-1]) != ('(',')'):
+            flags = '(%s)' % flags  # Avoid quoting the flags
+        typ, dat = self._simple_command('STORE', message_set, command, flags)
+        return self._untagged_response(typ, dat, 'FETCH')
+
+
+    def subscribe(self, mailbox):
+        """Subscribe to new mailbox.
+
+        (typ, [data]) = <instance>.subscribe(mailbox)
+        """
+        return self._simple_command('SUBSCRIBE', mailbox)
+
+
+    def uid(self, command, *args):
+        """Execute "command arg ..." with messages identified by UID,
+                rather than message number.
+
+        (typ, [data]) = <instance>.uid(command, arg1, arg2, ...)
+
+        Returns response appropriate to 'command'.
+        """
+        command = command.upper()
+        if not Commands.has_key(command):
+            raise self.error("Unknown IMAP4 UID command: %s" % command)
+        if self.state not in Commands[command]:
+            raise self.error('command %s illegal in state %s'
+                                    % (command, self.state))
+        name = 'UID'
+        typ, dat = apply(self._simple_command, (name, command) + args)
+        if command in ('SEARCH', 'SORT'):
+            name = command
+        else:
+            name = 'FETCH'
+        return self._untagged_response(typ, dat, name)
+
+
+    def unsubscribe(self, mailbox):
+        """Unsubscribe from old mailbox.
+
+        (typ, [data]) = <instance>.unsubscribe(mailbox)
+        """
+        return self._simple_command('UNSUBSCRIBE', mailbox)
+
+
+    def xatom(self, name, *args):
+        """Allow simple extension commands
+                notified by server in CAPABILITY response.
+
+        Assumes command is legal in current state.
+
+        (typ, [data]) = <instance>.xatom(name, arg, ...)
+
+        Returns response appropriate to extension command `name'.
+        """
+        name = name.upper()
+        #if not name in self.capabilities:      # Let the server decide!
+        #    raise self.error('unknown extension command: %s' % name)
+        if not Commands.has_key(name):
+            Commands[name] = (self.state,)
+        return apply(self._simple_command, (name,) + args)
+
+
+
+    #       Private methods
+
+
+    def _append_untagged(self, typ, dat):
+
+        if dat is None: dat = ''
+        ur = self.untagged_responses
+        if __debug__:
+            if self.debug >= 5:
+                _mesg('untagged_responses[%s] %s += ["%s"]' %
+                        (typ, len(ur.get(typ,'')), dat))
+        if ur.has_key(typ):
+            ur[typ].append(dat)
+        else:
+            ur[typ] = [dat]
+
+
+    def _check_bye(self):
+        bye = self.untagged_responses.get('BYE')
+        if bye:
+            raise self.abort(bye[-1])
+
+
+    def _command(self, name, *args):
+
+        if self.state not in Commands[name]:
+            self.literal = None
+            raise self.error(
+            'command %s illegal in state %s' % (name, self.state))
+
+        for typ in ('OK', 'NO', 'BAD'):
+            if self.untagged_responses.has_key(typ):
+                del self.untagged_responses[typ]
+
+        if self.untagged_responses.has_key('READ-ONLY') \
+        and not self.is_readonly:
+            raise self.readonly('mailbox status changed to READ-ONLY')
+
+        tag = self._new_tag()
+        data = '%s %s' % (tag, name)
+        for arg in args:
+            if arg is None: continue
+            data = '%s %s' % (data, self._checkquote(arg))
+
+        literal = self.literal
+        if literal is not None:
+            self.literal = None
+            if type(literal) is type(self._command):
+                literator = literal
+            else:
+                literator = None
+                data = '%s {%s}' % (data, len(literal))
+
+        if __debug__:
+            if self.debug >= 4:
+                _mesg('> %s' % data)
+            else:
+                _log('> %s' % data)
+
+        try:
+            self.send('%s%s' % (data, CRLF))
+        except (socket.error, OSError), val:
+            raise self.abort('socket error: %s' % val)
+
+        if literal is None:
+            return tag
+
+        while 1:
+            # Wait for continuation response
+
+            while self._get_response():
+                if self.tagged_commands[tag]:   # BAD/NO?
+                    return tag
+
+            # Send literal
+
+            if literator:
+                literal = literator(self.continuation_response)
+
+            if __debug__:
+                if self.debug >= 4:
+                    _mesg('write literal size %s' % len(literal))
+
+            try:
+                self.send(literal)
+                self.send(CRLF)
+            except (socket.error, OSError), val:
+                raise self.abort('socket error: %s' % val)
+
+            if not literator:
+                break
+
+        return tag
+
+
+    def _command_complete(self, name, tag):
+        self._check_bye()
+        try:
+            typ, data = self._get_tagged_response(tag)
+        except self.abort, val:
+            raise self.abort('command: %s => %s' % (name, val))
+        except self.error, val:
+            raise self.error('command: %s => %s' % (name, val))
+        self._check_bye()
+        if typ == 'BAD':
+            raise self.error('%s command error: %s %s' % (name, typ, data))
+        return typ, data
+
+
+    def _get_response(self):
+
+        # Read response and store.
+        #
+        # Returns None for continuation responses,
+        # otherwise first response line received.
+
+        resp = self._get_line()
+
+        # Command completion response?
+
+        if self._match(self.tagre, resp):
+            tag = self.mo.group('tag')
+            if not self.tagged_commands.has_key(tag):
+                raise self.abort('unexpected tagged response: %s' % resp)
+
+            typ = self.mo.group('type')
+            dat = self.mo.group('data')
+            self.tagged_commands[tag] = (typ, [dat])
+        else:
+            dat2 = None
+
+            # '*' (untagged) responses?
+
+            if not self._match(Untagged_response, resp):
+                if self._match(Untagged_status, resp):
+                    dat2 = self.mo.group('data2')
+
+            if self.mo is None:
+                # Only other possibility is '+' (continuation) response...
+
+                if self._match(Continuation, resp):
+                    self.continuation_response = self.mo.group('data')
+                    return None     # NB: indicates continuation
+
+                raise self.abort("unexpected response: '%s'" % resp)
+
+            typ = self.mo.group('type')
+            dat = self.mo.group('data')
+            if dat is None: dat = ''        # Null untagged response
+            if dat2: dat = dat + ' ' + dat2
+
+            # Is there a literal to come?
+
+            while self._match(Literal, dat):
+
+                # Read literal direct from connection.
+
+                size = int(self.mo.group('size'))
+                if __debug__:
+                    if self.debug >= 4:
+                        _mesg('read literal size %s' % size)
+                data = self.read(size)
+
+                # Store response with literal as tuple
+
+                self._append_untagged(typ, (dat, data))
+
+                # Read trailer - possibly containing another literal
+
+                dat = self._get_line()
+
+            self._append_untagged(typ, dat)
+
+        # Bracketed response information?
+
+        if typ in ('OK', 'NO', 'BAD') and self._match(Response_code, dat):
+            self._append_untagged(self.mo.group('type'), self.mo.group('data'))
+
+        if __debug__:
+            if self.debug >= 1 and typ in ('NO', 'BAD', 'BYE'):
+                _mesg('%s response: %s' % (typ, dat))
+
+        return resp
+
+
+    def _get_tagged_response(self, tag):
+
+        while 1:
+            result = self.tagged_commands[tag]
+            if result is not None:
+                del self.tagged_commands[tag]
+                return result
+
+            # Some have reported "unexpected response" exceptions.
+            # Note that ignoring them here causes loops.
+            # Instead, send me details of the unexpected response and
+            # I'll update the code in `_get_response()'.
+
+            try:
+                self._get_response()
+            except self.abort, val:
+                if __debug__:
+                    if self.debug >= 1:
+                        print_log()
+                raise
+
+
+    def _get_line(self):
+
+        line = self.readline()
+        if not line:
+            raise self.abort('socket error: EOF')
+
+        # Protocol mandates all lines terminated by CRLF
+
+        line = line[:-2]
+        if __debug__:
+            if self.debug >= 4:
+                _mesg('< %s' % line)
+            else:
+                _log('< %s' % line)
+        return line
+
+
+    def _match(self, cre, s):
+
+        # Run compiled regular expression match method on 's'.
+        # Save result, return success.
+
+        self.mo = cre.match(s)
+        if __debug__:
+            if self.mo is not None and self.debug >= 5:
+                _mesg("\tmatched r'%s' => %s" % (cre.pattern, `self.mo.groups()`))
+        return self.mo is not None
+
+
+    def _new_tag(self):
+
+        tag = '%s%s' % (self.tagpre, self.tagnum)
+        self.tagnum = self.tagnum + 1
+        self.tagged_commands[tag] = None
+        return tag
+
+
+    def _checkquote(self, arg):
+
+        # Must quote command args if non-alphanumeric chars present,
+        # and not already quoted.
+
+        if type(arg) is not type(''):
+            return arg
+        if (arg[0],arg[-1]) in (('(',')'),('"','"')):
+            return arg
+        if self.mustquote.search(arg) is None:
+            return arg
+        return self._quote(arg)
+
+
+    def _quote(self, arg):
+
+        arg = arg.replace('\\', '\\\\')
+        arg = arg.replace('"', '\\"')
+
+        return '"%s"' % arg
+
+
+    def _simple_command(self, name, *args):
+
+        return self._command_complete(name, apply(self._command, (name,) + args))
+
+
+    def _untagged_response(self, typ, dat, name):
+
+        if typ == 'NO':
+            return typ, dat
+        if not self.untagged_responses.has_key(name):
+            return typ, [None]
+        data = self.untagged_responses[name]
+        if __debug__:
+            if self.debug >= 5:
+                _mesg('untagged_responses[%s] => %s' % (name, data))
+        del self.untagged_responses[name]
+        return typ, data
+
+
+
+class _Authenticator:
+
+    """Private class to provide en/decoding
+            for base64-based authentication conversation.
+    """
+
+    def __init__(self, mechinst):
+        self.mech = mechinst    # Callable object to provide/process data
+
+    def process(self, data):
+        ret = self.mech(self.decode(data))
+        if ret is None:
+            return '*'      # Abort conversation
+        return self.encode(ret)
+
+    def encode(self, inp):
+        #
+        #  Invoke binascii.b2a_base64 iteratively with
+        #  short even length buffers, strip the trailing
+        #  line feed from the result and append.  "Even"
+        #  means a number that factors to both 6 and 8,
+        #  so when it gets to the end of the 8-bit input
+        #  there's no partial 6-bit output.
+        #
+        oup = ''
+        while inp:
+            if len(inp) > 48:
+                t = inp[:48]
+                inp = inp[48:]
+            else:
+                t = inp
+                inp = ''
+            e = binascii.b2a_base64(t)
+            if e:
+                oup = oup + e[:-1]
+        return oup
+
+    def decode(self, inp):
+        if not inp:
+            return ''
+        return binascii.a2b_base64(inp)
+
+
+
+Mon2num = {'Jan': 1, 'Feb': 2, 'Mar': 3, 'Apr': 4, 'May': 5, 'Jun': 6,
+        'Jul': 7, 'Aug': 8, 'Sep': 9, 'Oct': 10, 'Nov': 11, 'Dec': 12}
+
+def Internaldate2tuple(resp):
+    """Convert IMAP4 INTERNALDATE to UT.
+
+    Returns Python time module tuple.
+    """
+
+    mo = InternalDate.match(resp)
+    if not mo:
+        return None
+
+    mon = Mon2num[mo.group('mon')]
+    zonen = mo.group('zonen')
+
+    day = int(mo.group('day'))
+    year = int(mo.group('year'))
+    hour = int(mo.group('hour'))
+    min = int(mo.group('min'))
+    sec = int(mo.group('sec'))
+    zoneh = int(mo.group('zoneh'))
+    zonem = int(mo.group('zonem'))
+
+    # INTERNALDATE timezone must be subtracted to get UT
+
+    zone = (zoneh*60 + zonem)*60
+    if zonen == '-':
+        zone = -zone
+
+    tt = (year, mon, day, hour, min, sec, -1, -1, -1)
+
+    utc = time.mktime(tt)
+
+    # Following is necessary because the time module has no 'mkgmtime'.
+    # 'mktime' assumes arg in local timezone, so adds timezone/altzone.
+
+    lt = time.localtime(utc)
+    if time.daylight and lt[-1]:
+        zone = zone + time.altzone
+    else:
+        zone = zone + time.timezone
+
+    return time.localtime(utc - zone)
+
+
+
+def Int2AP(num):
+
+    """Convert integer to A-P string representation."""
+
+    val = ''; AP = 'ABCDEFGHIJKLMNOP'
+    num = int(abs(num))
+    while num:
+        num, mod = divmod(num, 16)
+        val = AP[mod] + val
+    return val
+
+
+
+def ParseFlags(resp):
+
+    """Convert IMAP4 flags response to python tuple."""
+
+    mo = Flags.match(resp)
+    if not mo:
+        return ()
+
+    return tuple(mo.group('flags').split())
+
+
+def Time2Internaldate(date_time):
+
+    """Convert 'date_time' to IMAP4 INTERNALDATE representation.
+
+    Return string in form: '"DD-Mmm-YYYY HH:MM:SS +HHMM"'
+    """
+
+    if isinstance(date_time, (int, float)):
+        tt = time.localtime(date_time)
+    elif isinstance(date_time, (tuple, time.struct_time)):
+        tt = date_time
+    elif isinstance(date_time, str):
+        return date_time        # Assume in correct format
+    else:
+        raise ValueError("date_time not of a known type")
+
+    dt = time.strftime("%d-%b-%Y %H:%M:%S", tt)
+    if dt[0] == '0':
+        dt = ' ' + dt[1:]
+    if time.daylight and tt[-1]:
+        zone = -time.altzone
+    else:
+        zone = -time.timezone
+    return '"' + dt + " %+03d%02d" % divmod(zone/60, 60) + '"'
+
+
+
+if __debug__:
+
+    def _mesg(s, secs=None):
+        if secs is None:
+            secs = time.time()
+        tm = time.strftime('%M:%S', time.localtime(secs))
+        sys.stderr.write('  %s.%02d %s\n' % (tm, (secs*100)%100, s))
+        sys.stderr.flush()
+
+    def _dump_ur(dict):
+        # Dump untagged responses (in `dict').
+        l = dict.items()
+        if not l: return
+        t = '\n\t\t'
+        l = map(lambda x:'%s: "%s"' % (x[0], x[1][0] and '" "'.join(x[1]) or ''), l)
+        _mesg('untagged responses dump:%s%s' % (t, t.join(l)))
+
+    _cmd_log = []           # Last `_cmd_log_len' interactions
+    _cmd_log_len = 10
+
+    def _log(line):
+        # Keep log of last `_cmd_log_len' interactions for debugging.
+        if len(_cmd_log) == _cmd_log_len:
+            del _cmd_log[0]
+        _cmd_log.append((time.time(), line))
+
+    def print_log():
+        _mesg('last %d IMAP4 interactions:' % len(_cmd_log))
+        for secs,line in _cmd_log:
+            _mesg(line, secs)
+
+
+
+if __name__ == '__main__':
+
+    import getopt, getpass
+
+    try:
+        optlist, args = getopt.getopt(sys.argv[1:], 'd:')
+    except getopt.error, val:
+        pass
+
+    for opt,val in optlist:
+        if opt == '-d':
+            Debug = int(val)
+
+    if not args: args = ('',)
+
+    host = args[0]
+
+    USER = getpass.getuser()
+    PASSWD = getpass.getpass("IMAP password for %s on %s: " % (USER, host or "localhost"))
+
+    test_mesg = 'From: %(user)s at localhost%(lf)sSubject: IMAP4 test%(lf)s%(lf)sdata...%(lf)s' % {'user':USER, 'lf':CRLF}
+    test_seq1 = (
+    ('login', (USER, PASSWD)),
+    ('create', ('/tmp/xxx 1',)),
+    ('rename', ('/tmp/xxx 1', '/tmp/yyy')),
+    ('CREATE', ('/tmp/yyz 2',)),
+    ('append', ('/tmp/yyz 2', None, None, test_mesg)),
+    ('list', ('/tmp', 'yy*')),
+    ('select', ('/tmp/yyz 2',)),
+    ('search', (None, 'SUBJECT', 'test')),
+    ('partial', ('1', 'RFC822', 1, 1024)),
+    ('store', ('1', 'FLAGS', '(\Deleted)')),
+    ('namespace', ()),
+    ('expunge', ()),
+    ('recent', ()),
+    ('close', ()),
+    )
+
+    test_seq2 = (
+    ('select', ()),
+    ('response',('UIDVALIDITY',)),
+    ('uid', ('SEARCH', 'ALL')),
+    ('response', ('EXISTS',)),
+    ('append', (None, None, None, test_mesg)),
+    ('recent', ()),
+    ('logout', ()),
+    )
+
+    def run(cmd, args):
+        _mesg('%s %s' % (cmd, args))
+        typ, dat = apply(getattr(M, cmd), args)
+        _mesg('%s => %s %s' % (cmd, typ, dat))
+        return dat
+
+    try:
+        M = IMAP4(host)
+        _mesg('PROTOCOL_VERSION = %s' % M.PROTOCOL_VERSION)
+        _mesg('CAPABILITIES = %s' % `M.capabilities`)
+
+        for cmd,args in test_seq1:
+            run(cmd, args)
+
+        for ml in run('list', ('/tmp/', 'yy%')):
+            mo = re.match(r'.*"([^"]+)"$', ml)
+            if mo: path = mo.group(1)
+            else: path = ml.split()[-1]
+            run('delete', (path,))
+
+        for cmd,args in test_seq2:
+            dat = run(cmd, args)
+
+            if (cmd,args) != ('uid', ('SEARCH', 'ALL')):
+                continue
+
+            uid = dat[-1].split()
+            if not uid: continue
+            run('uid', ('FETCH', '%s' % uid[-1],
+                    '(FLAGS INTERNALDATE RFC822.SIZE RFC822.HEADER RFC822.TEXT)'))
+
+        print '\nAll tests OK.'
+
+    except:
+        print '\nTests failed.'
+
+        if not Debug:
+            print '''
+If you would like to see debugging output,
+try: %s -d5
+''' % sys.argv[0]
+
+        raise
diff --git a/lib-python/2.2/imghdr.py b/lib-python/2.2/imghdr.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/imghdr.py
@@ -0,0 +1,154 @@
+"""Recognize image file formats based on their first few bytes."""
+
+__all__ = ["what"]
+
+#-------------------------#
+# Recognize image headers #
+#-------------------------#
+
+def what(file, h=None):
+    if h is None:
+        if type(file) == type(''):
+            f = open(file, 'rb')
+            h = f.read(32)
+        else:
+            location = file.tell()
+            h = file.read(32)
+            file.seek(location)
+            f = None
+    else:
+        f = None
+    try:
+        for tf in tests:
+            res = tf(h, f)
+            if res:
+                return res
+    finally:
+        if f: f.close()
+    return None
+
+
+#---------------------------------#
+# Subroutines per image file type #
+#---------------------------------#
+
+tests = []
+
+def test_rgb(h, f):
+    """SGI image library"""
+    if h[:2] == '\001\332':
+        return 'rgb'
+
+tests.append(test_rgb)
+
+def test_gif(h, f):
+    """GIF ('87 and '89 variants)"""
+    if h[:6] in ('GIF87a', 'GIF89a'):
+        return 'gif'
+
+tests.append(test_gif)
+
+def test_pbm(h, f):
+    """PBM (portable bitmap)"""
+    if len(h) >= 3 and \
+        h[0] == 'P' and h[1] in '14' and h[2] in ' \t\n\r':
+        return 'pbm'
+
+tests.append(test_pbm)
+
+def test_pgm(h, f):
+    """PGM (portable graymap)"""
+    if len(h) >= 3 and \
+        h[0] == 'P' and h[1] in '25' and h[2] in ' \t\n\r':
+        return 'pgm'
+
+tests.append(test_pgm)
+
+def test_ppm(h, f):
+    """PPM (portable pixmap)"""
+    if len(h) >= 3 and \
+        h[0] == 'P' and h[1] in '36' and h[2] in ' \t\n\r':
+        return 'ppm'
+
+tests.append(test_ppm)
+
+def test_tiff(h, f):
+    """TIFF (can be in Motorola or Intel byte order)"""
+    if h[:2] in ('MM', 'II'):
+        return 'tiff'
+
+tests.append(test_tiff)
+
+def test_rast(h, f):
+    """Sun raster file"""
+    if h[:4] == '\x59\xA6\x6A\x95':
+        return 'rast'
+
+tests.append(test_rast)
+
+def test_xbm(h, f):
+    """X bitmap (X10 or X11)"""
+    s = '#define '
+    if h[:len(s)] == s:
+        return 'xbm'
+
+tests.append(test_xbm)
+
+def test_jpeg(h, f):
+    """JPEG data in JFIF format"""
+    if h[6:10] == 'JFIF':
+        return 'jpeg'
+
+tests.append(test_jpeg)
+
+def test_bmp(h, f):
+    if h[:2] == 'BM':
+        return 'bmp'
+
+tests.append(test_bmp)
+
+def test_png(h, f):
+    if h[:8] == "\211PNG\r\n\032\n":
+        return 'png'
+
+tests.append(test_png)
+
+#--------------------#
+# Small test program #
+#--------------------#
+
+def test():
+    import sys
+    recursive = 0
+    if sys.argv[1:] and sys.argv[1] == '-r':
+        del sys.argv[1:2]
+        recursive = 1
+    try:
+        if sys.argv[1:]:
+            testall(sys.argv[1:], recursive, 1)
+        else:
+            testall(['.'], recursive, 1)
+    except KeyboardInterrupt:
+        sys.stderr.write('\n[Interrupted]\n')
+        sys.exit(1)
+
+def testall(list, recursive, toplevel):
+    import sys
+    import os
+    for filename in list:
+        if os.path.isdir(filename):
+            print filename + '/:',
+            if recursive or toplevel:
+                print 'recursing down:'
+                import glob
+                names = glob.glob(os.path.join(filename, '*'))
+                testall(names, recursive, 0)
+            else:
+                print '*** directory (use -r) ***'
+        else:
+            print filename + ':',
+            sys.stdout.flush()
+            try:
+                print what(filename)
+            except IOError:
+                print '*** not found ***'
diff --git a/lib-python/2.2/imputil.py b/lib-python/2.2/imputil.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/imputil.py
@@ -0,0 +1,720 @@
+"""
+Import utilities
+
+Exported classes:
+    ImportManager   Manage the import process
+
+    Importer        Base class for replacing standard import functions
+    BuiltinImporter Emulate the import mechanism for builtin and frozen modules
+
+    DynLoadSuffixImporter
+"""
+
+# note: avoid importing non-builtin modules
+import imp                      ### not available in JPython?
+import sys
+import __builtin__
+
+# for the DirectoryImporter
+import struct
+import marshal
+
+__all__ = ["ImportManager","Importer","BuiltinImporter"]
+
+_StringType = type('')
+_ModuleType = type(sys)         ### doesn't work in JPython...
+
+class ImportManager:
+    "Manage the import process."
+
+    def install(self, namespace=vars(__builtin__)):
+        "Install this ImportManager into the specified namespace."
+
+        if isinstance(namespace, _ModuleType):
+            namespace = vars(namespace)
+
+        # Note: we have no notion of "chaining"
+
+        # Record the previous import hook, then install our own.
+        self.previous_importer = namespace['__import__']
+        self.namespace = namespace
+        namespace['__import__'] = self._import_hook
+
+        ### fix this
+        #namespace['reload'] = self._reload_hook
+
+    def uninstall(self):
+        "Restore the previous import mechanism."
+        self.namespace['__import__'] = self.previous_importer
+
+    def add_suffix(self, suffix, importFunc):
+        assert callable(importFunc)
+        self.fs_imp.add_suffix(suffix, importFunc)
+
+    ######################################################################
+    #
+    # PRIVATE METHODS
+    #
+
+    clsFilesystemImporter = None
+
+    def __init__(self, fs_imp=None):
+        # we're definitely going to be importing something in the future,
+        # so let's just load the OS-related facilities.
+        if not _os_stat:
+            _os_bootstrap()
+
+        # This is the Importer that we use for grabbing stuff from the
+        # filesystem. It defines one more method (import_from_dir) for our use.
+        if not fs_imp:
+            cls = self.clsFilesystemImporter or _FilesystemImporter
+            fs_imp = cls()
+        self.fs_imp = fs_imp
+
+        # Initialize the set of suffixes that we recognize and import.
+        # The default will import dynamic-load modules first, followed by
+        # .py files (or a .py file's cached bytecode)
+        for desc in imp.get_suffixes():
+            if desc[2] == imp.C_EXTENSION:
+                self.add_suffix(desc[0],
+                                DynLoadSuffixImporter(desc).import_file)
+        self.add_suffix('.py', py_suffix_importer)
+
+    def _import_hook(self, fqname, globals=None, locals=None, fromlist=None):
+        """Python calls this hook to locate and import a module."""
+
+        parts = fqname.split('.')
+
+        # determine the context of this import
+        parent = self._determine_import_context(globals)
+
+        # if there is a parent, then its importer should manage this import
+        if parent:
+            module = parent.__importer__._do_import(parent, parts, fromlist)
+            if module:
+                return module
+
+        # has the top module already been imported?
+        try:
+            top_module = sys.modules[parts[0]]
+        except KeyError:
+
+            # look for the topmost module
+            top_module = self._import_top_module(parts[0])
+            if not top_module:
+                # the topmost module wasn't found at all.
+                raise ImportError, 'No module named ' + fqname
+
+        # fast-path simple imports
+        if len(parts) == 1:
+            if not fromlist:
+                return top_module
+
+            if not top_module.__dict__.get('__ispkg__'):
+                # __ispkg__ isn't defined (the module was not imported by us),
+                # or it is zero.
+                #
+                # In the former case, there is no way that we could import
+                # sub-modules that occur in the fromlist (but we can't raise an
+                # error because it may just be names) because we don't know how
+                # to deal with packages that were imported by other systems.
+                #
+                # In the latter case (__ispkg__ == 0), there can't be any sub-
+                # modules present, so we can just return.
+                #
+                # In both cases, since len(parts) == 1, the top_module is also
+                # the "bottom" which is the defined return when a fromlist
+                # exists.
+                return top_module
+
+        importer = top_module.__dict__.get('__importer__')
+        if importer:
+            return importer._finish_import(top_module, parts[1:], fromlist)
+
+        # Grrr, some people "import os.path"
+        if len(parts) == 2 and hasattr(top_module, parts[1]):
+            return top_module
+
+        # If the importer does not exist, then we have to bail. A missing
+        # importer means that something else imported the module, and we have
+        # no knowledge of how to get sub-modules out of the thing.
+        raise ImportError, 'No module named ' + fqname
+
+    def _determine_import_context(self, globals):
+        """Returns the context in which a module should be imported.
+
+        The context could be a loaded (package) module and the imported module
+        will be looked for within that package. The context could also be None,
+        meaning there is no context -- the module should be looked for as a
+        "top-level" module.
+        """
+
+        if not globals or not globals.get('__importer__'):
+            # globals does not refer to one of our modules or packages. That
+            # implies there is no relative import context (as far as we are
+            # concerned), and it should just pick it off the standard path.
+            return None
+
+        # The globals refer to a module or package of ours. It will define
+        # the context of the new import. Get the module/package fqname.
+        parent_fqname = globals['__name__']
+
+        # if a package is performing the import, then return itself (imports
+        # refer to pkg contents)
+        if globals['__ispkg__']:
+            parent = sys.modules[parent_fqname]
+            assert globals is parent.__dict__
+            return parent
+
+        i = parent_fqname.rfind('.')
+
+        # a module outside of a package has no particular import context
+        if i == -1:
+            return None
+
+        # if a module in a package is performing the import, then return the
+        # package (imports refer to siblings)
+        parent_fqname = parent_fqname[:i]
+        parent = sys.modules[parent_fqname]
+        assert parent.__name__ == parent_fqname
+        return parent
+
+    def _import_top_module(self, name):
+        # scan sys.path looking for a location in the filesystem that contains
+        # the module, or an Importer object that can import the module.
+        for item in sys.path:
+            if isinstance(item, _StringType):
+                module = self.fs_imp.import_from_dir(item, name)
+            else:
+                module = item.import_top(name)
+            if module:
+                return module
+        return None
+
+    def _reload_hook(self, module):
+        "Python calls this hook to reload a module."
+
+        # reloading of a module may or may not be possible (depending on the
+        # importer), but at least we can validate that it's ours to reload
+        importer = module.__dict__.get('__importer__')
+        if not importer:
+            ### oops. now what...
+            pass
+
+        # okay. it is using the imputil system, and we must delegate it, but
+        # we don't know what to do (yet)
+        ### we should blast the module dict and do another get_code(). need to
+        ### flesh this out and add proper docco...
+        raise SystemError, "reload not yet implemented"
+
+
+class Importer:
+    "Base class for replacing standard import functions."
+
+    def import_top(self, name):
+        "Import a top-level module."
+        return self._import_one(None, name, name)
+
+    ######################################################################
+    #
+    # PRIVATE METHODS
+    #
+    def _finish_import(self, top, parts, fromlist):
+        # if "a.b.c" was provided, then load the ".b.c" portion down from
+        # below the top-level module.
+        bottom = self._load_tail(top, parts)
+
+        # if the form is "import a.b.c", then return "a"
+        if not fromlist:
+            # no fromlist: return the top of the import tree
+            return top
+
+        # the top module was imported by self.
+        #
+        # this means that the bottom module was also imported by self (just
+        # now, or in the past and we fetched it from sys.modules).
+        #
+        # since we imported/handled the bottom module, this means that we can
+        # also handle its fromlist (and reliably use __ispkg__).
+
+        # if the bottom node is a package, then (potentially) import some
+        # modules.
+        #
+        # note: if it is not a package, then "fromlist" refers to names in
+        #       the bottom module rather than modules.
+        # note: for a mix of names and modules in the fromlist, we will
+        #       import all modules and insert those into the namespace of
+        #       the package module. Python will pick up all fromlist names
+        #       from the bottom (package) module; some will be modules that
+        #       we imported and stored in the namespace, others are expected
+        #       to be present already.
+        if bottom.__ispkg__:
+            self._import_fromlist(bottom, fromlist)
+
+        # if the form is "from a.b import c, d" then return "b"
+        return bottom
+
+    def _import_one(self, parent, modname, fqname):
+        "Import a single module."
+
+        # has the module already been imported?
+        try:
+            return sys.modules[fqname]
+        except KeyError:
+            pass
+
+        # load the module's code, or fetch the module itself
+        result = self.get_code(parent, modname, fqname)
+        if result is None:
+            return None
+
+        module = self._process_result(result, fqname)
+
+        # insert the module into its parent
+        if parent:
+            setattr(parent, modname, module)
+        return module
+
+    def _process_result(self, (ispkg, code, values), fqname):
+        # did get_code() return an actual module? (rather than a code object)
+        is_module = isinstance(code, _ModuleType)
+
+        # use the returned module, or create a new one to exec code into
+        if is_module:
+            module = code
+        else:
+            module = imp.new_module(fqname)
+
+        ### record packages a bit differently??
+        module.__importer__ = self
+        module.__ispkg__ = ispkg
+
+        # insert additional values into the module (before executing the code)
+        module.__dict__.update(values)
+
+        # the module is almost ready... make it visible
+        sys.modules[fqname] = module
+
+        # execute the code within the module's namespace
+        if not is_module:
+            exec code in module.__dict__
+
+        # fetch from sys.modules instead of returning module directly.
+        # also make module's __name__ agree with fqname, in case
+        # the "exec code in module.__dict__" played games on us.
+        module = sys.modules[fqname]
+        module.__name__ = fqname
+        return module
+
+    def _load_tail(self, m, parts):
+        """Import the rest of the modules, down from the top-level module.
+
+        Returns the last module in the dotted list of modules.
+        """
+        for part in parts:
+            fqname = "%s.%s" % (m.__name__, part)
+            m = self._import_one(m, part, fqname)
+            if not m:
+                raise ImportError, "No module named " + fqname
+        return m
+
+    def _import_fromlist(self, package, fromlist):
+        'Import any sub-modules in the "from" list.'
+
+        # if '*' is present in the fromlist, then look for the '__all__'
+        # variable to find additional items (modules) to import.
+        if '*' in fromlist:
+            fromlist = list(fromlist) + \
+                       list(package.__dict__.get('__all__', []))
+
+        for sub in fromlist:
+            # if the name is already present, then don't try to import it (it
+            # might not be a module!).
+            if sub != '*' and not hasattr(package, sub):
+                subname = "%s.%s" % (package.__name__, sub)
+                submod = self._import_one(package, sub, subname)
+                if not submod:
+                    raise ImportError, "cannot import name " + subname
+
+    def _do_import(self, parent, parts, fromlist):
+        """Attempt to import the module relative to parent.
+
+        This method is used when the import context specifies that <self>
+        imported the parent module.
+        """
+        top_name = parts[0]
+        top_fqname = parent.__name__ + '.' + top_name
+        top_module = self._import_one(parent, top_name, top_fqname)
+        if not top_module:
+            # this importer and parent could not find the module (relatively)
+            return None
+
+        return self._finish_import(top_module, parts[1:], fromlist)
+
+    ######################################################################
+    #
+    # METHODS TO OVERRIDE
+    #
+    def get_code(self, parent, modname, fqname):
+        """Find and retrieve the code for the given module.
+
+        parent specifies a parent module to define a context for importing. It
+        may be None, indicating no particular context for the search.
+
+        modname specifies a single module (not dotted) within the parent.
+
+        fqname specifies the fully-qualified module name. This is a
+        (potentially) dotted name from the "root" of the module namespace
+        down to the modname.
+        If there is no parent, then modname==fqname.
+
+        This method should return None, or a 3-tuple.
+
+        * If the module was not found, then None should be returned.
+
+        * The first item of the 2- or 3-tuple should be the integer 0 or 1,
+            specifying whether the module that was found is a package or not.
+
+        * The second item is the code object for the module (it will be
+            executed within the new module's namespace). This item can also
+            be a fully-loaded module object (e.g. loaded from a shared lib).
+
+        * The third item is a dictionary of name/value pairs that will be
+            inserted into new module before the code object is executed. This
+            is provided in case the module's code expects certain values (such
+            as where the module was found). When the second item is a module
+            object, then these names/values will be inserted *after* the module
+            has been loaded/initialized.
+        """
+        raise RuntimeError, "get_code not implemented"
+
+
+######################################################################
+#
+# Some handy stuff for the Importers
+#
+
+# byte-compiled file suffix character
+_suffix_char = __debug__ and 'c' or 'o'
+
+# byte-compiled file suffix
+_suffix = '.py' + _suffix_char
+
+def _compile(pathname, timestamp):
+    """Compile (and cache) a Python source file.
+
+    The file specified by <pathname> is compiled to a code object and
+    returned.
+
+    Presuming the appropriate privileges exist, the bytecodes will be
+    saved back to the filesystem for future imports. The source file's
+    modification timestamp must be provided as a Long value.
+    """
+    codestring = open(pathname, 'r').read()
+    if codestring and codestring[-1] != '\n':
+        codestring = codestring + '\n'
+    code = __builtin__.compile(codestring, pathname, 'exec')
+
+    # try to cache the compiled code
+    try:
+        f = open(pathname + _suffix_char, 'wb')
+    except IOError:
+        pass
+    else:
+        f.write('\0\0\0\0')
+        f.write(struct.pack('<I', timestamp))
+        marshal.dump(code, f)
+        f.flush()
+        f.seek(0, 0)
+        f.write(imp.get_magic())
+        f.close()
+
+    return code
+
+_os_stat = _os_path_join = None
+def _os_bootstrap():
+    "Set up 'os' module replacement functions for use during import bootstrap."
+
+    names = sys.builtin_module_names
+
+    join = None
+    if 'posix' in names:
+        sep = '/'
+        from posix import stat
+    elif 'nt' in names:
+        sep = '\\'
+        from nt import stat
+    elif 'dos' in names:
+        sep = '\\'
+        from dos import stat
+    elif 'os2' in names:
+        sep = '\\'
+        from os2 import stat
+    elif 'mac' in names:
+        from mac import stat
+        def join(a, b):
+            if a == '':
+                return b
+            path = s
+            if ':' not in a:
+                a = ':' + a
+            if a[-1:] != ':':
+                a = a + ':'
+            return a + b
+    else:
+        raise ImportError, 'no os specific module found'
+
+    if join is None:
+        def join(a, b, sep=sep):
+            if a == '':
+                return b
+            lastchar = a[-1:]
+            if lastchar == '/' or lastchar == sep:
+                return a + b
+            return a + sep + b
+
+    global _os_stat
+    _os_stat = stat
+
+    global _os_path_join
+    _os_path_join = join
+
+def _os_path_isdir(pathname):
+    "Local replacement for os.path.isdir()."
+    try:
+        s = _os_stat(pathname)
+    except OSError:
+        return None
+    return (s[0] & 0170000) == 0040000
+
+def _timestamp(pathname):
+    "Return the file modification time as a Long."
+    try:
+        s = _os_stat(pathname)
+    except OSError:
+        return None
+    return long(s[8])
+
+
+######################################################################
+#
+# Emulate the import mechanism for builtin and frozen modules
+#
+class BuiltinImporter(Importer):
+    def get_code(self, parent, modname, fqname):
+        if parent:
+            # these modules definitely do not occur within a package context
+            return None
+
+        # look for the module
+        if imp.is_builtin(modname):
+            type = imp.C_BUILTIN
+        elif imp.is_frozen(modname):
+            type = imp.PY_FROZEN
+        else:
+            # not found
+            return None
+
+        # got it. now load and return it.
+        module = imp.load_module(modname, None, modname, ('', '', type))
+        return 0, module, { }
+
+
+######################################################################
+#
+# Internal importer used for importing from the filesystem
+#
+class _FilesystemImporter(Importer):
+    def __init__(self):
+        self.suffixes = [ ]
+
+    def add_suffix(self, suffix, importFunc):
+        assert callable(importFunc)
+        self.suffixes.append((suffix, importFunc))
+
+    def import_from_dir(self, dir, fqname):
+        result = self._import_pathname(_os_path_join(dir, fqname), fqname)
+        if result:
+            return self._process_result(result, fqname)
+        return None
+
+    def get_code(self, parent, modname, fqname):
+        # This importer is never used with an empty parent. Its existence is
+        # private to the ImportManager. The ImportManager uses the
+        # import_from_dir() method to import top-level modules/packages.
+        # This method is only used when we look for a module within a package.
+        assert parent
+
+        return self._import_pathname(_os_path_join(parent.__pkgdir__, modname),
+                                     fqname)
+
+    def _import_pathname(self, pathname, fqname):
+        if _os_path_isdir(pathname):
+            result = self._import_pathname(_os_path_join(pathname, '__init__'),
+                                           fqname)
+            if result:
+                values = result[2]
+                values['__pkgdir__'] = pathname
+                values['__path__'] = [ pathname ]
+                return 1, result[1], values
+            return None
+
+        for suffix, importFunc in self.suffixes:
+            filename = pathname + suffix
+            try:
+                finfo = _os_stat(filename)
+            except OSError:
+                pass
+            else:
+                return importFunc(filename, finfo, fqname)
+        return None
+
+######################################################################
+#
+# SUFFIX-BASED IMPORTERS
+#
+
+def py_suffix_importer(filename, finfo, fqname):
+    file = filename[:-3] + _suffix
+    t_py = long(finfo[8])
+    t_pyc = _timestamp(file)
+
+    code = None
+    if t_pyc is not None and t_pyc >= t_py:
+        f = open(file, 'rb')
+        if f.read(4) == imp.get_magic():
+            t = struct.unpack('<I', f.read(4))[0]
+            if t == t_py:
+                code = marshal.load(f)
+        f.close()
+    if code is None:
+        file = filename
+        code = _compile(file, t_py)
+
+    return 0, code, { '__file__' : file }
+
+class DynLoadSuffixImporter:
+    def __init__(self, desc):
+        self.desc = desc
+
+    def import_file(self, filename, finfo, fqname):
+        fp = open(filename, self.desc[1])
+        module = imp.load_module(fqname, fp, filename, self.desc)
+        module.__file__ = filename
+        return 0, module, { }
+
+
+######################################################################
+
+def _print_importers():
+    items = sys.modules.items()
+    items.sort()
+    for name, module in items:
+        if module:
+            print name, module.__dict__.get('__importer__', '-- no importer')
+        else:
+            print name, '-- non-existent module'
+
+def _test_revamp():
+    ImportManager().install()
+    sys.path.insert(0, BuiltinImporter())
+
+######################################################################
+
+#
+# TODO
+#
+# from Finn Bock:
+#   type(sys) is not a module in JPython. what to use instead?
+#   imp.C_EXTENSION is not in JPython. same for get_suffixes and new_module
+#
+#   given foo.py of:
+#      import sys
+#      sys.modules['foo'] = sys
+#
+#   ---- standard import mechanism
+#   >>> import foo
+#   >>> foo
+#   <module 'sys' (built-in)>
+#
+#   ---- revamped import mechanism
+#   >>> import imputil
+#   >>> imputil._test_revamp()
+#   >>> import foo
+#   >>> foo
+#   <module 'foo' from 'foo.py'>
+#
+#
+# from MAL:
+#   should BuiltinImporter exist in sys.path or hard-wired in ImportManager?
+#   need __path__ processing
+#   performance
+#   move chaining to a subclass [gjs: it's been nuked]
+#   deinstall should be possible
+#   query mechanism needed: is a specific Importer installed?
+#   py/pyc/pyo piping hooks to filter/process these files
+#   wish list:
+#     distutils importer hooked to list of standard Internet repositories
+#     module->file location mapper to speed FS-based imports
+#     relative imports
+#     keep chaining so that it can play nice with other import hooks
+#
+# from Gordon:
+#   push MAL's mapper into sys.path[0] as a cache (hard-coded for apps)
+#
+# from Guido:
+#   need to change sys.* references for rexec environs
+#   need hook for MAL's walk-me-up import strategy, or Tim's absolute strategy
+#   watch out for sys.modules[...] is None
+#   flag to force absolute imports? (speeds _determine_import_context and
+#       checking for a relative module)
+#   insert names of archives into sys.path  (see quote below)
+#   note: reload does NOT blast module dict
+#   shift import mechanisms and policies around; provide for hooks, overrides
+#       (see quote below)
+#   add get_source stuff
+#   get_topcode and get_subcode
+#   CRLF handling in _compile
+#   race condition in _compile
+#   refactoring of os.py to deal with _os_bootstrap problem
+#   any special handling to do for importing a module with a SyntaxError?
+#       (e.g. clean up the traceback)
+#   implement "domain" for path-type functionality using pkg namespace
+#       (rather than FS-names like __path__)
+#   don't use the word "private"... maybe "internal"
+#
+#
+# Guido's comments on sys.path caching:
+#
+# We could cache this in a dictionary: the ImportManager can have a
+# cache dict mapping pathnames to importer objects, and a separate
+# method for coming up with an importer given a pathname that's not yet
+# in the cache.  The method should do a stat and/or look at the
+# extension to decide which importer class to use; you can register new
+# importer classes by registering a suffix or a Boolean function, plus a
+# class.  If you register a new importer class, the cache is zapped.
+# The cache is independent from sys.path (but maintained per
+# ImportManager instance) so that rearrangements of sys.path do the
+# right thing.  If a path is dropped from sys.path the corresponding
+# cache entry is simply no longer used.
+#
+# My/Guido's comments on factoring ImportManager and Importer:
+#
+# > However, we still have a tension occurring here:
+# >
+# > 1) implementing policy in ImportManager assists in single-point policy
+# >    changes for app/rexec situations
+# > 2) implementing policy in Importer assists in package-private policy
+# >    changes for normal, operating conditions
+# >
+# > I'll see if I can sort out a way to do this. Maybe the Importer class will
+# > implement the methods (which can be overridden to change policy) by
+# > delegating to ImportManager.
+#
+# Maybe also think about what kind of policies an Importer would be
+# likely to want to change.  I have a feeling that a lot of the code
+# there is actually not so much policy but a *necessity* to get things
+# working given the calling conventions for the __import__ hook: whether
+# to return the head or tail of a dotted name, or when to do the "finish
+# fromlist" stuff.
+#
diff --git a/lib-python/2.2/inspect.py b/lib-python/2.2/inspect.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/inspect.py
@@ -0,0 +1,785 @@
+"""Get useful information from live Python objects.
+
+This module encapsulates the interface provided by the internal special
+attributes (func_*, co_*, im_*, tb_*, etc.) in a friendlier fashion.
+It also provides some help for examining source code and class layout.
+
+Here are some of the useful functions provided by this module:
+
+    ismodule(), isclass(), ismethod(), isfunction(), istraceback(),
+        isframe(), iscode(), isbuiltin(), isroutine() - check object types
+    getmembers() - get members of an object that satisfy a given condition
+
+    getfile(), getsourcefile(), getsource() - find an object's source code
+    getdoc(), getcomments() - get documentation on an object
+    getmodule() - determine the module that an object came from
+    getclasstree() - arrange classes so as to represent their hierarchy
+
+    getargspec(), getargvalues() - get info about function arguments
+    formatargspec(), formatargvalues() - format an argument spec
+    getouterframes(), getinnerframes() - get info about frames
+    currentframe() - get the current stack frame
+    stack(), trace() - get info about frames on the stack or in a traceback
+"""
+
+# This module is in the public domain.  No warranties.
+
+__author__ = 'Ka-Ping Yee <ping at lfw.org>'
+__date__ = '1 Jan 2001'
+
+import sys, os, types, string, re, dis, imp, tokenize
+
+# ----------------------------------------------------------- type-checking
+def ismodule(object):
+    """Return true if the object is a module.
+
+    Module objects provide these attributes:
+        __doc__         documentation string
+        __file__        filename (missing for built-in modules)"""
+    return isinstance(object, types.ModuleType)
+
+def isclass(object):
+    """Return true if the object is a class.
+
+    Class objects provide these attributes:
+        __doc__         documentation string
+        __module__      name of module in which this class was defined"""
+    return isinstance(object, types.ClassType) or hasattr(object, '__bases__')
+
+def ismethod(object):
+    """Return true if the object is an instance method.
+
+    Instance method objects provide these attributes:
+        __doc__         documentation string
+        __name__        name with which this method was defined
+        im_class        class object in which this method belongs
+        im_func         function object containing implementation of method
+        im_self         instance to which this method is bound, or None"""
+    return isinstance(object, types.MethodType)
+
+def ismethoddescriptor(object):
+    """Return true if the object is a method descriptor.
+
+    But not if ismethod() or isclass() or isfunction() are true.
+
+    This is new in Python 2.2, and, for example, is true of int.__add__.
+    An object passing this test has a __get__ attribute but not a __set__
+    attribute, but beyond that the set of attributes varies.  __name__ is
+    usually sensible, and __doc__ often is.
+
+    Methods implemented via descriptors that also pass one of the other
+    tests return false from the ismethoddescriptor() test, simply because
+    the other tests promise more -- you can, e.g., count on having the
+    im_func attribute (etc) when an object passes ismethod()."""
+    return (hasattr(object, "__get__")
+            and not hasattr(object, "__set__") # else it's a data descriptor
+            and not ismethod(object)           # mutual exclusion
+            and not isfunction(object)
+            and not isclass(object))
+
+def isfunction(object):
+    """Return true if the object is a user-defined function.
+
+    Function objects provide these attributes:
+        __doc__         documentation string
+        __name__        name with which this function was defined
+        func_code       code object containing compiled function bytecode
+        func_defaults   tuple of any default values for arguments
+        func_doc        (same as __doc__)
+        func_globals    global namespace in which this function was defined
+        func_name       (same as __name__)"""
+    return isinstance(object, types.FunctionType)
+
+def istraceback(object):
+    """Return true if the object is a traceback.
+
+    Traceback objects provide these attributes:
+        tb_frame        frame object at this level
+        tb_lasti        index of last attempted instruction in bytecode
+        tb_lineno       current line number in Python source code
+        tb_next         next inner traceback object (called by this level)"""
+    return isinstance(object, types.TracebackType)
+
+def isframe(object):
+    """Return true if the object is a frame object.
+
+    Frame objects provide these attributes:
+        f_back          next outer frame object (this frame's caller)
+        f_builtins      built-in namespace seen by this frame
+        f_code          code object being executed in this frame
+        f_exc_traceback traceback if raised in this frame, or None
+        f_exc_type      exception type if raised in this frame, or None
+        f_exc_value     exception value if raised in this frame, or None
+        f_globals       global namespace seen by this frame
+        f_lasti         index of last attempted instruction in bytecode
+        f_lineno        current line number in Python source code
+        f_locals        local namespace seen by this frame
+        f_restricted    0 or 1 if frame is in restricted execution mode
+        f_trace         tracing function for this frame, or None"""
+    return isinstance(object, types.FrameType)
+
+def iscode(object):
+    """Return true if the object is a code object.
+
+    Code objects provide these attributes:
+        co_argcount     number of arguments (not including * or ** args)
+        co_code         string of raw compiled bytecode
+        co_consts       tuple of constants used in the bytecode
+        co_filename     name of file in which this code object was created
+        co_firstlineno  number of first line in Python source code
+        co_flags        bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg
+        co_lnotab       encoded mapping of line numbers to bytecode indices
+        co_name         name with which this code object was defined
+        co_names        tuple of names of local variables
+        co_nlocals      number of local variables
+        co_stacksize    virtual machine stack space required
+        co_varnames     tuple of names of arguments and local variables"""
+    return isinstance(object, types.CodeType)
+
+def isbuiltin(object):
+    """Return true if the object is a built-in function or method.
+
+    Built-in functions and methods provide these attributes:
+        __doc__         documentation string
+        __name__        original name of this function or method
+        __self__        instance to which a method is bound, or None"""
+    return isinstance(object, types.BuiltinFunctionType)
+
+def isroutine(object):
+    """Return true if the object is any kind of function or method."""
+    return (isbuiltin(object)
+            or isfunction(object)
+            or ismethod(object)
+            or ismethoddescriptor(object))
+
+def getmembers(object, predicate=None):
+    """Return all members of an object as (name, value) pairs sorted by name.
+    Optionally, only return members that satisfy a given predicate."""
+    results = []
+    for key in dir(object):
+        value = getattr(object, key)
+        if not predicate or predicate(value):
+            results.append((key, value))
+    results.sort()
+    return results
+
+def classify_class_attrs(cls):
+    """Return list of attribute-descriptor tuples.
+
+    For each name in dir(cls), the return list contains a 4-tuple
+    with these elements:
+
+        0. The name (a string).
+
+        1. The kind of attribute this is, one of these strings:
+               'class method'    created via classmethod()
+               'static method'   created via staticmethod()
+               'property'        created via property()
+               'method'          any other flavor of method
+               'data'            not a method
+
+        2. The class which defined this attribute (a class).
+
+        3. The object as obtained directly from the defining class's
+           __dict__, not via getattr.  This is especially important for
+           data attributes:  C.data is just a data object, but
+           C.__dict__['data'] may be a data descriptor with additional
+           info, like a __doc__ string.
+    """
+
+    mro = getmro(cls)
+    names = dir(cls)
+    result = []
+    for name in names:
+        # Get the object associated with the name.
+        # Getting an obj from the __dict__ sometimes reveals more than
+        # using getattr.  Static and class methods are dramatic examples.
+        if name in cls.__dict__:
+            obj = cls.__dict__[name]
+        else:
+            obj = getattr(cls, name)
+
+        # Figure out where it was defined.
+        homecls = getattr(obj, "__objclass__", None)
+        if homecls is None:
+            # search the dicts.
+            for base in mro:
+                if name in base.__dict__:
+                    homecls = base
+                    break
+
+        # Get the object again, in order to get it from the defining
+        # __dict__ instead of via getattr (if possible).
+        if homecls is not None and name in homecls.__dict__:
+            obj = homecls.__dict__[name]
+
+        # Also get the object via getattr.
+        obj_via_getattr = getattr(cls, name)
+
+        # Classify the object.
+        if isinstance(obj, staticmethod):
+            kind = "static method"
+        elif isinstance(obj, classmethod):
+            kind = "class method"
+        elif isinstance(obj, property):
+            kind = "property"
+        elif (ismethod(obj_via_getattr) or
+              ismethoddescriptor(obj_via_getattr)):
+            kind = "method"
+        else:
+            kind = "data"
+
+        result.append((name, kind, homecls, obj))
+
+    return result
+
+# ----------------------------------------------------------- class helpers
+def _searchbases(cls, accum):
+    # Simulate the "classic class" search order.
+    if cls in accum:
+        return
+    accum.append(cls)
+    for base in cls.__bases__:
+        _searchbases(base, accum)
+
+def getmro(cls):
+    "Return tuple of base classes (including cls) in method resolution order."
+    if hasattr(cls, "__mro__"):
+        return cls.__mro__
+    else:
+        result = []
+        _searchbases(cls, result)
+        return tuple(result)
+
+# -------------------------------------------------- source code extraction
+def indentsize(line):
+    """Return the indent size, in spaces, at the start of a line of text."""
+    expline = string.expandtabs(line)
+    return len(expline) - len(string.lstrip(expline))
+
+def getdoc(object):
+    """Get the documentation string for an object.
+
+    All tabs are expanded to spaces.  To clean up docstrings that are
+    indented to line up with blocks of code, any whitespace than can be
+    uniformly removed from the second line onwards is removed."""
+    try:
+        doc = object.__doc__
+    except AttributeError:
+        return None
+    if not isinstance(doc, (str, unicode)):
+        return None
+    try:
+        lines = string.split(string.expandtabs(doc), '\n')
+    except UnicodeError:
+        return None
+    else:
+        margin = None
+        for line in lines[1:]:
+            content = len(string.lstrip(line))
+            if not content: continue
+            indent = len(line) - content
+            if margin is None: margin = indent
+            else: margin = min(margin, indent)
+        if margin is not None:
+            for i in range(1, len(lines)): lines[i] = lines[i][margin:]
+        return string.join(lines, '\n')
+
+def getfile(object):
+    """Work out which source or compiled file an object was defined in."""
+    if ismodule(object):
+        if hasattr(object, '__file__'):
+            return object.__file__
+        raise TypeError, 'arg is a built-in module'
+    if isclass(object):
+        object = sys.modules.get(object.__module__)
+        if hasattr(object, '__file__'):
+            return object.__file__
+        raise TypeError, 'arg is a built-in class'
+    if ismethod(object):
+        object = object.im_func
+    if isfunction(object):
+        object = object.func_code
+    if istraceback(object):
+        object = object.tb_frame
+    if isframe(object):
+        object = object.f_code
+    if iscode(object):
+        return object.co_filename
+    raise TypeError, 'arg is not a module, class, method, ' \
+                     'function, traceback, frame, or code object'
+
+def getmoduleinfo(path):
+    """Get the module name, suffix, mode, and module type for a given file."""
+    filename = os.path.basename(path)
+    suffixes = map(lambda (suffix, mode, mtype):
+                   (-len(suffix), suffix, mode, mtype), imp.get_suffixes())
+    suffixes.sort() # try longest suffixes first, in case they overlap
+    for neglen, suffix, mode, mtype in suffixes:
+        if filename[neglen:] == suffix:
+            return filename[:neglen], suffix, mode, mtype
+
+def getmodulename(path):
+    """Return the module name for a given file, or None."""
+    info = getmoduleinfo(path)
+    if info: return info[0]
+
+def getsourcefile(object):
+    """Return the Python source file an object was defined in, if it exists."""
+    filename = getfile(object)
+    if string.lower(filename[-4:]) in ['.pyc', '.pyo']:
+        filename = filename[:-4] + '.py'
+    for suffix, mode, kind in imp.get_suffixes():
+        if 'b' in mode and string.lower(filename[-len(suffix):]) == suffix:
+            # Looks like a binary file.  We want to only return a text file.
+            return None
+    if os.path.exists(filename):
+        return filename
+
+def getabsfile(object):
+    """Return an absolute path to the source or compiled file for an object.
+
+    The idea is for each object to have a unique origin, so this routine
+    normalizes the result as much as possible."""
+    return os.path.normcase(
+        os.path.abspath(getsourcefile(object) or getfile(object)))
+
+modulesbyfile = {}
+
+def getmodule(object):
+    """Return the module an object was defined in, or None if not found."""
+    if ismodule(object):
+        return object
+    if isclass(object):
+        return sys.modules.get(object.__module__)
+    try:
+        file = getabsfile(object)
+    except TypeError:
+        return None
+    if modulesbyfile.has_key(file):
+        return sys.modules[modulesbyfile[file]]
+    for module in sys.modules.values():
+        if hasattr(module, '__file__'):
+            modulesbyfile[getabsfile(module)] = module.__name__
+    if modulesbyfile.has_key(file):
+        return sys.modules[modulesbyfile[file]]
+    main = sys.modules['__main__']
+    if hasattr(main, object.__name__):
+        mainobject = getattr(main, object.__name__)
+        if mainobject is object:
+            return main
+    builtin = sys.modules['__builtin__']
+    if hasattr(builtin, object.__name__):
+        builtinobject = getattr(builtin, object.__name__)
+        if builtinobject is object:
+            return builtin
+
+def findsource(object):
+    """Return the entire source file and starting line number for an object.
+
+    The argument may be a module, class, method, function, traceback, frame,
+    or code object.  The source code is returned as a list of all the lines
+    in the file and the line number indexes a line in that list.  An IOError
+    is raised if the source code cannot be retrieved."""
+    try:
+        file = open(getsourcefile(object))
+    except (TypeError, IOError):
+        raise IOError, 'could not get source code'
+    lines = file.readlines()
+    file.close()
+
+    if ismodule(object):
+        return lines, 0
+
+    if isclass(object):
+        name = object.__name__
+        pat = re.compile(r'^\s*class\s*' + name + r'\b')
+        for i in range(len(lines)):
+            if pat.match(lines[i]): return lines, i
+        else: raise IOError, 'could not find class definition'
+
+    if ismethod(object):
+        object = object.im_func
+    if isfunction(object):
+        object = object.func_code
+    if istraceback(object):
+        object = object.tb_frame
+    if isframe(object):
+        object = object.f_code
+    if iscode(object):
+        if not hasattr(object, 'co_firstlineno'):
+            raise IOError, 'could not find function definition'
+        lnum = object.co_firstlineno - 1
+        pat = re.compile(r'^(\s*def\s)|(.*\slambda(:|\s))')
+        while lnum > 0:
+            if pat.match(lines[lnum]): break
+            lnum = lnum - 1
+        return lines, lnum
+    raise IOError, 'could not find code object'
+
+def getcomments(object):
+    """Get lines of comments immediately preceding an object's source code."""
+    try: lines, lnum = findsource(object)
+    except IOError: return None
+
+    if ismodule(object):
+        # Look for a comment block at the top of the file.
+        start = 0
+        if lines and lines[0][:2] == '#!': start = 1
+        while start < len(lines) and string.strip(lines[start]) in ['', '#']:
+            start = start + 1
+        if start < len(lines) and lines[start][:1] == '#':
+            comments = []
+            end = start
+            while end < len(lines) and lines[end][:1] == '#':
+                comments.append(string.expandtabs(lines[end]))
+                end = end + 1
+            return string.join(comments, '')
+
+    # Look for a preceding block of comments at the same indentation.
+    elif lnum > 0:
+        indent = indentsize(lines[lnum])
+        end = lnum - 1
+        if end >= 0 and string.lstrip(lines[end])[:1] == '#' and \
+            indentsize(lines[end]) == indent:
+            comments = [string.lstrip(string.expandtabs(lines[end]))]
+            if end > 0:
+                end = end - 1
+                comment = string.lstrip(string.expandtabs(lines[end]))
+                while comment[:1] == '#' and indentsize(lines[end]) == indent:
+                    comments[:0] = [comment]
+                    end = end - 1
+                    if end < 0: break
+                    comment = string.lstrip(string.expandtabs(lines[end]))
+            while comments and string.strip(comments[0]) == '#':
+                comments[:1] = []
+            while comments and string.strip(comments[-1]) == '#':
+                comments[-1:] = []
+            return string.join(comments, '')
+
+class ListReader:
+    """Provide a readline() method to return lines from a list of strings."""
+    def __init__(self, lines):
+        self.lines = lines
+        self.index = 0
+
+    def readline(self):
+        i = self.index
+        if i < len(self.lines):
+            self.index = i + 1
+            return self.lines[i]
+        else: return ''
+
+class EndOfBlock(Exception): pass
+
+class BlockFinder:
+    """Provide a tokeneater() method to detect the end of a code block."""
+    def __init__(self):
+        self.indent = 0
+        self.started = 0
+        self.last = 0
+
+    def tokeneater(self, type, token, (srow, scol), (erow, ecol), line):
+        if not self.started:
+            if type == tokenize.NAME: self.started = 1
+        elif type == tokenize.NEWLINE:
+            self.last = srow
+        elif type == tokenize.INDENT:
+            self.indent = self.indent + 1
+        elif type == tokenize.DEDENT:
+            self.indent = self.indent - 1
+            if self.indent == 0: raise EndOfBlock, self.last
+        elif type == tokenize.NAME and scol == 0:
+            raise EndOfBlock, self.last
+
+def getblock(lines):
+    """Extract the block of code at the top of the given list of lines."""
+    try:
+        tokenize.tokenize(ListReader(lines).readline, BlockFinder().tokeneater)
+    except EndOfBlock, eob:
+        return lines[:eob.args[0]]
+    # Fooling the indent/dedent logic implies a one-line definition
+    return lines[:1]
+
+def getsourcelines(object):
+    """Return a list of source lines and starting line number for an object.
+
+    The argument may be a module, class, method, function, traceback, frame,
+    or code object.  The source code is returned as a list of the lines
+    corresponding to the object and the line number indicates where in the
+    original source file the first line of code was found.  An IOError is
+    raised if the source code cannot be retrieved."""
+    lines, lnum = findsource(object)
+
+    if ismodule(object): return lines, 0
+    else: return getblock(lines[lnum:]), lnum + 1
+
+def getsource(object):
+    """Return the text of the source code for an object.
+
+    The argument may be a module, class, method, function, traceback, frame,
+    or code object.  The source code is returned as a single string.  An
+    IOError is raised if the source code cannot be retrieved."""
+    lines, lnum = getsourcelines(object)
+    return string.join(lines, '')
+
+# --------------------------------------------------- class tree extraction
+def walktree(classes, children, parent):
+    """Recursive helper function for getclasstree()."""
+    results = []
+    classes.sort(lambda a, b: cmp(a.__name__, b.__name__))
+    for c in classes:
+        results.append((c, c.__bases__))
+        if children.has_key(c):
+            results.append(walktree(children[c], children, c))
+    return results
+
+def getclasstree(classes, unique=0):
+    """Arrange the given list of classes into a hierarchy of nested lists.
+
+    Where a nested list appears, it contains classes derived from the class
+    whose entry immediately precedes the list.  Each entry is a 2-tuple
+    containing a class and a tuple of its base classes.  If the 'unique'
+    argument is true, exactly one entry appears in the returned structure
+    for each class in the given list.  Otherwise, classes using multiple
+    inheritance and their descendants will appear multiple times."""
+    children = {}
+    roots = []
+    for c in classes:
+        if c.__bases__:
+            for parent in c.__bases__:
+                if not children.has_key(parent):
+                    children[parent] = []
+                children[parent].append(c)
+                if unique and parent in classes: break
+        elif c not in roots:
+            roots.append(c)
+    for parent in children.keys():
+        if parent not in classes:
+            roots.append(parent)
+    return walktree(roots, children, None)
+
+# ------------------------------------------------ argument list extraction
+# These constants are from Python's compile.h.
+CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS = 1, 2, 4, 8
+
+def getargs(co):
+    """Get information about the arguments accepted by a code object.
+
+    Three things are returned: (args, varargs, varkw), where 'args' is
+    a list of argument names (possibly containing nested lists), and
+    'varargs' and 'varkw' are the names of the * and ** arguments or None."""
+    if not iscode(co): raise TypeError, 'arg is not a code object'
+
+    code = co.co_code
+    nargs = co.co_argcount
+    names = co.co_varnames
+    args = list(names[:nargs])
+    step = 0
+
+    # The following acrobatics are for anonymous (tuple) arguments.
+    for i in range(nargs):
+        if args[i][:1] in ['', '.']:
+            stack, remain, count = [], [], []
+            while step < len(code):
+                op = ord(code[step])
+                step = step + 1
+                if op >= dis.HAVE_ARGUMENT:
+                    opname = dis.opname[op]
+                    value = ord(code[step]) + ord(code[step+1])*256
+                    step = step + 2
+                    if opname in ['UNPACK_TUPLE', 'UNPACK_SEQUENCE']:
+                        remain.append(value)
+                        count.append(value)
+                    elif opname == 'STORE_FAST':
+                        stack.append(names[value])
+                        remain[-1] = remain[-1] - 1
+                        while remain[-1] == 0:
+                            remain.pop()
+                            size = count.pop()
+                            stack[-size:] = [stack[-size:]]
+                            if not remain: break
+                            remain[-1] = remain[-1] - 1
+                        if not remain: break
+            args[i] = stack[0]
+
+    varargs = None
+    if co.co_flags & CO_VARARGS:
+        varargs = co.co_varnames[nargs]
+        nargs = nargs + 1
+    varkw = None
+    if co.co_flags & CO_VARKEYWORDS:
+        varkw = co.co_varnames[nargs]
+    return args, varargs, varkw
+
+def getargspec(func):
+    """Get the names and default values of a function's arguments.
+
+    A tuple of four things is returned: (args, varargs, varkw, defaults).
+    'args' is a list of the argument names (it may contain nested lists).
+    'varargs' and 'varkw' are the names of the * and ** arguments or None.
+    'defaults' is an n-tuple of the default values of the last n arguments."""
+    if ismethod(func):
+        func = func.im_func
+    if not isfunction(func): raise TypeError, 'arg is not a Python function'
+    args, varargs, varkw = getargs(func.func_code)
+    return args, varargs, varkw, func.func_defaults
+
+def getargvalues(frame):
+    """Get information about arguments passed into a particular frame.
+
+    A tuple of four things is returned: (args, varargs, varkw, locals).
+    'args' is a list of the argument names (it may contain nested lists).
+    'varargs' and 'varkw' are the names of the * and ** arguments or None.
+    'locals' is the locals dictionary of the given frame."""
+    args, varargs, varkw = getargs(frame.f_code)
+    return args, varargs, varkw, frame.f_locals
+
+def joinseq(seq):
+    if len(seq) == 1:
+        return '(' + seq[0] + ',)'
+    else:
+        return '(' + string.join(seq, ', ') + ')'
+
+def strseq(object, convert, join=joinseq):
+    """Recursively walk a sequence, stringifying each element."""
+    if type(object) in [types.ListType, types.TupleType]:
+        return join(map(lambda o, c=convert, j=join: strseq(o, c, j), object))
+    else:
+        return convert(object)
+
+def formatargspec(args, varargs=None, varkw=None, defaults=None,
+                  formatarg=str,
+                  formatvarargs=lambda name: '*' + name,
+                  formatvarkw=lambda name: '**' + name,
+                  formatvalue=lambda value: '=' + repr(value),
+                  join=joinseq):
+    """Format an argument spec from the 4 values returned by getargspec.
+
+    The first four arguments are (args, varargs, varkw, defaults).  The
+    other four arguments are the corresponding optional formatting functions
+    that are called to turn names and values into strings.  The ninth
+    argument is an optional function to format the sequence of arguments."""
+    specs = []
+    if defaults:
+        firstdefault = len(args) - len(defaults)
+    for i in range(len(args)):
+        spec = strseq(args[i], formatarg, join)
+        if defaults and i >= firstdefault:
+            spec = spec + formatvalue(defaults[i - firstdefault])
+        specs.append(spec)
+    if varargs:
+        specs.append(formatvarargs(varargs))
+    if varkw:
+        specs.append(formatvarkw(varkw))
+    return '(' + string.join(specs, ', ') + ')'
+
+def formatargvalues(args, varargs, varkw, locals,
+                    formatarg=str,
+                    formatvarargs=lambda name: '*' + name,
+                    formatvarkw=lambda name: '**' + name,
+                    formatvalue=lambda value: '=' + repr(value),
+                    join=joinseq):
+    """Format an argument spec from the 4 values returned by getargvalues.
+
+    The first four arguments are (args, varargs, varkw, locals).  The
+    next four arguments are the corresponding optional formatting functions
+    that are called to turn names and values into strings.  The ninth
+    argument is an optional function to format the sequence of arguments."""
+    def convert(name, locals=locals,
+                formatarg=formatarg, formatvalue=formatvalue):
+        return formatarg(name) + formatvalue(locals[name])
+    specs = []
+    for i in range(len(args)):
+        specs.append(strseq(args[i], convert, join))
+    if varargs:
+        specs.append(formatvarargs(varargs) + formatvalue(locals[varargs]))
+    if varkw:
+        specs.append(formatvarkw(varkw) + formatvalue(locals[varkw]))
+    return '(' + string.join(specs, ', ') + ')'
+
+# -------------------------------------------------- stack frame extraction
+def getframeinfo(frame, context=1):
+    """Get information about a frame or traceback object.
+
+    A tuple of five things is returned: the filename, the line number of
+    the current line, the function name, a list of lines of context from
+    the source code, and the index of the current line within that list.
+    The optional second argument specifies the number of lines of context
+    to return, which are centered around the current line."""
+    if istraceback(frame):
+        frame = frame.tb_frame
+    if not isframe(frame):
+        raise TypeError, 'arg is not a frame or traceback object'
+
+    filename = getsourcefile(frame)
+    lineno = getlineno(frame)
+    if context > 0:
+        start = lineno - 1 - context//2
+        try:
+            lines, lnum = findsource(frame)
+        except IOError:
+            lines = index = None
+        else:
+            start = max(start, 1)
+            start = min(start, len(lines) - context)
+            lines = lines[start:start+context]
+            index = lineno - 1 - start
+    else:
+        lines = index = None
+
+    return (filename, lineno, frame.f_code.co_name, lines, index)
+
+def getlineno(frame):
+    """Get the line number from a frame object, allowing for optimization."""
+    # Written by Marc-André Lemburg; revised by Jim Hugunin and Fredrik Lundh.
+    lineno = frame.f_lineno
+    code = frame.f_code
+    if hasattr(code, 'co_lnotab'):
+        table = code.co_lnotab
+        lineno = code.co_firstlineno
+        addr = 0
+        for i in range(0, len(table), 2):
+            addr = addr + ord(table[i])
+            if addr > frame.f_lasti: break
+            lineno = lineno + ord(table[i+1])
+    return lineno
+
+def getouterframes(frame, context=1):
+    """Get a list of records for a frame and all higher (calling) frames.
+
+    Each record contains a frame object, filename, line number, function
+    name, a list of lines of context, and index within the context."""
+    framelist = []
+    while frame:
+        framelist.append((frame,) + getframeinfo(frame, context))
+        frame = frame.f_back
+    return framelist
+
+def getinnerframes(tb, context=1):
+    """Get a list of records for a traceback's frame and all lower frames.
+
+    Each record contains a frame object, filename, line number, function
+    name, a list of lines of context, and index within the context."""
+    framelist = []
+    while tb:
+        framelist.append((tb.tb_frame,) + getframeinfo(tb, context))
+        tb = tb.tb_next
+    return framelist
+
+def currentframe():
+    """Return the frame object for the caller's stack frame."""
+    try:
+        raise 'catch me'
+    except:
+        return sys.exc_traceback.tb_frame.f_back
+
+if hasattr(sys, '_getframe'): currentframe = sys._getframe
+
+def stack(context=1):
+    """Return a list of records for the stack above the caller's frame."""
+    return getouterframes(currentframe().f_back, context)
+
+def trace(context=1):
+    """Return a list of records for the stack below the current exception."""
+    return getinnerframes(sys.exc_traceback, context)
diff --git a/lib-python/2.2/keyword.py b/lib-python/2.2/keyword.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/keyword.py
@@ -0,0 +1,97 @@
+#! /usr/bin/env python
+
+"""Keywords (from "graminit.c")
+
+This file is automatically generated; please don't muck it up!
+
+To update the symbols in this file, 'cd' to the top directory of
+the python source tree after building the interpreter and run:
+
+    python Lib/keyword.py
+"""
+
+__all__ = ["iskeyword", "kwlist"]
+
+kwlist = [
+#--start keywords--
+        'and',
+        'assert',
+        'break',
+        'class',
+        'continue',
+        'def',
+        'del',
+        'elif',
+        'else',
+        'except',
+        'exec',
+        'finally',
+        'for',
+        'from',
+        'global',
+        'if',
+        'import',
+        'in',
+        'is',
+        'lambda',
+        'not',
+        'or',
+        'pass',
+        'print',
+        'raise',
+        'return',
+        'try',
+        'while',
+        'yield',
+#--end keywords--
+        ]
+
+kwdict = {}
+for keyword in kwlist:
+    kwdict[keyword] = 1
+
+iskeyword = kwdict.has_key
+
+def main():
+    import sys, re
+
+    args = sys.argv[1:]
+    iptfile = args and args[0] or "Python/graminit.c"
+    if len(args) > 1: optfile = args[1]
+    else: optfile = "Lib/keyword.py"
+
+    # scan the source file for keywords
+    fp = open(iptfile)
+    strprog = re.compile('"([^"]+)"')
+    lines = []
+    while 1:
+        line = fp.readline()
+        if not line: break
+        if line.find('{1, "') > -1:
+            match = strprog.search(line)
+            if match:
+                lines.append("        '" + match.group(1) + "',\n")
+    fp.close()
+    lines.sort()
+
+    # load the output skeleton from the target
+    fp = open(optfile)
+    format = fp.readlines()
+    fp.close()
+
+    # insert the lines of keywords
+    try:
+        start = format.index("#--start keywords--\n") + 1
+        end = format.index("#--end keywords--\n")
+        format[start:end] = lines
+    except ValueError:
+        sys.stderr.write("target does not contain format markers\n")
+        sys.exit(1)
+
+    # write the output file
+    fp = open(optfile, 'w')
+    fp.write(''.join(format))
+    fp.close()
+
+if __name__ == "__main__":
+    main()
diff --git a/lib-python/2.2/knee.py b/lib-python/2.2/knee.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/knee.py
@@ -0,0 +1,126 @@
+"""An Python re-implementation of hierarchical module import.
+
+This code is intended to be read, not executed.  However, it does work
+-- all you need to do to enable it is "import knee".
+
+(The name is a pun on the klunkier predecessor of this module, "ni".)
+
+"""
+
+import sys, imp, __builtin__
+
+
+# Replacement for __import__()
+def import_hook(name, globals=None, locals=None, fromlist=None):
+    parent = determine_parent(globals)
+    q, tail = find_head_package(parent, name)
+    m = load_tail(q, tail)
+    if not fromlist:
+        return q
+    if hasattr(m, "__path__"):
+        ensure_fromlist(m, fromlist)
+    return m
+
+def determine_parent(globals):
+    if not globals or  not globals.has_key("__name__"):
+        return None
+    pname = globals['__name__']
+    if globals.has_key("__path__"):
+        parent = sys.modules[pname]
+        assert globals is parent.__dict__
+        return parent
+    if '.' in pname:
+        i = pname.rfind('.')
+        pname = pname[:i]
+        parent = sys.modules[pname]
+        assert parent.__name__ == pname
+        return parent
+    return None
+
+def find_head_package(parent, name):
+    if '.' in name:
+        i = name.find('.')
+        head = name[:i]
+        tail = name[i+1:]
+    else:
+        head = name
+        tail = ""
+    if parent:
+        qname = "%s.%s" % (parent.__name__, head)
+    else:
+        qname = head
+    q = import_module(head, qname, parent)
+    if q: return q, tail
+    if parent:
+        qname = head
+        parent = None
+        q = import_module(head, qname, parent)
+        if q: return q, tail
+    raise ImportError, "No module named " + qname
+
+def load_tail(q, tail):
+    m = q
+    while tail:
+        i = tail.find('.')
+        if i < 0: i = len(tail)
+        head, tail = tail[:i], tail[i+1:]
+        mname = "%s.%s" % (m.__name__, head)
+        m = import_module(head, mname, m)
+        if not m:
+            raise ImportError, "No module named " + mname
+    return m
+
+def ensure_fromlist(m, fromlist, recursive=0):
+    for sub in fromlist:
+        if sub == "*":
+            if not recursive:
+                try:
+                    all = m.__all__
+                except AttributeError:
+                    pass
+                else:
+                    ensure_fromlist(m, all, 1)
+            continue
+        if sub != "*" and not hasattr(m, sub):
+            subname = "%s.%s" % (m.__name__, sub)
+            submod = import_module(sub, subname, m)
+            if not submod:
+                raise ImportError, "No module named " + subname
+
+def import_module(partname, fqname, parent):
+    try:
+        return sys.modules[fqname]
+    except KeyError:
+        pass
+    try:
+        fp, pathname, stuff = imp.find_module(partname,
+                                              parent and parent.__path__)
+    except ImportError:
+        return None
+    try:
+        m = imp.load_module(fqname, fp, pathname, stuff)
+    finally:
+        if fp: fp.close()
+    if parent:
+        setattr(parent, partname, m)
+    return m
+
+
+# Replacement for reload()
+def reload_hook(module):
+    name = module.__name__
+    if '.' not in name:
+        return import_module(name, name, None)
+    i = name.rfind('.')
+    pname = name[:i]
+    parent = sys.modules[pname]
+    return import_module(name[i+1:], name, parent)
+
+
+# Save the original hooks
+original_import = __builtin__.__import__
+original_reload = __builtin__.reload
+
+# Now install our hooks
+__builtin__.__import__ = import_hook
+__builtin__.reload = reload_hook
diff --git a/lib-python/2.2/lib-old/Para.py b/lib-python/2.2/lib-old/Para.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/lib-old/Para.py
@@ -0,0 +1,343 @@
+# Text formatting abstractions 
+# Note -- this module is obsolete, it's too slow anyway
+
+
+# Oft-used type object
+Int = type(0)
+
+
+# Represent a paragraph.  This is a list of words with associated
+# font and size information, plus indents and justification for the
+# entire paragraph.
+# Once the words have been added to a paragraph, it can be laid out
+# for different line widths.  Once laid out, it can be rendered at
+# different screen locations.  Once rendered, it can be queried
+# for mouse hits, and parts of the text can be highlighted
+class Para:
+	#
+	def __init__(self):
+		self.words = [] # The words
+		self.just = 'l' # Justification: 'l', 'r', 'lr' or 'c'
+		self.indent_left = self.indent_right = self.indent_hang = 0
+		# Final lay-out parameters, may change
+		self.left = self.top = self.right = self.bottom = \
+			self.width = self.height = self.lines = None
+	#
+	# Add a word, computing size information for it.
+	# Words may also be added manually by appending to self.words
+	# Each word should be a 7-tuple:
+	# (font, text, width, space, stretch, ascent, descent)
+	def addword(self, d, font, text, space, stretch):
+		if font is not None:
+			d.setfont(font)
+		width = d.textwidth(text)
+		ascent = d.baseline()
+		descent = d.lineheight() - ascent
+		spw = d.textwidth(' ')
+		space = space * spw
+		stretch = stretch * spw
+		tuple = (font, text, width, space, stretch, ascent, descent)
+		self.words.append(tuple)
+	#
+	# Hooks to begin and end anchors -- insert numbers in the word list!
+	def bgn_anchor(self, id):
+		self.words.append(id)
+	#
+	def end_anchor(self, id):
+		self.words.append(0)
+	#
+	# Return the total length (width) of the text added so far, in pixels
+	def getlength(self):
+		total = 0
+		for word in self.words:
+			if type(word) is not Int:
+				total = total + word[2] + word[3]
+		return total
+	#
+	# Tab to a given position (relative to the current left indent):
+	# remove all stretch, add fixed space up to the new indent.
+	# If the current position is already at the tab stop,
+	# don't add any new space (but still remove the stretch)
+	def tabto(self, tab):
+		total = 0
+		as, de = 1, 0
+		for i in range(len(self.words)):
+			word = self.words[i]
+			if type(word) is Int: continue
+			(fo, te, wi, sp, st, as, de) = word
+			self.words[i] = (fo, te, wi, sp, 0, as, de)
+			total = total + wi + sp
+		if total < tab:
+			self.words.append((None, '', 0, tab-total, 0, as, de))
+	#
+	# Make a hanging tag: tab to hang, increment indent_left by hang,
+	# and reset indent_hang to -hang
+	def makehangingtag(self, hang):
+		self.tabto(hang)
+		self.indent_left = self.indent_left + hang
+		self.indent_hang = -hang
+	#
+	# Decide where the line breaks will be given some screen width
+	def layout(self, linewidth):
+		self.width = linewidth
+		height = 0
+		self.lines = lines = []
+		avail1 = self.width - self.indent_left - self.indent_right
+		avail = avail1 - self.indent_hang
+		words = self.words
+		i = 0
+		n = len(words)
+		lastfont = None
+		while i < n:
+			firstfont = lastfont
+			charcount = 0
+			width = 0
+			stretch = 0
+			ascent = 0
+			descent = 0
+			lsp = 0
+			j = i
+			while i < n:
+				word = words[i]
+				if type(word) is Int:
+					if word > 0 and width >= avail:
+						break
+					i = i+1
+					continue
+				fo, te, wi, sp, st, as, de = word
+				if width + wi > avail and width > 0 and wi > 0:
+					break
+				if fo is not None:
+					lastfont = fo
+					if width == 0:
+						firstfont = fo
+				charcount = charcount + len(te) + (sp > 0)
+				width = width + wi + sp
+				lsp = sp
+				stretch = stretch + st
+				lst = st
+				ascent = max(ascent, as)
+				descent = max(descent, de)
+				i = i+1
+			while i > j and type(words[i-1]) is Int and \
+				words[i-1] > 0: i = i-1
+			width = width - lsp
+			if i < n:
+				stretch = stretch - lst
+			else:
+				stretch = 0
+			tuple = i-j, firstfont, charcount, width, stretch, \
+				ascent, descent
+			lines.append(tuple)
+			height = height + ascent + descent
+			avail = avail1
+		self.height = height
+	#
+	# Call a function for all words in a line
+	def visit(self, wordfunc, anchorfunc):
+		avail1 = self.width - self.indent_left - self.indent_right
+		avail = avail1 - self.indent_hang
+		v = self.top
+		i = 0
+		for tuple in self.lines:
+			wordcount, firstfont, charcount, width, stretch, \
+				ascent, descent = tuple
+			h = self.left + self.indent_left
+			if i == 0: h = h + self.indent_hang
+			extra = 0
+			if self.just == 'r': h = h + avail - width
+			elif self.just == 'c': h = h + (avail - width) / 2
+			elif self.just == 'lr' and stretch > 0:
+				extra = avail - width
+			v2 = v + ascent + descent
+			for j in range(i, i+wordcount):
+				word = self.words[j]
+				if type(word) is Int:
+					ok = anchorfunc(self, tuple, word, \
+							h, v)
+					if ok is not None: return ok
+					continue
+				fo, te, wi, sp, st, as, de = word
+				if extra > 0 and stretch > 0:
+					ex = extra * st / stretch
+					extra = extra - ex
+					stretch = stretch - st
+				else:
+					ex = 0
+				h2 = h + wi + sp + ex
+				ok = wordfunc(self, tuple, word, h, v, \
+					h2, v2, (j==i), (j==i+wordcount-1))
+				if ok is not None: return ok
+				h = h2
+			v = v2
+			i = i + wordcount
+			avail = avail1
+	#
+	# Render a paragraph in "drawing object" d, using the rectangle
+	# given by (left, top, right) with an unspecified bottom.
+	# Return the computed bottom of the text.
+	def render(self, d, left, top, right):
+		if self.width != right-left:
+			self.layout(right-left)
+		self.left = left
+		self.top = top
+		self.right = right
+		self.bottom = self.top + self.height
+		self.anchorid = 0
+		try:
+			self.d = d
+			self.visit(self.__class__._renderword, \
+				   self.__class__._renderanchor)
+		finally:
+			self.d = None
+		return self.bottom
+	#
+	def _renderword(self, tuple, word, h, v, h2, v2, isfirst, islast):
+		if word[0] is not None: self.d.setfont(word[0])
+		baseline = v + tuple[5]
+		self.d.text((h, baseline - word[5]), word[1])
+		if self.anchorid > 0:
+			self.d.line((h, baseline+2), (h2, baseline+2))
+	#
+	def _renderanchor(self, tuple, word, h, v):
+		self.anchorid = word
+	#
+	# Return which anchor(s) was hit by the mouse
+	def hitcheck(self, mouseh, mousev):
+		self.mouseh = mouseh
+		self.mousev = mousev
+		self.anchorid = 0
+		self.hits = []
+		self.visit(self.__class__._hitcheckword, \
+			   self.__class__._hitcheckanchor)
+		return self.hits
+	#
+	def _hitcheckword(self, tuple, word, h, v, h2, v2, isfirst, islast):
+		if self.anchorid > 0 and h <= self.mouseh <= h2 and \
+			v <= self.mousev <= v2:
+			self.hits.append(self.anchorid)
+	#
+	def _hitcheckanchor(self, tuple, word, h, v):
+		self.anchorid = word
+	#
+	# Return whether the given anchor id is present
+	def hasanchor(self, id):
+		return id in self.words or -id in self.words
+	#
+	# Extract the raw text from the word list, substituting one space
+	# for non-empty inter-word space, and terminating with '\n'
+	def extract(self):
+		text = ''
+		for w in self.words:
+			if type(w) is not Int:
+				word = w[1]
+				if w[3]: word = word + ' '
+				text = text + word
+		return text + '\n'
+	#
+	# Return which character position was hit by the mouse, as
+	# an offset in the entire text as returned by extract().
+	# Return None if the mouse was not in this paragraph
+	def whereis(self, d, mouseh, mousev):
+		if mousev < self.top or mousev > self.bottom:
+			return None
+		self.mouseh = mouseh
+		self.mousev = mousev
+		self.lastfont = None
+		self.charcount = 0
+		try:
+			self.d = d
+			return self.visit(self.__class__._whereisword, \
+					  self.__class__._whereisanchor)
+		finally:
+			self.d = None
+	#
+	def _whereisword(self, tuple, word, h1, v1, h2, v2, isfirst, islast):
+		fo, te, wi, sp, st, as, de = word
+		if fo is not None: self.lastfont = fo
+		h = h1
+		if isfirst: h1 = 0
+		if islast: h2 = 999999
+		if not (v1 <= self.mousev <= v2 and h1 <= self.mouseh <= h2):
+			self.charcount = self.charcount + len(te) + (sp > 0)
+			return
+		if self.lastfont is not None:
+			self.d.setfont(self.lastfont)
+		cc = 0
+		for c in te:
+			cw = self.d.textwidth(c)
+			if self.mouseh <= h + cw/2:
+				return self.charcount + cc
+			cc = cc+1
+			h = h+cw
+		self.charcount = self.charcount + cc
+		if self.mouseh <= (h+h2) / 2:
+			return self.charcount
+		else:
+			return self.charcount + 1
+	#
+	def _whereisanchor(self, tuple, word, h, v):
+		pass
+	#
+	# Return screen position corresponding to position in paragraph.
+	# Return tuple (h, vtop, vbaseline, vbottom).
+	# This is more or less the inverse of whereis()
+	def screenpos(self, d, pos):
+		if pos < 0:
+			ascent, descent = self.lines[0][5:7]
+			return self.left, self.top, self.top + ascent, \
+				self.top + ascent + descent
+		self.pos = pos
+		self.lastfont = None
+		try:
+			self.d = d
+			ok = self.visit(self.__class__._screenposword, \
+					self.__class__._screenposanchor)
+		finally:
+			self.d = None
+		if ok is None:
+			ascent, descent = self.lines[-1][5:7]
+			ok = self.right, self.bottom - ascent - descent, \
+				self.bottom - descent, self.bottom
+		return ok
+	#
+	def _screenposword(self, tuple, word, h1, v1, h2, v2, isfirst, islast):
+		fo, te, wi, sp, st, as, de = word
+		if fo is not None: self.lastfont = fo
+		cc = len(te) + (sp > 0)
+		if self.pos > cc:
+			self.pos = self.pos - cc
+			return
+		if self.pos < cc:
+			self.d.setfont(self.lastfont)
+			h = h1 + self.d.textwidth(te[:self.pos])
+		else:
+			h = h2
+		ascent, descent = tuple[5:7]
+		return h, v1, v1+ascent, v2
+	#
+	def _screenposanchor(self, tuple, word, h, v):
+		pass
+	#
+	# Invert the stretch of text between pos1 and pos2.
+	# If pos1 is None, the beginning is implied;
+	# if pos2 is None, the end is implied.
+	# Undoes its own effect when called again with the same arguments
+	def invert(self, d, pos1, pos2):
+		if pos1 is None:
+			pos1 = self.left, self.top, self.top, self.top
+		else:
+			pos1 = self.screenpos(d, pos1)
+		if pos2 is None:
+			pos2 = self.right, self.bottom,self.bottom,self.bottom
+		else:
+			pos2 = self.screenpos(d, pos2)
+		h1, top1, baseline1, bottom1 = pos1
+		h2, top2, baseline2, bottom2 = pos2
+		if bottom1 <= top2:
+			d.invert((h1, top1), (self.right, bottom1))
+			h1 = self.left
+			if bottom1 < top2:
+				d.invert((h1, bottom1), (self.right, top2))
+			top1, bottom1 = top2, bottom2
+		d.invert((h1, top1), (h2, bottom2))
diff --git a/lib-python/2.2/lib-old/addpack.py b/lib-python/2.2/lib-old/addpack.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/lib-old/addpack.py
@@ -0,0 +1,67 @@
+# This module provides standard support for "packages".
+#
+# The idea is that large groups of related modules can be placed in
+# their own subdirectory, which can be added to the Python search path
+# in a relatively easy way.
+#
+# The current version takes a package name and searches the Python
+# search path for a directory by that name, and if found adds it to
+# the module search path (sys.path).  It maintains a list of packages
+# that have already been added so adding the same package many times
+# is OK.
+#
+# It is intended to be used in a fairly stylized manner: each module
+# that wants to use a particular package, say 'Foo', is supposed to
+# contain the following code:
+#
+#   from addpack import addpack
+#   addpack('Foo')
+#   <import modules from package Foo>
+#
+# Additional arguments, when present, provide additional places where
+# to look for the package before trying sys.path (these may be either
+# strings or lists/tuples of strings).  Also, if the package name is a
+# full pathname, first the last component is tried in the usual way,
+# then the full pathname is tried last.  If the package name is a
+# *relative* pathname (UNIX: contains a slash but doesn't start with
+# one), then nothing special is done.  The packages "/foo/bar/bletch"
+# and "bletch" are considered the same, but unrelated to "bar/bletch".
+#
+# If the algorithm finds more than one suitable subdirectory, all are
+# added to the search path -- this makes it possible to override part
+# of a package.  The same path will not be added more than once.
+#
+# If no directory is found, ImportError is raised.
+
+_packs = {}				# {pack: [pathname, ...], ...}
+
+def addpack(pack, *locations):
+	import os
+	if os.path.isabs(pack):
+		base = os.path.basename(pack)
+	else:
+		base = pack
+	if _packs.has_key(base):
+		return
+	import sys
+	path = []
+	for loc in _flatten(locations) + sys.path:
+		fn = os.path.join(loc, base)
+		if fn not in path and os.path.isdir(fn):
+			path.append(fn)
+	if pack != base and pack not in path and os.path.isdir(pack):
+		path.append(pack)
+	if not path: raise ImportError, 'package ' + pack + ' not found'
+	_packs[base] = path
+	for fn in path:
+		if fn not in sys.path:
+			sys.path.append(fn)
+
+def _flatten(locations):
+	locs = []
+	for loc in locations:
+		if type(loc) == type(''):
+			locs.append(loc)
+		else:
+			locs = locs + _flatten(loc)
+	return locs
diff --git a/lib-python/2.2/lib-old/cmp.py b/lib-python/2.2/lib-old/cmp.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/lib-old/cmp.py
@@ -0,0 +1,63 @@
+"""Efficiently compare files, boolean outcome only (equal / not equal).
+
+Tricks (used in this order):
+    - Files with identical type, size & mtime are assumed to be clones
+    - Files with different type or size cannot be identical
+    - We keep a cache of outcomes of earlier comparisons
+    - We don't fork a process to run 'cmp' but read the files ourselves
+"""
+
+import os
+
+cache = {}
+
+def cmp(f1, f2, shallow=1):
+    """Compare two files, use the cache if possible.
+    Return 1 for identical files, 0 for different.
+    Raise exceptions if either file could not be statted, read, etc."""
+    s1, s2 = sig(os.stat(f1)), sig(os.stat(f2))
+    if s1[0] != 8 or s2[0] != 8:
+        # Either is a not a plain file -- always report as different
+        return 0
+    if shallow and s1 == s2:
+        # type, size & mtime match -- report same
+        return 1
+    if s1[:2] != s2[:2]: # Types or sizes differ, don't bother
+        # types or sizes differ -- report different
+        return 0
+    # same type and size -- look in the cache
+    key = (f1, f2)
+    try:
+        cs1, cs2, outcome = cache[key]
+        # cache hit
+        if s1 == cs1 and s2 == cs2:
+            # cached signatures match
+            return outcome
+        # stale cached signature(s)
+    except KeyError:
+        # cache miss
+        pass
+    # really compare
+    outcome = do_cmp(f1, f2)
+    cache[key] = s1, s2, outcome
+    return outcome
+
+def sig(st):
+    """Return signature (i.e., type, size, mtime) from raw stat data
+    0-5: st_mode, st_ino, st_dev, st_nlink, st_uid, st_gid
+    6-9: st_size, st_atime, st_mtime, st_ctime"""
+    type = st[0] / 4096
+    size = st[6]
+    mtime = st[8]
+    return type, size, mtime
+
+def do_cmp(f1, f2):
+    """Compare two files, really."""
+    bufsize = 8*1024 # Could be tuned
+    fp1 = open(f1, 'rb')
+    fp2 = open(f2, 'rb')
+    while 1:
+        b1 = fp1.read(bufsize)
+        b2 = fp2.read(bufsize)
+        if b1 != b2: return 0
+        if not b1: return 1
diff --git a/lib-python/2.2/lib-old/cmpcache.py b/lib-python/2.2/lib-old/cmpcache.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/lib-old/cmpcache.py
@@ -0,0 +1,64 @@
+"""Efficiently compare files, boolean outcome only (equal / not equal).
+
+Tricks (used in this order):
+    - Use the statcache module to avoid statting files more than once
+    - Files with identical type, size & mtime are assumed to be clones
+    - Files with different type or size cannot be identical
+    - We keep a cache of outcomes of earlier comparisons
+    - We don't fork a process to run 'cmp' but read the files ourselves
+"""
+
+import os
+from stat import *
+import statcache
+
+
+# The cache.
+#
+cache = {}
+
+
+def cmp(f1, f2, shallow=1):
+    """Compare two files, use the cache if possible.
+    May raise os.error if a stat or open of either fails.
+    Return 1 for identical files, 0 for different.
+    Raise exceptions if either file could not be statted, read, etc."""
+    s1, s2 = sig(statcache.stat(f1)), sig(statcache.stat(f2))
+    if not S_ISREG(s1[0]) or not S_ISREG(s2[0]):
+        # Either is a not a plain file -- always report as different
+        return 0
+    if shallow and s1 == s2:
+        # type, size & mtime match -- report same
+        return 1
+    if s1[:2] != s2[:2]: # Types or sizes differ, don't bother
+        # types or sizes differ -- report different
+        return 0
+    # same type and size -- look in the cache
+    key = f1 + ' ' + f2
+    if cache.has_key(key):
+        cs1, cs2, outcome = cache[key]
+        # cache hit
+        if s1 == cs1 and s2 == cs2:
+            # cached signatures match
+            return outcome
+        # stale cached signature(s)
+    # really compare
+    outcome = do_cmp(f1, f2)
+    cache[key] = s1, s2, outcome
+    return outcome
+
+def sig(st):
+    """Return signature (i.e., type, size, mtime) from raw stat data."""
+    return S_IFMT(st[ST_MODE]), st[ST_SIZE], st[ST_MTIME]
+
+def do_cmp(f1, f2):
+    """Compare two files, really."""
+    #print '    cmp', f1, f2 # XXX remove when debugged
+    bufsize = 8*1024 # Could be tuned
+    fp1 = open(f1, 'rb')
+    fp2 = open(f2, 'rb')
+    while 1:
+        b1 = fp1.read(bufsize)
+        b2 = fp2.read(bufsize)
+        if b1 != b2: return 0
+        if not b1: return 1
diff --git a/lib-python/2.2/lib-old/codehack.py b/lib-python/2.2/lib-old/codehack.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/lib-old/codehack.py
@@ -0,0 +1,81 @@
+# A subroutine for extracting a function name from a code object
+# (with cache)
+
+import sys
+from stat import *
+import string
+import os
+import linecache
+
+# XXX The functions getcodename() and getfuncname() are now obsolete
+# XXX as code and function objects now have a name attribute --
+# XXX co.co_name and f.func_name.
+# XXX getlineno() is now also obsolete because of the new attribute
+# XXX of code objects, co.co_firstlineno.
+
+# Extract the function or class name from a code object.
+# This is a bit of a hack, since a code object doesn't contain
+# the name directly.  So what do we do:
+# - get the filename (which *is* in the code object)
+# - look in the code string to find the first SET_LINENO instruction
+#   (this must be the first instruction)
+# - get the line from the file
+# - if the line starts with 'class' or 'def' (after possible whitespace),
+#   extract the following identifier
+#
+# This breaks apart when the function was read from <stdin>
+# or constructed by exec(), when the file is not accessible,
+# and also when the file has been modified or when a line is
+# continued with a backslash before the function or class name.
+#
+# Because this is a pretty expensive hack, a cache is kept.
+
+SET_LINENO = 127 # The opcode (see "opcode.h" in the Python source)
+identchars = string.ascii_letters + string.digits + '_' # Identifier characters
+
+_namecache = {} # The cache
+
+def getcodename(co):
+	try:
+		return co.co_name
+	except AttributeError:
+		pass
+	key = `co` # arbitrary but uniquely identifying string
+	if _namecache.has_key(key): return _namecache[key]
+	filename = co.co_filename
+	code = co.co_code
+	name = ''
+	if ord(code[0]) == SET_LINENO:
+		lineno = ord(code[1]) | ord(code[2]) << 8
+		line = linecache.getline(filename, lineno)
+		words = line.split()
+		if len(words) >= 2 and words[0] in ('def', 'class'):
+			name = words[1]
+			for i in range(len(name)):
+				if name[i] not in identchars:
+					name = name[:i]
+					break
+	_namecache[key] = name
+	return name
+
+# Use the above routine to find a function's name.
+
+def getfuncname(func):
+	try:
+		return func.func_name
+	except AttributeError:
+		pass
+	return getcodename(func.func_code)
+
+# A part of the above code to extract just the line number from a code object.
+
+def getlineno(co):
+	try:
+		return co.co_firstlineno
+	except AttributeError:
+		pass
+	code = co.co_code
+	if ord(code[0]) == SET_LINENO:
+		return ord(code[1]) | ord(code[2]) << 8
+	else:
+		return -1
diff --git a/lib-python/2.2/lib-old/dircmp.py b/lib-python/2.2/lib-old/dircmp.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/lib-old/dircmp.py
@@ -0,0 +1,202 @@
+"""A class to build directory diff tools on."""
+
+import os
+
+import dircache
+import cmpcache
+import statcache
+from stat import *
+
+class dircmp:
+    """Directory comparison class."""
+
+    def new(self, a, b):
+        """Initialize."""
+        self.a = a
+        self.b = b
+        # Properties that caller may change before calling self.run():
+        self.hide = [os.curdir, os.pardir] # Names never to be shown
+        self.ignore = ['RCS', 'tags'] # Names ignored in comparison
+
+        return self
+
+    def run(self):
+        """Compare everything except common subdirectories."""
+        self.a_list = filter(dircache.listdir(self.a), self.hide)
+        self.b_list = filter(dircache.listdir(self.b), self.hide)
+        self.a_list.sort()
+        self.b_list.sort()
+        self.phase1()
+        self.phase2()
+        self.phase3()
+
+    def phase1(self):
+        """Compute common names."""
+        self.a_only = []
+        self.common = []
+        for x in self.a_list:
+            if x in self.b_list:
+                self.common.append(x)
+            else:
+                self.a_only.append(x)
+
+        self.b_only = []
+        for x in self.b_list:
+            if x not in self.common:
+                self.b_only.append(x)
+
+    def phase2(self):
+        """Distinguish files, directories, funnies."""
+        self.common_dirs = []
+        self.common_files = []
+        self.common_funny = []
+
+        for x in self.common:
+            a_path = os.path.join(self.a, x)
+            b_path = os.path.join(self.b, x)
+
+            ok = 1
+            try:
+                a_stat = statcache.stat(a_path)
+            except os.error, why:
+                # print 'Can\'t stat', a_path, ':', why[1]
+                ok = 0
+            try:
+                b_stat = statcache.stat(b_path)
+            except os.error, why:
+                # print 'Can\'t stat', b_path, ':', why[1]
+                ok = 0
+
+            if ok:
+                a_type = S_IFMT(a_stat[ST_MODE])
+                b_type = S_IFMT(b_stat[ST_MODE])
+                if a_type != b_type:
+                    self.common_funny.append(x)
+                elif S_ISDIR(a_type):
+                    self.common_dirs.append(x)
+                elif S_ISREG(a_type):
+                    self.common_files.append(x)
+                else:
+                    self.common_funny.append(x)
+            else:
+                self.common_funny.append(x)
+
+    def phase3(self):
+        """Find out differences between common files."""
+        xx = cmpfiles(self.a, self.b, self.common_files)
+        self.same_files, self.diff_files, self.funny_files = xx
+
+    def phase4(self):
+        """Find out differences between common subdirectories.
+        A new dircmp object is created for each common subdirectory,
+        these are stored in a dictionary indexed by filename.
+        The hide and ignore properties are inherited from the parent."""
+        self.subdirs = {}
+        for x in self.common_dirs:
+            a_x = os.path.join(self.a, x)
+            b_x = os.path.join(self.b, x)
+            self.subdirs[x] = newdd = dircmp().new(a_x, b_x)
+            newdd.hide = self.hide
+            newdd.ignore = self.ignore
+            newdd.run()
+
+    def phase4_closure(self):
+        """Recursively call phase4() on subdirectories."""
+        self.phase4()
+        for x in self.subdirs.keys():
+            self.subdirs[x].phase4_closure()
+
+    def report(self):
+        """Print a report on the differences between a and b."""
+        # Assume that phases 1 to 3 have been executed
+        # Output format is purposely lousy
+        print 'diff', self.a, self.b
+        if self.a_only:
+            print 'Only in', self.a, ':', self.a_only
+        if self.b_only:
+            print 'Only in', self.b, ':', self.b_only
+        if self.same_files:
+            print 'Identical files :', self.same_files
+        if self.diff_files:
+            print 'Differing files :', self.diff_files
+        if self.funny_files:
+            print 'Trouble with common files :', self.funny_files
+        if self.common_dirs:
+            print 'Common subdirectories :', self.common_dirs
+        if self.common_funny:
+            print 'Common funny cases :', self.common_funny
+
+    def report_closure(self):
+        """Print reports on self and on subdirs.
+        If phase 4 hasn't been done, no subdir reports are printed."""
+        self.report()
+        try:
+            x = self.subdirs
+        except AttributeError:
+            return # No subdirectories computed
+        for x in self.subdirs.keys():
+            print
+            self.subdirs[x].report_closure()
+
+    def report_phase4_closure(self):
+        """Report and do phase 4 recursively."""
+        self.report()
+        self.phase4()
+        for x in self.subdirs.keys():
+            print
+            self.subdirs[x].report_phase4_closure()
+
+
+def cmpfiles(a, b, common):
+    """Compare common files in two directories.
+    Return:
+        - files that compare equal
+        - files that compare different
+        - funny cases (can't stat etc.)"""
+
+    res = ([], [], [])
+    for x in common:
+        res[cmp(os.path.join(a, x), os.path.join(b, x))].append(x)
+    return res
+
+
+def cmp(a, b):
+    """Compare two files.
+    Return:
+        0 for equal
+        1 for different
+        2 for funny cases (can't stat, etc.)"""
+
+    try:
+        if cmpcache.cmp(a, b): return 0
+        return 1
+    except os.error:
+        return 2
+
+
+def filter(list, skip):
+    """Return a copy with items that occur in skip removed."""
+
+    result = []
+    for item in list:
+        if item not in skip: result.append(item)
+    return result
+
+
+def demo():
+    """Demonstration and testing."""
+
+    import sys
+    import getopt
+    options, args = getopt.getopt(sys.argv[1:], 'r')
+    if len(args) != 2:
+        raise getopt.error, 'need exactly two args'
+    dd = dircmp().new(args[0], args[1])
+    dd.run()
+    if ('-r', '') in options:
+        dd.report_phase4_closure()
+    else:
+        dd.report()
+
+if __name__ == "__main__":
+    demo()
diff --git a/lib-python/2.2/lib-old/dump.py b/lib-python/2.2/lib-old/dump.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/lib-old/dump.py
@@ -0,0 +1,63 @@
+# Module 'dump'
+#
+# Print python code that reconstructs a variable.
+# This only works in certain cases.
+#
+# It works fine for:
+# - ints and floats (except NaNs and other weird things)
+# - strings
+# - compounds and lists, provided it works for all their elements
+# - imported modules, provided their name is the module name
+#
+# It works for top-level dictionaries but not for dictionaries
+# contained in other objects (could be made to work with some hassle
+# though).
+#
+# It does not work for functions (all sorts), classes, class objects,
+# windows, files etc.
+#
+# Finally, objects referenced by more than one name or contained in more
+# than one other object lose their sharing property (this is bad for
+# strings used as exception identifiers, for instance).
+
+# Dump a whole symbol table
+#
+def dumpsymtab(dict):
+	for key in dict.keys():
+		dumpvar(key, dict[key])
+
+# Dump a single variable
+#
+def dumpvar(name, x):
+	import sys
+	t = type(x)
+	if t == type({}):
+		print name, '= {}'
+		for key in x.keys():
+			item = x[key]
+			if not printable(item):
+				print '#',
+			print name, '[', `key`, '] =', `item`
+	elif t in (type(''), type(0), type(0.0), type([]), type(())):
+		if not printable(x):
+			print '#',
+		print name, '=', `x`
+	elif t == type(sys):
+		print 'import', name, '#', x
+	else:
+		print '#', name, '=', x
+
+# check if a value is printable in a way that can be read back with input()
+#
+def printable(x):
+	t = type(x)
+	if t in (type(''), type(0), type(0.0)):
+		return 1
+	if t in (type([]), type(())):
+		for item in x:
+			if not printable(item):
+				return 0
+		return 1
+	if x == {}:
+		return 1
+	return 0
diff --git a/lib-python/2.2/lib-old/find.py b/lib-python/2.2/lib-old/find.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/lib-old/find.py
@@ -0,0 +1,26 @@
+import fnmatch
+import os
+
+_debug = 0
+
+_prune = ['(*)']
+
+def find(pattern, dir = os.curdir):
+	list = []
+	names = os.listdir(dir)
+	names.sort()
+	for name in names:
+		if name in (os.curdir, os.pardir):
+			continue
+		fullname = os.path.join(dir, name)
+		if fnmatch.fnmatch(name, pattern):
+			list.append(fullname)
+		if os.path.isdir(fullname) and not os.path.islink(fullname):
+			for p in _prune:
+				if fnmatch.fnmatch(name, p):
+					if _debug: print "skip", `fullname`
+					break
+			else:
+				if _debug: print "descend into", `fullname`
+				list = list + find(pattern, fullname)
+	return list
diff --git a/lib-python/2.2/lib-old/fmt.py b/lib-python/2.2/lib-old/fmt.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/lib-old/fmt.py
@@ -0,0 +1,623 @@
+# Text formatting abstractions
+# Note -- this module is obsolete, it's too slow anyway
+
+
+import string
+import Para
+
+
+# A formatter back-end object has one method that is called by the formatter:
+# addpara(p), where p is a paragraph object.  For example:
+
+
+# Formatter back-end to do nothing at all with the paragraphs
+class NullBackEnd:
+	#
+	def __init__(self):
+		pass
+	#
+	def addpara(self, p):
+		pass
+	#
+	def bgn_anchor(self, id):
+		pass
+	#
+	def end_anchor(self, id):
+		pass
+
+
+# Formatter back-end to collect the paragraphs in a list
+class SavingBackEnd(NullBackEnd):
+	#
+	def __init__(self):
+		self.paralist = []
+	#
+	def addpara(self, p):
+		self.paralist.append(p)
+	#
+	def hitcheck(self, h, v):
+		hits = []
+		for p in self.paralist:
+			if p.top <= v <= p.bottom:
+				for id in p.hitcheck(h, v):
+					if id not in hits:
+						hits.append(id)
+		return hits
+	#
+	def extract(self):
+		text = ''
+		for p in self.paralist:
+			text = text + (p.extract())
+		return text
+	#
+	def extractpart(self, long1, long2):
+		if long1 > long2: long1, long2 = long2, long1
+		para1, pos1 = long1
+		para2, pos2 = long2
+		text = ''
+		while para1 < para2:
+			ptext = self.paralist[para1].extract()
+			text = text + ptext[pos1:]
+			pos1 = 0
+			para1 = para1 + 1
+		ptext = self.paralist[para2].extract()
+		return text + ptext[pos1:pos2]
+	#
+	def whereis(self, d, h, v):
+		total = 0
+		for i in range(len(self.paralist)):
+			p = self.paralist[i]
+			result = p.whereis(d, h, v)
+			if result is not None:
+				return i, result
+		return None
+	#
+	def roundtowords(self, long1, long2):
+		i, offset = long1
+		text = self.paralist[i].extract()
+		while offset > 0 and text[offset-1] != ' ': offset = offset-1
+		long1 = i, offset
+		#
+		i, offset = long2
+		text = self.paralist[i].extract()
+		n = len(text)
+		while offset < n-1 and text[offset] != ' ': offset = offset+1
+		long2 = i, offset
+		#
+		return long1, long2
+	#
+	def roundtoparagraphs(self, long1, long2):
+		long1 = long1[0], 0
+		long2 = long2[0], len(self.paralist[long2[0]].extract())
+		return long1, long2
+
+
+# Formatter back-end to send the text directly to the drawing object
+class WritingBackEnd(NullBackEnd):
+	#
+	def __init__(self, d, width):
+		self.d = d
+		self.width = width
+		self.lineno = 0
+	#
+	def addpara(self, p):
+		self.lineno = p.render(self.d, 0, self.lineno, self.width)
+
+
+# A formatter receives a stream of formatting instructions and assembles
+# these into a stream of paragraphs on to a back-end.  The assembly is
+# parametrized by a text measurement object, which must match the output
+# operations of the back-end.  The back-end is responsible for splitting
+# paragraphs up in lines of a given maximum width.  (This is done because
+# in a windowing environment, when the window size changes, there is no
+# need to redo the assembly into paragraphs, but the splitting into lines
+# must be done taking the new window size into account.)
+
+
+# Formatter base class.  Initialize it with a text measurement object,
+# which is used for text measurements, and a back-end object,
+# which receives the completed paragraphs.  The formatting methods are:
+# setfont(font)
+# setleftindent(nspaces)
+# setjust(type) where type is 'l', 'c', 'r', or 'lr'
+# flush()
+# vspace(nlines)
+# needvspace(nlines)
+# addword(word, nspaces)
+class BaseFormatter:
+	#
+	def __init__(self, d, b):
+		# Drawing object used for text measurements
+		self.d = d
+		#
+		# BackEnd object receiving completed paragraphs
+		self.b = b
+		#
+		# Parameters of the formatting model
+		self.leftindent = 0
+		self.just = 'l'
+		self.font = None
+		self.blanklines = 0
+		#
+		# Parameters derived from the current font
+		self.space = d.textwidth(' ')
+		self.line = d.lineheight()
+		self.ascent = d.baseline()
+		self.descent = self.line - self.ascent
+		#
+		# Parameter derived from the default font
+		self.n_space = self.space
+		#
+		# Current paragraph being built
+		self.para = None
+		self.nospace = 1
+		#
+		# Font to set on the next word
+		self.nextfont = None
+	#
+	def newpara(self):
+		return Para.Para()
+	#
+	def setfont(self, font):
+		if font is None: return
+		self.font = self.nextfont = font
+		d = self.d
+		d.setfont(font)
+		self.space = d.textwidth(' ')
+		self.line = d.lineheight()
+		self.ascent = d.baseline()
+		self.descent = self.line - self.ascent
+	#
+	def setleftindent(self, nspaces):
+		self.leftindent = int(self.n_space * nspaces)
+		if self.para:
+			hang = self.leftindent - self.para.indent_left
+			if hang > 0 and self.para.getlength() <= hang:
+				self.para.makehangingtag(hang)
+				self.nospace = 1
+			else:
+				self.flush()
+	#
+	def setrightindent(self, nspaces):
+		self.rightindent = int(self.n_space * nspaces)
+		if self.para:
+			self.para.indent_right = self.rightindent
+			self.flush()
+	#
+	def setjust(self, just):
+		self.just = just
+		if self.para:
+			self.para.just = self.just
+	#
+	def flush(self):
+		if self.para:
+			self.b.addpara(self.para)
+			self.para = None
+			if self.font is not None:
+				self.d.setfont(self.font)
+		self.nospace = 1
+	#
+	def vspace(self, nlines):
+		self.flush()
+		if nlines > 0:
+			self.para = self.newpara()
+			tuple = None, '', 0, 0, 0, int(nlines*self.line), 0
+			self.para.words.append(tuple)
+			self.flush()
+			self.blanklines = self.blanklines + nlines
+	#
+	def needvspace(self, nlines):
+		self.flush() # Just to be sure
+		if nlines > self.blanklines:
+			self.vspace(nlines - self.blanklines)
+	#
+	def addword(self, text, space):
+		if self.nospace and not text:
+			return
+		self.nospace = 0
+		self.blanklines = 0
+		if not self.para:
+			self.para = self.newpara()
+			self.para.indent_left = self.leftindent
+			self.para.just = self.just
+			self.nextfont = self.font
+		space = int(space * self.space)
+		self.para.words.append((self.nextfont, text,
+			self.d.textwidth(text), space, space,
+			self.ascent, self.descent))
+		self.nextfont = None
+	#
+	def bgn_anchor(self, id):
+		if not self.para:
+			self.nospace = 0
+			self.addword('', 0)
+		self.para.bgn_anchor(id)
+	#
+	def end_anchor(self, id):
+		if not self.para:
+			self.nospace = 0
+			self.addword('', 0)
+		self.para.end_anchor(id)
+
+
+# Measuring object for measuring text as viewed on a tty
+class NullMeasurer:
+	#
+	def __init__(self):
+		pass
+	#
+	def setfont(self, font):
+		pass
+	#
+	def textwidth(self, text):
+		return len(text)
+	#
+	def lineheight(self):
+		return 1
+	#
+	def baseline(self):
+		return 0
+
+
+# Drawing object for writing plain ASCII text to a file
+class FileWriter:
+	#
+	def __init__(self, fp):
+		self.fp = fp
+		self.lineno, self.colno = 0, 0
+	#
+	def setfont(self, font):
+		pass
+	#
+	def text(self, (h, v), str):
+		if not str: return
+		if '\n' in str:
+			raise ValueError, 'can\'t write \\n'
+		while self.lineno < v:
+			self.fp.write('\n')
+			self.colno, self.lineno = 0, self.lineno + 1
+		while self.lineno > v:
+			# XXX This should never happen...
+			self.fp.write('\033[A') # ANSI up arrow
+			self.lineno = self.lineno - 1
+		if self.colno < h:
+			self.fp.write(' ' * (h - self.colno))
+		elif self.colno > h:
+			self.fp.write('\b' * (self.colno - h))
+		self.colno = h
+		self.fp.write(str)
+		self.colno = h + len(str)
+
+
+# Formatting class to do nothing at all with the data
+class NullFormatter(BaseFormatter):
+	#
+	def __init__(self):
+		d = NullMeasurer()
+		b = NullBackEnd()
+		BaseFormatter.__init__(self, d, b)
+
+
+# Formatting class to write directly to a file
+class WritingFormatter(BaseFormatter):
+	#
+	def __init__(self, fp, width):
+		dm = NullMeasurer()
+		dw = FileWriter(fp)
+		b = WritingBackEnd(dw, width)
+		BaseFormatter.__init__(self, dm, b)
+		self.blanklines = 1
+	#
+	# Suppress multiple blank lines
+	def needvspace(self, nlines):
+		BaseFormatter.needvspace(self, min(1, nlines))
+
+
+# A "FunnyFormatter" writes ASCII text with a twist: *bold words*,
+# _italic text_ and _underlined words_, and `quoted text'.
+# It assumes that the fonts are 'r', 'i', 'b', 'u', 'q': (roman,
+# italic, bold, underline, quote).
+# Moreover, if the font is in upper case, the text is converted to
+# UPPER CASE.
+class FunnyFormatter(WritingFormatter):
+	#
+	def flush(self):
+		if self.para: finalize(self.para)
+		WritingFormatter.flush(self)
+
+
+# Surrounds *bold words* and _italic text_ in a paragraph with
+# appropriate markers, fixing the size (assuming these characters'
+# width is 1).
+openchar = \
+    {'b':'*', 'i':'_', 'u':'_', 'q':'`', 'B':'*', 'I':'_', 'U':'_', 'Q':'`'}
+closechar = \
+    {'b':'*', 'i':'_', 'u':'_', 'q':'\'', 'B':'*', 'I':'_', 'U':'_', 'Q':'\''}
+def finalize(para):
+	oldfont = curfont = 'r'
+	para.words.append(('r', '', 0, 0, 0, 0)) # temporary, deleted at end
+	for i in range(len(para.words)):
+		fo, te, wi = para.words[i][:3]
+		if fo is not None: curfont = fo
+		if curfont != oldfont:
+			if closechar.has_key(oldfont):
+				c = closechar[oldfont]
+				j = i-1
+				while j > 0 and para.words[j][1] == '': j = j-1
+				fo1, te1, wi1 = para.words[j][:3]
+				te1 = te1 + c
+				wi1 = wi1 + len(c)
+				para.words[j] = (fo1, te1, wi1) + \
+					para.words[j][3:]
+			if openchar.has_key(curfont) and te:
+				c = openchar[curfont]
+				te = c + te
+				wi = len(c) + wi
+				para.words[i] = (fo, te, wi) + \
+					para.words[i][3:]
+			if te: oldfont = curfont
+			else: oldfont = 'r'
+		if curfont in string.uppercase:
+			te = string.upper(te)
+			para.words[i] = (fo, te, wi) + para.words[i][3:]
+	del para.words[-1]
+
+
+# Formatter back-end to draw the text in a window.
+# This has an option to draw while the paragraphs are being added,
+# to minimize the delay before the user sees anything.
+# This manages the entire "document" of the window.
+class StdwinBackEnd(SavingBackEnd):
+	#
+	def __init__(self, window, drawnow):
+		self.window = window
+		self.drawnow = drawnow
+		self.width = window.getwinsize()[0]
+		self.selection = None
+		self.height = 0
+		window.setorigin(0, 0)
+		window.setdocsize(0, 0)
+		self.d = window.begindrawing()
+		SavingBackEnd.__init__(self)
+	#
+	def finish(self):
+		self.d.close()
+		self.d = None
+		self.window.setdocsize(0, self.height)
+	#
+	def addpara(self, p):
+		self.paralist.append(p)
+		if self.drawnow:
+			self.height = \
+				p.render(self.d, 0, self.height, self.width)
+		else:
+			p.layout(self.width)
+			p.left = 0
+			p.top = self.height
+			p.right = self.width
+			p.bottom = self.height + p.height
+			self.height = p.bottom
+	#
+	def resize(self):
+		self.window.change((0, 0), (self.width, self.height))
+		self.width = self.window.getwinsize()[0]
+		self.height = 0
+		for p in self.paralist:
+			p.layout(self.width)
+			p.left = 0
+			p.top = self.height
+			p.right = self.width
+			p.bottom = self.height + p.height
+			self.height = p.bottom
+		self.window.change((0, 0), (self.width, self.height))
+		self.window.setdocsize(0, self.height)
+	#
+	def redraw(self, area):
+		d = self.window.begindrawing()
+		(left, top), (right, bottom) = area
+		d.erase(area)
+		d.cliprect(area)
+		for p in self.paralist:
+			if top < p.bottom and p.top < bottom:
+				v = p.render(d, p.left, p.top, p.right)
+		if self.selection:
+			self.invert(d, self.selection)
+		d.close()
+	#
+	def setselection(self, new):
+		if new:
+			long1, long2 = new
+			pos1 = long1[:3]
+			pos2 = long2[:3]
+			new = pos1, pos2
+		if new != self.selection:
+			d = self.window.begindrawing()
+			if self.selection:
+				self.invert(d, self.selection)
+			if new:
+				self.invert(d, new)
+			d.close()
+			self.selection = new
+	#
+	def getselection(self):
+		return self.selection
+	#
+	def extractselection(self):
+		if self.selection:
+			a, b = self.selection
+			return self.extractpart(a, b)
+		else:
+			return None
+	#
+	def invert(self, d, region):
+		long1, long2 = region
+		if long1 > long2: long1, long2 = long2, long1
+		para1, pos1 = long1
+		para2, pos2 = long2
+		while para1 < para2:
+			self.paralist[para1].invert(d, pos1, None)
+			pos1 = None
+			para1 = para1 + 1
+		self.paralist[para2].invert(d, pos1, pos2)
+	#
+	def search(self, prog):
+		import re, string
+		if type(prog) is type(''):
+			prog = re.compile(string.lower(prog))
+		if self.selection:
+			iold = self.selection[0][0]
+		else:
+			iold = -1
+		hit = None
+		for i in range(len(self.paralist)):
+			if i == iold or i < iold and hit:
+				continue
+			p = self.paralist[i]
+			text = string.lower(p.extract())
+			match = prog.search(text)
+			if match:
+				a, b = match.group(0)
+				long1 = i, a
+				long2 = i, b
+				hit = long1, long2
+				if i > iold:
+					break
+		if hit:
+			self.setselection(hit)
+			i = hit[0][0]
+			p = self.paralist[i]
+			self.window.show((p.left, p.top), (p.right, p.bottom))
+			return 1
+		else:
+			return 0
+	#
+	def showanchor(self, id):
+		for i in range(len(self.paralist)):
+			p = self.paralist[i]
+			if p.hasanchor(id):
+				long1 = i, 0
+				long2 = i, len(p.extract())
+				hit = long1, long2
+				self.setselection(hit)
+				self.window.show(
+					(p.left, p.top), (p.right, p.bottom))
+				break
+
+
+# GL extensions
+
+class GLFontCache:
+	#
+	def __init__(self):
+		self.reset()
+		self.setfont('')
+	#
+	def reset(self):
+		self.fontkey = None
+		self.fonthandle = None
+		self.fontinfo = None
+		self.fontcache = {}
+	#
+	def close(self):
+		self.reset()
+	#
+	def setfont(self, fontkey):
+		if fontkey == '':
+			fontkey = 'Times-Roman 12'
+		elif ' ' not in fontkey:
+			fontkey = fontkey + ' 12'
+		if fontkey == self.fontkey:
+			return
+		if self.fontcache.has_key(fontkey):
+			handle = self.fontcache[fontkey]
+		else:
+			import string
+			i = string.index(fontkey, ' ')
+			name, sizestr = fontkey[:i], fontkey[i:]
+			size = eval(sizestr)
+			key1 = name + ' 1'
+			key = name + ' ' + `size`
+			# NB key may differ from fontkey!
+			if self.fontcache.has_key(key):
+				handle = self.fontcache[key]
+			else:
+				if self.fontcache.has_key(key1):
+					handle = self.fontcache[key1]
+				else:
+					import fm
+					handle = fm.findfont(name)
+					self.fontcache[key1] = handle
+				handle = handle.scalefont(size)
+				self.fontcache[fontkey] = \
+					self.fontcache[key] = handle
+		self.fontkey = fontkey
+		if self.fonthandle != handle:
+			self.fonthandle = handle
+			self.fontinfo = handle.getfontinfo()
+			handle.setfont()
+
+
+class GLMeasurer(GLFontCache):
+	#
+	def textwidth(self, text):
+		return self.fonthandle.getstrwidth(text)
+	#
+	def baseline(self):
+		return self.fontinfo[6] - self.fontinfo[3]
+	#
+	def lineheight(self):
+		return self.fontinfo[6]
+
+
+class GLWriter(GLFontCache):
+	#
+	# NOTES:
+	# (1) Use gl.ortho2 to use X pixel coordinates!
+	#
+	def text(self, (h, v), text):
+		import gl, fm
+		gl.cmov2i(h, v + self.fontinfo[6] - self.fontinfo[3])
+		fm.prstr(text)
+	#
+	def setfont(self, fontkey):
+		oldhandle = self.fonthandle
+		GLFontCache.setfont(fontkey)
+		if self.fonthandle != oldhandle:
+			handle.setfont()
+
+
+class GLMeasurerWriter(GLMeasurer, GLWriter):
+	pass
+
+
+class GLBackEnd(SavingBackEnd):
+	#
+	def __init__(self, wid):
+		import gl
+		gl.winset(wid)
+		self.wid = wid
+		self.width = gl.getsize()[1]
+		self.height = 0
+		self.d = GLMeasurerWriter()
+		SavingBackEnd.__init__(self)
+	#
+	def finish(self):
+		pass
+	#
+	def addpara(self, p):
+		self.paralist.append(p)
+		self.height = p.render(self.d, 0, self.height, self.width)
+	#
+	def redraw(self):
+		import gl
+		gl.winset(self.wid)
+		width = gl.getsize()[1]
+		if width != self.width:
+			setdocsize = 1
+			self.width = width
+			for p in self.paralist:
+				p.top = p.bottom = None
+		d = self.d
+		v = 0
+		for p in self.paralist:
+			v = p.render(d, 0, v, width)
diff --git a/lib-python/2.2/lib-old/grep.py b/lib-python/2.2/lib-old/grep.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/lib-old/grep.py
@@ -0,0 +1,79 @@
+# 'grep'
+
+import regex
+from regex_syntax import *
+
+opt_show_where = 0
+opt_show_filename = 0
+opt_show_lineno = 1
+
+def grep(pat, *files):
+	return ggrep(RE_SYNTAX_GREP, pat, files)
+
+def egrep(pat, *files):
+	return ggrep(RE_SYNTAX_EGREP, pat, files)
+
+def emgrep(pat, *files):
+	return ggrep(RE_SYNTAX_EMACS, pat, files)
+
+def ggrep(syntax, pat, files):
+	if len(files) == 1 and type(files[0]) == type([]):
+		files = files[0]
+	global opt_show_filename
+	opt_show_filename = (len(files) != 1)
+	syntax = regex.set_syntax(syntax)
+	try:
+		prog = regex.compile(pat)
+	finally:
+		syntax = regex.set_syntax(syntax)
+	for filename in files:
+		fp = open(filename, 'r')
+		lineno = 0
+		while 1:
+			line = fp.readline()
+			if not line: break
+			lineno = lineno + 1
+			if prog.search(line) >= 0:
+				showline(filename, lineno, line, prog)
+		fp.close()
+
+def pgrep(pat, *files):
+	if len(files) == 1 and type(files[0]) == type([]):
+		files = files[0]
+	global opt_show_filename
+	opt_show_filename = (len(files) != 1)
+	import re
+	prog = re.compile(pat)
+	for filename in files:
+		fp = open(filename, 'r')
+		lineno = 0
+		while 1:
+			line = fp.readline()
+			if not line: break
+			lineno = lineno + 1
+			if prog.search(line):
+				showline(filename, lineno, line, prog)
+		fp.close()
+
+def showline(filename, lineno, line, prog):
+	if line[-1:] == '\n': line = line[:-1]
+	if opt_show_lineno:
+		prefix = `lineno`.rjust(3) + ': '
+	else:
+		prefix = ''
+	if opt_show_filename:
+		prefix = filename + ': ' + prefix
+	print prefix + line
+	if opt_show_where:
+		start, end = prog.regs()[0]
+		line = line[:start]
+		if '\t' not in line:
+			prefix = ' ' * (len(prefix) + start)
+		else:
+			prefix = ' ' * len(prefix)
+			for c in line:
+				if c != '\t': c = ' '
+				prefix = prefix + c
+		if start == end: prefix = prefix + '\\'
+		else: prefix = prefix + '^'*(end-start)
+		print prefix
diff --git a/lib-python/2.2/lib-old/lockfile.py b/lib-python/2.2/lib-old/lockfile.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/lib-old/lockfile.py
@@ -0,0 +1,15 @@
+import struct, fcntl
+
+def writelock(f):
+	_lock(f, fcntl.F_WRLCK)
+
+def readlock(f):
+	_lock(f, fcntl.F_RDLCK)
+
+def unlock(f):
+	_lock(f, fcntl.F_UNLCK)
+
+def _lock(f, op):
+	dummy = fcntl.fcntl(f.fileno(), fcntl.F_SETLKW,
+			    struct.pack('2h8l', op,
+					0, 0, 0, 0, 0, 0, 0, 0, 0))
diff --git a/lib-python/2.2/lib-old/newdir.py b/lib-python/2.2/lib-old/newdir.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/lib-old/newdir.py
@@ -0,0 +1,73 @@
+# New dir() function
+
+
+# This should be the new dir(), except that it should still list
+# the current local name space by default
+
+def listattrs(x):
+	try:
+		dictkeys = x.__dict__.keys()
+	except (AttributeError, TypeError):
+		dictkeys = []
+	#
+	try:
+		methods = x.__methods__
+	except (AttributeError, TypeError):
+		methods = []
+	#
+	try:
+		members = x.__members__
+	except (AttributeError, TypeError):
+		members = []
+	#
+	try:
+		the_class = x.__class__
+	except (AttributeError, TypeError):
+		the_class = None
+	#
+	try:
+		bases = x.__bases__
+	except (AttributeError, TypeError):
+		bases = ()
+	#
+	total = dictkeys + methods + members
+	if the_class:
+		# It's a class instace; add the class's attributes
+		# that are functions (methods)...
+		class_attrs = listattrs(the_class)
+		class_methods = []
+		for name in class_attrs:
+			if is_function(getattr(the_class, name)):
+				class_methods.append(name)
+		total = total + class_methods
+	elif bases:
+		# It's a derived class; add the base class attributes
+		for base in bases:
+			base_attrs = listattrs(base)
+			total = total + base_attrs
+	total.sort()
+	return total
+	i = 0
+	while i+1 < len(total):
+		if total[i] == total[i+1]:
+			del total[i+1]
+		else:
+			i = i+1
+	return total
+
+
+# Helper to recognize functions
+
+def is_function(x):
+	return type(x) == type(is_function)
+
+
+# Approximation of builtin dir(); but note that this lists the user's
+# variables by default, not the current local name space.
+
+def dir(x = None):
+	if x is not None:
+		return listattrs(x)
+	else:
+		import __main__
+		return listattrs(__main__)
diff --git a/lib-python/2.2/lib-old/ni.py b/lib-python/2.2/lib-old/ni.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/lib-old/ni.py
@@ -0,0 +1,433 @@
+"""New import scheme with package support.
+
+Quick Reference
+---------------
+
+- To enable package support, execute "import ni" before importing any
+  packages.  Importing this module automatically installs the relevant
+  import hooks.
+
+- To create a package named spam containing sub-modules ham, bacon and
+  eggs, create a directory spam somewhere on Python's module search
+  path (i.e. spam's parent directory must be one of the directories in
+  sys.path or $PYTHONPATH); then create files ham.py, bacon.py and
+  eggs.py inside spam.
+
+- To import module ham from package spam and use function hamneggs()
+  from that module, you can either do
+
+    import spam.ham             # *not* "import spam" !!!
+    spam.ham.hamneggs()
+
+  or
+
+    from spam import ham
+    ham.hamneggs()
+
+  or
+
+    from spam.ham import hamneggs
+    hamneggs()
+
+- Importing just "spam" does not do what you expect: it creates an
+  empty package named spam if one does not already exist, but it does
+  not import spam's submodules.  The only submodule that is guaranteed
+  to be imported is spam.__init__, if it exists.  Note that
+  spam.__init__ is a submodule of package spam.  It can reference to
+  spam's namespace via the '__.' prefix, for instance
+
+    __.spam_inited = 1          # Set a package-level variable
+
+
+
+Theory of Operation
+-------------------
+
+A Package is a module that can contain other modules.  Packages can be
+nested.  Package introduce dotted names for modules, like P.Q.M, which
+could correspond to a file P/Q/M.py found somewhere on sys.path.  It
+is possible to import a package itself, though this makes little sense
+unless the package contains a module called __init__.
+
+A package has two variables that control the namespace used for
+packages and modules, both initialized to sensible defaults the first
+time the package is referenced.
+
+(1) A package's *module search path*, contained in the per-package
+variable __path__, defines a list of *directories* where submodules or
+subpackages of the package are searched.  It is initialized to the
+directory containing the package.  Setting this variable to None makes
+the module search path default to sys.path (this is not quite the same
+as setting it to sys.path, since the latter won't track later
+assignments to sys.path).
+
+(2) A package's *import domain*, contained in the per-package variable
+__domain__, defines a list of *packages* that are searched (using
+their respective module search paths) to satisfy imports.  It is
+initialized to the list consisting of the package itself, its parent
+package, its parent's parent, and so on, ending with the root package
+(the nameless package containing all top-level packages and modules,
+whose module search path is None, implying sys.path).
+
+The default domain implements a search algorithm called "expanding
+search".  An alternative search algorithm called "explicit search"
+fixes the import search path to contain only the root package,
+requiring the modules in the package to name all imported modules by
+their full name.  The convention of using '__' to refer to the current
+package (both as a per-module variable and in module names) can be
+used by packages using explicit search to refer to modules in the same
+package; this combination is known as "explicit-relative search".
+
+The PackageImporter and PackageLoader classes together implement the
+following policies:
+
+- There is a root package, whose name is ''.  It cannot be imported
+  directly but may be referenced, e.g. by using '__' from a top-level
+  module.
+
+- In each module or package, the variable '__' contains a reference to
+  the parent package; in the root package, '__' points to itself.
+
+- In the name for imported modules (e.g. M in "import M" or "from M
+  import ..."), a leading '__' refers to the current package (i.e.
+  the package containing the current module); leading '__.__' and so
+  on refer to the current package's parent, and so on.  The use of
+  '__' elsewhere in the module name is not supported.
+
+- Modules are searched using the "expanding search" algorithm by
+  virtue of the default value for __domain__.
+
+- If A.B.C is imported, A is searched using __domain__; then
+  subpackage B is searched in A using its __path__, and so on.
+
+- Built-in modules have priority: even if a file sys.py exists in a
+  package, "import sys" imports the built-in sys module.
+
+- The same holds for frozen modules, for better or for worse.
+
+- Submodules and subpackages are not automatically loaded when their
+  parent packages is loaded.
+
+- The construct "from package import *" is illegal.  (It can still be
+  used to import names from a module.)
+
+- When "from package import module1, module2, ..." is used, those
+    modules are explicitly loaded.
+
+- When a package is loaded, if it has a submodule __init__, that
+  module is loaded.  This is the place where required submodules can
+  be loaded, the __path__ variable extended, etc.  The __init__ module
+  is loaded even if the package was loaded only in order to create a
+  stub for a sub-package: if "import P.Q.R" is the first reference to
+  P, and P has a submodule __init__, P.__init__ is loaded before P.Q
+  is even searched.
+
+Caveats:
+
+- It is possible to import a package that has no __init__ submodule;
+  this is not particularly useful but there may be useful applications
+  for it (e.g. to manipulate its search paths from the outside!).
+
+- There are no special provisions for os.chdir().  If you plan to use
+  os.chdir() before you have imported all your modules, it is better
+  not to have relative pathnames in sys.path.  (This could actually be
+  fixed by changing the implementation of path_join() in the hook to
+  absolutize paths.)
+
+- Packages and modules are introduced in sys.modules as soon as their
+  loading is started.  When the loading is terminated by an exception,
+  the sys.modules entries remain around.
+
+- There are no special measures to support mutually recursive modules,
+  but it will work under the same conditions where it works in the
+  flat module space system.
+
+- Sometimes dummy entries (whose value is None) are entered in
+  sys.modules, to indicate that a particular module does not exist --
+  this is done to speed up the expanding search algorithm when a
+  module residing at a higher level is repeatedly imported (Python
+  promises that importing a previously imported module is cheap!)
+
+- Although dynamically loaded extensions are allowed inside packages,
+  the current implementation (hardcoded in the interpreter) of their
+  initialization may cause problems if an extension invokes the
+  interpreter during its initialization.
+
+- reload() may find another version of the module only if it occurs on
+  the package search path.  Thus, it keeps the connection to the
+  package to which the module belongs, but may find a different file.
+
+XXX Need to have an explicit name for '', e.g. '__root__'.
+
+"""
+
+
+import imp
+import sys
+import __builtin__
+
+import ihooks
+from ihooks import ModuleLoader, ModuleImporter
+
+
+class PackageLoader(ModuleLoader):
+
+    """A subclass of ModuleLoader with package support.
+
+    find_module_in_dir() will succeed if there's a subdirectory with
+    the given name; load_module() will create a stub for a package and
+    load its __init__ module if it exists.
+
+    """
+
+    def find_module_in_dir(self, name, dir):
+        if dir is not None:
+            dirname = self.hooks.path_join(dir, name)
+            if self.hooks.path_isdir(dirname):
+                return None, dirname, ('', '', 'PACKAGE')
+        return ModuleLoader.find_module_in_dir(self, name, dir)
+
+    def load_module(self, name, stuff):
+        file, filename, info = stuff
+        suff, mode, type = info
+        if type == 'PACKAGE':
+            return self.load_package(name, stuff)
+        if sys.modules.has_key(name):
+            m = sys.modules[name]
+        else:
+            sys.modules[name] = m = imp.new_module(name)
+        self.set_parent(m)
+        if type == imp.C_EXTENSION and '.' in name:
+            return self.load_dynamic(name, stuff)
+        else:
+            return ModuleLoader.load_module(self, name, stuff)
+
+    def load_dynamic(self, name, stuff):
+        file, filename, (suff, mode, type) = stuff
+        # Hack around restriction in imp.load_dynamic()
+        i = name.rfind('.')
+        tail = name[i+1:]
+        if sys.modules.has_key(tail):
+            save = sys.modules[tail]
+        else:
+            save = None
+        sys.modules[tail] = imp.new_module(name)
+        try:
+            m = imp.load_dynamic(tail, filename, file)
+        finally:
+            if save:
+                sys.modules[tail] = save
+            else:
+                del sys.modules[tail]
+        sys.modules[name] = m
+        return m
+
+    def load_package(self, name, stuff):
+        file, filename, info = stuff
+        if sys.modules.has_key(name):
+            package = sys.modules[name]
+        else:
+            sys.modules[name] = package = imp.new_module(name)
+        package.__path__ = [filename]
+        self.init_package(package)
+        return package
+
+    def init_package(self, package):
+        self.set_parent(package)
+        self.set_domain(package)
+        self.call_init_module(package)
+
+    def set_parent(self, m):
+        name = m.__name__
+        if '.' in name:
+            name = name[:name.rfind('.')]
+        else:
+            name = ''
+        m.__ = sys.modules[name]
+
+    def set_domain(self, package):
+        name = package.__name__
+        package.__domain__ = domain = [name]
+        while '.' in name:
+            name = name[:name.rfind('.')]
+            domain.append(name)
+        if name:
+            domain.append('')
+
+    def call_init_module(self, package):
+        stuff = self.find_module('__init__', package.__path__)
+        if stuff:
+            m = self.load_module(package.__name__ + '.__init__', stuff)
+            package.__init__ = m
+
+
+class PackageImporter(ModuleImporter):
+
+    """Importer that understands packages and '__'."""
+
+    def __init__(self, loader = None, verbose = 0):
+        ModuleImporter.__init__(self,
+        loader or PackageLoader(None, verbose), verbose)
+
+    def import_module(self, name, globals={}, locals={}, fromlist=[]):
+        if globals.has_key('__'):
+            package = globals['__']
+        else:
+            # No calling context, assume in root package
+            package = sys.modules['']
+        if name[:3] in ('__.', '__'):
+            p = package
+            name = name[3:]
+            while name[:3] in ('__.', '__'):
+                p = p.__
+                name = name[3:]
+            if not name:
+                return self.finish(package, p, '', fromlist)
+            if '.' in name:
+                i = name.find('.')
+                name, tail = name[:i], name[i:]
+            else:
+                tail = ''
+            mname = p.__name__ and p.__name__+'.'+name or name
+            m = self.get1(mname)
+            return self.finish(package, m, tail, fromlist)
+        if '.' in name:
+            i = name.find('.')
+            name, tail = name[:i], name[i:]
+        else:
+            tail = ''
+        for pname in package.__domain__:
+            mname = pname and pname+'.'+name or name
+            m = self.get0(mname)
+            if m: break
+        else:
+            raise ImportError, "No such module %s" % name
+        return self.finish(m, m, tail, fromlist)
+
+    def finish(self, module, m, tail, fromlist):
+        # Got ....A; now get ....A.B.C.D
+        yname = m.__name__
+        if tail and sys.modules.has_key(yname + tail): # Fast path
+            yname, tail = yname + tail, ''
+            m = self.get1(yname)
+        while tail:
+            i = tail.find('.', 1)
+            if i > 0:
+                head, tail = tail[:i], tail[i:]
+            else:
+                head, tail = tail, ''
+            yname = yname + head
+            m = self.get1(yname)
+
+        # Got ....A.B.C.D; now finalize things depending on fromlist
+        if not fromlist:
+            return module
+        if '__' in fromlist:
+            raise ImportError, "Can't import __ from anywhere"
+        if not hasattr(m, '__path__'): return m
+        if '*' in fromlist:
+            raise ImportError, "Can't import * from a package"
+        for f in fromlist:
+            if hasattr(m, f): continue
+            fname = yname + '.' + f
+            self.get1(fname)
+        return m
+
+    def get1(self, name):
+        m = self.get(name)
+        if not m:
+            raise ImportError, "No module named %s" % name
+        return m
+
+    def get0(self, name):
+        m = self.get(name)
+        if not m:
+            sys.modules[name] = None
+        return m
+
+    def get(self, name):
+        # Internal routine to get or load a module when its parent exists
+        if sys.modules.has_key(name):
+            return sys.modules[name]
+        if '.' in name:
+            i = name.rfind('.')
+            head, tail = name[:i], name[i+1:]
+        else:
+            head, tail = '', name
+        path = sys.modules[head].__path__
+        stuff = self.loader.find_module(tail, path)
+        if not stuff:
+            return None
+        sys.modules[name] = m = self.loader.load_module(name, stuff)
+        if head:
+            setattr(sys.modules[head], tail, m)
+        return m
+
+    def reload(self, module):
+        name = module.__name__
+        if '.' in name:
+            i = name.rfind('.')
+            head, tail = name[:i], name[i+1:]
+            path = sys.modules[head].__path__
+        else:
+            tail = name
+            path = sys.modules[''].__path__
+        stuff = self.loader.find_module(tail, path)
+        if not stuff:
+            raise ImportError, "No module named %s" % name
+        return self.loader.load_module(name, stuff)
+
+    def unload(self, module):
+        if hasattr(module, '__path__'):
+            raise ImportError, "don't know how to unload packages yet"
+        PackageImporter.unload(self, module)
+
+    def install(self):
+        if not sys.modules.has_key(''):
+            sys.modules[''] = package = imp.new_module('')
+            package.__path__ = None
+            self.loader.init_package(package)
+            for m in sys.modules.values():
+                if not m: continue
+                if not hasattr(m, '__'):
+                    self.loader.set_parent(m)
+        ModuleImporter.install(self)
+
+
+def install(v = 0):
+    ihooks.install(PackageImporter(None, v))
+
+def uninstall():
+    ihooks.uninstall()
+
+def ni(v = 0):
+    install(v)
+
+def no():
+    uninstall()
+
+def test():
+    import pdb
+    try:
+        testproper()
+    except:
+        sys.last_type, sys.last_value, sys.last_traceback = sys.exc_info()
+        print
+        print sys.last_type, ':', sys.last_value
+        print
+        pdb.pm()
+
+def testproper():
+    install(1)
+    try:
+        import mactest
+        print dir(mactest)
+        raw_input('OK?')
+    finally:
+        uninstall()
+
+
+if __name__ == '__main__':
+    test()
+else:
+    install()
diff --git a/lib-python/2.2/lib-old/packmail.py b/lib-python/2.2/lib-old/packmail.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/lib-old/packmail.py
@@ -0,0 +1,111 @@
+# Module 'packmail' -- create a self-unpacking shell archive.
+
+# This module works on UNIX and on the Mac; the archives can unpack
+# themselves only on UNIX.
+
+import os
+from stat import ST_MTIME
+
+# Print help
+def help():
+	print 'All fns have a file open for writing as first parameter'
+	print 'pack(f, fullname, name): pack fullname as name'
+	print 'packsome(f, directory, namelist): selected files from directory'
+	print 'packall(f, directory): pack all files from directory'
+	print 'packnotolder(f, directory, name): pack all files from directory'
+	print '                        that are not older than a file there'
+	print 'packtree(f, directory): pack entire directory tree'
+
+# Pack one file
+def pack(outfp, file, name):
+	fp = open(file, 'r')
+	outfp.write('echo ' + name + '\n')
+	outfp.write('sed "s/^X//" >"' + name + '" <<"!"\n')
+	while 1:
+		line = fp.readline()
+		if not line: break
+		if line[-1:] != '\n':
+			line = line + '\n'
+		outfp.write('X' + line)
+	outfp.write('!\n')
+	fp.close()
+
+# Pack some files from a directory
+def packsome(outfp, dirname, names):
+	for name in names:
+		print name
+		file = os.path.join(dirname, name)
+		pack(outfp, file, name)
+
+# Pack all files from a directory
+def packall(outfp, dirname):
+	names = os.listdir(dirname)
+	try:
+	    names.remove('.')
+	except:
+	    pass
+	try:
+	    names.remove('..')
+	except:
+	    pass
+	names.sort()
+	packsome(outfp, dirname, names)
+
+# Pack all files from a directory that are not older than a give one
+def packnotolder(outfp, dirname, oldest):
+	names = os.listdir(dirname)
+	try:
+	    names.remove('.')
+	except:
+	    pass
+	try:
+	    names.remove('..')
+	except:
+	    pass
+	oldest = os.path.join(dirname, oldest)
+	st = os.stat(oldest)
+	mtime = st[ST_MTIME]
+	todo = []
+	for name in names:
+		print name, '...',
+		st = os.stat(os.path.join(dirname, name))
+		if st[ST_MTIME] >= mtime:
+			print 'Yes.'
+			todo.append(name)
+		else:
+			print 'No.'
+	todo.sort()
+	packsome(outfp, dirname, todo)
+
+# Pack a whole tree (no exceptions)
+def packtree(outfp, dirname):
+	print 'packtree', dirname
+	outfp.write('mkdir ' + unixfix(dirname) + '\n')
+	names = os.listdir(dirname)
+	try:
+	    names.remove('.')
+	except:
+	    pass
+	try:
+	    names.remove('..')
+	except:
+	    pass
+	subdirs = []
+	for name in names:
+		fullname = os.path.join(dirname, name)
+		if os.path.isdir(fullname):
+			subdirs.append(fullname)
+		else:
+			print 'pack', fullname
+			pack(outfp, fullname, unixfix(fullname))
+	for subdirname in subdirs:
+		packtree(outfp, subdirname)
+
+def unixfix(name):
+	comps = name.split(os.sep)
+	res = ''
+	for comp in comps:
+		if comp:
+			if res: res = res + '/'
+			res = res + comp
+	return res
diff --git a/lib-python/2.2/lib-old/poly.py b/lib-python/2.2/lib-old/poly.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/lib-old/poly.py
@@ -0,0 +1,52 @@
+# module 'poly' -- Polynomials
+
+# A polynomial is represented by a list of coefficients, e.g.,
+# [1, 10, 5] represents 1*x**0 + 10*x**1 + 5*x**2 (or 1 + 10x + 5x**2).
+# There is no way to suppress internal zeros; trailing zeros are
+# taken out by normalize().
+
+def normalize(p): # Strip unnecessary zero coefficients
+	n = len(p)
+	while n:
+		if p[n-1]: return p[:n]
+		n = n-1
+	return []
+
+def plus(a, b):
+	if len(a) < len(b): a, b = b, a # make sure a is the longest
+	res = a[:] # make a copy
+	for i in range(len(b)):
+		res[i] = res[i] + b[i]
+	return normalize(res)
+
+def minus(a, b):
+	neg_b = map(lambda x: -x, b[:])
+	return plus(a, neg_b)
+
+def one(power, coeff): # Representation of coeff * x**power
+	res = []
+	for i in range(power): res.append(0)
+	return res + [coeff]
+
+def times(a, b):
+	res = []
+	for i in range(len(a)):
+		for j in range(len(b)):
+			res = plus(res, one(i+j, a[i]*b[j]))
+	return res
+
+def power(a, n): # Raise polynomial a to the positive integral power n
+	if n == 0: return [1]
+	if n == 1: return a
+	if n/2*2 == n:
+		b = power(a, n/2)
+		return times(b, b)
+	return times(power(a, n-1), a)
+
+def der(a): # First derivative
+	res = a[1:]
+	for i in range(len(res)):
+		res[i] = res[i] * (i+1)
+	return res
+
+# Computing a primitive function would require rational arithmetic...
diff --git a/lib-python/2.2/lib-old/rand.py b/lib-python/2.2/lib-old/rand.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/lib-old/rand.py
@@ -0,0 +1,13 @@
+# Module 'rand'
+# Don't use unless you want compatibility with C's rand()!
+
+import whrandom
+
+def srand(seed):
+	whrandom.seed(seed%256, seed/256%256, seed/65536%256)
+
+def rand():
+	return int(whrandom.random() * 32768.0) % 32768
+
+def choice(seq):
+	return seq[rand() % len(seq)]
diff --git a/lib-python/2.2/lib-old/tb.py b/lib-python/2.2/lib-old/tb.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/lib-old/tb.py
@@ -0,0 +1,177 @@
+# Print tracebacks, with a dump of local variables.
+# Also an interactive stack trace browser.
+# Note -- this module is obsolete -- use pdb.pm() instead.
+
+import sys
+import os
+from stat import *
+import linecache
+
+def br(): browser(sys.last_traceback)
+
+def tb(): printtb(sys.last_traceback)
+
+def browser(tb):
+	if not tb:
+		print 'No traceback.'
+		return
+	tblist = []
+	while tb:
+		tblist.append(tb)
+		tb = tb.tb_next
+	ptr = len(tblist)-1
+	tb = tblist[ptr]
+	while 1:
+		if tb != tblist[ptr]:
+			tb = tblist[ptr]
+			print `ptr` + ':',
+			printtbheader(tb)
+		try:
+			line = raw_input('TB: ')
+		except KeyboardInterrupt:
+			print '\n[Interrupted]'
+			break
+		except EOFError:
+			print '\n[EOF]'
+			break
+		cmd = line.strip()
+		if cmd:
+			if cmd == 'quit':
+				break
+			elif cmd == 'list':
+				browserlist(tb)
+			elif cmd == 'up':
+				if ptr-1 >= 0: ptr = ptr-1
+				else: print 'Bottom of stack.'
+			elif cmd == 'down':
+				if ptr+1 < len(tblist): ptr = ptr+1
+				else: print 'Top of stack.'
+			elif cmd == 'locals':
+				printsymbols(tb.tb_frame.f_locals)
+			elif cmd == 'globals':
+				printsymbols(tb.tb_frame.f_globals)
+			elif cmd in ('?', 'help'):
+				browserhelp()
+			else:
+				browserexec(tb, cmd)
+
+def browserlist(tb):
+	filename = tb.tb_frame.f_code.co_filename
+	lineno = tb.tb_lineno
+	last = lineno
+	first = max(1, last-10)
+	for i in range(first, last+1):
+		if i == lineno: prefix = '***' + `i`.rjust(4) + ':'
+		else: prefix = `i`.rjust(7) + ':'
+		line = linecache.getline(filename, i)
+		if line[-1:] == '\n': line = line[:-1]
+		print prefix + line
+
+def browserexec(tb, cmd):
+	locals = tb.tb_frame.f_locals
+	globals = tb.tb_frame.f_globals
+	try:
+		exec cmd+'\n' in globals, locals
+	except:
+		t, v = sys.exc_info()[:2]
+		print '*** Exception:',
+		if type(t) is type(''):
+			print t,
+		else:
+			print t.__name__,
+		if v is not None:
+			print ':', v,
+		print
+		print 'Type help to get help.'
+
+def browserhelp():
+	print
+	print '    This is the traceback browser.  Commands are:'
+	print '        up      : move one level up in the call stack'
+	print '        down    : move one level down in the call stack'
+	print '        locals  : print all local variables at this level'
+	print '        globals : print all global variables at this level'
+	print '        list    : list source code around the failure'
+	print '        help    : print help (what you are reading now)'
+	print '        quit    : back to command interpreter'
+	print '    Typing any other 1-line statement will execute it'
+	print '    using the current level\'s symbol tables'
+	print
+
+def printtb(tb):
+	while tb:
+		print1tb(tb)
+		tb = tb.tb_next
+
+def print1tb(tb):
+	printtbheader(tb)
+	if tb.tb_frame.f_locals is not tb.tb_frame.f_globals:
+		printsymbols(tb.tb_frame.f_locals)
+
+def printtbheader(tb):
+	filename = tb.tb_frame.f_code.co_filename
+	lineno = tb.tb_lineno
+	info = '"' + filename + '"(' + `lineno` + ')'
+	line = linecache.getline(filename, lineno)
+	if line:
+		info = info + ': ' + line.strip()
+	print info
+
+def printsymbols(d):
+	keys = d.keys()
+	keys.sort()
+	for name in keys:
+		print '  ' + name.ljust(12) + ':',
+		printobject(d[name], 4)
+		print
+
+def printobject(v, maxlevel):
+	if v is None:
+		print 'None',
+	elif type(v) in (type(0), type(0.0)):
+		print v,
+	elif type(v) is type(''):
+		if len(v) > 20:
+			print `v[:17] + '...'`,
+		else:
+			print `v`,
+	elif type(v) is type(()):
+		print '(',
+		printlist(v, maxlevel)
+		print ')',
+	elif type(v) is type([]):
+		print '[',
+		printlist(v, maxlevel)
+		print ']',
+	elif type(v) is type({}):
+		print '{',
+		printdict(v, maxlevel)
+		print '}',
+	else:
+		print v,
+
+def printlist(v, maxlevel):
+	n = len(v)
+	if n == 0: return
+	if maxlevel <= 0:
+		print '...',
+		return
+	for i in range(min(6, n)):
+		printobject(v[i], maxlevel-1)
+		if i+1 < n: print ',',
+	if n > 6: print '...',
+
+def printdict(v, maxlevel):
+	keys = v.keys()
+	n = len(keys)
+	if n == 0: return
+	if maxlevel <= 0:
+		print '...',
+		return
+	keys.sort()
+	for i in range(min(6, n)):
+		key = keys[i]
+		print `key` + ':',
+		printobject(v[key], maxlevel-1)
+		if i+1 < n: print ',',
+	if n > 6: print '...',
diff --git a/lib-python/2.2/lib-old/util.py b/lib-python/2.2/lib-old/util.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/lib-old/util.py
@@ -0,0 +1,25 @@
+# Module 'util' -- some useful functions that don't fit elsewhere
+
+# NB: These are now built-in functions, but this module is provided
+# for compatibility.  Don't use in new programs unless you need backward
+# compatibility (i.e. need to run with old interpreters).
+
+
+# Remove an item from a list.
+# No complaints if it isn't in the list at all.
+# If it occurs more than once, remove the first occurrence.
+#
+def remove(item, list):
+	if item in list: list.remove(item)
+
+
+# Return a string containing a file's contents.
+#
+def readfile(fn):
+	return readopenfile(open(fn, 'r'))
+
+
+# Read an open file until EOF.
+#
+def readopenfile(fp):
+	return fp.read()
diff --git a/lib-python/2.2/lib-old/whatsound.py b/lib-python/2.2/lib-old/whatsound.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/lib-old/whatsound.py
@@ -0,0 +1,1 @@
+from sndhdr import *
diff --git a/lib-python/2.2/lib-old/zmod.py b/lib-python/2.2/lib-old/zmod.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/lib-old/zmod.py
@@ -0,0 +1,94 @@
+# module 'zmod'
+
+# Compute properties of mathematical "fields" formed by taking
+# Z/n (the whole numbers modulo some whole number n) and an 
+# irreducible polynomial (i.e., a polynomial with only complex zeros),
+# e.g., Z/5 and X**2 + 2.
+#
+# The field is formed by taking all possible linear combinations of
+# a set of d base vectors (where d is the degree of the polynomial).
+#
+# Note that this procedure doesn't yield a field for all combinations
+# of n and p: it may well be that some numbers have more than one
+# inverse and others have none.  This is what we check.
+#
+# Remember that a field is a ring where each element has an inverse.
+# A ring has commutative addition and multiplication, a zero and a one:
+# 0*x = x*0 = 0, 0+x = x+0 = x, 1*x = x*1 = x.  Also, the distributive
+# property holds: a*(b+c) = a*b + b*c.
+# (XXX I forget if this is an axiom or follows from the rules.)
+
+import poly
+
+
+# Example N and polynomial
+
+N = 5
+P = poly.plus(poly.one(0, 2), poly.one(2, 1)) # 2 + x**2
+
+
+# Return x modulo y.  Returns >= 0 even if x < 0.
+
+def mod(x, y):
+	return divmod(x, y)[1]
+
+
+# Normalize a polynomial modulo n and modulo p.
+
+def norm(a, n, p):
+	a = poly.modulo(a, p)
+	a = a[:]
+	for i in range(len(a)): a[i] = mod(a[i], n)
+	a = poly.normalize(a)
+	return a
+
+
+# Make a list of all n^d elements of the proposed field.
+
+def make_all(mat):
+	all = []
+	for row in mat:
+		for a in row:
+			all.append(a)
+	return all
+
+def make_elements(n, d):
+	if d == 0: return [poly.one(0, 0)]
+	sub = make_elements(n, d-1)
+	all = []
+	for a in sub:
+		for i in range(n):
+			all.append(poly.plus(a, poly.one(d-1, i)))
+	return all
+
+def make_inv(all, n, p):
+	x = poly.one(1, 1)
+	inv = []
+	for a in all:
+		inv.append(norm(poly.times(a, x), n, p))
+	return inv
+
+def checkfield(n, p):
+	all = make_elements(n, len(p)-1)
+	inv = make_inv(all, n, p)
+	all1 = all[:]
+	inv1 = inv[:]
+	all1.sort()
+	inv1.sort()
+	if all1 == inv1: print 'BINGO!'
+	else:
+		print 'Sorry:', n, p
+		print all
+		print inv
+
+def rj(s, width):
+	if type(s) is not type(''): s = `s`
+	n = len(s)
+	if n >= width: return s
+	return ' '*(width - n) + s
+
+def lj(s, width):
+	if type(s) is not type(''): s = `s`
+	n = len(s)
+	if n >= width: return s
+	return s + ' '*(width - n)
diff --git a/lib-python/2.2/lib-tk/Canvas.py b/lib-python/2.2/lib-tk/Canvas.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/lib-tk/Canvas.py
@@ -0,0 +1,188 @@
+# This module exports classes for the various canvas item types
+
+# NOTE: This module was an experiment and is now obsolete.
+# It's best to use the Tkinter.Canvas class directly.
+
+from Tkinter import Canvas, _cnfmerge, _flatten
+
+
+class CanvasItem:
+    def __init__(self, canvas, itemType, *args, **kw):
+        self.canvas = canvas
+        self.id = canvas._create(itemType, args, kw)
+        if not hasattr(canvas, 'items'):
+            canvas.items = {}
+        canvas.items[self.id] = self
+    def __str__(self):
+        return str(self.id)
+    def __repr__(self):
+        return '<%s, id=%d>' % (self.__class__.__name__, self.id)
+    def delete(self):
+        del self.canvas.items[self.id]
+        self.canvas.delete(self.id)
+    def __getitem__(self, key):
+        v = self.canvas.tk.split(self.canvas.tk.call(
+                self.canvas._w, 'itemconfigure',
+                self.id, '-' + key))
+        return v[4]
+    cget = __getitem__
+    def __setitem__(self, key, value):
+        self.canvas.itemconfig(self.id, {key: value})
+    def keys(self):
+        if not hasattr(self, '_keys'):
+            self._keys = map(lambda x, tk=self.canvas.tk:
+                             tk.splitlist(x)[0][1:],
+                             self.canvas.tk.splitlist(
+                                     self.canvas._do(
+                                             'itemconfigure',
+                                             (self.id,))))
+        return self._keys
+    def has_key(self, key):
+        return key in self.keys()
+    def addtag(self, tag, option='withtag'):
+        self.canvas.addtag(tag, option, self.id)
+    def bbox(self):
+        x1, y1, x2, y2 = self.canvas.bbox(self.id)
+        return (x1, y1), (x2, y2)
+    def bind(self, sequence=None, command=None, add=None):
+        return self.canvas.tag_bind(self.id, sequence, command, add)
+    def unbind(self, sequence, funcid=None):
+        self.canvas.tag_unbind(self.id, sequence, funcid)
+    def config(self, cnf={}, **kw):
+        return self.canvas.itemconfig(self.id, _cnfmerge((cnf, kw)))
+    def coords(self, pts = ()):
+        flat = ()
+        for x, y in pts: flat = flat + (x, y)
+        return apply(self.canvas.coords, (self.id,) + flat)
+    def dchars(self, first, last=None):
+        self.canvas.dchars(self.id, first, last)
+    def dtag(self, ttd):
+        self.canvas.dtag(self.id, ttd)
+    def focus(self):
+        self.canvas.focus(self.id)
+    def gettags(self):
+        return self.canvas.gettags(self.id)
+    def icursor(self, index):
+        self.canvas.icursor(self.id, index)
+    def index(self, index):
+        return self.canvas.index(self.id, index)
+    def insert(self, beforethis, string):
+        self.canvas.insert(self.id, beforethis, string)
+    def lower(self, belowthis=None):
+        self.canvas.tag_lower(self.id, belowthis)
+    def move(self, xamount, yamount):
+        self.canvas.move(self.id, xamount, yamount)
+    def tkraise(self, abovethis=None):
+        self.canvas.tag_raise(self.id, abovethis)
+    raise_ = tkraise # BW compat
+    def scale(self, xorigin, yorigin, xscale, yscale):
+        self.canvas.scale(self.id, xorigin, yorigin, xscale, yscale)
+    def type(self):
+        return self.canvas.type(self.id)
+
+class Arc(CanvasItem):
+    def __init__(self, canvas, *args, **kw):
+        apply(CanvasItem.__init__, (self, canvas, 'arc') + args, kw)
+
+class Bitmap(CanvasItem):
+    def __init__(self, canvas, *args, **kw):
+        apply(CanvasItem.__init__, (self, canvas, 'bitmap') + args, kw)
+
+class ImageItem(CanvasItem):
+    def __init__(self, canvas, *args, **kw):
+        apply(CanvasItem.__init__, (self, canvas, 'image') + args, kw)
+
+class Line(CanvasItem):
+    def __init__(self, canvas, *args, **kw):
+        apply(CanvasItem.__init__, (self, canvas, 'line') + args, kw)
+
+class Oval(CanvasItem):
+    def __init__(self, canvas, *args, **kw):
+        apply(CanvasItem.__init__, (self, canvas, 'oval') + args, kw)
+
+class Polygon(CanvasItem):
+    def __init__(self, canvas, *args, **kw):
+        apply(CanvasItem.__init__, (self, canvas, 'polygon') + args,kw)
+
+class Rectangle(CanvasItem):
+    def __init__(self, canvas, *args, **kw):
+        apply(CanvasItem.__init__, (self, canvas, 'rectangle')+args,kw)
+
+# XXX "Text" is taken by the Text widget...
+class CanvasText(CanvasItem):
+    def __init__(self, canvas, *args, **kw):
+        apply(CanvasItem.__init__, (self, canvas, 'text') + args, kw)
+
+class Window(CanvasItem):
+    def __init__(self, canvas, *args, **kw):
+        apply(CanvasItem.__init__, (self, canvas, 'window') + args, kw)
+
+class Group:
+    def __init__(self, canvas, tag=None):
+        if not tag:
+            tag = 'Group%d' % id(self)
+        self.tag = self.id = tag
+        self.canvas = canvas
+        self.canvas.dtag(self.tag)
+    def str(self):
+        return self.tag
+    __str__ = str
+    def _do(self, cmd, *args):
+        return self.canvas._do(cmd, (self.tag,) + _flatten(args))
+    def addtag_above(self, tagOrId):
+        self._do('addtag', 'above', tagOrId)
+    def addtag_all(self):
+        self._do('addtag', 'all')
+    def addtag_below(self, tagOrId):
+        self._do('addtag', 'below', tagOrId)
+    def addtag_closest(self, x, y, halo=None, start=None):
+        self._do('addtag', 'closest', x, y, halo, start)
+    def addtag_enclosed(self, x1, y1, x2, y2):
+        self._do('addtag', 'enclosed', x1, y1, x2, y2)
+    def addtag_overlapping(self, x1, y1, x2, y2):
+        self._do('addtag', 'overlapping', x1, y1, x2, y2)
+    def addtag_withtag(self, tagOrId):
+        self._do('addtag', 'withtag', tagOrId)
+    def bbox(self):
+        return self.canvas._getints(self._do('bbox'))
+    def bind(self, sequence=None, command=None, add=None):
+        return self.canvas.tag_bind(self.id, sequence, command, add)
+    def unbind(self, sequence, funcid=None):
+        self.canvas.tag_unbind(self.id, sequence, funcid)
+    def coords(self, *pts):
+        return self._do('coords', pts)
+    def dchars(self, first, last=None):
+        self._do('dchars', first, last)
+    def delete(self):
+        self._do('delete')
+    def dtag(self, tagToDelete=None):
+        self._do('dtag', tagToDelete)
+    def focus(self):
+        self._do('focus')
+    def gettags(self):
+        return self.canvas.tk.splitlist(self._do('gettags', self.tag))
+    def icursor(self, index):
+        return self._do('icursor', index)
+    def index(self, index):
+        return self.canvas.tk.getint(self._do('index', index))
+    def insert(self, beforeThis, string):
+        self._do('insert', beforeThis, string)
+    def config(self, cnf={}, **kw):
+        return self.canvas.itemconfigure(self.tag, _cnfmerge((cnf,kw)))
+    def lower(self, belowThis=None):
+        self._do('lower', belowThis)
+    def move(self, xAmount, yAmount):
+        self._do('move', xAmount, yAmount)
+    def tkraise(self, aboveThis=None):
+        self._do('raise', aboveThis)
+    lift = tkraise
+    def scale(self, xOrigin, yOrigin, xScale, yScale):
+        self._do('scale', xOrigin, yOrigin, xScale, yScale)
+    def select_adjust(self, index):
+        self.canvas._do('select', ('adjust', self.tag, index))
+    def select_from(self, index):
+        self.canvas._do('select', ('from', self.tag, index))
+    def select_to(self, index):
+        self.canvas._do('select', ('to', self.tag, index))
+    def type(self):
+        return self._do('type')
diff --git a/lib-python/2.2/lib-tk/Dialog.py b/lib-python/2.2/lib-tk/Dialog.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/lib-tk/Dialog.py
@@ -0,0 +1,49 @@
+# Dialog.py -- Tkinter interface to the tk_dialog script.
+
+from Tkinter import *
+from Tkinter import _cnfmerge
+
+if TkVersion <= 3.6:
+    DIALOG_ICON = 'warning'
+else:
+    DIALOG_ICON = 'questhead'
+
+
+class Dialog(Widget):
+    def __init__(self, master=None, cnf={}, **kw):
+        cnf = _cnfmerge((cnf, kw))
+        self.widgetName = '__dialog__'
+        Widget._setup(self, master, cnf)
+        self.num = self.tk.getint(
+                apply(self.tk.call,
+                      ('tk_dialog', self._w,
+                       cnf['title'], cnf['text'],
+                       cnf['bitmap'], cnf['default'])
+                      + cnf['strings']))
+        try: Widget.destroy(self)
+        except TclError: pass
+    def destroy(self): pass
+
+def _test():
+    d = Dialog(None, {'title': 'File Modified',
+                      'text':
+                      'File "Python.h" has been modified'
+                      ' since the last time it was saved.'
+                      ' Do you want to save it before'
+                      ' exiting the application.',
+                      'bitmap': DIALOG_ICON,
+                      'default': 0,
+                      'strings': ('Save File',
+                                  'Discard Changes',
+                                  'Return to Editor')})
+    print d.num
+
+
+if __name__ == '__main__':
+    t = Button(None, {'text': 'Test',
+                      'command': _test,
+                      Pack: {}})
+    q = Button(None, {'text': 'Quit',
+                      'command': t.quit,
+                      Pack: {}})
+    t.mainloop()
diff --git a/lib-python/2.2/lib-tk/FileDialog.py b/lib-python/2.2/lib-tk/FileDialog.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/lib-tk/FileDialog.py
@@ -0,0 +1,273 @@
+"""File selection dialog classes.
+
+Classes:
+
+- FileDialog
+- LoadFileDialog
+- SaveFileDialog
+
+"""
+
+from Tkinter import *
+from Dialog import Dialog
+
+import os
+import fnmatch
+
+
+dialogstates = {}
+
+
+class FileDialog:
+
+    """Standard file selection dialog -- no checks on selected file.
+
+    Usage:
+
+        d = FileDialog(master)
+        file = d.go(dir_or_file, pattern, default, key)
+        if file is None: ...canceled...
+        else: ...open file...
+
+    All arguments to go() are optional.
+
+    The 'key' argument specifies a key in the global dictionary
+    'dialogstates', which keeps track of the values for the directory
+    and pattern arguments, overriding the values passed in (it does
+    not keep track of the default argument!).  If no key is specified,
+    the dialog keeps no memory of previous state.  Note that memory is
+    kept even when the dialog is canceled.  (All this emulates the
+    behavior of the Macintosh file selection dialogs.)
+
+    """
+
+    title = "File Selection Dialog"
+
+    def __init__(self, master, title=None):
+        if title is None: title = self.title
+        self.master = master
+        self.directory = None
+
+        self.top = Toplevel(master)
+        self.top.title(title)
+        self.top.iconname(title)
+
+        self.botframe = Frame(self.top)
+        self.botframe.pack(side=BOTTOM, fill=X)
+
+        self.selection = Entry(self.top)
+        self.selection.pack(side=BOTTOM, fill=X)
+        self.selection.bind('<Return>', self.ok_event)
+
+        self.filter = Entry(self.top)
+        self.filter.pack(side=TOP, fill=X)
+        self.filter.bind('<Return>', self.filter_command)
+
+        self.midframe = Frame(self.top)
+        self.midframe.pack(expand=YES, fill=BOTH)
+
+        self.filesbar = Scrollbar(self.midframe)
+        self.filesbar.pack(side=RIGHT, fill=Y)
+        self.files = Listbox(self.midframe, exportselection=0,
+                             yscrollcommand=(self.filesbar, 'set'))
+        self.files.pack(side=RIGHT, expand=YES, fill=BOTH)
+        btags = self.files.bindtags()
+        self.files.bindtags(btags[1:] + btags[:1])
+        self.files.bind('<ButtonRelease-1>', self.files_select_event)
+        self.files.bind('<Double-ButtonRelease-1>', self.files_double_event)
+        self.filesbar.config(command=(self.files, 'yview'))
+
+        self.dirsbar = Scrollbar(self.midframe)
+        self.dirsbar.pack(side=LEFT, fill=Y)
+        self.dirs = Listbox(self.midframe, exportselection=0,
+                            yscrollcommand=(self.dirsbar, 'set'))
+        self.dirs.pack(side=LEFT, expand=YES, fill=BOTH)
+        self.dirsbar.config(command=(self.dirs, 'yview'))
+        btags = self.dirs.bindtags()
+        self.dirs.bindtags(btags[1:] + btags[:1])
+        self.dirs.bind('<ButtonRelease-1>', self.dirs_select_event)
+        self.dirs.bind('<Double-ButtonRelease-1>', self.dirs_double_event)
+
+        self.ok_button = Button(self.botframe,
+                                 text="OK",
+                                 command=self.ok_command)
+        self.ok_button.pack(side=LEFT)
+        self.filter_button = Button(self.botframe,
+                                    text="Filter",
+                                    command=self.filter_command)
+        self.filter_button.pack(side=LEFT, expand=YES)
+        self.cancel_button = Button(self.botframe,
+                                    text="Cancel",
+                                    command=self.cancel_command)
+        self.cancel_button.pack(side=RIGHT)
+
+        self.top.protocol('WM_DELETE_WINDOW', self.cancel_command)
+        # XXX Are the following okay for a general audience?
+        self.top.bind('<Alt-w>', self.cancel_command)
+        self.top.bind('<Alt-W>', self.cancel_command)
+
+    def go(self, dir_or_file=os.curdir, pattern="*", default="", key=None):
+        if key and dialogstates.has_key(key):
+            self.directory, pattern = dialogstates[key]
+        else:
+            dir_or_file = os.path.expanduser(dir_or_file)
+            if os.path.isdir(dir_or_file):
+                self.directory = dir_or_file
+            else:
+                self.directory, default = os.path.split(dir_or_file)
+        self.set_filter(self.directory, pattern)
+        self.set_selection(default)
+        self.filter_command()
+        self.selection.focus_set()
+        self.top.grab_set()
+        self.how = None
+        self.master.mainloop()          # Exited by self.quit(how)
+        if key:
+            directory, pattern = self.get_filter()
+            if self.how:
+                directory = os.path.dirname(self.how)
+            dialogstates[key] = directory, pattern
+        self.top.destroy()
+        return self.how
+
+    def quit(self, how=None):
+        self.how = how
+        self.master.quit()              # Exit mainloop()
+
+    def dirs_double_event(self, event):
+        self.filter_command()
+
+    def dirs_select_event(self, event):
+        dir, pat = self.get_filter()
+        subdir = self.dirs.get('active')
+        dir = os.path.normpath(os.path.join(self.directory, subdir))
+        self.set_filter(dir, pat)
+
+    def files_double_event(self, event):
+        self.ok_command()
+
+    def files_select_event(self, event):
+        file = self.files.get('active')
+        self.set_selection(file)
+
+    def ok_event(self, event):
+        self.ok_command()
+
+    def ok_command(self):
+        self.quit(self.get_selection())
+
+    def filter_command(self, event=None):
+        dir, pat = self.get_filter()
+        try:
+            names = os.listdir(dir)
+        except os.error:
+            self.master.bell()
+            return
+        self.directory = dir
+        self.set_filter(dir, pat)
+        names.sort()
+        subdirs = [os.pardir]
+        matchingfiles = []
+        for name in names:
+            fullname = os.path.join(dir, name)
+            if os.path.isdir(fullname):
+                subdirs.append(name)
+            elif fnmatch.fnmatch(name, pat):
+                matchingfiles.append(name)
+        self.dirs.delete(0, END)
+        for name in subdirs:
+            self.dirs.insert(END, name)
+        self.files.delete(0, END)
+        for name in matchingfiles:
+            self.files.insert(END, name)
+        head, tail = os.path.split(self.get_selection())
+        if tail == os.curdir: tail = ''
+        self.set_selection(tail)
+
+    def get_filter(self):
+        filter = self.filter.get()
+        filter = os.path.expanduser(filter)
+        if filter[-1:] == os.sep or os.path.isdir(filter):
+            filter = os.path.join(filter, "*")
+        return os.path.split(filter)
+
+    def get_selection(self):
+        file = self.selection.get()
+        file = os.path.expanduser(file)
+        return file
+
+    def cancel_command(self, event=None):
+        self.quit()
+
+    def set_filter(self, dir, pat):
+        if not os.path.isabs(dir):
+            try:
+                pwd = os.getcwd()
+            except os.error:
+                pwd = None
+            if pwd:
+                dir = os.path.join(pwd, dir)
+                dir = os.path.normpath(dir)
+        self.filter.delete(0, END)
+        self.filter.insert(END, os.path.join(dir or os.curdir, pat or "*"))
+
+    def set_selection(self, file):
+        self.selection.delete(0, END)
+        self.selection.insert(END, os.path.join(self.directory, file))
+
+
+class LoadFileDialog(FileDialog):
+
+    """File selection dialog which checks that the file exists."""
+
+    title = "Load File Selection Dialog"
+
+    def ok_command(self):
+        file = self.get_selection()
+        if not os.path.isfile(file):
+            self.master.bell()
+        else:
+            self.quit(file)
+
+
+class SaveFileDialog(FileDialog):
+
+    """File selection dialog which checks that the file may be created."""
+
+    title = "Save File Selection Dialog"
+
+    def ok_command(self):
+        file = self.get_selection()
+        if os.path.exists(file):
+            if os.path.isdir(file):
+                self.master.bell()
+                return
+            d = Dialog(self.top,
+                       title="Overwrite Existing File Question",
+                       text="Overwrite existing file %s?" % `file`,
+                       bitmap='questhead',
+                       default=1,
+                       strings=("Yes", "Cancel"))
+            if d.num != 0:
+                return
+        else:
+            head, tail = os.path.split(file)
+            if not os.path.isdir(head):
+                self.master.bell()
+                return
+        self.quit(file)
+
+
+def test():
+    """Simple test program."""
+    root = Tk()
+    root.withdraw()
+    fd = LoadFileDialog(root)
+    loadfile = fd.go(key="test")
+    fd = SaveFileDialog(root)
+    savefile = fd.go(key="test")
+    print loadfile, savefile
+
+
+if __name__ == '__main__':
+    test()
diff --git a/lib-python/2.2/lib-tk/FixTk.py b/lib-python/2.2/lib-tk/FixTk.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/lib-tk/FixTk.py
@@ -0,0 +1,37 @@
+import sys, os
+
+# Delay import _tkinter until we have set TCL_LIBRARY,
+# so that Tcl_FindExecutable has a chance to locate its
+# encoding directory.
+
+# Unfortunately, we cannot know the TCL_LIBRARY directory
+# if we don't know the tcl version, which we cannot find out
+# without import Tcl. Fortunately, Tcl will itself look in
+# <TCL_LIBRARY>\..\tcl<TCL_VERSION>, so anything close to
+# the real Tcl library will do.
+
+prefix = os.path.join(sys.prefix,"tcl")
+# if this does not exist, no further search is needed
+if os.path.exists(prefix):
+    if not os.environ.has_key("TCL_LIBRARY"):
+        for name in os.listdir(prefix):
+            if name.startswith("tcl"):
+                tcldir = os.path.join(prefix,name)
+                if os.path.isdir(tcldir):
+                    os.environ["TCL_LIBRARY"] = tcldir
+    # Compute TK_LIBRARY, knowing that it has the same version
+    # as Tcl
+    import _tkinter
+    ver = str(_tkinter.TCL_VERSION)
+    if not os.environ.has_key("TK_LIBRARY"):
+        v = os.path.join(prefix, 'tk'+ver)
+        if os.path.exists(os.path.join(v, "tclIndex")):
+            os.environ['TK_LIBRARY'] = v
+    # We don't know the Tix version, so we must search the entire
+    # directory
+    if not os.environ.has_key("TIX_LIBRARY"):
+        for name in os.listdir(prefix):
+            if name.startswith("tix"):
+                tixdir = os.path.join(prefix,name)
+                if os.path.isdir(tixdir):
+                    os.environ["TIX_LIBRARY"] = tixdir
diff --git a/lib-python/2.2/lib-tk/ScrolledText.py b/lib-python/2.2/lib-tk/ScrolledText.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/lib-tk/ScrolledText.py
@@ -0,0 +1,43 @@
+# A ScrolledText widget feels like a text widget but also has a
+# vertical scroll bar on its right.  (Later, options may be added to
+# add a horizontal bar as well, to make the bars disappear
+# automatically when not needed, to move them to the other side of the
+# window, etc.)
+#
+# Configuration options are passed to the Text widget.
+# A Frame widget is inserted between the master and the text, to hold
+# the Scrollbar widget.
+# Most methods calls are inherited from the Text widget; Pack methods
+# are redirected to the Frame widget however.
+
+from Tkinter import *
+from Tkinter import _cnfmerge
+
+class ScrolledText(Text):
+    def __init__(self, master=None, cnf=None, **kw):
+        if cnf is None:
+            cnf = {}
+        if kw:
+            cnf = _cnfmerge((cnf, kw))
+        fcnf = {}
+        for k in cnf.keys():
+            if type(k) == ClassType or k == 'name':
+                fcnf[k] = cnf[k]
+                del cnf[k]
+        self.frame = apply(Frame, (master,), fcnf)
+        self.vbar = Scrollbar(self.frame, name='vbar')
+        self.vbar.pack(side=RIGHT, fill=Y)
+        cnf['name'] = 'text'
+        apply(Text.__init__, (self, self.frame), cnf)
+        self.pack(side=LEFT, fill=BOTH, expand=1)
+        self['yscrollcommand'] = self.vbar.set
+        self.vbar['command'] = self.yview
+
+        # Copy geometry methods of self.frame -- hack!
+        methods = Pack.__dict__.keys()
+        methods = methods + Grid.__dict__.keys()
+        methods = methods + Place.__dict__.keys()
+
+        for m in methods:
+            if m[0] != '_' and m != 'config' and m != 'configure':
+                setattr(self, m, getattr(self.frame, m))
diff --git a/lib-python/2.2/lib-tk/SimpleDialog.py b/lib-python/2.2/lib-tk/SimpleDialog.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/lib-tk/SimpleDialog.py
@@ -0,0 +1,111 @@
+"""A simple but flexible modal dialog box."""
+
+
+from Tkinter import *
+
+
+class SimpleDialog:
+
+    def __init__(self, master,
+                 text='', buttons=[], default=None, cancel=None,
+                 title=None, class_=None):
+        if class_:
+            self.root = Toplevel(master, class_=class_)
+        else:
+            self.root = Toplevel(master)
+        if title:
+            self.root.title(title)
+            self.root.iconname(title)
+        self.message = Message(self.root, text=text, aspect=400)
+        self.message.pack(expand=1, fill=BOTH)
+        self.frame = Frame(self.root)
+        self.frame.pack()
+        self.num = default
+        self.cancel = cancel
+        self.default = default
+        self.root.bind('<Return>', self.return_event)
+        for num in range(len(buttons)):
+            s = buttons[num]
+            b = Button(self.frame, text=s,
+                       command=(lambda self=self, num=num: self.done(num)))
+            if num == default:
+                b.config(relief=RIDGE, borderwidth=8)
+            b.pack(side=LEFT, fill=BOTH, expand=1)
+        self.root.protocol('WM_DELETE_WINDOW', self.wm_delete_window)
+        self._set_transient(master)
+
+    def _set_transient(self, master, relx=0.5, rely=0.3):
+        widget = self.root
+        widget.withdraw() # Remain invisible while we figure out the geometry
+        widget.transient(master)
+        widget.update_idletasks() # Actualize geometry information
+        if master.winfo_ismapped():
+            m_width = master.winfo_width()
+            m_height = master.winfo_height()
+            m_x = master.winfo_rootx()
+            m_y = master.winfo_rooty()
+        else:
+            m_width = master.winfo_screenwidth()
+            m_height = master.winfo_screenheight()
+            m_x = m_y = 0
+        w_width = widget.winfo_reqwidth()
+        w_height = widget.winfo_reqheight()
+        x = m_x + (m_width - w_width) * relx
+        y = m_y + (m_height - w_height) * rely
+        if x+w_width > master.winfo_screenwidth():
+            x = master.winfo_screenwidth() - w_width
+        elif x < 0:
+            x = 0
+        if y+w_height > master.winfo_screenheight():
+            y = master.winfo_screenheight() - w_height
+        elif y < 0:
+            y = 0
+        widget.geometry("+%d+%d" % (x, y))
+        widget.deiconify() # Become visible at the desired location
+
+    def go(self):
+        self.root.grab_set()
+        self.root.mainloop()
+        self.root.destroy()
+        return self.num
+
+    def return_event(self, event):
+        if self.default is None:
+            self.root.bell()
+        else:
+            self.done(self.default)
+
+    def wm_delete_window(self):
+        if self.cancel is None:
+            self.root.bell()
+        else:
+            self.done(self.cancel)
+
+    def done(self, num):
+        self.num = num
+        self.root.quit()
+
+
+def test():
+    root = Tk()
+    def doit(root=root):
+        d = SimpleDialog(root,
+                         text="This is a test dialog.  "
+                              "Would this have been an actual dialog, "
+                              "the buttons below would have been glowing "
+                              "in soft pink light.\n"
+                              "Do you believe this?",
+                         buttons=["Yes", "No", "Cancel"],
+                         default=0,
+                         cancel=2,
+                         title="Test Dialog")
+        print d.go()
+    t = Button(root, text='Test', command=doit)
+    t.pack()
+    q = Button(root, text='Quit', command=t.quit)
+    q.pack()
+    t.mainloop()
+
+
+if __name__ == '__main__':
+    test()
diff --git a/lib-python/2.2/lib-tk/Tix.py b/lib-python/2.2/lib-tk/Tix.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/lib-tk/Tix.py
@@ -0,0 +1,1626 @@
+# -*-mode: python; fill-column: 75; tab-width: 8; coding: iso-latin-1-unix -*-
+#
+# $Id$
+#
+# Tix.py -- Tix widget wrappers.
+#
+#	For Tix, see http://tix.sourceforge.net
+#
+#       - Sudhir Shenoy (sshenoy at gol.com), Dec. 1995.
+#         based on an idea of Jean-Marc Lugrin (lugrin at ms.com)
+#
+# NOTE: In order to minimize changes to Tkinter.py, some of the code here
+#       (TixWidget.__init__) has been taken from Tkinter (Widget.__init__)
+#       and will break if there are major changes in Tkinter.
+#
+# The Tix widgets are represented by a class hierarchy in python with proper
+# inheritance of base classes.
+#
+# As a result after creating a 'w = StdButtonBox', I can write
+#              w.ok['text'] = 'Who Cares'
+#    or              w.ok['bg'] = w['bg']
+# or even       w.ok.invoke()
+# etc.
+#
+# Compare the demo tixwidgets.py to the original Tcl program and you will
+# appreciate the advantages.
+#
+
+import string
+from Tkinter import *
+from Tkinter import _flatten, _cnfmerge, _default_root
+
+# WARNING - TkVersion is a limited precision floating point number
+if TkVersion < 3.999:
+    raise ImportError, "This version of Tix.py requires Tk 4.0 or higher"
+
+import _tkinter # If this fails your Python may not be configured for Tk
+# TixVersion = string.atof(tkinter.TIX_VERSION) # If this fails your Python may not be configured for Tix
+# WARNING - TixVersion is a limited precision floating point number
+
+# Some more constants (for consistency with Tkinter)
+WINDOW = 'window'
+TEXT = 'text'
+STATUS = 'status'
+IMMEDIATE = 'immediate'
+IMAGE = 'image'
+IMAGETEXT = 'imagetext'
+BALLOON = 'balloon'
+AUTO = 'auto'
+ACROSSTOP = 'acrosstop'
+
+# Some constants used by Tkinter dooneevent()
+TCL_DONT_WAIT     = 1 << 1
+TCL_WINDOW_EVENTS = 1 << 2
+TCL_FILE_EVENTS   = 1 << 3
+TCL_TIMER_EVENTS  = 1 << 4
+TCL_IDLE_EVENTS   = 1 << 5
+TCL_ALL_EVENTS    = 0
+
+# BEWARE - this is implemented by copying some code from the Widget class
+#          in Tkinter (to override Widget initialization) and is therefore
+#          liable to break.
+import Tkinter, os
+
+# Could probably add this to Tkinter.Misc
+class tixCommand:
+    """The tix commands provide access to miscellaneous  elements
+    of  Tix's  internal state and the Tix application context.
+    Most of the information manipulated by these  commands pertains
+    to  the  application  as a whole, or to a screen or
+    display, rather than to a particular window.
+
+    This is a mixin class, assumed to be mixed to Tkinter.Tk
+    that supports the self.tk.call method.
+    """
+
+    def tix_addbitmapdir(self, directory):
+        """Tix maintains a list of directories under which
+        the  tix_getimage  and tix_getbitmap commands will
+        search for image files. The standard bitmap  directory
+        is $TIX_LIBRARY/bitmaps. The addbitmapdir command
+        adds directory into this list. By  using  this
+        command, the  image  files  of an applications can
+        also be located using the tix_getimage or tix_getbitmap
+        command.
+        """
+        return self.tk.call('tix', 'addbitmapdir', directory)
+
+    def tix_cget(self, option):
+        """Returns  the  current  value  of the configuration
+        option given by option. Option may be  any  of  the
+        options described in the CONFIGURATION OPTIONS section.
+        """
+        return self.tk.call('tix', 'cget', option)
+
+    def tix_configure(self, cnf=None, **kw):
+        """Query or modify the configuration options of the Tix application
+        context. If no option is specified, returns a dictionary all of the
+        available options.  If option is specified with no value, then the
+        command returns a list describing the one named option (this list
+        will be identical to the corresponding sublist of the value
+        returned if no option is specified).  If one or more option-value
+        pairs are specified, then the command modifies the given option(s)
+        to have the given value(s); in this case the command returns an
+        empty string. Option may be any of the configuration options.
+        """
+        # Copied from Tkinter.py
+        if kw:
+            cnf = _cnfmerge((cnf, kw))
+        elif cnf:
+            cnf = _cnfmerge(cnf)
+        if cnf is None:
+            cnf = {}
+            for x in self.tk.split(self.tk.call('tix', 'configure')):
+                cnf[x[0][1:]] = (x[0][1:],) + x[1:]
+            return cnf
+        if isinstance(cnf, StringType):
+            x = self.tk.split(self.tk.call('tix', 'configure', '-'+cnf))
+            return (x[0][1:],) + x[1:]
+        return self.tk.call(('tix', 'configure') + self._options(cnf))
+
+    def tix_filedialog(self, dlgclass=None):
+        """Returns the file selection dialog that may be shared among
+        different calls from this application.  This command will create a
+        file selection dialog widget when it is called the first time. This
+        dialog will be returned by all subsequent calls to tix_filedialog.
+        An optional dlgclass parameter can be passed to specified what type
+        of file selection dialog widget is desired. Possible options are
+        tix FileSelectDialog or tixExFileSelectDialog.
+        """
+        if dlgclass is not None:
+            return self.tk.call('tix', 'filedialog', dlgclass)
+        else:
+            return self.tk.call('tix', 'filedialog')
+
+    def tix_getbitmap(self, name):
+        """Locates a bitmap file of the name name.xpm or name in one of the
+        bitmap directories (see the tix_addbitmapdir command above).  By
+        using tix_getbitmap, you can avoid hard coding the pathnames of the
+        bitmap files in your application. When successful, it returns the
+        complete pathname of the bitmap file, prefixed with the character
+        '@'.  The returned value can be used to configure the -bitmap
+        option of the TK and Tix widgets.
+        """
+        return self.tk.call('tix', 'getbitmap', name)
+
+    def tix_getimage(self, name):
+        """Locates an image file of the name name.xpm, name.xbm or name.ppm
+        in one of the bitmap directories (see the addbitmapdir command
+        above). If more than one file with the same name (but different
+        extensions) exist, then the image type is chosen according to the
+        depth of the X display: xbm images are chosen on monochrome
+        displays and color images are chosen on color displays. By using
+        tix_ getimage, you can advoid hard coding the pathnames of the
+        image files in your application. When successful, this command
+        returns the name of the newly created image, which can be used to
+        configure the -image option of the Tk and Tix widgets.
+        """
+        return self.tk.call('tix', 'getimage', name)
+
+    def tix_option_get(self, name):
+        """Gets  the options  manitained  by  the  Tix
+        scheme mechanism. Available options include:
+
+            active_bg       active_fg      bg
+            bold_font       dark1_bg       dark1_fg
+            dark2_bg        dark2_fg       disabled_fg
+            fg       	    fixed_font     font
+            inactive_bg     inactive_fg    input1_bg
+            input2_bg       italic_font    light1_bg
+            light1_fg       light2_bg      light2_fg
+            menu_font       output1_bg     output2_bg
+            select_bg       select_fg      selector
+            """
+        # could use self.tk.globalgetvar('tixOption', name)
+        return self.tk.call('tix', 'option', 'get', name)
+
+    def tix_resetoptions(self, newScheme, newFontSet, newScmPrio=None):
+        """Resets the scheme and fontset of the Tix application to
+        newScheme and newFontSet, respectively.  This affects only those
+        widgets created after this call. Therefore, it is best to call the
+        resetoptions command before the creation of any widgets in a Tix
+        application.
+
+        The optional parameter newScmPrio can be given to reset the
+        priority level of the Tk options set by the Tix schemes.
+
+        Because of the way Tk handles the X option database, after Tix has
+        been has imported and inited, it is not possible to reset the color
+        schemes and font sets using the tix config command.  Instead, the
+        tix_resetoptions command must be used.
+        """
+        if newScmPrio is not None:
+            return self.tk.call('tix', 'resetoptions', newScheme, newFontSet, newScmPrio)
+        else:
+            return self.tk.call('tix', 'resetoptions', newScheme, newFontSet)
+
+class Tk(Tkinter.Tk, tixCommand):
+    """Toplevel widget of Tix which represents mostly the main window
+    of an application. It has an associated Tcl interpreter."""
+    def __init__(self, screenName=None, baseName=None, className='Tix'):
+        Tkinter.Tk.__init__(self, screenName, baseName, className)
+        tixlib = os.environ.get('TIX_LIBRARY')
+        self.tk.eval('global auto_path; lappend auto_path [file dir [info nameof]]')
+        if tixlib is not None:
+            self.tk.eval('global auto_path; lappend auto_path {%s}' % tixlib)
+            self.tk.eval('global tcl_pkgPath; lappend tcl_pkgPath {%s}' % tixlib)
+        # Load Tix - this should work dynamically or statically
+        # If it's static, lib/tix8.1/pkgIndex.tcl should have
+        #		'load {} Tix'
+        # If it's dynamic under Unix, lib/tix8.1/pkgIndex.tcl should have
+        #		'load libtix8.1.8.3.so Tix'
+        self.tk.eval('package require Tix')
+
+
+# The Tix 'tixForm' geometry manager
+class Form:
+    """The Tix Form geometry manager
+
+    Widgets can be arranged by specifying attachments to other widgets.
+    See Tix documentation for complete details"""
+
+    def config(self, cnf={}, **kw):
+        apply(self.tk.call, ('tixForm', self._w) + self._options(cnf, kw))
+
+    form = config
+
+    def __setitem__(self, key, value):
+        Form.form(self, {key: value})
+
+    def check(self):
+        return self.tk.call('tixForm', 'check', self._w)
+
+    def forget(self):
+        self.tk.call('tixForm', 'forget', self._w)
+
+    def grid(self, xsize=0, ysize=0):
+        if (not xsize) and (not ysize):
+            x = self.tk.call('tixForm', 'grid', self._w)
+            y = self.tk.splitlist(x)
+            z = ()
+            for x in y:
+                z = z + (self.tk.getint(x),)
+            return z
+        self.tk.call('tixForm', 'grid', self._w, xsize, ysize)
+
+    def info(self, option=None):
+        if not option:
+            return self.tk.call('tixForm', 'info', self._w)
+        if option[0] != '-':
+            option = '-' + option
+        return self.tk.call('tixForm', 'info', self._w, option)
+
+    def slaves(self):
+        return map(self._nametowidget,
+                   self.tk.splitlist(
+                       self.tk.call(
+                       'tixForm', 'slaves', self._w)))
+
+
+    
+
+Tkinter.Widget.__bases__ = Tkinter.Widget.__bases__ + (Form,)
+
+class TixWidget(Tkinter.Widget):
+    """A TixWidget class is used to package all (or most) Tix widgets.
+
+    Widget initialization is extended in two ways:
+       1) It is possible to give a list of options which must be part of
+       the creation command (so called Tix 'static' options). These cannot be
+       given as a 'config' command later.
+       2) It is possible to give the name of an existing TK widget. These are
+       child widgets created automatically by a Tix mega-widget. The Tk call
+       to create these widgets is therefore bypassed in TixWidget.__init__
+
+    Both options are for use by subclasses only.
+    """
+    def __init__ (self, master=None, widgetName=None,
+                static_options=None, cnf={}, kw={}):
+       # Merge keywords and dictionary arguments
+       if kw:
+            cnf = _cnfmerge((cnf, kw))
+       else:
+           cnf = _cnfmerge(cnf)
+
+       # Move static options into extra. static_options must be
+       # a list of keywords (or None).
+       extra=()
+       if static_options:
+           for k,v in cnf.items()[:]:
+              if k in static_options:
+                  extra = extra + ('-' + k, v)
+                  del cnf[k]
+
+       self.widgetName = widgetName
+       Widget._setup(self, master, cnf)
+
+       # If widgetName is None, this is a dummy creation call where the
+       # corresponding Tk widget has already been created by Tix
+       if widgetName:
+           apply(self.tk.call, (widgetName, self._w) + extra)
+
+       # Non-static options - to be done via a 'config' command
+       if cnf:
+           Widget.config(self, cnf)
+
+       # Dictionary to hold subwidget names for easier access. We can't
+       # use the children list because the public Tix names may not be the
+       # same as the pathname component
+       self.subwidget_list = {}
+
+    # We set up an attribute access function so that it is possible to
+    # do w.ok['text'] = 'Hello' rather than w.subwidget('ok')['text'] = 'Hello'
+    # when w is a StdButtonBox.
+    # We can even do w.ok.invoke() because w.ok is subclassed from the
+    # Button class if you go through the proper constructors
+    def __getattr__(self, name):
+       if self.subwidget_list.has_key(name):
+           return self.subwidget_list[name]
+       raise AttributeError, name
+
+    def set_silent(self, value):
+       """Set a variable without calling its action routine"""
+       self.tk.call('tixSetSilent', self._w, value)
+
+    def subwidget(self, name):
+       """Return the named subwidget (which must have been created by
+       the sub-class)."""
+       n = self._subwidget_name(name)
+       if not n:
+           raise TclError, "Subwidget " + name + " not child of " + self._name
+       # Remove header of name and leading dot
+       n = n[len(self._w)+1:]
+       return self._nametowidget(n)
+
+    def subwidgets_all(self):
+       """Return all subwidgets."""
+       names = self._subwidget_names()
+       if not names:
+           return []
+       retlist = []
+       for name in names:
+           name = name[len(self._w)+1:]
+           try:
+              retlist.append(self._nametowidget(name))
+           except:
+              # some of the widgets are unknown e.g. border in LabelFrame
+              pass
+       return retlist
+
+    def _subwidget_name(self,name):
+       """Get a subwidget name (returns a String, not a Widget !)"""
+       try:
+           return self.tk.call(self._w, 'subwidget', name)
+       except TclError:
+           return None
+
+    def _subwidget_names(self):
+       """Return the name of all subwidgets."""
+       try:
+           x = self.tk.call(self._w, 'subwidgets', '-all')
+           return self.tk.split(x)
+       except TclError:
+           return None
+
+    def config_all(self, option, value):
+       """Set configuration options for all subwidgets (and self)."""
+       if option == '':
+           return
+       elif not isinstance(option, StringType):
+           option = `option`
+       if not isinstance(value, StringType):
+           value = `value`
+       names = self._subwidget_names()
+       for name in names:
+           self.tk.call(name, 'configure', '-' + option, value)
+
+# Subwidgets are child widgets created automatically by mega-widgets.
+# In python, we have to create these subwidgets manually to mirror their
+# existence in Tk/Tix.
+class TixSubWidget(TixWidget):
+    """Subwidget class.
+
+    This is used to mirror child widgets automatically created
+    by Tix/Tk as part of a mega-widget in Python (which is not informed
+    of this)"""
+
+    def __init__(self, master, name,
+               destroy_physically=1, check_intermediate=1):
+       if check_intermediate:
+           path = master._subwidget_name(name)
+           try:
+              path = path[len(master._w)+1:]
+              plist = string.splitfields(path, '.')
+           except:
+              plist = []
+
+       if (not check_intermediate) or len(plist) < 2:
+           # immediate descendant
+           TixWidget.__init__(self, master, None, None, {'name' : name})
+       else:
+           # Ensure that the intermediate widgets exist
+           parent = master
+           for i in range(len(plist) - 1):
+              n = string.joinfields(plist[:i+1], '.')
+              try:
+                  w = master._nametowidget(n)
+                  parent = w
+              except KeyError:
+                  # Create the intermediate widget
+                  parent = TixSubWidget(parent, plist[i],
+                                     destroy_physically=0,
+                                     check_intermediate=0)
+           TixWidget.__init__(self, parent, None, None, {'name' : name})
+       self.destroy_physically = destroy_physically
+
+    def destroy(self):
+       # For some widgets e.g., a NoteBook, when we call destructors,
+       # we must be careful not to destroy the frame widget since this
+       # also destroys the parent NoteBook thus leading to an exception
+       # in Tkinter when it finally calls Tcl to destroy the NoteBook
+       for c in self.children.values(): c.destroy()
+       if self.master.children.has_key(self._name):
+           del self.master.children[self._name]
+       if self.master.subwidget_list.has_key(self._name):
+           del self.master.subwidget_list[self._name]
+       if self.destroy_physically:
+           # This is bypassed only for a few widgets
+           self.tk.call('destroy', self._w)
+
+
+# Useful func. to split Tcl lists and return as a dict. From Tkinter.py
+def _lst2dict(lst):
+    dict = {}
+    for x in lst:
+       dict[x[0][1:]] = (x[0][1:],) + x[1:]
+    return dict
+
+# Useful class to create a display style - later shared by many items.
+# Contributed by Steffen Kremser
+class DisplayStyle:
+    """DisplayStyle - handle configuration options shared by
+    (multiple) Display Items"""
+
+    def __init__(self, itemtype, cnf={}, **kw ):
+        master = _default_root              # global from Tkinter
+        if not master and cnf.has_key('refwindow'): master=cnf['refwindow']
+        elif not master and kw.has_key('refwindow'):  master= kw['refwindow']
+        elif not master: raise RuntimeError, "Too early to create display style: no root window"
+        self.tk = master.tk
+        self.stylename = apply(self.tk.call, ('tixDisplayStyle', itemtype) +
+                            self._options(cnf,kw) )
+
+    def __str__(self):
+       return self.stylename
+ 
+    def _options(self, cnf, kw ):
+       if kw and cnf:
+           cnf = _cnfmerge((cnf, kw))
+       elif kw:
+           cnf = kw
+       opts = ()
+       for k, v in cnf.items():
+           opts = opts + ('-'+k, v)
+       return opts
+ 
+    def delete(self):
+       self.tk.call(self.stylename, 'delete')
+       del(self)
+ 
+    def __setitem__(self,key,value):
+       self.tk.call(self.stylename, 'configure', '-%s'%key, value)
+ 
+    def config(self, cnf={}, **kw):
+       return _lst2dict(
+           self.tk.split(
+              apply(self.tk.call,
+                    (self.stylename, 'configure') + self._options(cnf,kw))))
+ 
+    def __getitem__(self,key):
+       return self.tk.call(self.stylename, 'cget', '-%s'%key)
+
+
+######################################################
+### The Tix Widget classes - in alphabetical order ###
+######################################################
+
+class Balloon(TixWidget):
+    """Balloon help widget.
+
+    Subwidget       Class
+    ---------       -----
+    label           Label
+    message         Message"""
+
+    def __init__(self, master=None, cnf={}, **kw):
+        # static seem to be -installcolormap -initwait -statusbar -cursor
+       static = ['options', 'installcolormap', 'initwait', 'statusbar',
+                 'cursor']
+       TixWidget.__init__(self, master, 'tixBalloon', static, cnf, kw)
+       self.subwidget_list['label'] = _dummyLabel(self, 'label',
+                                                  destroy_physically=0)
+       self.subwidget_list['message'] = _dummyLabel(self, 'message',
+                                                    destroy_physically=0)
+
+    def bind_widget(self, widget, cnf={}, **kw):
+       """Bind balloon widget to another.
+       One balloon widget may be bound to several widgets at the same time"""
+       apply(self.tk.call, 
+             (self._w, 'bind', widget._w) + self._options(cnf, kw))
+
+    def unbind_widget(self, widget):
+       self.tk.call(self._w, 'unbind', widget._w)
+
+class ButtonBox(TixWidget):
+    """ButtonBox - A container for pushbuttons.
+    Subwidgets are the buttons added with the add method.
+    """
+    def __init__(self, master=None, cnf={}, **kw):
+       TixWidget.__init__(self, master, 'tixButtonBox',
+                          ['orientation', 'options'], cnf, kw)
+
+    def add(self, name, cnf={}, **kw):
+       """Add a button with given name to box."""
+
+       btn = apply(self.tk.call,
+                   (self._w, 'add', name) + self._options(cnf, kw))
+       self.subwidget_list[name] = _dummyButton(self, name)
+       return btn
+
+    def invoke(self, name):
+       if self.subwidget_list.has_key(name):
+           self.tk.call(self._w, 'invoke', name)
+
+class ComboBox(TixWidget):
+    """ComboBox - an Entry field with a dropdown menu. The user can select a
+    choice by either typing in the entry subwdget or selecting from the
+    listbox subwidget.
+
+    Subwidget       Class
+    ---------       -----
+    entry       Entry
+    arrow       Button
+    slistbox    ScrolledListBox
+    tick        Button 
+    cross       Button : present if created with the fancy option"""
+
+    def __init__ (self, master=None, cnf={}, **kw):
+       TixWidget.__init__(self, master, 'tixComboBox', 
+                          ['editable', 'dropdown', 'fancy', 'options'],
+                          cnf, kw)
+       self.subwidget_list['label'] = _dummyLabel(self, 'label')
+       self.subwidget_list['entry'] = _dummyEntry(self, 'entry')
+       self.subwidget_list['arrow'] = _dummyButton(self, 'arrow')
+       self.subwidget_list['slistbox'] = _dummyScrolledListBox(self,
+                                                               'slistbox')
+       try:
+           self.subwidget_list['tick'] = _dummyButton(self, 'tick')
+           self.subwidget_list['cross'] = _dummyButton(self, 'cross')
+       except TypeError:
+           # unavailable when -fancy not specified
+           pass
+
+    def add_history(self, str):
+       self.tk.call(self._w, 'addhistory', str)
+
+    def append_history(self, str):
+       self.tk.call(self._w, 'appendhistory', str)
+
+    def insert(self, index, str):
+       self.tk.call(self._w, 'insert', index, str)
+
+    def pick(self, index):
+       self.tk.call(self._w, 'pick', index)
+
+class Control(TixWidget):
+    """Control - An entry field with value change arrows.  The user can
+    adjust the value by pressing the two arrow buttons or by entering
+    the value directly into the entry. The new value will be checked
+    against the user-defined upper and lower limits.
+
+    Subwidget       Class
+    ---------       -----
+    incr       Button
+    decr       Button
+    entry       Entry
+    label       Label"""
+
+    def __init__ (self, master=None, cnf={}, **kw):
+       TixWidget.__init__(self, master, 'tixControl', ['options'], cnf, kw)
+       self.subwidget_list['incr'] = _dummyButton(self, 'incr')
+       self.subwidget_list['decr'] = _dummyButton(self, 'decr')
+       self.subwidget_list['label'] = _dummyLabel(self, 'label')
+       self.subwidget_list['entry'] = _dummyEntry(self, 'entry')
+
+    def decrement(self):
+       self.tk.call(self._w, 'decr')
+
+    def increment(self):
+       self.tk.call(self._w, 'incr')
+
+    def invoke(self):
+       self.tk.call(self._w, 'invoke')
+
+    def update(self):
+       self.tk.call(self._w, 'update')
+
+class DirList(TixWidget):
+    """DirList - displays a list view of a directory, its previous
+    directories and its sub-directories. The user can choose one of
+    the directories displayed in the list or change to another directory.
+
+    Subwidget       Class
+    ---------       -----
+    hlist       HList
+    hsb              Scrollbar
+    vsb              Scrollbar"""
+
+    def __init__(self, master, cnf={}, **kw):
+       TixWidget.__init__(self, master, 'tixDirList', ['options'], cnf, kw)
+       self.subwidget_list['hlist'] = _dummyHList(self, 'hlist')
+       self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
+       self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
+
+    def chdir(self, dir):
+       self.tk.call(self._w, 'chdir', dir)
+
+class DirTree(TixWidget):
+    """DirTree - Directory Listing in a hierarchical view.
+    Displays a tree view of a directory, its previous directories and its
+    sub-directories. The user can choose one of the directories displayed
+    in the list or change to another directory.
+
+    Subwidget       Class
+    ---------       -----
+    hlist       HList
+    hsb              Scrollbar
+    vsb              Scrollbar"""
+
+    def __init__(self, master, cnf={}, **kw):
+       TixWidget.__init__(self, master, 'tixDirTree', ['options'], cnf, kw)
+       self.subwidget_list['hlist'] = _dummyHList(self, 'hlist')
+       self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
+       self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
+
+    def chdir(self, dir):
+       self.tk.call(self._w, 'chdir', dir)
+
+class DirSelectBox(TixWidget):
+    """DirSelectBox - Motif style file select box.
+    It is generally used for
+    the user to choose a file. FileSelectBox stores the files mostly
+    recently selected into a ComboBox widget so that they can be quickly
+    selected again.
+    
+    Subwidget       Class
+    ---------       -----
+    selection       ComboBox
+    filter       ComboBox
+    dirlist       ScrolledListBox
+    filelist       ScrolledListBox"""
+
+    def __init__(self, master, cnf={}, **kw):
+       TixWidget.__init__(self, master, 'tixDirSelectBox', ['options'], cnf, kw)
+       self.subwidget_list['dirlist'] = _dummyDirList(self, 'dirlist')
+       self.subwidget_list['dircbx'] = _dummyFileComboBox(self, 'dircbx')
+
+class ExFileSelectBox(TixWidget):
+    """ExFileSelectBox - MS Windows style file select box.
+    It provides an convenient method for the user to select files.
+
+    Subwidget       Class
+    ---------       -----
+    cancel       Button
+    ok              Button
+    hidden       Checkbutton
+    types       ComboBox
+    dir              ComboBox
+    file       ComboBox
+    dirlist       ScrolledListBox
+    filelist       ScrolledListBox"""
+
+    def __init__(self, master, cnf={}, **kw):
+       TixWidget.__init__(self, master, 'tixExFileSelectBox', ['options'], cnf, kw)
+       self.subwidget_list['cancel'] = _dummyButton(self, 'cancel')
+       self.subwidget_list['ok'] = _dummyButton(self, 'ok')
+       self.subwidget_list['hidden'] = _dummyCheckbutton(self, 'hidden')
+       self.subwidget_list['types'] = _dummyComboBox(self, 'types')
+       self.subwidget_list['dir'] = _dummyComboBox(self, 'dir')
+       self.subwidget_list['dirlist'] = _dummyDirList(self, 'dirlist')
+       self.subwidget_list['file'] = _dummyComboBox(self, 'file')
+       self.subwidget_list['filelist'] = _dummyScrolledListBox(self, 'filelist')
+
+    def filter(self):
+       self.tk.call(self._w, 'filter')
+
+    def invoke(self):
+       self.tk.call(self._w, 'invoke')
+
+
+# Should inherit from a Dialog class
+class DirSelectDialog(TixWidget):
+    """The DirSelectDialog widget presents the directories in the file
+    system in a dialog window. The user can use this dialog window to
+    navigate through the file system to select the desired directory.
+
+    Subwidgets       Class
+    ----------       -----
+    dirbox       DirSelectDialog"""
+
+    def __init__(self, master, cnf={}, **kw):
+       TixWidget.__init__(self, master, 'tixDirSelectDialog',
+                        ['options'], cnf, kw)
+       self.subwidget_list['dirbox'] = _dummyDirSelectBox(self, 'dirbox')
+       # cancel and ok buttons are missing
+       
+    def popup(self):
+       self.tk.call(self._w, 'popup')
+
+    def popdown(self):
+       self.tk.call(self._w, 'popdown')
+
+
+# Should inherit from a Dialog class
+class ExFileSelectDialog(TixWidget):
+    """ExFileSelectDialog - MS Windows style file select dialog.
+    It provides an convenient method for the user to select files.
+
+    Subwidgets       Class
+    ----------       -----
+    fsbox       ExFileSelectBox"""
+
+    def __init__(self, master, cnf={}, **kw):
+       TixWidget.__init__(self, master, 'tixExFileSelectDialog',
+                        ['options'], cnf, kw)
+       self.subwidget_list['fsbox'] = _dummyExFileSelectBox(self, 'fsbox')
+
+    def popup(self):
+       self.tk.call(self._w, 'popup')
+
+    def popdown(self):
+       self.tk.call(self._w, 'popdown')
+
+class FileSelectBox(TixWidget):
+    """ExFileSelectBox - Motif style file select box.
+    It is generally used for
+    the user to choose a file. FileSelectBox stores the files mostly
+    recently selected into a ComboBox widget so that they can be quickly
+    selected again.
+    
+    Subwidget       Class
+    ---------       -----
+    selection       ComboBox
+    filter       ComboBox
+    dirlist       ScrolledListBox
+    filelist       ScrolledListBox"""
+
+    def __init__(self, master, cnf={}, **kw):
+       TixWidget.__init__(self, master, 'tixFileSelectBox', ['options'], cnf, kw)
+       self.subwidget_list['dirlist'] = _dummyScrolledListBox(self, 'dirlist')
+       self.subwidget_list['filelist'] = _dummyScrolledListBox(self, 'filelist')
+       self.subwidget_list['filter'] = _dummyComboBox(self, 'filter')
+       self.subwidget_list['selection'] = _dummyComboBox(self, 'selection')
+
+    def apply_filter(self):              # name of subwidget is same as command
+       self.tk.call(self._w, 'filter')
+
+    def invoke(self):
+       self.tk.call(self._w, 'invoke')
+
+# Should inherit from a Dialog class
+class FileSelectDialog(TixWidget):
+    """FileSelectDialog - Motif style file select dialog.
+
+    Subwidgets       Class
+    ----------       -----
+    btns       StdButtonBox
+    fsbox       FileSelectBox"""
+
+    def __init__(self, master, cnf={}, **kw):
+       TixWidget.__init__(self, master, 'tixFileSelectDialog',
+                        ['options'], cnf, kw)
+       self.subwidget_list['btns'] = _dummyStdButtonBox(self, 'btns')
+       self.subwidget_list['fsbox'] = _dummyFileSelectBox(self, 'fsbox')
+
+    def popup(self):
+       self.tk.call(self._w, 'popup')
+
+    def popdown(self):
+       self.tk.call(self._w, 'popdown')
+
+class FileEntry(TixWidget):
+    """FileEntry - Entry field with button that invokes a FileSelectDialog.
+    The user can type in the filename manually. Alternatively, the user can
+    press the button widget that sits next to the entry, which will bring
+    up a file selection dialog.
+
+    Subwidgets       Class
+    ----------       -----
+    button       Button
+    entry       Entry"""
+
+    def __init__(self, master, cnf={}, **kw):
+       TixWidget.__init__(self, master, 'tixFileEntry',
+                        ['dialogtype', 'options'], cnf, kw)
+       self.subwidget_list['button'] = _dummyButton(self, 'button')
+       self.subwidget_list['entry'] = _dummyEntry(self, 'entry')
+
+    def invoke(self):
+       self.tk.call(self._w, 'invoke')
+
+    def file_dialog(self):
+       # XXX return python object
+       pass
+
+class HList(TixWidget):
+    """HList - Hierarchy display  widget can be used to display any data
+    that have a hierarchical structure, for example, file system directory
+    trees. The list entries are indented and connected by branch lines
+    according to their places in the hierachy.
+
+    Subwidgets - None"""
+
+    def __init__ (self,master=None,cnf={}, **kw):
+       TixWidget.__init__(self, master, 'tixHList',
+                        ['columns', 'options'], cnf, kw)
+
+    def add(self, entry, cnf={}, **kw):
+       return apply(self.tk.call,
+                   (self._w, 'add', entry) + self._options(cnf, kw))
+
+    def add_child(self, parent=None, cnf={}, **kw):
+       if not parent:
+           parent = ''
+       return apply(self.tk.call,
+                   (self._w, 'addchild', parent) + self._options(cnf, kw))
+
+    def anchor_set(self, entry):
+       self.tk.call(self._w, 'anchor', 'set', entry)
+
+    def anchor_clear(self):
+       self.tk.call(self._w, 'anchor', 'clear')
+
+    def column_width(self, col=0, width=None, chars=None):
+       if not chars:
+           return self.tk.call(self._w, 'column', 'width', col, width)
+       else:
+           return self.tk.call(self._w, 'column', 'width', col,
+                            '-char', chars)
+
+    def delete_all(self):
+       self.tk.call(self._w, 'delete', 'all')
+
+    def delete_entry(self, entry):
+       self.tk.call(self._w, 'delete', 'entry', entry)
+
+    def delete_offsprings(self, entry):
+       self.tk.call(self._w, 'delete', 'offsprings', entry)
+
+    def delete_siblings(self, entry):
+       self.tk.call(self._w, 'delete', 'siblings', entry)
+
+    def dragsite_set(self, index):
+       self.tk.call(self._w, 'dragsite', 'set', index)
+
+    def dragsite_clear(self):
+       self.tk.call(self._w, 'dragsite', 'clear')
+
+    def dropsite_set(self, index):
+       self.tk.call(self._w, 'dropsite', 'set', index)
+
+    def dropsite_clear(self):
+       self.tk.call(self._w, 'dropsite', 'clear')
+
+    def header_create(self, col, cnf={}, **kw):
+        apply(self.tk.call,
+              (self._w, 'header', 'create', col) + self._options(cnf, kw))
+ 
+    def header_configure(self, col, cnf={}, **kw):
+       if cnf is None:
+           return _lst2dict(
+              self.tk.split(
+                  self.tk.call(self._w, 'header', 'configure', col)))
+       apply(self.tk.call, (self._w, 'header', 'configure', col)
+             + self._options(cnf, kw))
+ 
+    def header_cget(self,  col, opt):
+       return self.tk.call(self._w, 'header', 'cget', col, opt)
+ 
+    def header_exists(self,  col):
+       return self.tk.call(self._w, 'header', 'exists', col)
+ 
+    def header_delete(self, col):
+        self.tk.call(self._w, 'header', 'delete', col)
+ 
+    def header_size(self, col):
+        return self.tk.call(self._w, 'header', 'size', col)
+ 
+    def hide_entry(self, entry):
+       self.tk.call(self._w, 'hide', 'entry', entry)
+
+    def indicator_create(self, entry, cnf={}, **kw):
+        apply(self.tk.call,
+              (self._w, 'indicator', 'create', entry) + self._options(cnf, kw))
+ 
+    def indicator_configure(self, entry, cnf={}, **kw):
+       if cnf is None:
+           return _lst2dict(
+              self.tk.split(
+                  self.tk.call(self._w, 'indicator', 'configure', entry)))
+       apply(self.tk.call,
+             (self._w, 'indicator', 'configure', entry) + self._options(cnf, kw))
+ 
+    def indicator_cget(self,  entry, opt):
+       return self.tk.call(self._w, 'indicator', 'cget', entry, opt)
+ 
+    def indicator_exists(self,  entry):
+       return self.tk.call (self._w, 'indicator', 'exists', entry)
+ 
+    def indicator_delete(self, entry):
+        self.tk.call(self._w, 'indicator', 'delete', entry)
+ 
+    def indicator_size(self, entry):
+        return self.tk.call(self._w, 'indicator', 'size', entry)
+
+    def info_anchor(self):
+       return self.tk.call(self._w, 'info', 'anchor')
+
+    def info_children(self, entry=None):
+       c = self.tk.call(self._w, 'info', 'children', entry)
+       return self.tk.splitlist(c)
+
+    def info_data(self, entry):
+       return self.tk.call(self._w, 'info', 'data', entry)
+
+    def info_exists(self, entry):
+       return self.tk.call(self._w, 'info', 'exists', entry)
+
+    def info_hidden(self, entry):
+       return self.tk.call(self._w, 'info', 'hidden', entry)
+
+    def info_next(self, entry):
+       return self.tk.call(self._w, 'info', 'next', entry)
+
+    def info_parent(self, entry):
+       return self.tk.call(self._w, 'info', 'parent', entry)
+
+    def info_prev(self, entry):
+       return self.tk.call(self._w, 'info', 'prev', entry)
+
+    def info_selection(self):
+       c = self.tk.call(self._w, 'info', 'selection')
+       return self.tk.splitlist(c)
+
+    def item_cget(self, entry, col, opt):
+       return self.tk.call(self._w, 'item', 'cget', entry, col, opt)
+ 
+    def item_configure(self, entry, col, cnf={}, **kw):
+       if cnf is None:
+           return _lst2dict(
+              self.tk.split(
+                  self.tk.call(self._w, 'item', 'configure', entry, col)))
+       apply(self.tk.call, (self._w, 'item', 'configure', entry, col) +
+             self._options(cnf, kw))
+
+    def item_create(self, entry, col, cnf={}, **kw):
+       apply(self.tk.call,
+             (self._w, 'item', 'create', entry, col) + self._options(cnf, kw))
+
+    def item_exists(self, entry, col):
+       return self.tk.call(self._w, 'item', 'exists', entry, col)
+ 
+    def item_delete(self, entry, col):
+       self.tk.call(self._w, 'item', 'delete', entry, col)
+
+    def nearest(self, y):
+       return self.tk.call(self._w, 'nearest', y)
+
+    def see(self, entry):
+       self.tk.call(self._w, 'see', entry)
+
+    def selection_clear(self, cnf={}, **kw):
+       apply(self.tk.call,
+             (self._w, 'selection', 'clear') + self._options(cnf, kw))
+
+    def selection_includes(self, entry):
+       return self.tk.call(self._w, 'selection', 'includes', entry)
+
+    def selection_set(self, first, last=None):
+       self.tk.call(self._w, 'selection', 'set', first, last)
+
+    def show_entry(self, entry):
+       return self.tk.call(self._w, 'show', 'entry', entry)
+
+    def xview(self, *args):
+       apply(self.tk.call, (self._w, 'xview') + args)
+
+    def yview(self, *args):
+       apply(self.tk.call, (self._w, 'yview') + args)
+
+class InputOnly(TixWidget):
+    """InputOnly - Invisible widget.
+
+    Subwidgets - None"""
+
+    def __init__ (self,master=None,cnf={}, **kw):
+       TixWidget.__init__(self, master, 'tixInputOnly', None, cnf, kw)
+
+class LabelEntry(TixWidget):
+    """LabelEntry - Entry field with label. Packages an entry widget
+    and a label into one mega widget. It can beused be used to simplify
+    the creation of ``entry-form'' type of interface.
+
+    Subwidgets       Class
+    ----------       -----
+    label       Label
+    entry       Entry"""
+
+    def __init__ (self,master=None,cnf={}, **kw):
+       TixWidget.__init__(self, master, 'tixLabelEntry',
+                        ['labelside','options'], cnf, kw)
+       self.subwidget_list['label'] = _dummyLabel(self, 'label')
+       self.subwidget_list['entry'] = _dummyEntry(self, 'entry')
+
+class LabelFrame(TixWidget):
+    """LabelFrame - Labelled Frame container. Packages a frame widget
+    and a label into one mega widget. To create widgets inside a
+    LabelFrame widget, one creates the new widgets relative to the
+    frame subwidget and manage them inside the frame subwidget.
+
+    Subwidgets       Class
+    ----------       -----
+    label       Label
+    frame       Frame"""
+
+    def __init__ (self,master=None,cnf={}, **kw):
+       TixWidget.__init__(self, master, 'tixLabelFrame',
+                        ['labelside','options'], cnf, kw)
+       self.subwidget_list['label'] = _dummyLabel(self, 'label')
+       self.subwidget_list['frame'] = _dummyFrame(self, 'frame')
+
+
+class ListNoteBook(TixWidget):
+    """A ListNoteBook widget is very similar to the TixNoteBook widget:
+    it can be used to display many windows in a limited space using a
+    notebook metaphor. The notebook is divided into a stack of pages
+    (windows). At one time only one of these pages can be shown.
+    The user can navigate through these pages by
+    choosing the name of the desired page in the hlist subwidget."""
+
+    def __init__(self, master, cnf={}, **kw):
+       TixWidget.__init__(self, master, 'tixDirList', ['options'], cnf, kw)
+       self.subwidget_list['hlist'] = _dummyHList(self, 'hlist')
+       self.subwidget_list['shlist'] = _dummyScrolledHList(self, 'vsb')
+
+
+    def add(self, name, cnf={}, **kw):
+       apply(self.tk.call,
+             (self._w, 'add', name) + self._options(cnf, kw))
+       self.subwidget_list[name] = TixSubWidget(self, name)
+       return self.subwidget_list[name]
+
+    def raise_page(self, name):              # raise is a python keyword
+       self.tk.call(self._w, 'raise', name)
+
+class Meter(TixWidget):
+    """The Meter widget can be used to show the progress of a background
+    job which may take a long time to execute.
+    """
+
+    def __init__(self, master=None, cnf={}, **kw):
+       TixWidget.__init__(self, master, 'tixMeter',
+                        ['options'], cnf, kw)
+
+class NoteBook(TixWidget):
+    """NoteBook - Multi-page container widget (tabbed notebook metaphor).
+
+    Subwidgets       Class
+    ----------       -----
+    nbframe       NoteBookFrame
+    <pages>       page widgets added dynamically with the add method"""
+
+    def __init__ (self,master=None,cnf={}, **kw):
+       TixWidget.__init__(self,master,'tixNoteBook', ['options'], cnf, kw)
+       self.subwidget_list['nbframe'] = TixSubWidget(self, 'nbframe',
+                                                destroy_physically=0)
+
+    def add(self, name, cnf={}, **kw):
+       apply(self.tk.call,
+             (self._w, 'add', name) + self._options(cnf, kw))
+       self.subwidget_list[name] = TixSubWidget(self, name)
+       return self.subwidget_list[name]
+
+    def delete(self, name):
+       self.tk.call(self._w, 'delete', name)
+
+    def page(self, name):
+       return self.subwidget(name)
+
+    def pages(self):
+       # Can't call subwidgets_all directly because we don't want .nbframe
+       names = self.tk.split(self.tk.call(self._w, 'pages'))
+       ret = []
+       for x in names:
+           ret.append(self.subwidget(x))
+       return ret
+
+    def raise_page(self, name):              # raise is a python keyword
+       self.tk.call(self._w, 'raise', name)
+
+    def raised(self):
+       return self.tk.call(self._w, 'raised')
+
+class NoteBookFrame(TixWidget):
+    """Will be added when Tix documentation is available !!!"""
+    pass
+
+class OptionMenu(TixWidget):
+    """OptionMenu - creates a menu button of options.
+
+    Subwidget       Class
+    ---------       -----
+    menubutton       Menubutton
+    menu       Menu"""
+
+    def __init__(self, master, cnf={}, **kw):
+       TixWidget.__init__(self, master, 'tixOptionMenu', ['options'], cnf, kw)
+       self.subwidget_list['menubutton'] = _dummyMenubutton(self, 'menubutton')
+       self.subwidget_list['menu'] = _dummyMenu(self, 'menu')
+
+    def add_command(self, name, cnf={}, **kw):
+       apply(self.tk.call,
+             (self._w, 'add', 'command', name) + self._options(cnf, kw))
+
+    def add_separator(self, name, cnf={}, **kw):
+       apply(self.tk.call,
+             (self._w, 'add', 'separator', name) + self._options(cnf, kw))
+
+    def delete(self, name):
+       self.tk.call(self._w, 'delete', name)
+
+    def disable(self, name):
+       self.tk.call(self._w, 'disable', name)
+
+    def enable(self, name):
+       self.tk.call(self._w, 'enable', name)
+
+class PanedWindow(TixWidget):
+    """PanedWindow - Multi-pane container widget
+    allows the user to interactively manipulate the sizes of several
+    panes. The panes can be arranged either vertically or horizontally.The
+    user changes the sizes of the panes by dragging the resize handle
+    between two panes.
+
+    Subwidgets       Class
+    ----------       -----
+    <panes>       g/p widgets added dynamically with the add method."""
+
+    def __init__(self, master, cnf={}, **kw):
+       TixWidget.__init__(self, master, 'tixPanedWindow', ['orientation', 'options'], cnf, kw)
+
+    def add(self, name, cnf={}, **kw):
+       apply(self.tk.call,
+             (self._w, 'add', name) + self._options(cnf, kw))
+       self.subwidget_list[name] = TixSubWidget(self, name,
+                                           check_intermediate=0)
+       return self.subwidget_list[name]
+
+    def panes(self):
+       names = self.tk.call(self._w, 'panes')
+       ret = []
+       for x in names:
+           ret.append(self.subwidget(x))
+       return ret
+
+class PopupMenu(TixWidget):
+    """PopupMenu widget can be used as a replacement of the tk_popup command.
+    The advantage of the Tix PopupMenu widget is it requires less application
+    code to manipulate.
+
+
+    Subwidgets       Class
+    ----------       -----
+    menubutton       Menubutton
+    menu       Menu"""
+
+    def __init__(self, master, cnf={}, **kw):
+       TixWidget.__init__(self, master, 'tixPopupMenu', ['options'], cnf, kw)
+       self.subwidget_list['menubutton'] = _dummyMenubutton(self, 'menubutton')
+       self.subwidget_list['menu'] = _dummyMenu(self, 'menu')
+
+    def bind_widget(self, widget):
+       self.tk.call(self._w, 'bind', widget._w)
+
+    def unbind_widget(self, widget):
+       self.tk.call(self._w, 'unbind', widget._w)
+
+    def post_widget(self, widget, x, y):
+       self.tk.call(self._w, 'post', widget._w, x, y)
+
+class ResizeHandle(TixWidget):
+    """Internal widget to draw resize handles on Scrolled widgets."""
+
+    def __init__(self, master, cnf={}, **kw):
+       # There seems to be a Tix bug rejecting the configure method
+       # Let's try making the flags -static
+       flags = ['options', 'command', 'cursorfg', 'cursorbg',
+                'handlesize', 'hintcolor', 'hintwidth',
+                'x', 'y']
+       # In fact, x y height width are configurable
+       TixWidget.__init__(self, master, 'tixResizeHandle',
+                           flags, cnf, kw)
+
+    def attach_widget(self, widget):
+       self.tk.call(self._w, 'attachwidget', widget._w)
+
+    def detach_widget(self, widget):
+       self.tk.call(self._w, 'detachwidget', widget._w)
+
+    def hide(self, widget):
+       self.tk.call(self._w, 'hide', widget._w)
+
+    def show(self, widget):
+       self.tk.call(self._w, 'show', widget._w)
+
+class ScrolledHList(TixWidget):
+    """ScrolledHList - HList with automatic scrollbars."""
+
+    def __init__(self, master, cnf={}, **kw):
+       TixWidget.__init__(self, master, 'tixScrolledHList', ['options'],
+                        cnf, kw)
+       self.subwidget_list['hlist'] = _dummyHList(self, 'hlist')
+       self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
+       self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
+
+class ScrolledListBox(TixWidget):
+    """ScrolledListBox - Listbox with automatic scrollbars."""
+
+    def __init__(self, master, cnf={}, **kw):
+       TixWidget.__init__(self, master, 'tixScrolledListBox', ['options'], cnf, kw)
+       self.subwidget_list['listbox'] = _dummyListbox(self, 'listbox')
+       self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
+       self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
+
+class ScrolledText(TixWidget):
+    """ScrolledText - Text with automatic scrollbars."""
+
+    def __init__(self, master, cnf={}, **kw):
+       TixWidget.__init__(self, master, 'tixScrolledText', ['options'], cnf, kw)
+       self.subwidget_list['text'] = _dummyText(self, 'text')
+       self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
+       self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
+
+class ScrolledTList(TixWidget):
+    """ScrolledTList - TList with automatic scrollbars."""
+
+    def __init__(self, master, cnf={}, **kw):
+       TixWidget.__init__(self, master, 'tixScrolledTList', ['options'],
+                        cnf, kw)
+       self.subwidget_list['tlist'] = _dummyTList(self, 'tlist')
+       self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
+       self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
+
+class ScrolledWindow(TixWidget):
+    """ScrolledWindow - Window with automatic scrollbars."""
+
+    def __init__(self, master, cnf={}, **kw):
+       TixWidget.__init__(self, master, 'tixScrolledWindow', ['options'], cnf, kw)
+       self.subwidget_list['window'] = _dummyFrame(self, 'window')
+       self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
+       self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
+
+class Select(TixWidget):
+    """Select - Container of button subwidgets. It can be used to provide
+    radio-box or check-box style of selection options for the user.
+
+    Subwidgets are buttons added dynamically using the add method."""
+
+    def __init__(self, master, cnf={}, **kw):
+       TixWidget.__init__(self, master, 'tixSelect',
+                        ['allowzero', 'radio', 'orientation', 'labelside',
+                         'options'],
+                        cnf, kw)
+       self.subwidget_list['label'] = _dummyLabel(self, 'label')
+
+    def add(self, name, cnf={}, **kw):
+       apply(self.tk.call,
+             (self._w, 'add', name) + self._options(cnf, kw))
+       self.subwidget_list[name] = _dummyButton(self, name)
+       return self.subwidget_list[name]
+
+    def invoke(self, name):
+       self.tk.call(self._w, 'invoke', name)
+
+class StdButtonBox(TixWidget):
+    """StdButtonBox - Standard Button Box (OK, Apply, Cancel and Help) """
+
+    def __init__(self, master=None, cnf={}, **kw):
+       TixWidget.__init__(self, master, 'tixStdButtonBox',
+                        ['orientation', 'options'], cnf, kw)
+       self.subwidget_list['ok'] = _dummyButton(self, 'ok')
+       self.subwidget_list['apply'] = _dummyButton(self, 'apply')
+       self.subwidget_list['cancel'] = _dummyButton(self, 'cancel')
+       self.subwidget_list['help'] = _dummyButton(self, 'help')
+
+    def invoke(self, name):
+       if self.subwidget_list.has_key(name):
+           self.tk.call(self._w, 'invoke', name)
+
+class TList(TixWidget):
+    """TList - Hierarchy display widget which can be
+    used to display data in a tabular format. The list entries of a TList
+    widget are similar to the entries in the Tk listbox widget. The main
+    differences are (1) the TList widget can display the list entries in a
+    two dimensional format and (2) you can use graphical images as well as
+    multiple colors and fonts for the list entries.
+
+    Subwidgets - None"""
+
+    def __init__ (self,master=None,cnf={}, **kw):
+       TixWidget.__init__(self, master, 'tixTList', ['options'], cnf, kw)
+
+    def active_set(self, index):
+       self.tk.call(self._w, 'active', 'set', index)
+
+    def active_clear(self):
+       self.tk.call(self._w, 'active', 'clear')
+
+    def anchor_set(self, index):
+       self.tk.call(self._w, 'anchor', 'set', index)
+
+    def anchor_clear(self):
+       self.tk.call(self._w, 'anchor', 'clear')
+
+    def delete(self, from_, to=None):
+       self.tk.call(self._w, 'delete', from_, to)
+
+    def dragsite_set(self, index):
+       self.tk.call(self._w, 'dragsite', 'set', index)
+
+    def dragsite_clear(self):
+       self.tk.call(self._w, 'dragsite', 'clear')
+
+    def dropsite_set(self, index):
+       self.tk.call(self._w, 'dropsite', 'set', index)
+
+    def dropsite_clear(self):
+       self.tk.call(self._w, 'dropsite', 'clear')
+
+    def insert(self, index, cnf={}, **kw):
+       apply(self.tk.call,
+              (self._w, 'insert', index) + self._options(cnf, kw))
+
+    def info_active(self):
+       return self.tk.call(self._w, 'info', 'active')
+
+    def info_anchor(self):
+       return self.tk.call(self._w, 'info', 'anchor')
+
+    def info_down(self, index):
+       return self.tk.call(self._w, 'info', 'down', index)
+
+    def info_left(self, index):
+       return self.tk.call(self._w, 'info', 'left', index)
+
+    def info_right(self, index):
+       return self.tk.call(self._w, 'info', 'right', index)
+
+    def info_selection(self):
+       c = self.tk.call(self._w, 'info', 'selection')
+       return self.tk.splitlist(c)
+
+    def info_size(self):
+       return self.tk.call(self._w, 'info', 'size')
+
+    def info_up(self, index):
+       return self.tk.call(self._w, 'info', 'up', index)
+
+    def nearest(self, x, y):
+       return self.tk.call(self._w, 'nearest', x, y)
+
+    def see(self, index):
+       self.tk.call(self._w, 'see', index)
+
+    def selection_clear(self, cnf={}, **kw):
+       apply(self.tk.call,
+             (self._w, 'selection', 'clear') + self._options(cnf, kw))
+
+    def selection_includes(self, index):
+       return self.tk.call(self._w, 'selection', 'includes', index)
+
+    def selection_set(self, first, last=None):
+       self.tk.call(self._w, 'selection', 'set', first, last)
+
+    def xview(self, *args):
+       apply(self.tk.call, (self._w, 'xview') + args)
+
+    def yview(self, *args):
+       apply(self.tk.call, (self._w, 'yview') + args)
+
+class Tree(TixWidget):
+    """Tree - The tixTree widget can be used to display hierachical
+    data in a tree form. The user can adjust
+    the view of the tree by opening or closing parts of the tree."""
+
+    def __init__(self, master=None, cnf={}, **kw):
+       TixWidget.__init__(self, master, 'tixTree',
+                        ['options'], cnf, kw)
+       self.subwidget_list['hlist'] = _dummyHList(self, 'hlist')
+       self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
+       self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
+
+    def autosetmode(self):
+       self.tk.call(self._w, 'autosetmode')
+
+    def close(self, entrypath):
+       self.tk.call(self._w, 'close', entrypath)
+
+    def getmode(self, entrypath):
+       return self.tk.call(self._w, 'getmode', entrypath)
+
+    def open(self, entrypath):
+       self.tk.call(self._w, 'open', entrypath)
+
+    def setmode(self, entrypath, mode='none'):
+       self.tk.call(self._w, 'setmode', entrypath, mode)
+
+
+# Could try subclassing Tree for CheckList - would need another arg to init
+class CheckList(TixWidget):
+    """The CheckList widget
+    displays a list of items to be selected by the user. CheckList acts
+    similarly to the Tk checkbutton or radiobutton widgets, except it is
+    capable of handling many more items than checkbuttons or radiobuttons.
+    """
+
+    def __init__(self, master=None, cnf={}, **kw):
+        TixWidget.__init__(self, master, 'tixCheckList',
+                           ['options'], cnf, kw)
+        self.subwidget_list['hlist'] = _dummyHList(self, 'hlist')
+        self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
+        self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
+        
+    def autosetmode(self):
+        self.tk.call(self._w, 'autosetmode')
+
+    def close(self, entrypath):
+        self.tk.call(self._w, 'close', entrypath)
+
+    def getmode(self, entrypath):
+        return self.tk.call(self._w, 'getmode', entrypath)
+
+    def open(self, entrypath):
+        self.tk.call(self._w, 'open', entrypath)
+
+    def getselection(self, mode='on'):
+        '''Mode can be on, off, default'''
+        self.tk.call(self._w, 'getselection', mode)
+
+    def getstatus(self, entrypath):
+        self.tk.call(self._w, 'getstatus', entrypath)
+
+    def setstatus(self, entrypath, mode='on'):
+        self.tk.call(self._w, 'setstatus', entrypath, mode)
+
+
+###########################################################################
+### The subclassing below is used to instantiate the subwidgets in each ###
+### mega widget. This allows us to access their methods directly.       ###
+###########################################################################
+
+class _dummyButton(Button, TixSubWidget):
+    def __init__(self, master, name, destroy_physically=1):
+       TixSubWidget.__init__(self, master, name, destroy_physically)
+
+class _dummyCheckbutton(Checkbutton, TixSubWidget):
+    def __init__(self, master, name, destroy_physically=1):
+       TixSubWidget.__init__(self, master, name, destroy_physically)
+
+class _dummyEntry(Entry, TixSubWidget):
+    def __init__(self, master, name, destroy_physically=1):
+       TixSubWidget.__init__(self, master, name, destroy_physically)
+
+class _dummyFrame(Frame, TixSubWidget):
+    def __init__(self, master, name, destroy_physically=1):
+       TixSubWidget.__init__(self, master, name, destroy_physically)
+
+class _dummyLabel(Label, TixSubWidget):
+    def __init__(self, master, name, destroy_physically=1):
+       TixSubWidget.__init__(self, master, name, destroy_physically)
+
+class _dummyListbox(Listbox, TixSubWidget):
+    def __init__(self, master, name, destroy_physically=1):
+       TixSubWidget.__init__(self, master, name, destroy_physically)
+
+class _dummyMenu(Menu, TixSubWidget):
+    def __init__(self, master, name, destroy_physically=1):
+       TixSubWidget.__init__(self, master, name, destroy_physically)
+
+class _dummyMenubutton(Menubutton, TixSubWidget):
+    def __init__(self, master, name, destroy_physically=1):
+       TixSubWidget.__init__(self, master, name, destroy_physically)
+
+class _dummyScrollbar(Scrollbar, TixSubWidget):
+    def __init__(self, master, name, destroy_physically=1):
+       TixSubWidget.__init__(self, master, name, destroy_physically)
+
+class _dummyText(Text, TixSubWidget):
+    def __init__(self, master, name, destroy_physically=1):
+       TixSubWidget.__init__(self, master, name, destroy_physically)
+
+class _dummyScrolledListBox(ScrolledListBox, TixSubWidget):
+    def __init__(self, master, name, destroy_physically=1):
+       TixSubWidget.__init__(self, master, name, destroy_physically)
+       self.subwidget_list['listbox'] = _dummyListbox(self, 'listbox')
+       self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
+       self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
+
+class _dummyHList(HList, TixSubWidget):
+    def __init__(self, master, name, destroy_physically=1):
+       TixSubWidget.__init__(self, master, name, destroy_physically)
+
+class _dummyScrolledHList(ScrolledHList, TixSubWidget):
+    def __init__(self, master, name, destroy_physically=1):
+       TixSubWidget.__init__(self, master, name, destroy_physically)
+       self.subwidget_list['hlist'] = _dummyHList(self, 'hlist')
+       self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
+       self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
+
+class _dummyTList(TList, TixSubWidget):
+    def __init__(self, master, name, destroy_physically=1):
+       TixSubWidget.__init__(self, master, name, destroy_physically)
+
+class _dummyComboBox(ComboBox, TixSubWidget):
+    def __init__(self, master, name, destroy_physically=1):
+       TixSubWidget.__init__(self, master, name, destroy_physically)
+       self.subwidget_list['entry'] = _dummyEntry(self, 'entry')
+       self.subwidget_list['arrow'] = _dummyButton(self, 'arrow')
+       # I'm not sure about this destroy_physically=0 in all cases;
+       # it may depend on if -dropdown is true; I've added as a trial
+       self.subwidget_list['slistbox'] = _dummyScrolledListBox(self,
+                                                        'slistbox',
+                                                        destroy_physically=0)
+       self.subwidget_list['listbox'] = _dummyListbox(self, 'listbox',
+                                                 destroy_physically=0)
+
+class _dummyDirList(DirList, TixSubWidget):
+    def __init__(self, master, name, destroy_physically=1):
+       TixSubWidget.__init__(self, master, name, destroy_physically)
+       self.subwidget_list['hlist'] = _dummyHList(self, 'hlist')
+       self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
+       self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
+
+class _dummyDirSelectBox(DirSelectBox, TixSubWidget):
+    def __init__(self, master, name, destroy_physically=1):
+       TixSubWidget.__init__(self, master, name, destroy_physically)
+       self.subwidget_list['dirlist'] = _dummyDirList(self, 'dirlist')
+       self.subwidget_list['dircbx'] = _dummyFileComboBox(self, 'dircbx')
+
+class _dummyExFileSelectBox(ExFileSelectBox, TixSubWidget):
+    def __init__(self, master, name, destroy_physically=1):
+       TixSubWidget.__init__(self, master, name, destroy_physically)
+       self.subwidget_list['cancel'] = _dummyButton(self, 'cancel')
+       self.subwidget_list['ok'] = _dummyButton(self, 'ok')
+       self.subwidget_list['hidden'] = _dummyCheckbutton(self, 'hidden')
+       self.subwidget_list['types'] = _dummyComboBox(self, 'types')
+       self.subwidget_list['dir'] = _dummyComboBox(self, 'dir')
+       self.subwidget_list['dirlist'] = _dummyScrolledListBox(self, 'dirlist')
+       self.subwidget_list['file'] = _dummyComboBox(self, 'file')
+       self.subwidget_list['filelist'] = _dummyScrolledListBox(self, 'filelist')
+
+class _dummyFileSelectBox(FileSelectBox, TixSubWidget):
+    def __init__(self, master, name, destroy_physically=1):
+       TixSubWidget.__init__(self, master, name, destroy_physically)
+       self.subwidget_list['dirlist'] = _dummyScrolledListBox(self, 'dirlist')
+       self.subwidget_list['filelist'] = _dummyScrolledListBox(self, 'filelist')
+       self.subwidget_list['filter'] = _dummyComboBox(self, 'filter')
+       self.subwidget_list['selection'] = _dummyComboBox(self, 'selection')
+
+class _dummyFileComboBox(ComboBox, TixSubWidget):
+    def __init__(self, master, name, destroy_physically=1):
+       TixSubWidget.__init__(self, master, name, destroy_physically)
+       self.subwidget_list['dircbx'] = _dummyComboBox(self, 'dircbx')
+
+class _dummyStdButtonBox(StdButtonBox, TixSubWidget):
+    def __init__(self, master, name, destroy_physically=1):
+       TixSubWidget.__init__(self, master, name, destroy_physically)
+       self.subwidget_list['ok'] = _dummyButton(self, 'ok')
+       self.subwidget_list['apply'] = _dummyButton(self, 'apply')
+       self.subwidget_list['cancel'] = _dummyButton(self, 'cancel')
+       self.subwidget_list['help'] = _dummyButton(self, 'help')
+
+class _dummyNoteBookFrame(NoteBookFrame, TixSubWidget):
+    def __init__(self, master, name, destroy_physically=0):
+       TixSubWidget.__init__(self, master, name, destroy_physically)
+
+########################
+### Utility Routines ###
+########################
+
+# Returns the qualified path name for the widget. Normally used to set
+# default options for subwidgets. See tixwidgets.py
+def OptionName(widget):
+    return widget.tk.call('tixOptionName', widget._w)
+
+# Called with a dictionary argument of the form
+# {'*.c':'C source files', '*.txt':'Text Files', '*':'All files'}
+# returns a string which can be used to configure the fsbox file types
+# in an ExFileSelectBox. i.e.,
+# '{{*} {* - All files}} {{*.c} {*.c - C source files}} {{*.txt} {*.txt - Text Files}}'
+def FileTypeList(dict):
+    s = ''
+    for type in dict.keys():
+       s = s + '{{' + type + '} {' + type + ' - ' + dict[type] + '}} '
+    return s
+
+# Still to be done:
+class CObjView(TixWidget):
+    """This file implements the Canvas Object View widget. This is a base
+    class of IconView. It implements automatic placement/adjustment of the
+    scrollbars according to the canvas objects inside the canvas subwidget.
+    The scrollbars are adjusted so that the canvas is just large enough
+    to see all the objects.
+    """
+    pass
+
diff --git a/lib-python/2.2/lib-tk/Tkconstants.py b/lib-python/2.2/lib-tk/Tkconstants.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/lib-tk/Tkconstants.py
@@ -0,0 +1,103 @@
+# Symbolic constants for Tk
+
+# Booleans
+NO=FALSE=OFF=0
+YES=TRUE=ON=1
+
+# -anchor and -sticky
+N='n'
+S='s'
+W='w'
+E='e'
+NW='nw'
+SW='sw'
+NE='ne'
+SE='se'
+NS='ns'
+EW='ew'
+NSEW='nsew'
+CENTER='center'
+
+# -fill
+NONE='none'
+X='x'
+Y='y'
+BOTH='both'
+
+# -side
+LEFT='left'
+TOP='top'
+RIGHT='right'
+BOTTOM='bottom'
+
+# -relief
+RAISED='raised'
+SUNKEN='sunken'
+FLAT='flat'
+RIDGE='ridge'
+GROOVE='groove'
+SOLID = 'solid'
+
+# -orient
+HORIZONTAL='horizontal'
+VERTICAL='vertical'
+
+# -tabs
+NUMERIC='numeric'
+
+# -wrap
+CHAR='char'
+WORD='word'
+
+# -align
+BASELINE='baseline'
+
+# -bordermode
+INSIDE='inside'
+OUTSIDE='outside'
+
+# Special tags, marks and insert positions
+SEL='sel'
+SEL_FIRST='sel.first'
+SEL_LAST='sel.last'
+END='end'
+INSERT='insert'
+CURRENT='current'
+ANCHOR='anchor'
+ALL='all' # e.g. Canvas.delete(ALL)
+
+# Text widget and button states
+NORMAL='normal'
+DISABLED='disabled'
+ACTIVE='active'
+
+# Menu item types
+CASCADE='cascade'
+CHECKBUTTON='checkbutton'
+COMMAND='command'
+RADIOBUTTON='radiobutton'
+SEPARATOR='separator'
+
+# Selection modes for list boxes
+SINGLE='single'
+BROWSE='browse'
+MULTIPLE='multiple'
+EXTENDED='extended'
+
+# Various canvas styles
+PIESLICE='pieslice'
+CHORD='chord'
+ARC='arc'
+FIRST='first'
+LAST='last'
+BUTT='butt'
+PROJECTING='projecting'
+ROUND='round'
+BEVEL='bevel'
+MITER='miter'
+
+# Arguments to xview/yview
+MOVETO='moveto'
+SCROLL='scroll'
+UNITS='units'
+PAGES='pages'
diff --git a/lib-python/2.2/lib-tk/Tkdnd.py b/lib-python/2.2/lib-tk/Tkdnd.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/lib-tk/Tkdnd.py
@@ -0,0 +1,321 @@
+"""Drag-and-drop support for Tkinter.
+
+This is very preliminary.  I currently only support dnd *within* one
+application, between different windows (or within the same window).
+
+I an trying to make this as generic as possible -- not dependent on
+the use of a particular widget or icon type, etc.  I also hope that
+this will work with Pmw.
+
+To enable an object to be dragged, you must create an event binding
+for it that starts the drag-and-drop process. Typically, you should
+bind <ButtonPress> to a callback function that you write. The function
+should call Tkdnd.dnd_start(source, event), where 'source' is the
+object to be dragged, and 'event' is the event that invoked the call
+(the argument to your callback function).  Even though this is a class
+instantiation, the returned instance should not be stored -- it will
+be kept alive automatically for the duration of the drag-and-drop.
+
+When a drag-and-drop is already in process for the Tk interpreter, the
+call is *ignored*; this normally averts starting multiple simultaneous
+dnd processes, e.g. because different button callbacks all
+dnd_start().
+
+The object is *not* necessarily a widget -- it can be any
+application-specific object that is meaningful to potential
+drag-and-drop targets.
+
+Potential drag-and-drop targets are discovered as follows.  Whenever
+the mouse moves, and at the start and end of a drag-and-drop move, the
+Tk widget directly under the mouse is inspected.  This is the target
+widget (not to be confused with the target object, yet to be
+determined).  If there is no target widget, there is no dnd target
+object.  If there is a target widget, and it has an attribute
+dnd_accept, this should be a function (or any callable object).  The
+function is called as dnd_accept(source, event), where 'source' is the
+object being dragged (the object passed to dnd_start() above), and
+'event' is the most recent event object (generally a <Motion> event;
+it can also be <ButtonPress> or <ButtonRelease>).  If the dnd_accept()
+function returns something other than None, this is the new dnd target
+object.  If dnd_accept() returns None, or if the target widget has no
+dnd_accept attribute, the target widget's parent is considered as the
+target widget, and the search for a target object is repeated from
+there.  If necessary, the search is repeated all the way up to the
+root widget.  If none of the target widgets can produce a target
+object, there is no target object (the target object is None).
+
+The target object thus produced, if any, is called the new target
+object.  It is compared with the old target object (or None, if there
+was no old target widget).  There are several cases ('source' is the
+source object, and 'event' is the most recent event object):
+
+- Both the old and new target objects are None.  Nothing happens.
+
+- The old and new target objects are the same object.  Its method
+dnd_motion(source, event) is called.
+
+- The old target object was None, and the new target object is not
+None.  The new target object's method dnd_enter(source, event) is
+called.
+
+- The new target object is None, and the old target object is not
+None.  The old target object's method dnd_leave(source, event) is
+called.
+
+- The old and new target objects differ and neither is None.  The old
+target object's method dnd_leave(source, event), and then the new
+target object's method dnd_enter(source, event) is called.
+
+Once this is done, the new target object replaces the old one, and the
+Tk mainloop proceeds.  The return value of the methods mentioned above
+is ignored; if they raise an exception, the normal exception handling
+mechanisms take over.
+
+The drag-and-drop processes can end in two ways: a final target object
+is selected, or no final target object is selected.  When a final
+target object is selected, it will always have been notified of the
+potential drop by a call to its dnd_enter() method, as described
+above, and possibly one or more calls to its dnd_motion() method; its
+dnd_leave() method has not been called since the last call to
+dnd_enter().  The target is notified of the drop by a call to its
+method dnd_commit(source, event).
+
+If no final target object is selected, and there was an old target
+object, its dnd_leave(source, event) method is called to complete the
+dnd sequence.
+
+Finally, the source object is notified that the drag-and-drop process
+is over, by a call to source.dnd_end(target, event), specifying either
+the selected target object, or None if no target object was selected.
+The source object can use this to implement the commit action; this is
+sometimes simpler than to do it in the target's dnd_commit().  The
+target's dnd_commit() method could then simply be aliased to
+dnd_leave().
+
+At any time during a dnd sequence, the application can cancel the
+sequence by calling the cancel() method on the object returned by
+dnd_start().  This will call dnd_leave() if a target is currently
+active; it will never call dnd_commit().
+
+"""
+
+
+import Tkinter
+
+
+# The factory function
+
+def dnd_start(source, event):
+    h = DndHandler(source, event)
+    if h.root:
+        return h
+    else:
+        return None
+
+
+# The class that does the work
+
+class DndHandler:
+
+    root = None
+
+    def __init__(self, source, event):
+        if event.num > 5:
+            return
+        root = event.widget._root()
+        try:
+            root.__dnd
+            return # Don't start recursive dnd
+        except AttributeError:
+            root.__dnd = self
+            self.root = root
+        self.source = source
+        self.target = None
+        self.initial_button = button = event.num
+        self.initial_widget = widget = event.widget
+        self.release_pattern = "<B%d-ButtonRelease-%d>" % (button, button)
+        self.save_cursor = widget['cursor'] or ""
+        widget.bind(self.release_pattern, self.on_release)
+        widget.bind("<Motion>", self.on_motion)
+        widget['cursor'] = "hand2"
+
+    def __del__(self):
+        root = self.root
+        self.root = None
+        if root:
+            try:
+                del root.__dnd
+            except AttributeError:
+                pass
+
+    def on_motion(self, event):
+        x, y = event.x_root, event.y_root
+        target_widget = self.initial_widget.winfo_containing(x, y)
+        source = self.source
+        new_target = None
+        while target_widget:
+            try:
+                attr = target_widget.dnd_accept
+            except AttributeError:
+                pass
+            else:
+                new_target = attr(source, event)
+                if new_target:
+                    break
+            target_widget = target_widget.master
+        old_target = self.target
+        if old_target is new_target:
+            if old_target:
+                old_target.dnd_motion(source, event)
+        else:
+            if old_target:
+                self.target = None
+                old_target.dnd_leave(source, event)
+            if new_target:
+                new_target.dnd_enter(source, event)
+                self.target = new_target
+
+    def on_release(self, event):
+        self.finish(event, 1)
+
+    def cancel(self, event=None):
+        self.finish(event, 0)
+
+    def finish(self, event, commit=0):
+        target = self.target
+        source = self.source
+        widget = self.initial_widget
+        root = self.root
+        try:
+            del root.__dnd
+            self.initial_widget.unbind(self.release_pattern)
+            self.initial_widget.unbind("<Motion>")
+            widget['cursor'] = self.save_cursor
+            self.target = self.source = self.initial_widget = self.root = None
+            if target:
+                if commit:
+                    target.dnd_commit(source, event)
+                else:
+                    target.dnd_leave(source, event)
+        finally:
+            source.dnd_end(target, event)
+
+
+
+# ----------------------------------------------------------------------
+# The rest is here for testing and demonstration purposes only!
+
+class Icon:
+
+    def __init__(self, name):
+        self.name = name
+        self.canvas = self.label = self.id = None
+
+    def attach(self, canvas, x=10, y=10):
+        if canvas is self.canvas:
+            self.canvas.coords(self.id, x, y)
+            return
+        if self.canvas:
+            self.detach()
+        if not canvas:
+            return
+        label = Tkinter.Label(canvas, text=self.name,
+                              borderwidth=2, relief="raised")
+        id = canvas.create_window(x, y, window=label, anchor="nw")
+        self.canvas = canvas
+        self.label = label
+        self.id = id
+        label.bind("<ButtonPress>", self.press)
+
+    def detach(self):
+        canvas = self.canvas
+        if not canvas:
+            return
+        id = self.id
+        label = self.label
+        self.canvas = self.label = self.id = None
+        canvas.delete(id)
+        label.destroy()
+
+    def press(self, event):
+        if dnd_start(self, event):
+            # where the pointer is relative to the label widget:
+            self.x_off = event.x
+            self.y_off = event.y
+            # where the widget is relative to the canvas:
+            self.x_orig, self.y_orig = self.canvas.coords(self.id)
+
+    def move(self, event):
+        x, y = self.where(self.canvas, event)
+        self.canvas.coords(self.id, x, y)
+
+    def putback(self):
+        self.canvas.coords(self.id, self.x_orig, self.y_orig)
+
+    def where(self, canvas, event):
+        # where the corner of the canvas is relative to the screen:
+        x_org = canvas.winfo_rootx()
+        y_org = canvas.winfo_rooty()
+        # where the pointer is relative to the canvas widget:
+        x = event.x_root - x_org
+        y = event.y_root - y_org
+        # compensate for initial pointer offset
+        return x - self.x_off, y - self.y_off
+
+    def dnd_end(self, target, event):
+        pass
+
+class Tester:
+
+    def __init__(self, root):
+        self.top = Tkinter.Toplevel(root)
+        self.canvas = Tkinter.Canvas(self.top, width=100, height=100)
+        self.canvas.pack(fill="both", expand=1)
+        self.canvas.dnd_accept = self.dnd_accept
+
+    def dnd_accept(self, source, event):
+        return self
+
+    def dnd_enter(self, source, event):
+        self.canvas.focus_set() # Show highlight border
+        x, y = source.where(self.canvas, event)
+        x1, y1, x2, y2 = source.canvas.bbox(source.id)
+        dx, dy = x2-x1, y2-y1
+        self.dndid = self.canvas.create_rectangle(x, y, x+dx, y+dy)
+        self.dnd_motion(source, event)
+
+    def dnd_motion(self, source, event):
+        x, y = source.where(self.canvas, event)
+        x1, y1, x2, y2 = self.canvas.bbox(self.dndid)
+        self.canvas.move(self.dndid, x-x1, y-y1)
+
+    def dnd_leave(self, source, event):
+        self.top.focus_set() # Hide highlight border
+        self.canvas.delete(self.dndid)
+        self.dndid = None
+
+    def dnd_commit(self, source, event):
+        self.dnd_leave(source, event)
+        x, y = source.where(self.canvas, event)
+        source.attach(self.canvas, x, y)
+
+def test():
+    root = Tkinter.Tk()
+    root.geometry("+1+1")
+    Tkinter.Button(command=root.quit, text="Quit").pack()
+    t1 = Tester(root)
+    t1.top.geometry("+1+60")
+    t2 = Tester(root)
+    t2.top.geometry("+120+60")
+    t3 = Tester(root)
+    t3.top.geometry("+240+60")
+    i1 = Icon("ICON1")
+    i2 = Icon("ICON2")
+    i3 = Icon("ICON3")
+    i1.attach(t1.canvas)
+    i2.attach(t2.canvas)
+    i3.attach(t3.canvas)
+    root.mainloop()
+
+if __name__ == '__main__':
+    test()
diff --git a/lib-python/2.2/lib-tk/Tkinter.py b/lib-python/2.2/lib-tk/Tkinter.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/lib-tk/Tkinter.py
@@ -0,0 +1,3141 @@
+"""Wrapper functions for Tcl/Tk.
+
+Tkinter provides classes which allow the display, positioning and
+control of widgets. Toplevel widgets are Tk and Toplevel. Other
+widgets are Frame, Label, Entry, Text, Canvas, Button, Radiobutton,
+Checkbutton, Scale, Listbox, Scrollbar, OptionMenu. Properties of the
+widgets are specified with keyword arguments.  Keyword arguments have
+the same name as the corresponding resource under Tk.
+
+Widgets are positioned with one of the geometry managers Place, Pack
+or Grid. These managers can be called with methods place, pack, grid
+available in every Widget.
+
+Actions are bound to events by resources (e.g. keyword argument
+command) or with the method bind.
+
+Example (Hello, World):
+import Tkinter
+from Tkconstants import *
+tk = Tkinter.Tk()
+frame = Tkinter.Frame(tk, relief=RIDGE, borderwidth=2)
+frame.pack(fill=BOTH,expand=1)
+label = Tkinter.Label(frame, text="Hello, World")
+label.pack(fill=X, expand=1)
+button = Tkinter.Button(frame,text="Exit",command=tk.destroy)
+button.pack(side=BOTTOM)
+tk.mainloop()
+"""
+
+__version__ = "$Revision$"
+
+import sys
+if sys.platform == "win32":
+    import FixTk # Attempt to configure Tcl/Tk without requiring PATH
+import _tkinter # If this fails your Python may not be configured for Tk
+tkinter = _tkinter # b/w compat for export
+TclError = _tkinter.TclError
+from types import *
+from Tkconstants import *
+try:
+    import MacOS; _MacOS = MacOS; del MacOS
+except ImportError:
+    _MacOS = None
+
+TkVersion = float(_tkinter.TK_VERSION)
+TclVersion = float(_tkinter.TCL_VERSION)
+
+READABLE = _tkinter.READABLE
+WRITABLE = _tkinter.WRITABLE
+EXCEPTION = _tkinter.EXCEPTION
+
+# These are not always defined, e.g. not on Win32 with Tk 8.0 :-(
+try: _tkinter.createfilehandler
+except AttributeError: _tkinter.createfilehandler = None
+try: _tkinter.deletefilehandler
+except AttributeError: _tkinter.deletefilehandler = None
+
+
+def _flatten(tuple):
+    """Internal function."""
+    res = ()
+    for item in tuple:
+        if type(item) in (TupleType, ListType):
+            res = res + _flatten(item)
+        elif item is not None:
+            res = res + (item,)
+    return res
+
+try: _flatten = _tkinter._flatten
+except AttributeError: pass
+
+def _cnfmerge(cnfs):
+    """Internal function."""
+    if type(cnfs) is DictionaryType:
+        return cnfs
+    elif type(cnfs) in (NoneType, StringType):
+        return cnfs
+    else:
+        cnf = {}
+        for c in _flatten(cnfs):
+            try:
+                cnf.update(c)
+            except (AttributeError, TypeError), msg:
+                print "_cnfmerge: fallback due to:", msg
+                for k, v in c.items():
+                    cnf[k] = v
+        return cnf
+
+try: _cnfmerge = _tkinter._cnfmerge
+except AttributeError: pass
+
+class Event:
+    """Container for the properties of an event.
+
+    Instances of this type are generated if one of the following events occurs:
+
+    KeyPress, KeyRelease - for keyboard events
+    ButtonPress, ButtonRelease, Motion, Enter, Leave, MouseWheel - for mouse events
+    Visibility, Unmap, Map, Expose, FocusIn, FocusOut, Circulate,
+    Colormap, Gravity, Reparent, Property, Destroy, Activate,
+    Deactivate - for window events.
+
+    If a callback function for one of these events is registered
+    using bind, bind_all, bind_class, or tag_bind, the callback is
+    called with an Event as first argument. It will have the
+    following attributes (in braces are the event types for which
+    the attribute is valid):
+
+        serial - serial number of event
+    num - mouse button pressed (ButtonPress, ButtonRelease)
+    focus - whether the window has the focus (Enter, Leave)
+    height - height of the exposed window (Configure, Expose)
+    width - width of the exposed window (Configure, Expose)
+    keycode - keycode of the pressed key (KeyPress, KeyRelease)
+    state - state of the event as a number (ButtonPress, ButtonRelease,
+                            Enter, KeyPress, KeyRelease,
+                            Leave, Motion)
+    state - state as a string (Visibility)
+    time - when the event occurred
+    x - x-position of the mouse
+    y - y-position of the mouse
+    x_root - x-position of the mouse on the screen
+             (ButtonPress, ButtonRelease, KeyPress, KeyRelease, Motion)
+    y_root - y-position of the mouse on the screen
+             (ButtonPress, ButtonRelease, KeyPress, KeyRelease, Motion)
+    char - pressed character (KeyPress, KeyRelease)
+    send_event - see X/Windows documentation
+    keysym - keysym of the the event as a string (KeyPress, KeyRelease)
+    keysym_num - keysym of the event as a number (KeyPress, KeyRelease)
+    type - type of the event as a number
+    widget - widget in which the event occurred
+    delta - delta of wheel movement (MouseWheel)
+    """
+    pass
+
+_support_default_root = 1
+_default_root = None
+
+def NoDefaultRoot():
+    """Inhibit setting of default root window.
+
+    Call this function to inhibit that the first instance of
+    Tk is used for windows without an explicit parent window.
+    """
+    global _support_default_root
+    _support_default_root = 0
+    global _default_root
+    _default_root = None
+    del _default_root
+
+def _tkerror(err):
+    """Internal function."""
+    pass
+
+def _exit(code='0'):
+    """Internal function. Calling it will throw the exception SystemExit."""
+    raise SystemExit, code
+
+_varnum = 0
+class Variable:
+    """Internal class. Base class to define value holders for e.g. buttons."""
+    _default = ""
+    def __init__(self, master=None):
+        """Construct a variable with an optional MASTER as master widget.
+        The variable is named PY_VAR_number in Tcl.
+        """
+        global _varnum
+        if not master:
+            master = _default_root
+        self._master = master
+        self._tk = master.tk
+        self._name = 'PY_VAR' + `_varnum`
+        _varnum = _varnum + 1
+        self.set(self._default)
+    def __del__(self):
+        """Unset the variable in Tcl."""
+        self._tk.globalunsetvar(self._name)
+    def __str__(self):
+        """Return the name of the variable in Tcl."""
+        return self._name
+    def set(self, value):
+        """Set the variable to VALUE."""
+        return self._tk.globalsetvar(self._name, value)
+    def trace_variable(self, mode, callback):
+        """Define a trace callback for the variable.
+
+        MODE is one of "r", "w", "u" for read, write, undefine.
+        CALLBACK must be a function which is called when
+        the variable is read, written or undefined.
+
+        Return the name of the callback.
+        """
+        cbname = self._master._register(callback)
+        self._tk.call("trace", "variable", self._name, mode, cbname)
+        return cbname
+    trace = trace_variable
+    def trace_vdelete(self, mode, cbname):
+        """Delete the trace callback for a variable.
+
+        MODE is one of "r", "w", "u" for read, write, undefine.
+        CBNAME is the name of the callback returned from trace_variable or trace.
+        """
+        self._tk.call("trace", "vdelete", self._name, mode, cbname)
+        self._master.deletecommand(cbname)
+    def trace_vinfo(self):
+        """Return all trace callback information."""
+        return map(self._tk.split, self._tk.splitlist(
+            self._tk.call("trace", "vinfo", self._name)))
+
+class StringVar(Variable):
+    """Value holder for strings variables."""
+    _default = ""
+    def __init__(self, master=None):
+        """Construct a string variable.
+
+        MASTER can be given as master widget."""
+        Variable.__init__(self, master)
+
+    def get(self):
+        """Return value of variable as string."""
+        return self._tk.globalgetvar(self._name)
+
+class IntVar(Variable):
+    """Value holder for integer variables."""
+    _default = 0
+    def __init__(self, master=None):
+        """Construct an integer variable.
+
+        MASTER can be given as master widget."""
+        Variable.__init__(self, master)
+
+    def get(self):
+        """Return the value of the variable as an integer."""
+        return getint(self._tk.globalgetvar(self._name))
+
+class DoubleVar(Variable):
+    """Value holder for float variables."""
+    _default = 0.0
+    def __init__(self, master=None):
+        """Construct a float variable.
+
+        MASTER can be given as a master widget."""
+        Variable.__init__(self, master)
+
+    def get(self):
+        """Return the value of the variable as a float."""
+        return getdouble(self._tk.globalgetvar(self._name))
+
+class BooleanVar(Variable):
+    """Value holder for boolean variables."""
+    _default = "false"
+    def __init__(self, master=None):
+        """Construct a boolean variable.
+
+        MASTER can be given as a master widget."""
+        Variable.__init__(self, master)
+
+    def get(self):
+        """Return the value of the variable as 0 or 1."""
+        return self._tk.getboolean(self._tk.globalgetvar(self._name))
+
+def mainloop(n=0):
+    """Run the main loop of Tcl."""
+    _default_root.tk.mainloop(n)
+
+getint = int
+
+getdouble = float
+
+def getboolean(s):
+    """Convert true and false to integer values 1 and 0."""
+    return _default_root.tk.getboolean(s)
+
+# Methods defined on both toplevel and interior widgets
+class Misc:
+    """Internal class.
+
+    Base class which defines methods common for interior widgets."""
+
+    # XXX font command?
+    _tclCommands = None
+    def destroy(self):
+        """Internal function.
+
+        Delete all Tcl commands created for
+        this widget in the Tcl interpreter."""
+        if self._tclCommands is not None:
+            for name in self._tclCommands:
+                #print '- Tkinter: deleted command', name
+                self.tk.deletecommand(name)
+            self._tclCommands = None
+    def deletecommand(self, name):
+        """Internal function.
+
+        Delete the Tcl command provided in NAME."""
+        #print '- Tkinter: deleted command', name
+        self.tk.deletecommand(name)
+        try:
+            self._tclCommands.remove(name)
+        except ValueError:
+            pass
+    def tk_strictMotif(self, boolean=None):
+        """Set Tcl internal variable, whether the look and feel
+        should adhere to Motif.
+
+        A parameter of 1 means adhere to Motif (e.g. no color
+        change if mouse passes over slider).
+        Returns the set value."""
+        return self.tk.getboolean(self.tk.call(
+            'set', 'tk_strictMotif', boolean))
+    def tk_bisque(self):
+        """Change the color scheme to light brown as used in Tk 3.6 and before."""
+        self.tk.call('tk_bisque')
+    def tk_setPalette(self, *args, **kw):
+        """Set a new color scheme for all widget elements.
+
+        A single color as argument will cause that all colors of Tk
+        widget elements are derived from this.
+        Alternatively several keyword parameters and its associated
+        colors can be given. The following keywords are valid:
+        activeBackground, foreground, selectColor,
+        activeForeground, highlightBackground, selectBackground,
+        background, highlightColor, selectForeground,
+        disabledForeground, insertBackground, troughColor."""
+        self.tk.call(('tk_setPalette',)
+              + _flatten(args) + _flatten(kw.items()))
+    def tk_menuBar(self, *args):
+        """Do not use. Needed in Tk 3.6 and earlier."""
+        pass # obsolete since Tk 4.0
+    def wait_variable(self, name='PY_VAR'):
+        """Wait until the variable is modified.
+
+        A parameter of type IntVar, StringVar, DoubleVar or
+        BooleanVar must be given."""
+        self.tk.call('tkwait', 'variable', name)
+    waitvar = wait_variable # XXX b/w compat
+    def wait_window(self, window=None):
+        """Wait until a WIDGET is destroyed.
+
+        If no parameter is given self is used."""
+        if window is None:
+            window = self
+        self.tk.call('tkwait', 'window', window._w)
+    def wait_visibility(self, window=None):
+        """Wait until the visibility of a WIDGET changes
+        (e.g. it appears).
+
+        If no parameter is given self is used."""
+        if window is None:
+            window = self
+        self.tk.call('tkwait', 'visibility', window._w)
+    def setvar(self, name='PY_VAR', value='1'):
+        """Set Tcl variable NAME to VALUE."""
+        self.tk.setvar(name, value)
+    def getvar(self, name='PY_VAR'):
+        """Return value of Tcl variable NAME."""
+        return self.tk.getvar(name)
+    getint = int
+    getdouble = float
+    def getboolean(self, s):
+        """Return 0 or 1 for Tcl boolean values true and false given as parameter."""
+        return self.tk.getboolean(s)
+    def focus_set(self):
+        """Direct input focus to this widget.
+
+        If the application currently does not have the focus
+        this widget will get the focus if the application gets
+        the focus through the window manager."""
+        self.tk.call('focus', self._w)
+    focus = focus_set # XXX b/w compat?
+    def focus_force(self):
+        """Direct input focus to this widget even if the
+        application does not have the focus. Use with
+        caution!"""
+        self.tk.call('focus', '-force', self._w)
+    def focus_get(self):
+        """Return the widget which has currently the focus in the
+        application.
+
+        Use focus_displayof to allow working with several
+        displays. Return None if application does not have
+        the focus."""
+        name = self.tk.call('focus')
+        if name == 'none' or not name: return None
+        return self._nametowidget(name)
+    def focus_displayof(self):
+        """Return the widget which has currently the focus on the
+        display where this widget is located.
+
+        Return None if the application does not have the focus."""
+        name = self.tk.call('focus', '-displayof', self._w)
+        if name == 'none' or not name: return None
+        return self._nametowidget(name)
+    def focus_lastfor(self):
+        """Return the widget which would have the focus if top level
+        for this widget gets the focus from the window manager."""
+        name = self.tk.call('focus', '-lastfor', self._w)
+        if name == 'none' or not name: return None
+        return self._nametowidget(name)
+    def tk_focusFollowsMouse(self):
+        """The widget under mouse will get automatically focus. Can not
+        be disabled easily."""
+        self.tk.call('tk_focusFollowsMouse')
+    def tk_focusNext(self):
+        """Return the next widget in the focus order which follows
+        widget which has currently the focus.
+
+        The focus order first goes to the next child, then to
+        the children of the child recursively and then to the
+        next sibling which is higher in the stacking order.  A
+        widget is omitted if it has the takefocus resource set
+        to 0."""
+        name = self.tk.call('tk_focusNext', self._w)
+        if not name: return None
+        return self._nametowidget(name)
+    def tk_focusPrev(self):
+        """Return previous widget in the focus order. See tk_focusNext for details."""
+        name = self.tk.call('tk_focusPrev', self._w)
+        if not name: return None
+        return self._nametowidget(name)
+    def after(self, ms, func=None, *args):
+        """Call function once after given time.
+
+        MS specifies the time in milliseconds. FUNC gives the
+        function which shall be called. Additional parameters
+        are given as parameters to the function call.  Return
+        identifier to cancel scheduling with after_cancel."""
+        if not func:
+            # I'd rather use time.sleep(ms*0.001)
+            self.tk.call('after', ms)
+        else:
+            # XXX Disgusting hack to clean up after calling func
+            tmp = []
+            def callit(func=func, args=args, self=self, tmp=tmp):
+                try:
+                    apply(func, args)
+                finally:
+                    try:
+                        self.deletecommand(tmp[0])
+                    except TclError:
+                        pass
+            name = self._register(callit)
+            tmp.append(name)
+            return self.tk.call('after', ms, name)
+    def after_idle(self, func, *args):
+        """Call FUNC once if the Tcl main loop has no event to
+        process.
+
+        Return an identifier to cancel the scheduling with
+        after_cancel."""
+        return apply(self.after, ('idle', func) + args)
+    def after_cancel(self, id):
+        """Cancel scheduling of function identified with ID.
+
+        Identifier returned by after or after_idle must be
+        given as first parameter."""
+        try:
+            (script, type) = self.tk.splitlist(
+                self.tk.call('after', 'info', id))
+            self.deletecommand(script)
+        except TclError:
+            pass
+        self.tk.call('after', 'cancel', id)
+    def bell(self, displayof=0):
+        """Ring a display's bell."""
+        self.tk.call(('bell',) + self._displayof(displayof))
+    # Clipboard handling:
+    def clipboard_clear(self, **kw):
+        """Clear the data in the Tk clipboard.
+
+        A widget specified for the optional displayof keyword
+        argument specifies the target display."""
+        if not kw.has_key('displayof'): kw['displayof'] = self._w
+        self.tk.call(('clipboard', 'clear') + self._options(kw))
+    def clipboard_append(self, string, **kw):
+        """Append STRING to the Tk clipboard.
+
+        A widget specified at the optional displayof keyword
+        argument specifies the target display. The clipboard
+        can be retrieved with selection_get."""
+        if not kw.has_key('displayof'): kw['displayof'] = self._w
+        self.tk.call(('clipboard', 'append') + self._options(kw)
+              + ('--', string))
+    # XXX grab current w/o window argument
+    def grab_current(self):
+        """Return widget which has currently the grab in this application
+        or None."""
+        name = self.tk.call('grab', 'current', self._w)
+        if not name: return None
+        return self._nametowidget(name)
+    def grab_release(self):
+        """Release grab for this widget if currently set."""
+        self.tk.call('grab', 'release', self._w)
+    def grab_set(self):
+        """Set grab for this widget.
+
+        A grab directs all events to this and descendant
+        widgets in the application."""
+        self.tk.call('grab', 'set', self._w)
+    def grab_set_global(self):
+        """Set global grab for this widget.
+
+        A global grab directs all events to this and
+        descendant widgets on the display. Use with caution -
+        other applications do not get events anymore."""
+        self.tk.call('grab', 'set', '-global', self._w)
+    def grab_status(self):
+        """Return None, "local" or "global" if this widget has
+        no, a local or a global grab."""
+        status = self.tk.call('grab', 'status', self._w)
+        if status == 'none': status = None
+        return status
+    def lower(self, belowThis=None):
+        """Lower this widget in the stacking order."""
+        self.tk.call('lower', self._w, belowThis)
+    def option_add(self, pattern, value, priority = None):
+        """Set a VALUE (second parameter) for an option
+        PATTERN (first parameter).
+
+        An optional third parameter gives the numeric priority
+        (defaults to 80)."""
+        self.tk.call('option', 'add', pattern, value, priority)
+    def option_clear(self):
+        """Clear the option database.
+
+        It will be reloaded if option_add is called."""
+        self.tk.call('option', 'clear')
+    def option_get(self, name, className):
+        """Return the value for an option NAME for this widget
+        with CLASSNAME.
+
+        Values with higher priority override lower values."""
+        return self.tk.call('option', 'get', self._w, name, className)
+    def option_readfile(self, fileName, priority = None):
+        """Read file FILENAME into the option database.
+
+        An optional second parameter gives the numeric
+        priority."""
+        self.tk.call('option', 'readfile', fileName, priority)
+    def selection_clear(self, **kw):
+        """Clear the current X selection."""
+        if not kw.has_key('displayof'): kw['displayof'] = self._w
+        self.tk.call(('selection', 'clear') + self._options(kw))
+    def selection_get(self, **kw):
+        """Return the contents of the current X selection.
+
+        A keyword parameter selection specifies the name of
+        the selection and defaults to PRIMARY.  A keyword
+        parameter displayof specifies a widget on the display
+        to use."""
+        if not kw.has_key('displayof'): kw['displayof'] = self._w
+        return self.tk.call(('selection', 'get') + self._options(kw))
+    def selection_handle(self, command, **kw):
+        """Specify a function COMMAND to call if the X
+        selection owned by this widget is queried by another
+        application.
+
+        This function must return the contents of the
+        selection. The function will be called with the
+        arguments OFFSET and LENGTH which allows the chunking
+        of very long selections. The following keyword
+        parameters can be provided:
+        selection - name of the selection (default PRIMARY),
+        type - type of the selection (e.g. STRING, FILE_NAME)."""
+        name = self._register(command)
+        self.tk.call(('selection', 'handle') + self._options(kw)
+              + (self._w, name))
+    def selection_own(self, **kw):
+        """Become owner of X selection.
+
+        A keyword parameter selection specifies the name of
+        the selection (default PRIMARY)."""
+        self.tk.call(('selection', 'own') +
+                 self._options(kw) + (self._w,))
+    def selection_own_get(self, **kw):
+        """Return owner of X selection.
+
+        The following keyword parameter can
+        be provided:
+        selection - name of the selection (default PRIMARY),
+        type - type of the selection (e.g. STRING, FILE_NAME)."""
+        if not kw.has_key('displayof'): kw['displayof'] = self._w
+        name = self.tk.call(('selection', 'own') + self._options(kw))
+        if not name: return None
+        return self._nametowidget(name)
+    def send(self, interp, cmd, *args):
+        """Send Tcl command CMD to different interpreter INTERP to be executed."""
+        return self.tk.call(('send', interp, cmd) + args)
+    def lower(self, belowThis=None):
+        """Lower this widget in the stacking order."""
+        self.tk.call('lower', self._w, belowThis)
+    def tkraise(self, aboveThis=None):
+        """Raise this widget in the stacking order."""
+        self.tk.call('raise', self._w, aboveThis)
+    lift = tkraise
+    def colormodel(self, value=None):
+        """Useless. Not implemented in Tk."""
+        return self.tk.call('tk', 'colormodel', self._w, value)
+    def winfo_atom(self, name, displayof=0):
+        """Return integer which represents atom NAME."""
+        args = ('winfo', 'atom') + self._displayof(displayof) + (name,)
+        return getint(self.tk.call(args))
+    def winfo_atomname(self, id, displayof=0):
+        """Return name of atom with identifier ID."""
+        args = ('winfo', 'atomname') \
+               + self._displayof(displayof) + (id,)
+        return self.tk.call(args)
+    def winfo_cells(self):
+        """Return number of cells in the colormap for this widget."""
+        return getint(
+            self.tk.call('winfo', 'cells', self._w))
+    def winfo_children(self):
+        """Return a list of all widgets which are children of this widget."""
+        result = []
+        for child in self.tk.splitlist(
+            self.tk.call('winfo', 'children', self._w)):
+            try:
+                # Tcl sometimes returns extra windows, e.g. for
+                # menus; those need to be skipped
+                result.append(self._nametowidget(child))
+            except KeyError:
+                pass
+        return result
+
+    def winfo_class(self):
+        """Return window class name of this widget."""
+        return self.tk.call('winfo', 'class', self._w)
+    def winfo_colormapfull(self):
+        """Return true if at the last color request the colormap was full."""
+        return self.tk.getboolean(
+            self.tk.call('winfo', 'colormapfull', self._w))
+    def winfo_containing(self, rootX, rootY, displayof=0):
+        """Return the widget which is at the root coordinates ROOTX, ROOTY."""
+        args = ('winfo', 'containing') \
+               + self._displayof(displayof) + (rootX, rootY)
+        name = self.tk.call(args)
+        if not name: return None
+        return self._nametowidget(name)
+    def winfo_depth(self):
+        """Return the number of bits per pixel."""
+        return getint(self.tk.call('winfo', 'depth', self._w))
+    def winfo_exists(self):
+        """Return true if this widget exists."""
+        return getint(
+            self.tk.call('winfo', 'exists', self._w))
+    def winfo_fpixels(self, number):
+        """Return the number of pixels for the given distance NUMBER
+        (e.g. "3c") as float."""
+        return getdouble(self.tk.call(
+            'winfo', 'fpixels', self._w, number))
+    def winfo_geometry(self):
+        """Return geometry string for this widget in the form "widthxheight+X+Y"."""
+        return self.tk.call('winfo', 'geometry', self._w)
+    def winfo_height(self):
+        """Return height of this widget."""
+        return getint(
+            self.tk.call('winfo', 'height', self._w))
+    def winfo_id(self):
+        """Return identifier ID for this widget."""
+        return self.tk.getint(
+            self.tk.call('winfo', 'id', self._w))
+    def winfo_interps(self, displayof=0):
+        """Return the name of all Tcl interpreters for this display."""
+        args = ('winfo', 'interps') + self._displayof(displayof)
+        return self.tk.splitlist(self.tk.call(args))
+    def winfo_ismapped(self):
+        """Return true if this widget is mapped."""
+        return getint(
+            self.tk.call('winfo', 'ismapped', self._w))
+    def winfo_manager(self):
+        """Return the window mananger name for this widget."""
+        return self.tk.call('winfo', 'manager', self._w)
+    def winfo_name(self):
+        """Return the name of this widget."""
+        return self.tk.call('winfo', 'name', self._w)
+    def winfo_parent(self):
+        """Return the name of the parent of this widget."""
+        return self.tk.call('winfo', 'parent', self._w)
+    def winfo_pathname(self, id, displayof=0):
+        """Return the pathname of the widget given by ID."""
+        args = ('winfo', 'pathname') \
+               + self._displayof(displayof) + (id,)
+        return self.tk.call(args)
+    def winfo_pixels(self, number):
+        """Rounded integer value of winfo_fpixels."""
+        return getint(
+            self.tk.call('winfo', 'pixels', self._w, number))
+    def winfo_pointerx(self):
+        """Return the x coordinate of the pointer on the root window."""
+        return getint(
+            self.tk.call('winfo', 'pointerx', self._w))
+    def winfo_pointerxy(self):
+        """Return a tuple of x and y coordinates of the pointer on the root window."""
+        return self._getints(
+            self.tk.call('winfo', 'pointerxy', self._w))
+    def winfo_pointery(self):
+        """Return the y coordinate of the pointer on the root window."""
+        return getint(
+            self.tk.call('winfo', 'pointery', self._w))
+    def winfo_reqheight(self):
+        """Return requested height of this widget."""
+        return getint(
+            self.tk.call('winfo', 'reqheight', self._w))
+    def winfo_reqwidth(self):
+        """Return requested width of this widget."""
+        return getint(
+            self.tk.call('winfo', 'reqwidth', self._w))
+    def winfo_rgb(self, color):
+        """Return tuple of decimal values for red, green, blue for
+        COLOR in this widget."""
+        return self._getints(
+            self.tk.call('winfo', 'rgb', self._w, color))
+    def winfo_rootx(self):
+        """Return x coordinate of upper left corner of this widget on the
+        root window."""
+        return getint(
+            self.tk.call('winfo', 'rootx', self._w))
+    def winfo_rooty(self):
+        """Return y coordinate of upper left corner of this widget on the
+        root window."""
+        return getint(
+            self.tk.call('winfo', 'rooty', self._w))
+    def winfo_screen(self):
+        """Return the screen name of this widget."""
+        return self.tk.call('winfo', 'screen', self._w)
+    def winfo_screencells(self):
+        """Return the number of the cells in the colormap of the screen
+        of this widget."""
+        return getint(
+            self.tk.call('winfo', 'screencells', self._w))
+    def winfo_screendepth(self):
+        """Return the number of bits per pixel of the root window of the
+        screen of this widget."""
+        return getint(
+            self.tk.call('winfo', 'screendepth', self._w))
+    def winfo_screenheight(self):
+        """Return the number of pixels of the height of the screen of this widget
+        in pixel."""
+        return getint(
+            self.tk.call('winfo', 'screenheight', self._w))
+    def winfo_screenmmheight(self):
+        """Return the number of pixels of the height of the screen of
+        this widget in mm."""
+        return getint(
+            self.tk.call('winfo', 'screenmmheight', self._w))
+    def winfo_screenmmwidth(self):
+        """Return the number of pixels of the width of the screen of
+        this widget in mm."""
+        return getint(
+            self.tk.call('winfo', 'screenmmwidth', self._w))
+    def winfo_screenvisual(self):
+        """Return one of the strings directcolor, grayscale, pseudocolor,
+        staticcolor, staticgray, or truecolor for the default
+        colormodel of this screen."""
+        return self.tk.call('winfo', 'screenvisual', self._w)
+    def winfo_screenwidth(self):
+        """Return the number of pixels of the width of the screen of
+        this widget in pixel."""
+        return getint(
+            self.tk.call('winfo', 'screenwidth', self._w))
+    def winfo_server(self):
+        """Return information of the X-Server of the screen of this widget in
+        the form "XmajorRminor vendor vendorVersion"."""
+        return self.tk.call('winfo', 'server', self._w)
+    def winfo_toplevel(self):
+        """Return the toplevel widget of this widget."""
+        return self._nametowidget(self.tk.call(
+            'winfo', 'toplevel', self._w))
+    def winfo_viewable(self):
+        """Return true if the widget and all its higher ancestors are mapped."""
+        return getint(
+            self.tk.call('winfo', 'viewable', self._w))
+    def winfo_visual(self):
+        """Return one of the strings directcolor, grayscale, pseudocolor,
+        staticcolor, staticgray, or truecolor for the
+        colormodel of this widget."""
+        return self.tk.call('winfo', 'visual', self._w)
+    def winfo_visualid(self):
+        """Return the X identifier for the visual for this widget."""
+        return self.tk.call('winfo', 'visualid', self._w)
+    def winfo_visualsavailable(self, includeids=0):
+        """Return a list of all visuals available for the screen
+        of this widget.
+
+        Each item in the list consists of a visual name (see winfo_visual), a
+        depth and if INCLUDEIDS=1 is given also the X identifier."""
+        data = self.tk.split(
+            self.tk.call('winfo', 'visualsavailable', self._w,
+                     includeids and 'includeids' or None))
+        if type(data) is StringType:
+            data = [self.tk.split(data)]
+        return map(self.__winfo_parseitem, data)
+    def __winfo_parseitem(self, t):
+        """Internal function."""
+        return t[:1] + tuple(map(self.__winfo_getint, t[1:]))
+    def __winfo_getint(self, x):
+        """Internal function."""
+        return int(x, 0)
+    def winfo_vrootheight(self):
+        """Return the height of the virtual root window associated with this
+        widget in pixels. If there is no virtual root window return the
+        height of the screen."""
+        return getint(
+            self.tk.call('winfo', 'vrootheight', self._w))
+    def winfo_vrootwidth(self):
+        """Return the width of the virtual root window associated with this
+        widget in pixel. If there is no virtual root window return the
+        width of the screen."""
+        return getint(
+            self.tk.call('winfo', 'vrootwidth', self._w))
+    def winfo_vrootx(self):
+        """Return the x offset of the virtual root relative to the root
+        window of the screen of this widget."""
+        return getint(
+            self.tk.call('winfo', 'vrootx', self._w))
+    def winfo_vrooty(self):
+        """Return the y offset of the virtual root relative to the root
+        window of the screen of this widget."""
+        return getint(
+            self.tk.call('winfo', 'vrooty', self._w))
+    def winfo_width(self):
+        """Return the width of this widget."""
+        return getint(
+            self.tk.call('winfo', 'width', self._w))
+    def winfo_x(self):
+        """Return the x coordinate of the upper left corner of this widget
+        in the parent."""
+        return getint(
+            self.tk.call('winfo', 'x', self._w))
+    def winfo_y(self):
+        """Return the y coordinate of the upper left corner of this widget
+        in the parent."""
+        return getint(
+            self.tk.call('winfo', 'y', self._w))
+    def update(self):
+        """Enter event loop until all pending events have been processed by Tcl."""
+        self.tk.call('update')
+    def update_idletasks(self):
+        """Enter event loop until all idle callbacks have been called. This
+        will update the display of windows but not process events caused by
+        the user."""
+        self.tk.call('update', 'idletasks')
+    def bindtags(self, tagList=None):
+        """Set or get the list of bindtags for this widget.
+
+        With no argument return the list of all bindtags associated with
+        this widget. With a list of strings as argument the bindtags are
+        set to this list. The bindtags determine in which order events are
+        processed (see bind)."""
+        if tagList is None:
+            return self.tk.splitlist(
+                self.tk.call('bindtags', self._w))
+        else:
+            self.tk.call('bindtags', self._w, tagList)
+    def _bind(self, what, sequence, func, add, needcleanup=1):
+        """Internal function."""
+        if type(func) is StringType:
+            self.tk.call(what + (sequence, func))
+        elif func:
+            funcid = self._register(func, self._substitute,
+                        needcleanup)
+            cmd = ('%sif {"[%s %s]" == "break"} break\n'
+                   %
+                   (add and '+' or '',
+                funcid, self._subst_format_str))
+            self.tk.call(what + (sequence, cmd))
+            return funcid
+        elif sequence:
+            return self.tk.call(what + (sequence,))
+        else:
+            return self.tk.splitlist(self.tk.call(what))
+    def bind(self, sequence=None, func=None, add=None):
+        """Bind to this widget at event SEQUENCE a call to function FUNC.
+
+        SEQUENCE is a string of concatenated event
+        patterns. An event pattern is of the form
+        <MODIFIER-MODIFIER-TYPE-DETAIL> where MODIFIER is one
+        of Control, Mod2, M2, Shift, Mod3, M3, Lock, Mod4, M4,
+        Button1, B1, Mod5, M5 Button2, B2, Meta, M, Button3,
+        B3, Alt, Button4, B4, Double, Button5, B5 Triple,
+        Mod1, M1. TYPE is one of Activate, Enter, Map,
+        ButtonPress, Button, Expose, Motion, ButtonRelease
+        FocusIn, MouseWheel, Circulate, FocusOut, Property,
+        Colormap, Gravity Reparent, Configure, KeyPress, Key,
+        Unmap, Deactivate, KeyRelease Visibility, Destroy,
+        Leave and DETAIL is the button number for ButtonPress,
+        ButtonRelease and DETAIL is the Keysym for KeyPress and
+        KeyRelease. Examples are
+        <Control-Button-1> for pressing Control and mouse button 1 or
+        <Alt-A> for pressing A and the Alt key (KeyPress can be omitted).
+        An event pattern can also be a virtual event of the form
+        <<AString>> where AString can be arbitrary. This
+        event can be generated by event_generate.
+        If events are concatenated they must appear shortly
+        after each other.
+
+        FUNC will be called if the event sequence occurs with an
+        instance of Event as argument. If the return value of FUNC is
+        "break" no further bound function is invoked.
+
+        An additional boolean parameter ADD specifies whether FUNC will
+        be called additionally to the other bound function or whether
+        it will replace the previous function.
+
+        Bind will return an identifier to allow deletion of the bound function with
+        unbind without memory leak.
+
+        If FUNC or SEQUENCE is omitted the bound function or list
+        of bound events are returned."""
+
+        return self._bind(('bind', self._w), sequence, func, add)
+    def unbind(self, sequence, funcid=None):
+        """Unbind for this widget for event SEQUENCE  the
+        function identified with FUNCID."""
+        self.tk.call('bind', self._w, sequence, '')
+        if funcid:
+            self.deletecommand(funcid)
+    def bind_all(self, sequence=None, func=None, add=None):
+        """Bind to all widgets at an event SEQUENCE a call to function FUNC.
+        An additional boolean parameter ADD specifies whether FUNC will
+        be called additionally to the other bound function or whether
+        it will replace the previous function. See bind for the return value."""
+        return self._bind(('bind', 'all'), sequence, func, add, 0)
+    def unbind_all(self, sequence):
+        """Unbind for all widgets for event SEQUENCE all functions."""
+        self.tk.call('bind', 'all' , sequence, '')
+    def bind_class(self, className, sequence=None, func=None, add=None):
+
+        """Bind to widgets with bindtag CLASSNAME at event
+        SEQUENCE a call of function FUNC. An additional
+        boolean parameter ADD specifies whether FUNC will be
+        called additionally to the other bound function or
+        whether it will replace the previous function. See bind for
+        the return value."""
+
+        return self._bind(('bind', className), sequence, func, add, 0)
+    def unbind_class(self, className, sequence):
+        """Unbind for a all widgets with bindtag CLASSNAME for event SEQUENCE
+        all functions."""
+        self.tk.call('bind', className , sequence, '')
+    def mainloop(self, n=0):
+        """Call the mainloop of Tk."""
+        self.tk.mainloop(n)
+    def quit(self):
+        """Quit the Tcl interpreter. All widgets will be destroyed."""
+        self.tk.quit()
+    def _getints(self, string):
+        """Internal function."""
+        if string:
+            return tuple(map(getint, self.tk.splitlist(string)))
+    def _getdoubles(self, string):
+        """Internal function."""
+        if string:
+            return tuple(map(getdouble, self.tk.splitlist(string)))
+    def _getboolean(self, string):
+        """Internal function."""
+        if string:
+            return self.tk.getboolean(string)
+    def _displayof(self, displayof):
+        """Internal function."""
+        if displayof:
+            return ('-displayof', displayof)
+        if displayof is None:
+            return ('-displayof', self._w)
+        return ()
+    def _options(self, cnf, kw = None):
+        """Internal function."""
+        if kw:
+            cnf = _cnfmerge((cnf, kw))
+        else:
+            cnf = _cnfmerge(cnf)
+        res = ()
+        for k, v in cnf.items():
+            if v is not None:
+                if k[-1] == '_': k = k[:-1]
+                if callable(v):
+                    v = self._register(v)
+                res = res + ('-'+k, v)
+        return res
+    def nametowidget(self, name):
+        """Return the Tkinter instance of a widget identified by
+        its Tcl name NAME."""
+        w = self
+        if name[0] == '.':
+            w = w._root()
+            name = name[1:]
+        while name:
+            i = name.find('.')
+            if i >= 0:
+                name, tail = name[:i], name[i+1:]
+            else:
+                tail = ''
+            w = w.children[name]
+            name = tail
+        return w
+    _nametowidget = nametowidget
+    def _register(self, func, subst=None, needcleanup=1):
+        """Return a newly created Tcl function. If this
+        function is called, the Python function FUNC will
+        be executed. An optional function SUBST can
+        be given which will be executed before FUNC."""
+        f = CallWrapper(func, subst, self).__call__
+        name = `id(f)`
+        try:
+            func = func.im_func
+        except AttributeError:
+            pass
+        try:
+            name = name + func.__name__
+        except AttributeError:
+            pass
+        self.tk.createcommand(name, f)
+        if needcleanup:
+            if self._tclCommands is None:
+                self._tclCommands = []
+            self._tclCommands.append(name)
+        #print '+ Tkinter created command', name
+        return name
+    register = _register
+    def _root(self):
+        """Internal function."""
+        w = self
+        while w.master: w = w.master
+        return w
+    _subst_format = ('%#', '%b', '%f', '%h', '%k',
+             '%s', '%t', '%w', '%x', '%y',
+             '%A', '%E', '%K', '%N', '%W', '%T', '%X', '%Y', '%D')
+    _subst_format_str = " ".join(_subst_format)
+    def _substitute(self, *args):
+        """Internal function."""
+        if len(args) != len(self._subst_format): return args
+        getboolean = self.tk.getboolean
+
+        getint = int
+        def getint_event(s):
+            """Tk changed behavior in 8.4.2, returning "??" rather more often."""
+            try:
+                return int(s)
+            except ValueError:
+                return s
+
+        nsign, b, f, h, k, s, t, w, x, y, A, E, K, N, W, T, X, Y, D = args
+        # Missing: (a, c, d, m, o, v, B, R)
+        e = Event()
+        # serial field: valid vor all events
+        # number of button: ButtonPress and ButtonRelease events only
+        # height field: Configure, ConfigureRequest, Create,
+        # ResizeRequest, and Expose events only
+        # keycode field: KeyPress and KeyRelease events only
+        # time field: "valid for events that contain a time field"
+        # width field: Configure, ConfigureRequest, Create, ResizeRequest,
+        # and Expose events only
+        # x field: "valid for events that contain a x field"
+        # y field: "valid for events that contain a y field"
+        # keysym as decimal: KeyPress and KeyRelease events only
+        # x_root, y_root fields: ButtonPress, ButtonRelease, KeyPress,
+        # KeyRelease,and Motion events
+        e.serial = getint(nsign)
+        e.num = getint_event(b)
+        try: e.focus = getboolean(f)
+        except TclError: pass
+        e.height = getint_event(h)
+        e.keycode = getint_event(k)
+        e.state = getint_event(s)
+        e.time = getint_event(t)
+        e.width = getint_event(w)
+        e.x = getint_event(x)
+        e.y = getint_event(y)
+        e.char = A
+        try: e.send_event = getboolean(E)
+        except TclError: pass
+        e.keysym = K
+        e.keysym_num = getint_event(N)
+        e.type = T
+        try:
+            e.widget = self._nametowidget(W)
+        except KeyError:
+            e.widget = W
+        e.x_root = getint_event(X)
+        e.y_root = getint_event(Y)
+        try:
+            e.delta = getint(D)
+        except ValueError:
+            e.delta = 0
+        return (e,)
+    def _report_exception(self):
+        """Internal function."""
+        import sys
+        exc, val, tb = sys.exc_type, sys.exc_value, sys.exc_traceback
+        root = self._root()
+        root.report_callback_exception(exc, val, tb)
+    # These used to be defined in Widget:
+    def configure(self, cnf=None, **kw):
+        """Configure resources of a widget.
+
+        The values for resources are specified as keyword
+        arguments. To get an overview about
+        the allowed keyword arguments call the method keys.
+        """
+        # XXX ought to generalize this so tag_config etc. can use it
+        if kw:
+            cnf = _cnfmerge((cnf, kw))
+        elif cnf:
+            cnf = _cnfmerge(cnf)
+        if cnf is None:
+            cnf = {}
+            for x in self.tk.split(
+                self.tk.call(self._w, 'configure')):
+                cnf[x[0][1:]] = (x[0][1:],) + x[1:]
+            return cnf
+        if type(cnf) is StringType:
+            x = self.tk.split(self.tk.call(
+                self._w, 'configure', '-'+cnf))
+            return (x[0][1:],) + x[1:]
+        self.tk.call((self._w, 'configure')
+              + self._options(cnf))
+    config = configure
+    def cget(self, key):
+        """Return the resource value for a KEY given as string."""
+        return self.tk.call(self._w, 'cget', '-' + key)
+    __getitem__ = cget
+    def __setitem__(self, key, value):
+        self.configure({key: value})
+    def keys(self):
+        """Return a list of all resource names of this widget."""
+        return map(lambda x: x[0][1:],
+               self.tk.split(self.tk.call(self._w, 'configure')))
+    def __str__(self):
+        """Return the window path name of this widget."""
+        return self._w
+    # Pack methods that apply to the master
+    _noarg_ = ['_noarg_']
+    def pack_propagate(self, flag=_noarg_):
+        """Set or get the status for propagation of geometry information.
+
+        A boolean argument specifies whether the geometry information
+        of the slaves will determine the size of this widget. If no argument
+        is given the current setting will be returned.
+        """
+        if flag is Misc._noarg_:
+            return self._getboolean(self.tk.call(
+                'pack', 'propagate', self._w))
+        else:
+            self.tk.call('pack', 'propagate', self._w, flag)
+    propagate = pack_propagate
+    def pack_slaves(self):
+        """Return a list of all slaves of this widget
+        in its packing order."""
+        return map(self._nametowidget,
+               self.tk.splitlist(
+                   self.tk.call('pack', 'slaves', self._w)))
+    slaves = pack_slaves
+    # Place method that applies to the master
+    def place_slaves(self):
+        """Return a list of all slaves of this widget
+        in its packing order."""
+        return map(self._nametowidget,
+               self.tk.splitlist(
+                   self.tk.call(
+                       'place', 'slaves', self._w)))
+    # Grid methods that apply to the master
+    def grid_bbox(self, column=None, row=None, col2=None, row2=None):
+        """Return a tuple of integer coordinates for the bounding
+        box of this widget controlled by the geometry manager grid.
+
+        If COLUMN, ROW is given the bounding box applies from
+        the cell with row and column 0 to the specified
+        cell. If COL2 and ROW2 are given the bounding box
+        starts at that cell.
+
+        The returned integers specify the offset of the upper left
+        corner in the master widget and the width and height.
+        """
+        args = ('grid', 'bbox', self._w)
+        if column is not None and row is not None:
+            args = args + (column, row)
+        if col2 is not None and row2 is not None:
+            args = args + (col2, row2)
+        return self._getints(apply(self.tk.call, args)) or None
+
+    bbox = grid_bbox
+    def _grid_configure(self, command, index, cnf, kw):
+        """Internal function."""
+        if type(cnf) is StringType and not kw:
+            if cnf[-1:] == '_':
+                cnf = cnf[:-1]
+            if cnf[:1] != '-':
+                cnf = '-'+cnf
+            options = (cnf,)
+        else:
+            options = self._options(cnf, kw)
+        if not options:
+            res = self.tk.call('grid',
+                       command, self._w, index)
+            words = self.tk.splitlist(res)
+            dict = {}
+            for i in range(0, len(words), 2):
+                key = words[i][1:]
+                value = words[i+1]
+                if not value:
+                    value = None
+                elif '.' in value:
+                    value = getdouble(value)
+                else:
+                    value = getint(value)
+                dict[key] = value
+            return dict
+        res = self.tk.call(
+                  ('grid', command, self._w, index)
+                  + options)
+        if len(options) == 1:
+            if not res: return None
+            # In Tk 7.5, -width can be a float
+            if '.' in res: return getdouble(res)
+            return getint(res)
+    def grid_columnconfigure(self, index, cnf={}, **kw):
+        """Configure column INDEX of a grid.
+
+        Valid resources are minsize (minimum size of the column),
+        weight (how much does additional space propagate to this column)
+        and pad (how much space to let additionally)."""
+        return self._grid_configure('columnconfigure', index, cnf, kw)
+    columnconfigure = grid_columnconfigure
+    def grid_location(self, x, y):
+        """Return a tuple of column and row which identify the cell
+        at which the pixel at position X and Y inside the master
+        widget is located."""
+        return self._getints(
+            self.tk.call(
+                'grid', 'location', self._w, x, y)) or None
+    def grid_propagate(self, flag=_noarg_):
+        """Set or get the status for propagation of geometry information.
+
+        A boolean argument specifies whether the geometry information
+        of the slaves will determine the size of this widget. If no argument
+        is given, the current setting will be returned.
+        """
+        if flag is Misc._noarg_:
+            return self._getboolean(self.tk.call(
+                'grid', 'propagate', self._w))
+        else:
+            self.tk.call('grid', 'propagate', self._w, flag)
+    def grid_rowconfigure(self, index, cnf={}, **kw):
+        """Configure row INDEX of a grid.
+
+        Valid resources are minsize (minimum size of the row),
+        weight (how much does additional space propagate to this row)
+        and pad (how much space to let additionally)."""
+        return self._grid_configure('rowconfigure', index, cnf, kw)
+    rowconfigure = grid_rowconfigure
+    def grid_size(self):
+        """Return a tuple of the number of column and rows in the grid."""
+        return self._getints(
+            self.tk.call('grid', 'size', self._w)) or None
+    size = grid_size
+    def grid_slaves(self, row=None, column=None):
+        """Return a list of all slaves of this widget
+        in its packing order."""
+        args = ()
+        if row is not None:
+            args = args + ('-row', row)
+        if column is not None:
+            args = args + ('-column', column)
+        return map(self._nametowidget,
+               self.tk.splitlist(self.tk.call(
+                   ('grid', 'slaves', self._w) + args)))
+
+    # Support for the "event" command, new in Tk 4.2.
+    # By Case Roole.
+
+    def event_add(self, virtual, *sequences):
+        """Bind a virtual event VIRTUAL (of the form <<Name>>)
+        to an event SEQUENCE such that the virtual event is triggered
+        whenever SEQUENCE occurs."""
+        args = ('event', 'add', virtual) + sequences
+        self.tk.call(args)
+
+    def event_delete(self, virtual, *sequences):
+        """Unbind a virtual event VIRTUAL from SEQUENCE."""
+        args = ('event', 'delete', virtual) + sequences
+        self.tk.call(args)
+
+    def event_generate(self, sequence, **kw):
+        """Generate an event SEQUENCE. Additional
+        keyword arguments specify parameter of the event
+        (e.g. x, y, rootx, rooty)."""
+        args = ('event', 'generate', self._w, sequence)
+        for k, v in kw.items():
+            args = args + ('-%s' % k, str(v))
+        self.tk.call(args)
+
+    def event_info(self, virtual=None):
+        """Return a list of all virtual events or the information
+        about the SEQUENCE bound to the virtual event VIRTUAL."""
+        return self.tk.splitlist(
+            self.tk.call('event', 'info', virtual))
+
+    # Image related commands
+
+    def image_names(self):
+        """Return a list of all existing image names."""
+        return self.tk.call('image', 'names')
+
+    def image_types(self):
+        """Return a list of all available image types (e.g. phote bitmap)."""
+        return self.tk.call('image', 'types')
+
+
+class CallWrapper:
+    """Internal class. Stores function to call when some user
+    defined Tcl function is called e.g. after an event occurred."""
+    def __init__(self, func, subst, widget):
+        """Store FUNC, SUBST and WIDGET as members."""
+        self.func = func
+        self.subst = subst
+        self.widget = widget
+    def __call__(self, *args):
+        """Apply first function SUBST to arguments, than FUNC."""
+        try:
+            if self.subst:
+                args = apply(self.subst, args)
+            return apply(self.func, args)
+        except SystemExit, msg:
+            raise SystemExit, msg
+        except:
+            self.widget._report_exception()
+
+
+class Wm:
+    """Provides functions for the communication with the window manager."""
+    def wm_aspect(self,
+              minNumer=None, minDenom=None,
+              maxNumer=None, maxDenom=None):
+        """Instruct the window manager to set the aspect ratio (width/height)
+        of this widget to be between MINNUMER/MINDENOM and MAXNUMER/MAXDENOM. Return a tuple
+        of the actual values if no argument is given."""
+        return self._getints(
+            self.tk.call('wm', 'aspect', self._w,
+                     minNumer, minDenom,
+                     maxNumer, maxDenom))
+    aspect = wm_aspect
+    def wm_client(self, name=None):
+        """Store NAME in WM_CLIENT_MACHINE property of this widget. Return
+        current value."""
+        return self.tk.call('wm', 'client', self._w, name)
+    client = wm_client
+    def wm_colormapwindows(self, *wlist):
+        """Store list of window names (WLIST) into WM_COLORMAPWINDOWS property
+        of this widget. This list contains windows whose colormaps differ from their
+        parents. Return current list of widgets if WLIST is empty."""
+        if len(wlist) > 1:
+            wlist = (wlist,) # Tk needs a list of windows here
+        args = ('wm', 'colormapwindows', self._w) + wlist
+        return map(self._nametowidget, self.tk.call(args))
+    colormapwindows = wm_colormapwindows
+    def wm_command(self, value=None):
+        """Store VALUE in WM_COMMAND property. It is the command
+        which shall be used to invoke the application. Return current
+        command if VALUE is None."""
+        return self.tk.call('wm', 'command', self._w, value)
+    command = wm_command
+    def wm_deiconify(self):
+        """Deiconify this widget. If it was never mapped it will not be mapped.
+        On Windows it will raise this widget and give it the focus."""
+        return self.tk.call('wm', 'deiconify', self._w)
+    deiconify = wm_deiconify
+    def wm_focusmodel(self, model=None):
+        """Set focus model to MODEL. "active" means that this widget will claim
+        the focus itself, "passive" means that the window manager shall give
+        the focus. Return current focus model if MODEL is None."""
+        return self.tk.call('wm', 'focusmodel', self._w, model)
+    focusmodel = wm_focusmodel
+    def wm_frame(self):
+        """Return identifier for decorative frame of this widget if present."""
+        return self.tk.call('wm', 'frame', self._w)
+    frame = wm_frame
+    def wm_geometry(self, newGeometry=None):
+        """Set geometry to NEWGEOMETRY of the form =widthxheight+x+y. Return
+        current value if None is given."""
+        return self.tk.call('wm', 'geometry', self._w, newGeometry)
+    geometry = wm_geometry
+    def wm_grid(self,
+         baseWidth=None, baseHeight=None,
+         widthInc=None, heightInc=None):
+        """Instruct the window manager that this widget shall only be
+        resized on grid boundaries. WIDTHINC and HEIGHTINC are the width and
+        height of a grid unit in pixels. BASEWIDTH and BASEHEIGHT are the
+        number of grid units requested in Tk_GeometryRequest."""
+        return self._getints(self.tk.call(
+            'wm', 'grid', self._w,
+            baseWidth, baseHeight, widthInc, heightInc))
+    grid = wm_grid
+    def wm_group(self, pathName=None):
+        """Set the group leader widgets for related widgets to PATHNAME. Return
+        the group leader of this widget if None is given."""
+        return self.tk.call('wm', 'group', self._w, pathName)
+    group = wm_group
+    def wm_iconbitmap(self, bitmap=None):
+        """Set bitmap for the iconified widget to BITMAP. Return
+        the bitmap if None is given."""
+        return self.tk.call('wm', 'iconbitmap', self._w, bitmap)
+    iconbitmap = wm_iconbitmap
+    def wm_iconify(self):
+        """Display widget as icon."""
+        return self.tk.call('wm', 'iconify', self._w)
+    iconify = wm_iconify
+    def wm_iconmask(self, bitmap=None):
+        """Set mask for the icon bitmap of this widget. Return the
+        mask if None is given."""
+        return self.tk.call('wm', 'iconmask', self._w, bitmap)
+    iconmask = wm_iconmask
+    def wm_iconname(self, newName=None):
+        """Set the name of the icon for this widget. Return the name if
+        None is given."""
+        return self.tk.call('wm', 'iconname', self._w, newName)
+    iconname = wm_iconname
+    def wm_iconposition(self, x=None, y=None):
+        """Set the position of the icon of this widget to X and Y. Return
+        a tuple of the current values of X and X if None is given."""
+        return self._getints(self.tk.call(
+            'wm', 'iconposition', self._w, x, y))
+    iconposition = wm_iconposition
+    def wm_iconwindow(self, pathName=None):
+        """Set widget PATHNAME to be displayed instead of icon. Return the current
+        value if None is given."""
+        return self.tk.call('wm', 'iconwindow', self._w, pathName)
+    iconwindow = wm_iconwindow
+    def wm_maxsize(self, width=None, height=None):
+        """Set max WIDTH and HEIGHT for this widget. If the window is gridded
+        the values are given in grid units. Return the current values if None
+        is given."""
+        return self._getints(self.tk.call(
+            'wm', 'maxsize', self._w, width, height))
+    maxsize = wm_maxsize
+    def wm_minsize(self, width=None, height=None):
+        """Set min WIDTH and HEIGHT for this widget. If the window is gridded
+        the values are given in grid units. Return the current values if None
+        is given."""
+        return self._getints(self.tk.call(
+            'wm', 'minsize', self._w, width, height))
+    minsize = wm_minsize
+    def wm_overrideredirect(self, boolean=None):
+        """Instruct the window manager to ignore this widget
+        if BOOLEAN is given with 1. Return the current value if None
+        is given."""
+        return self._getboolean(self.tk.call(
+            'wm', 'overrideredirect', self._w, boolean))
+    overrideredirect = wm_overrideredirect
+    def wm_positionfrom(self, who=None):
+        """Instruct the window manager that the position of this widget shall
+        be defined by the user if WHO is "user", and by its own policy if WHO is
+        "program"."""
+        return self.tk.call('wm', 'positionfrom', self._w, who)
+    positionfrom = wm_positionfrom
+    def wm_protocol(self, name=None, func=None):
+        """Bind function FUNC to command NAME for this widget.
+        Return the function bound to NAME if None is given. NAME could be
+        e.g. "WM_SAVE_YOURSELF" or "WM_DELETE_WINDOW"."""
+        if callable(func):
+            command = self._register(func)
+        else:
+            command = func
+        return self.tk.call(
+            'wm', 'protocol', self._w, name, command)
+    protocol = wm_protocol
+    def wm_resizable(self, width=None, height=None):
+        """Instruct the window manager whether this width can be resized
+        in WIDTH or HEIGHT. Both values are boolean values."""
+        return self.tk.call('wm', 'resizable', self._w, width, height)
+    resizable = wm_resizable
+    def wm_sizefrom(self, who=None):
+        """Instruct the window manager that the size of this widget shall
+        be defined by the user if WHO is "user", and by its own policy if WHO is
+        "program"."""
+        return self.tk.call('wm', 'sizefrom', self._w, who)
+    sizefrom = wm_sizefrom
+    def wm_state(self, newstate=None):
+        """Query or set the state of this widget as one of normal, icon,
+        iconic (see wm_iconwindow), withdrawn, or zoomed (Windows only)."""
+        return self.tk.call('wm', 'state', self._w, newstate)
+    state = wm_state
+    def wm_title(self, string=None):
+        """Set the title of this widget."""
+        return self.tk.call('wm', 'title', self._w, string)
+    title = wm_title
+    def wm_transient(self, master=None):
+        """Instruct the window manager that this widget is transient
+        with regard to widget MASTER."""
+        return self.tk.call('wm', 'transient', self._w, master)
+    transient = wm_transient
+    def wm_withdraw(self):
+        """Withdraw this widget from the screen such that it is unmapped
+        and forgotten by the window manager. Re-draw it with wm_deiconify."""
+        return self.tk.call('wm', 'withdraw', self._w)
+    withdraw = wm_withdraw
+
+
+class Tk(Misc, Wm):
+    """Toplevel widget of Tk which represents mostly the main window
+    of an appliation. It has an associated Tcl interpreter."""
+    _w = '.'
+    def __init__(self, screenName=None, baseName=None, className='Tk'):
+        """Return a new Toplevel widget on screen SCREENNAME. A new Tcl interpreter will
+        be created. BASENAME will be used for the identification of the profile file (see
+        readprofile).
+        It is constructed from sys.argv[0] without extensions if None is given. CLASSNAME
+        is the name of the widget class."""
+        global _default_root
+        self.master = None
+        self.children = {}
+        if baseName is None:
+            import sys, os
+            baseName = os.path.basename(sys.argv[0])
+            baseName, ext = os.path.splitext(baseName)
+            if ext not in ('.py', '.pyc', '.pyo'):
+                baseName = baseName + ext
+        self.tk = _tkinter.create(screenName, baseName, className)
+        if _MacOS and hasattr(_MacOS, 'SchedParams'):
+            # Disable event scanning except for Command-Period
+            _MacOS.SchedParams(1, 0)
+            # Work around nasty MacTk bug
+            # XXX Is this one still needed?
+            self.update()
+        # Version sanity checks
+        tk_version = self.tk.getvar('tk_version')
+        if tk_version != _tkinter.TK_VERSION:
+            raise RuntimeError, \
+            "tk.h version (%s) doesn't match libtk.a version (%s)" \
+            % (_tkinter.TK_VERSION, tk_version)
+        tcl_version = self.tk.getvar('tcl_version')
+        if tcl_version != _tkinter.TCL_VERSION:
+            raise RuntimeError, \
+            "tcl.h version (%s) doesn't match libtcl.a version (%s)" \
+            % (_tkinter.TCL_VERSION, tcl_version)
+        if TkVersion < 4.0:
+            raise RuntimeError, \
+            "Tk 4.0 or higher is required; found Tk %s" \
+            % str(TkVersion)
+        self.tk.createcommand('tkerror', _tkerror)
+        self.tk.createcommand('exit', _exit)
+        self.readprofile(baseName, className)
+        if _support_default_root and not _default_root:
+            _default_root = self
+        self.protocol("WM_DELETE_WINDOW", self.destroy)
+    def destroy(self):
+        """Destroy this and all descendants widgets. This will
+        end the application of this Tcl interpreter."""
+        for c in self.children.values(): c.destroy()
+        self.tk.call('destroy', self._w)
+        Misc.destroy(self)
+        global _default_root
+        if _support_default_root and _default_root is self:
+            _default_root = None
+    def readprofile(self, baseName, className):
+        """Internal function. It reads BASENAME.tcl and CLASSNAME.tcl into
+        the Tcl Interpreter and calls execfile on BASENAME.py and CLASSNAME.py if
+        such a file exists in the home directory."""
+        import os
+        if os.environ.has_key('HOME'): home = os.environ['HOME']
+        else: home = os.curdir
+        class_tcl = os.path.join(home, '.%s.tcl' % className)
+        class_py = os.path.join(home, '.%s.py' % className)
+        base_tcl = os.path.join(home, '.%s.tcl' % baseName)
+        base_py = os.path.join(home, '.%s.py' % baseName)
+        dir = {'self': self}
+        exec 'from Tkinter import *' in dir
+        if os.path.isfile(class_tcl):
+            self.tk.call('source', class_tcl)
+        if os.path.isfile(class_py):
+            execfile(class_py, dir)
+        if os.path.isfile(base_tcl):
+            self.tk.call('source', base_tcl)
+        if os.path.isfile(base_py):
+            execfile(base_py, dir)
+    def report_callback_exception(self, exc, val, tb):
+        """Internal function. It reports exception on sys.stderr."""
+        import traceback, sys
+        sys.stderr.write("Exception in Tkinter callback\n")
+        sys.last_type = exc
+        sys.last_value = val
+        sys.last_traceback = tb
+        traceback.print_exception(exc, val, tb)
+
+# Ideally, the classes Pack, Place and Grid disappear, the
+# pack/place/grid methods are defined on the Widget class, and
+# everybody uses w.pack_whatever(...) instead of Pack.whatever(w,
+# ...), with pack(), place() and grid() being short for
+# pack_configure(), place_configure() and grid_columnconfigure(), and
+# forget() being short for pack_forget().  As a practical matter, I'm
+# afraid that there is too much code out there that may be using the
+# Pack, Place or Grid class, so I leave them intact -- but only as
+# backwards compatibility features.  Also note that those methods that
+# take a master as argument (e.g. pack_propagate) have been moved to
+# the Misc class (which now incorporates all methods common between
+# toplevel and interior widgets).  Again, for compatibility, these are
+# copied into the Pack, Place or Grid class.
+
+class Pack:
+    """Geometry manager Pack.
+
+    Base class to use the methods pack_* in every widget."""
+    def pack_configure(self, cnf={}, **kw):
+        """Pack a widget in the parent widget. Use as options:
+        after=widget - pack it after you have packed widget
+        anchor=NSEW (or subset) - position widget according to
+                                  given direction
+                before=widget - pack it before you will pack widget
+        expand=1 or 0 - expand widget if parent size grows
+        fill=NONE or X or Y or BOTH - fill widget if widget grows
+        in=master - use master to contain this widget
+        ipadx=amount - add internal padding in x direction
+        ipady=amount - add internal padding in y direction
+        padx=amount - add padding in x direction
+        pady=amount - add padding in y direction
+        side=TOP or BOTTOM or LEFT or RIGHT -  where to add this widget.
+        """
+        self.tk.call(
+              ('pack', 'configure', self._w)
+              + self._options(cnf, kw))
+    pack = configure = config = pack_configure
+    def pack_forget(self):
+        """Unmap this widget and do not use it for the packing order."""
+        self.tk.call('pack', 'forget', self._w)
+    forget = pack_forget
+    def pack_info(self):
+        """Return information about the packing options
+        for this widget."""
+        words = self.tk.splitlist(
+            self.tk.call('pack', 'info', self._w))
+        dict = {}
+        for i in range(0, len(words), 2):
+            key = words[i][1:]
+            value = words[i+1]
+            if value[:1] == '.':
+                value = self._nametowidget(value)
+            dict[key] = value
+        return dict
+    info = pack_info
+    propagate = pack_propagate = Misc.pack_propagate
+    slaves = pack_slaves = Misc.pack_slaves
+
+class Place:
+    """Geometry manager Place.
+
+    Base class to use the methods place_* in every widget."""
+    def place_configure(self, cnf={}, **kw):
+        """Place a widget in the parent widget. Use as options:
+        in=master - master relative to which the widget is placed.
+        x=amount - locate anchor of this widget at position x of master
+        y=amount - locate anchor of this widget at position y of master
+        relx=amount - locate anchor of this widget between 0.0 and 1.0
+                      relative to width of master (1.0 is right edge)
+            rely=amount - locate anchor of this widget between 0.0 and 1.0
+                      relative to height of master (1.0 is bottom edge)
+            anchor=NSEW (or subset) - position anchor according to given direction
+        width=amount - width of this widget in pixel
+        height=amount - height of this widget in pixel
+        relwidth=amount - width of this widget between 0.0 and 1.0
+                          relative to width of master (1.0 is the same width
+                  as the master)
+            relheight=amount - height of this widget between 0.0 and 1.0
+                           relative to height of master (1.0 is the same
+                   height as the master)
+            bordermode="inside" or "outside" - whether to take border width of master widget
+                                               into account
+            """
+        for k in ['in_']:
+            if kw.has_key(k):
+                kw[k[:-1]] = kw[k]
+                del kw[k]
+        self.tk.call(
+              ('place', 'configure', self._w)
+              + self._options(cnf, kw))
+    place = configure = config = place_configure
+    def place_forget(self):
+        """Unmap this widget."""
+        self.tk.call('place', 'forget', self._w)
+    forget = place_forget
+    def place_info(self):
+        """Return information about the placing options
+        for this widget."""
+        words = self.tk.splitlist(
+            self.tk.call('place', 'info', self._w))
+        dict = {}
+        for i in range(0, len(words), 2):
+            key = words[i][1:]
+            value = words[i+1]
+            if value[:1] == '.':
+                value = self._nametowidget(value)
+            dict[key] = value
+        return dict
+    info = place_info
+    slaves = place_slaves = Misc.place_slaves
+
+class Grid:
+    """Geometry manager Grid.
+
+    Base class to use the methods grid_* in every widget."""
+    # Thanks to Masazumi Yoshikawa (yosikawa at isi.edu)
+    def grid_configure(self, cnf={}, **kw):
+        """Position a widget in the parent widget in a grid. Use as options:
+        column=number - use cell identified with given column (starting with 0)
+        columnspan=number - this widget will span several columns
+        in=master - use master to contain this widget
+        ipadx=amount - add internal padding in x direction
+        ipady=amount - add internal padding in y direction
+        padx=amount - add padding in x direction
+        pady=amount - add padding in y direction
+        row=number - use cell identified with given row (starting with 0)
+        rowspan=number - this widget will span several rows
+        sticky=NSEW - if cell is larger on which sides will this
+                      widget stick to the cell boundary
+        """
+        self.tk.call(
+              ('grid', 'configure', self._w)
+              + self._options(cnf, kw))
+    grid = configure = config = grid_configure
+    bbox = grid_bbox = Misc.grid_bbox
+    columnconfigure = grid_columnconfigure = Misc.grid_columnconfigure
+    def grid_forget(self):
+        """Unmap this widget."""
+        self.tk.call('grid', 'forget', self._w)
+    forget = grid_forget
+    def grid_remove(self):
+        """Unmap this widget but remember the grid options."""
+        self.tk.call('grid', 'remove', self._w)
+    def grid_info(self):
+        """Return information about the options
+        for positioning this widget in a grid."""
+        words = self.tk.splitlist(
+            self.tk.call('grid', 'info', self._w))
+        dict = {}
+        for i in range(0, len(words), 2):
+            key = words[i][1:]
+            value = words[i+1]
+            if value[:1] == '.':
+                value = self._nametowidget(value)
+            dict[key] = value
+        return dict
+    info = grid_info
+    location = grid_location = Misc.grid_location
+    propagate = grid_propagate = Misc.grid_propagate
+    rowconfigure = grid_rowconfigure = Misc.grid_rowconfigure
+    size = grid_size = Misc.grid_size
+    slaves = grid_slaves = Misc.grid_slaves
+
+class BaseWidget(Misc):
+    """Internal class."""
+    def _setup(self, master, cnf):
+        """Internal function. Sets up information about children."""
+        if _support_default_root:
+            global _default_root
+            if not master:
+                if not _default_root:
+                    _default_root = Tk()
+                master = _default_root
+        self.master = master
+        self.tk = master.tk
+        name = None
+        if cnf.has_key('name'):
+            name = cnf['name']
+            del cnf['name']
+        if not name:
+            name = `id(self)`
+        self._name = name
+        if master._w=='.':
+            self._w = '.' + name
+        else:
+            self._w = master._w + '.' + name
+        self.children = {}
+        if self.master.children.has_key(self._name):
+            self.master.children[self._name].destroy()
+        self.master.children[self._name] = self
+    def __init__(self, master, widgetName, cnf={}, kw={}, extra=()):
+        """Construct a widget with the parent widget MASTER, a name WIDGETNAME
+        and appropriate options."""
+        if kw:
+            cnf = _cnfmerge((cnf, kw))
+        self.widgetName = widgetName
+        BaseWidget._setup(self, master, cnf)
+        classes = []
+        for k in cnf.keys():
+            if type(k) is ClassType:
+                classes.append((k, cnf[k]))
+                del cnf[k]
+        self.tk.call(
+            (widgetName, self._w) + extra + self._options(cnf))
+        for k, v in classes:
+            k.configure(self, v)
+    def destroy(self):
+        """Destroy this and all descendants widgets."""
+        for c in self.children.values(): c.destroy()
+        if self.master.children.has_key(self._name):
+            del self.master.children[self._name]
+        self.tk.call('destroy', self._w)
+        Misc.destroy(self)
+    def _do(self, name, args=()):
+        # XXX Obsolete -- better use self.tk.call directly!
+        return self.tk.call((self._w, name) + args)
+
+class Widget(BaseWidget, Pack, Place, Grid):
+    """Internal class.
+
+    Base class for a widget which can be positioned with the geometry managers
+    Pack, Place or Grid."""
+    pass
+
+class Toplevel(BaseWidget, Wm):
+    """Toplevel widget, e.g. for dialogs."""
+    def __init__(self, master=None, cnf={}, **kw):
+        """Construct a toplevel widget with the parent MASTER.
+
+        Valid resource names: background, bd, bg, borderwidth, class,
+        colormap, container, cursor, height, highlightbackground,
+        highlightcolor, highlightthickness, menu, relief, screen, takefocus,
+        use, visual, width."""
+        if kw:
+            cnf = _cnfmerge((cnf, kw))
+        extra = ()
+        for wmkey in ['screen', 'class_', 'class', 'visual',
+                  'colormap']:
+            if cnf.has_key(wmkey):
+                val = cnf[wmkey]
+                # TBD: a hack needed because some keys
+                # are not valid as keyword arguments
+                if wmkey[-1] == '_': opt = '-'+wmkey[:-1]
+                else: opt = '-'+wmkey
+                extra = extra + (opt, val)
+                del cnf[wmkey]
+        BaseWidget.__init__(self, master, 'toplevel', cnf, {}, extra)
+        root = self._root()
+        self.iconname(root.iconname())
+        self.title(root.title())
+        self.protocol("WM_DELETE_WINDOW", self.destroy)
+
+class Button(Widget):
+    """Button widget."""
+    def __init__(self, master=None, cnf={}, **kw):
+        """Construct a button widget with the parent MASTER.
+
+        Valid resource names: activebackground, activeforeground, anchor,
+        background, bd, bg, bitmap, borderwidth, command, cursor, default,
+        disabledforeground, fg, font, foreground, height,
+        highlightbackground, highlightcolor, highlightthickness, image,
+        justify, padx, pady, relief, state, takefocus, text, textvariable,
+        underline, width, wraplength."""
+        Widget.__init__(self, master, 'button', cnf, kw)
+    def tkButtonEnter(self, *dummy):
+        self.tk.call('tkButtonEnter', self._w)
+    def tkButtonLeave(self, *dummy):
+        self.tk.call('tkButtonLeave', self._w)
+    def tkButtonDown(self, *dummy):
+        self.tk.call('tkButtonDown', self._w)
+    def tkButtonUp(self, *dummy):
+        self.tk.call('tkButtonUp', self._w)
+    def tkButtonInvoke(self, *dummy):
+        self.tk.call('tkButtonInvoke', self._w)
+    def flash(self):
+        self.tk.call(self._w, 'flash')
+    def invoke(self):
+        return self.tk.call(self._w, 'invoke')
+
+# Indices:
+# XXX I don't like these -- take them away
+def AtEnd():
+    return 'end'
+def AtInsert(*args):
+    s = 'insert'
+    for a in args:
+        if a: s = s + (' ' + a)
+    return s
+def AtSelFirst():
+    return 'sel.first'
+def AtSelLast():
+    return 'sel.last'
+def At(x, y=None):
+    if y is None:
+        return '@' + `x`
+    else:
+        return '@' + `x` + ',' + `y`
+
+class Canvas(Widget):
+    """Canvas widget to display graphical elements like lines or text."""
+    def __init__(self, master=None, cnf={}, **kw):
+        """Construct a canvas widget with the parent MASTER.
+
+        Valid resource names: background, bd, bg, borderwidth, closeenough,
+        confine, cursor, height, highlightbackground, highlightcolor,
+        highlightthickness, insertbackground, insertborderwidth,
+        insertofftime, insertontime, insertwidth, offset, relief,
+        scrollregion, selectbackground, selectborderwidth, selectforeground,
+        state, takefocus, width, xscrollcommand, xscrollincrement,
+        yscrollcommand, yscrollincrement."""
+        Widget.__init__(self, master, 'canvas', cnf, kw)
+    def addtag(self, *args):
+        """Internal function."""
+        self.tk.call((self._w, 'addtag') + args)
+    def addtag_above(self, newtag, tagOrId):
+        """Add tag NEWTAG to all items above TAGORID."""
+        self.addtag(newtag, 'above', tagOrId)
+    def addtag_all(self, newtag):
+        """Add tag NEWTAG to all items."""
+        self.addtag(newtag, 'all')
+    def addtag_below(self, newtag, tagOrId):
+        """Add tag NEWTAG to all items below TAGORID."""
+        self.addtag(newtag, 'below', tagOrId)
+    def addtag_closest(self, newtag, x, y, halo=None, start=None):
+        """Add tag NEWTAG to item which is closest to pixel at X, Y.
+        If several match take the top-most.
+        All items closer than HALO are considered overlapping (all are
+        closests). If START is specified the next below this tag is taken."""
+        self.addtag(newtag, 'closest', x, y, halo, start)
+    def addtag_enclosed(self, newtag, x1, y1, x2, y2):
+        """Add tag NEWTAG to all items in the rectangle defined
+        by X1,Y1,X2,Y2."""
+        self.addtag(newtag, 'enclosed', x1, y1, x2, y2)
+    def addtag_overlapping(self, newtag, x1, y1, x2, y2):
+        """Add tag NEWTAG to all items which overlap the rectangle
+        defined by X1,Y1,X2,Y2."""
+        self.addtag(newtag, 'overlapping', x1, y1, x2, y2)
+    def addtag_withtag(self, newtag, tagOrId):
+        """Add tag NEWTAG to all items with TAGORID."""
+        self.addtag(newtag, 'withtag', tagOrId)
+    def bbox(self, *args):
+        """Return a tuple of X1,Y1,X2,Y2 coordinates for a rectangle
+        which encloses all items with tags specified as arguments."""
+        return self._getints(
+            self.tk.call((self._w, 'bbox') + args)) or None
+    def tag_unbind(self, tagOrId, sequence, funcid=None):
+        """Unbind for all items with TAGORID for event SEQUENCE  the
+        function identified with FUNCID."""
+        self.tk.call(self._w, 'bind', tagOrId, sequence, '')
+        if funcid:
+            self.deletecommand(funcid)
+    def tag_bind(self, tagOrId, sequence=None, func=None, add=None):
+        """Bind to all items with TAGORID at event SEQUENCE a call to function FUNC.
+
+        An additional boolean parameter ADD specifies whether FUNC will be
+        called additionally to the other bound function or whether it will
+        replace the previous function. See bind for the return value."""
+        return self._bind((self._w, 'bind', tagOrId),
+                  sequence, func, add)
+    def canvasx(self, screenx, gridspacing=None):
+        """Return the canvas x coordinate of pixel position SCREENX rounded
+        to nearest multiple of GRIDSPACING units."""
+        return getdouble(self.tk.call(
+            self._w, 'canvasx', screenx, gridspacing))
+    def canvasy(self, screeny, gridspacing=None):
+        """Return the canvas y coordinate of pixel position SCREENY rounded
+        to nearest multiple of GRIDSPACING units."""
+        return getdouble(self.tk.call(
+            self._w, 'canvasy', screeny, gridspacing))
+    def coords(self, *args):
+        """Return a list of coordinates for the item given in ARGS."""
+        # XXX Should use _flatten on args
+        return map(getdouble,
+                           self.tk.splitlist(
+                   self.tk.call((self._w, 'coords') + args)))
+    def _create(self, itemType, args, kw): # Args: (val, val, ..., cnf={})
+        """Internal function."""
+        args = _flatten(args)
+        cnf = args[-1]
+        if type(cnf) in (DictionaryType, TupleType):
+            args = args[:-1]
+        else:
+            cnf = {}
+        return getint(apply(
+            self.tk.call,
+            (self._w, 'create', itemType)
+            + args + self._options(cnf, kw)))
+    def create_arc(self, *args, **kw):
+        """Create arc shaped region with coordinates x1,y1,x2,y2."""
+        return self._create('arc', args, kw)
+    def create_bitmap(self, *args, **kw):
+        """Create bitmap with coordinates x1,y1."""
+        return self._create('bitmap', args, kw)
+    def create_image(self, *args, **kw):
+        """Create image item with coordinates x1,y1."""
+        return self._create('image', args, kw)
+    def create_line(self, *args, **kw):
+        """Create line with coordinates x1,y1,...,xn,yn."""
+        return self._create('line', args, kw)
+    def create_oval(self, *args, **kw):
+        """Create oval with coordinates x1,y1,x2,y2."""
+        return self._create('oval', args, kw)
+    def create_polygon(self, *args, **kw):
+        """Create polygon with coordinates x1,y1,...,xn,yn."""
+        return self._create('polygon', args, kw)
+    def create_rectangle(self, *args, **kw):
+        """Create rectangle with coordinates x1,y1,x2,y2."""
+        return self._create('rectangle', args, kw)
+    def create_text(self, *args, **kw):
+        """Create text with coordinates x1,y1."""
+        return self._create('text', args, kw)
+    def create_window(self, *args, **kw):
+        """Create window with coordinates x1,y1,x2,y2."""
+        return self._create('window', args, kw)
+    def dchars(self, *args):
+        """Delete characters of text items identified by tag or id in ARGS (possibly
+        several times) from FIRST to LAST character (including)."""
+        self.tk.call((self._w, 'dchars') + args)
+    def delete(self, *args):
+        """Delete items identified by all tag or ids contained in ARGS."""
+        self.tk.call((self._w, 'delete') + args)
+    def dtag(self, *args):
+        """Delete tag or id given as last arguments in ARGS from items
+        identified by first argument in ARGS."""
+        self.tk.call((self._w, 'dtag') + args)
+    def find(self, *args):
+        """Internal function."""
+        return self._getints(
+            self.tk.call((self._w, 'find') + args)) or ()
+    def find_above(self, tagOrId):
+        """Return items above TAGORID."""
+        return self.find('above', tagOrId)
+    def find_all(self):
+        """Return all items."""
+        return self.find('all')
+    def find_below(self, tagOrId):
+        """Return all items below TAGORID."""
+        return self.find('below', tagOrId)
+    def find_closest(self, x, y, halo=None, start=None):
+        """Return item which is closest to pixel at X, Y.
+        If several match take the top-most.
+        All items closer than HALO are considered overlapping (all are
+        closests). If START is specified the next below this tag is taken."""
+        return self.find('closest', x, y, halo, start)
+    def find_enclosed(self, x1, y1, x2, y2):
+        """Return all items in rectangle defined
+        by X1,Y1,X2,Y2."""
+        return self.find('enclosed', x1, y1, x2, y2)
+    def find_overlapping(self, x1, y1, x2, y2):
+        """Return all items which overlap the rectangle
+        defined by X1,Y1,X2,Y2."""
+        return self.find('overlapping', x1, y1, x2, y2)
+    def find_withtag(self, tagOrId):
+        """Return all items with TAGORID."""
+        return self.find('withtag', tagOrId)
+    def focus(self, *args):
+        """Set focus to the first item specified in ARGS."""
+        return self.tk.call((self._w, 'focus') + args)
+    def gettags(self, *args):
+        """Return tags associated with the first item specified in ARGS."""
+        return self.tk.splitlist(
+            self.tk.call((self._w, 'gettags') + args))
+    def icursor(self, *args):
+        """Set cursor at position POS in the item identified by TAGORID.
+        In ARGS TAGORID must be first."""
+        self.tk.call((self._w, 'icursor') + args)
+    def index(self, *args):
+        """Return position of cursor as integer in item specified in ARGS."""
+        return getint(self.tk.call((self._w, 'index') + args))
+    def insert(self, *args):
+        """Insert TEXT in item TAGORID at position POS. ARGS must
+        be TAGORID POS TEXT."""
+        self.tk.call((self._w, 'insert') + args)
+    def itemcget(self, tagOrId, option):
+        """Return the resource value for an OPTION for item TAGORID."""
+        return self.tk.call(
+            (self._w, 'itemcget') + (tagOrId, '-'+option))
+    def itemconfigure(self, tagOrId, cnf=None, **kw):
+        """Configure resources of an item TAGORID.
+
+        The values for resources are specified as keyword
+        arguments. To get an overview about
+        the allowed keyword arguments call the method without arguments.
+        """
+        if cnf is None and not kw:
+            cnf = {}
+            for x in self.tk.split(
+                self.tk.call(self._w,
+                         'itemconfigure', tagOrId)):
+                cnf[x[0][1:]] = (x[0][1:],) + x[1:]
+            return cnf
+        if type(cnf) == StringType and not kw:
+            x = self.tk.split(self.tk.call(
+                self._w, 'itemconfigure', tagOrId, '-'+cnf))
+            return (x[0][1:],) + x[1:]
+        self.tk.call((self._w, 'itemconfigure', tagOrId) +
+                 self._options(cnf, kw))
+    itemconfig = itemconfigure
+    # lower, tkraise/lift hide Misc.lower, Misc.tkraise/lift,
+    # so the preferred name for them is tag_lower, tag_raise
+    # (similar to tag_bind, and similar to the Text widget);
+    # unfortunately can't delete the old ones yet (maybe in 1.6)
+    def tag_lower(self, *args):
+        """Lower an item TAGORID given in ARGS
+        (optional below another item)."""
+        self.tk.call((self._w, 'lower') + args)
+    lower = tag_lower
+    def move(self, *args):
+        """Move an item TAGORID given in ARGS."""
+        self.tk.call((self._w, 'move') + args)
+    def postscript(self, cnf={}, **kw):
+        """Print the contents of the canvas to a postscript
+        file. Valid options: colormap, colormode, file, fontmap,
+        height, pageanchor, pageheight, pagewidth, pagex, pagey,
+        rotate, witdh, x, y."""
+        return self.tk.call((self._w, 'postscript') +
+                    self._options(cnf, kw))
+    def tag_raise(self, *args):
+        """Raise an item TAGORID given in ARGS
+        (optional above another item)."""
+        self.tk.call((self._w, 'raise') + args)
+    lift = tkraise = tag_raise
+    def scale(self, *args):
+        """Scale item TAGORID with XORIGIN, YORIGIN, XSCALE, YSCALE."""
+        self.tk.call((self._w, 'scale') + args)
+    def scan_mark(self, x, y):
+        """Remember the current X, Y coordinates."""
+        self.tk.call(self._w, 'scan', 'mark', x, y)
+    def scan_dragto(self, x, y):
+        """Adjust the view of the canvas to 10 times the
+        difference between X and Y and the coordinates given in
+        scan_mark."""
+        self.tk.call(self._w, 'scan', 'dragto', x, y)
+    def select_adjust(self, tagOrId, index):
+        """Adjust the end of the selection near the cursor of an item TAGORID to index."""
+        self.tk.call(self._w, 'select', 'adjust', tagOrId, index)
+    def select_clear(self):
+        """Clear the selection if it is in this widget."""
+        self.tk.call(self._w, 'select', 'clear')
+    def select_from(self, tagOrId, index):
+        """Set the fixed end of a selection in item TAGORID to INDEX."""
+        self.tk.call(self._w, 'select', 'from', tagOrId, index)
+    def select_item(self):
+        """Return the item which has the selection."""
+        return self.tk.call(self._w, 'select', 'item') or None
+    def select_to(self, tagOrId, index):
+        """Set the variable end of a selection in item TAGORID to INDEX."""
+        self.tk.call(self._w, 'select', 'to', tagOrId, index)
+    def type(self, tagOrId):
+        """Return the type of the item TAGORID."""
+        return self.tk.call(self._w, 'type', tagOrId) or None
+    def xview(self, *args):
+        """Query and change horizontal position of the view."""
+        if not args:
+            return self._getdoubles(self.tk.call(self._w, 'xview'))
+        self.tk.call((self._w, 'xview') + args)
+    def xview_moveto(self, fraction):
+        """Adjusts the view in the window so that FRACTION of the
+        total width of the canvas is off-screen to the left."""
+        self.tk.call(self._w, 'xview', 'moveto', fraction)
+    def xview_scroll(self, number, what):
+        """Shift the x-view according to NUMBER which is measured in "units" or "pages" (WHAT)."""
+        self.tk.call(self._w, 'xview', 'scroll', number, what)
+    def yview(self, *args):
+        """Query and change vertical position of the view."""
+        if not args:
+            return self._getdoubles(self.tk.call(self._w, 'yview'))
+        self.tk.call((self._w, 'yview') + args)
+    def yview_moveto(self, fraction):
+        """Adjusts the view in the window so that FRACTION of the
+        total height of the canvas is off-screen to the top."""
+        self.tk.call(self._w, 'yview', 'moveto', fraction)
+    def yview_scroll(self, number, what):
+        """Shift the y-view according to NUMBER which is measured in "units" or "pages" (WHAT)."""
+        self.tk.call(self._w, 'yview', 'scroll', number, what)
+
+class Checkbutton(Widget):
+    """Checkbutton widget which is either in on- or off-state."""
+    def __init__(self, master=None, cnf={}, **kw):
+        """Construct a checkbutton widget with the parent MASTER.
+
+        Valid resource names: activebackground, activeforeground, anchor,
+        background, bd, bg, bitmap, borderwidth, command, cursor,
+        disabledforeground, fg, font, foreground, height,
+        highlightbackground, highlightcolor, highlightthickness, image,
+        indicatoron, justify, offvalue, onvalue, padx, pady, relief,
+        selectcolor, selectimage, state, takefocus, text, textvariable,
+        underline, variable, width, wraplength."""
+        Widget.__init__(self, master, 'checkbutton', cnf, kw)
+    def deselect(self):
+        """Put the button in off-state."""
+        self.tk.call(self._w, 'deselect')
+    def flash(self):
+        """Flash the button."""
+        self.tk.call(self._w, 'flash')
+    def invoke(self):
+        """Toggle the button and invoke a command if given as resource."""
+        return self.tk.call(self._w, 'invoke')
+    def select(self):
+        """Put the button in on-state."""
+        self.tk.call(self._w, 'select')
+    def toggle(self):
+        """Toggle the button."""
+        self.tk.call(self._w, 'toggle')
+
+class Entry(Widget):
+    """Entry widget which allows to display simple text."""
+    def __init__(self, master=None, cnf={}, **kw):
+        """Construct an entry widget with the parent MASTER.
+
+        Valid resource names: background, bd, bg, borderwidth, cursor,
+        exportselection, fg, font, foreground, highlightbackground,
+        highlightcolor, highlightthickness, insertbackground,
+        insertborderwidth, insertofftime, insertontime, insertwidth,
+        invalidcommand, invcmd, justify, relief, selectbackground,
+        selectborderwidth, selectforeground, show, state, takefocus,
+        textvariable, validate, validatecommand, vcmd, width,
+        xscrollcommand."""
+        Widget.__init__(self, master, 'entry', cnf, kw)
+    def delete(self, first, last=None):
+        """Delete text from FIRST to LAST (not included)."""
+        self.tk.call(self._w, 'delete', first, last)
+    def get(self):
+        """Return the text."""
+        return self.tk.call(self._w, 'get')
+    def icursor(self, index):
+        """Insert cursor at INDEX."""
+        self.tk.call(self._w, 'icursor', index)
+    def index(self, index):
+        """Return position of cursor."""
+        return getint(self.tk.call(
+            self._w, 'index', index))
+    def insert(self, index, string):
+        """Insert STRING at INDEX."""
+        self.tk.call(self._w, 'insert', index, string)
+    def scan_mark(self, x):
+        """Remember the current X, Y coordinates."""
+        self.tk.call(self._w, 'scan', 'mark', x)
+    def scan_dragto(self, x):
+        """Adjust the view of the canvas to 10 times the
+        difference between X and Y and the coordinates given in
+        scan_mark."""
+        self.tk.call(self._w, 'scan', 'dragto', x)
+    def selection_adjust(self, index):
+        """Adjust the end of the selection near the cursor to INDEX."""
+        self.tk.call(self._w, 'selection', 'adjust', index)
+    select_adjust = selection_adjust
+    def selection_clear(self):
+        """Clear the selection if it is in this widget."""
+        self.tk.call(self._w, 'selection', 'clear')
+    select_clear = selection_clear
+    def selection_from(self, index):
+        """Set the fixed end of a selection to INDEX."""
+        self.tk.call(self._w, 'selection', 'from', index)
+    select_from = selection_from
+    def selection_present(self):
+        """Return whether the widget has the selection."""
+        return self.tk.getboolean(
+            self.tk.call(self._w, 'selection', 'present'))
+    select_present = selection_present
+    def selection_range(self, start, end):
+        """Set the selection from START to END (not included)."""
+        self.tk.call(self._w, 'selection', 'range', start, end)
+    select_range = selection_range
+    def selection_to(self, index):
+        """Set the variable end of a selection to INDEX."""
+        self.tk.call(self._w, 'selection', 'to', index)
+    select_to = selection_to
+    def xview(self, index):
+        """Query and change horizontal position of the view."""
+        self.tk.call(self._w, 'xview', index)
+    def xview_moveto(self, fraction):
+        """Adjust the view in the window so that FRACTION of the
+        total width of the entry is off-screen to the left."""
+        self.tk.call(self._w, 'xview', 'moveto', fraction)
+    def xview_scroll(self, number, what):
+        """Shift the x-view according to NUMBER which is measured in "units" or "pages" (WHAT)."""
+        self.tk.call(self._w, 'xview', 'scroll', number, what)
+
+class Frame(Widget):
+    """Frame widget which may contain other widgets and can have a 3D border."""
+    def __init__(self, master=None, cnf={}, **kw):
+        """Construct a frame widget with the parent MASTER.
+
+        Valid resource names: background, bd, bg, borderwidth, class,
+        colormap, container, cursor, height, highlightbackground,
+        highlightcolor, highlightthickness, relief, takefocus, visual, width."""
+        cnf = _cnfmerge((cnf, kw))
+        extra = ()
+        if cnf.has_key('class_'):
+            extra = ('-class', cnf['class_'])
+            del cnf['class_']
+        elif cnf.has_key('class'):
+            extra = ('-class', cnf['class'])
+            del cnf['class']
+        Widget.__init__(self, master, 'frame', cnf, {}, extra)
+
+class Label(Widget):
+    """Label widget which can display text and bitmaps."""
+    def __init__(self, master=None, cnf={}, **kw):
+        """Construct a label widget with the parent MASTER.
+
+        Valid resource names: anchor, background, bd, bg, bitmap,
+        borderwidth, cursor, fg, font, foreground, height,
+        highlightbackground, highlightcolor, highlightthickness, image,
+        justify, padx, pady, relief, takefocus, text, textvariable,
+        underline, width, wraplength."""
+        Widget.__init__(self, master, 'label', cnf, kw)
+
+class Listbox(Widget):
+    """Listbox widget which can display a list of strings."""
+    def __init__(self, master=None, cnf={}, **kw):
+        """Construct a listbox widget with the parent MASTER.
+
+        Valid resource names: background, bd, bg, borderwidth, cursor,
+        exportselection, fg, font, foreground, height, highlightbackground,
+        highlightcolor, highlightthickness, relief, selectbackground,
+        selectborderwidth, selectforeground, selectmode, setgrid, takefocus,
+        width, xscrollcommand, yscrollcommand, listvariable."""
+        Widget.__init__(self, master, 'listbox', cnf, kw)
+    def activate(self, index):
+        """Activate item identified by INDEX."""
+        self.tk.call(self._w, 'activate', index)
+    def bbox(self, *args):
+        """Return a tuple of X1,Y1,X2,Y2 coordinates for a rectangle
+        which encloses the item identified by index in ARGS."""
+        return self._getints(
+            self.tk.call((self._w, 'bbox') + args)) or None
+    def curselection(self):
+        """Return list of indices of currently selected item."""
+        # XXX Ought to apply self._getints()...
+        return self.tk.splitlist(self.tk.call(
+            self._w, 'curselection'))
+    def delete(self, first, last=None):
+        """Delete items from FIRST to LAST (not included)."""
+        self.tk.call(self._w, 'delete', first, last)
+    def get(self, first, last=None):
+        """Get list of items from FIRST to LAST (not included)."""
+        if last:
+            return self.tk.splitlist(self.tk.call(
+                self._w, 'get', first, last))
+        else:
+            return self.tk.call(self._w, 'get', first)
+    def index(self, index):
+        """Return index of item identified with INDEX."""
+        i = self.tk.call(self._w, 'index', index)
+        if i == 'none': return None
+        return getint(i)
+    def insert(self, index, *elements):
+        """Insert ELEMENTS at INDEX."""
+        self.tk.call((self._w, 'insert', index) + elements)
+    def nearest(self, y):
+        """Get index of item which is nearest to y coordinate Y."""
+        return getint(self.tk.call(
+            self._w, 'nearest', y))
+    def scan_mark(self, x, y):
+        """Remember the current X, Y coordinates."""
+        self.tk.call(self._w, 'scan', 'mark', x, y)
+    def scan_dragto(self, x, y):
+        """Adjust the view of the listbox to 10 times the
+        difference between X and Y and the coordinates given in
+        scan_mark."""
+        self.tk.call(self._w, 'scan', 'dragto', x, y)
+    def see(self, index):
+        """Scroll such that INDEX is visible."""
+        self.tk.call(self._w, 'see', index)
+    def selection_anchor(self, index):
+        """Set the fixed end oft the selection to INDEX."""
+        self.tk.call(self._w, 'selection', 'anchor', index)
+    select_anchor = selection_anchor
+    def selection_clear(self, first, last=None):
+        """Clear the selection from FIRST to LAST (not included)."""
+        self.tk.call(self._w,
+                 'selection', 'clear', first, last)
+    select_clear = selection_clear
+    def selection_includes(self, index):
+        """Return 1 if INDEX is part of the selection."""
+        return self.tk.getboolean(self.tk.call(
+            self._w, 'selection', 'includes', index))
+    select_includes = selection_includes
+    def selection_set(self, first, last=None):
+        """Set the selection from FIRST to LAST (not included) without
+        changing the currently selected elements."""
+        self.tk.call(self._w, 'selection', 'set', first, last)
+    select_set = selection_set
+    def size(self):
+        """Return the number of elements in the listbox."""
+        return getint(self.tk.call(self._w, 'size'))
+    def xview(self, *what):
+        """Query and change horizontal position of the view."""
+        if not what:
+            return self._getdoubles(self.tk.call(self._w, 'xview'))
+        self.tk.call((self._w, 'xview') + what)
+    def xview_moveto(self, fraction):
+        """Adjust the view in the window so that FRACTION of the
+        total width of the entry is off-screen to the left."""
+        self.tk.call(self._w, 'xview', 'moveto', fraction)
+    def xview_scroll(self, number, what):
+        """Shift the x-view according to NUMBER which is measured in "units" or "pages" (WHAT)."""
+        self.tk.call(self._w, 'xview', 'scroll', number, what)
+    def yview(self, *what):
+        """Query and change vertical position of the view."""
+        if not what:
+            return self._getdoubles(self.tk.call(self._w, 'yview'))
+        self.tk.call((self._w, 'yview') + what)
+    def yview_moveto(self, fraction):
+        """Adjust the view in the window so that FRACTION of the
+        total width of the entry is off-screen to the top."""
+        self.tk.call(self._w, 'yview', 'moveto', fraction)
+    def yview_scroll(self, number, what):
+        """Shift the y-view according to NUMBER which is measured in "units" or "pages" (WHAT)."""
+        self.tk.call(self._w, 'yview', 'scroll', number, what)
+    def itemcget(self, index, option):
+        """Return the resource value for an ITEM and an OPTION."""
+        return self.tk.call(
+            (self._w, 'itemcget') + (index, '-'+option))
+    def itemconfigure(self, index, cnf=None, **kw):
+        """Configure resources of an ITEM.
+
+        The values for resources are specified as keyword arguments.
+        To get an overview about the allowed keyword arguments
+        call the method without arguments.
+        Valid resource names: background, bg, foreground, fg,
+        selectbackground, selectforeground."""
+        if cnf is None and not kw:
+            cnf = {}
+            for x in self.tk.split(
+                self.tk.call(self._w, 'itemconfigure', index)):
+                cnf[x[0][1:]] = (x[0][1:],) + x[1:]
+            return cnf
+        if type(cnf) == StringType and not kw:
+            x = self.tk.split(self.tk.call(
+                self._w, 'itemconfigure', index, '-'+cnf))
+            return (x[0][1:],) + x[1:]
+        self.tk.call((self._w, 'itemconfigure', index) +
+                     self._options(cnf, kw))
+    itemconfig = itemconfigure
+
+class Menu(Widget):
+    """Menu widget which allows to display menu bars, pull-down menus and pop-up menus."""
+    def __init__(self, master=None, cnf={}, **kw):
+        """Construct menu widget with the parent MASTER.
+
+        Valid resource names: activebackground, activeborderwidth,
+        activeforeground, background, bd, bg, borderwidth, cursor,
+        disabledforeground, fg, font, foreground, postcommand, relief,
+        selectcolor, takefocus, tearoff, tearoffcommand, title, type."""
+        Widget.__init__(self, master, 'menu', cnf, kw)
+    def tk_bindForTraversal(self):
+        pass # obsolete since Tk 4.0
+    def tk_mbPost(self):
+        self.tk.call('tk_mbPost', self._w)
+    def tk_mbUnpost(self):
+        self.tk.call('tk_mbUnpost')
+    def tk_traverseToMenu(self, char):
+        self.tk.call('tk_traverseToMenu', self._w, char)
+    def tk_traverseWithinMenu(self, char):
+        self.tk.call('tk_traverseWithinMenu', self._w, char)
+    def tk_getMenuButtons(self):
+        return self.tk.call('tk_getMenuButtons', self._w)
+    def tk_nextMenu(self, count):
+        self.tk.call('tk_nextMenu', count)
+    def tk_nextMenuEntry(self, count):
+        self.tk.call('tk_nextMenuEntry', count)
+    def tk_invokeMenu(self):
+        self.tk.call('tk_invokeMenu', self._w)
+    def tk_firstMenu(self):
+        self.tk.call('tk_firstMenu', self._w)
+    def tk_mbButtonDown(self):
+        self.tk.call('tk_mbButtonDown', self._w)
+    def tk_popup(self, x, y, entry=""):
+        """Post the menu at position X,Y with entry ENTRY."""
+        self.tk.call('tk_popup', self._w, x, y, entry)
+    def activate(self, index):
+        """Activate entry at INDEX."""
+        self.tk.call(self._w, 'activate', index)
+    def add(self, itemType, cnf={}, **kw):
+        """Internal function."""
+        self.tk.call((self._w, 'add', itemType) +
+                 self._options(cnf, kw))
+    def add_cascade(self, cnf={}, **kw):
+        """Add hierarchical menu item."""
+        self.add('cascade', cnf or kw)
+    def add_checkbutton(self, cnf={}, **kw):
+        """Add checkbutton menu item."""
+        self.add('checkbutton', cnf or kw)
+    def add_command(self, cnf={}, **kw):
+        """Add command menu item."""
+        self.add('command', cnf or kw)
+    def add_radiobutton(self, cnf={}, **kw):
+        """Addd radio menu item."""
+        self.add('radiobutton', cnf or kw)
+    def add_separator(self, cnf={}, **kw):
+        """Add separator."""
+        self.add('separator', cnf or kw)
+    def insert(self, index, itemType, cnf={}, **kw):
+        """Internal function."""
+        self.tk.call((self._w, 'insert', index, itemType) +
+                 self._options(cnf, kw))
+    def insert_cascade(self, index, cnf={}, **kw):
+        """Add hierarchical menu item at INDEX."""
+        self.insert(index, 'cascade', cnf or kw)
+    def insert_checkbutton(self, index, cnf={}, **kw):
+        """Add checkbutton menu item at INDEX."""
+        self.insert(index, 'checkbutton', cnf or kw)
+    def insert_command(self, index, cnf={}, **kw):
+        """Add command menu item at INDEX."""
+        self.insert(index, 'command', cnf or kw)
+    def insert_radiobutton(self, index, cnf={}, **kw):
+        """Addd radio menu item at INDEX."""
+        self.insert(index, 'radiobutton', cnf or kw)
+    def insert_separator(self, index, cnf={}, **kw):
+        """Add separator at INDEX."""
+        self.insert(index, 'separator', cnf or kw)
+    def delete(self, index1, index2=None):
+        """Delete menu items between INDEX1 and INDEX2 (not included)."""
+        self.tk.call(self._w, 'delete', index1, index2)
+    def entrycget(self, index, option):
+        """Return the resource value of an menu item for OPTION at INDEX."""
+        return self.tk.call(self._w, 'entrycget', index, '-' + option)
+    def entryconfigure(self, index, cnf=None, **kw):
+        """Configure a menu item at INDEX."""
+        if cnf is None and not kw:
+            cnf = {}
+            for x in self.tk.split(self.tk.call(
+                (self._w, 'entryconfigure', index))):
+                cnf[x[0][1:]] = (x[0][1:],) + x[1:]
+            return cnf
+        if type(cnf) == StringType and not kw:
+            x = self.tk.split(self.tk.call(
+                (self._w, 'entryconfigure', index, '-'+cnf)))
+            return (x[0][1:],) + x[1:]
+        self.tk.call((self._w, 'entryconfigure', index)
+              + self._options(cnf, kw))
+    entryconfig = entryconfigure
+    def index(self, index):
+        """Return the index of a menu item identified by INDEX."""
+        i = self.tk.call(self._w, 'index', index)
+        if i == 'none': return None
+        return getint(i)
+    def invoke(self, index):
+        """Invoke a menu item identified by INDEX and execute
+        the associated command."""
+        return self.tk.call(self._w, 'invoke', index)
+    def post(self, x, y):
+        """Display a menu at position X,Y."""
+        self.tk.call(self._w, 'post', x, y)
+    def type(self, index):
+        """Return the type of the menu item at INDEX."""
+        return self.tk.call(self._w, 'type', index)
+    def unpost(self):
+        """Unmap a menu."""
+        self.tk.call(self._w, 'unpost')
+    def yposition(self, index):
+        """Return the y-position of the topmost pixel of the menu item at INDEX."""
+        return getint(self.tk.call(
+            self._w, 'yposition', index))
+
+class Menubutton(Widget):
+    """Menubutton widget, obsolete since Tk8.0."""
+    def __init__(self, master=None, cnf={}, **kw):
+        Widget.__init__(self, master, 'menubutton', cnf, kw)
+
+class Message(Widget):
+    """Message widget to display multiline text. Obsolete since Label does it too."""
+    def __init__(self, master=None, cnf={}, **kw):
+        Widget.__init__(self, master, 'message', cnf, kw)
+
+class Radiobutton(Widget):
+    """Radiobutton widget which shows only one of several buttons in on-state."""
+    def __init__(self, master=None, cnf={}, **kw):
+        """Construct a radiobutton widget with the parent MASTER.
+
+        Valid resource names: activebackground, activeforeground, anchor,
+        background, bd, bg, bitmap, borderwidth, command, cursor,
+        disabledforeground, fg, font, foreground, height,
+        highlightbackground, highlightcolor, highlightthickness, image,
+        indicatoron, justify, padx, pady, relief, selectcolor, selectimage,
+        state, takefocus, text, textvariable, underline, value, variable,
+        width, wraplength."""
+        Widget.__init__(self, master, 'radiobutton', cnf, kw)
+    def deselect(self):
+        """Put the button in off-state."""
+
+        self.tk.call(self._w, 'deselect')
+    def flash(self):
+        """Flash the button."""
+        self.tk.call(self._w, 'flash')
+    def invoke(self):
+        """Toggle the button and invoke a command if given as resource."""
+        return self.tk.call(self._w, 'invoke')
+    def select(self):
+        """Put the button in on-state."""
+        self.tk.call(self._w, 'select')
+
+class Scale(Widget):
+    """Scale widget which can display a numerical scale."""
+    def __init__(self, master=None, cnf={}, **kw):
+        """Construct a scale widget with the parent MASTER.
+
+        Valid resource names: activebackground, background, bigincrement, bd,
+        bg, borderwidth, command, cursor, digits, fg, font, foreground, from,
+        highlightbackground, highlightcolor, highlightthickness, label,
+        length, orient, relief, repeatdelay, repeatinterval, resolution,
+        showvalue, sliderlength, sliderrelief, state, takefocus,
+        tickinterval, to, troughcolor, variable, width."""
+        Widget.__init__(self, master, 'scale', cnf, kw)
+    def get(self):
+        """Get the current value as integer or float."""
+        value = self.tk.call(self._w, 'get')
+        try:
+            return getint(value)
+        except ValueError:
+            return getdouble(value)
+    def set(self, value):
+        """Set the value to VALUE."""
+        self.tk.call(self._w, 'set', value)
+    def coords(self, value=None):
+        """Return a tuple (X,Y) of the point along the centerline of the
+        trough that corresponds to VALUE or the current value if None is
+        given."""
+
+        return self._getints(self.tk.call(self._w, 'coords', value))
+    def identify(self, x, y):
+        """Return where the point X,Y lies. Valid return values are "slider",
+        "though1" and "though2"."""
+        return self.tk.call(self._w, 'identify', x, y)
+
+class Scrollbar(Widget):
+    """Scrollbar widget which displays a slider at a certain position."""
+    def __init__(self, master=None, cnf={}, **kw):
+        """Construct a scrollbar widget with the parent MASTER.
+
+        Valid resource names: activebackground, activerelief,
+        background, bd, bg, borderwidth, command, cursor,
+        elementborderwidth, highlightbackground,
+        highlightcolor, highlightthickness, jump, orient,
+        relief, repeatdelay, repeatinterval, takefocus,
+        troughcolor, width."""
+        Widget.__init__(self, master, 'scrollbar', cnf, kw)
+    def activate(self, index):
+        """Display the element at INDEX with activebackground and activerelief.
+        INDEX can be "arrow1","slider" or "arrow2"."""
+        self.tk.call(self._w, 'activate', index)
+    def delta(self, deltax, deltay):
+        """Return the fractional change of the scrollbar setting if it
+        would be moved by DELTAX or DELTAY pixels."""
+        return getdouble(
+            self.tk.call(self._w, 'delta', deltax, deltay))
+    def fraction(self, x, y):
+        """Return the fractional value which corresponds to a slider
+        position of X,Y."""
+        return getdouble(self.tk.call(self._w, 'fraction', x, y))
+    def identify(self, x, y):
+        """Return the element under position X,Y as one of
+        "arrow1","slider","arrow2" or ""."""
+        return self.tk.call(self._w, 'identify', x, y)
+    def get(self):
+        """Return the current fractional values (upper and lower end)
+        of the slider position."""
+        return self._getdoubles(self.tk.call(self._w, 'get'))
+    def set(self, *args):
+        """Set the fractional values of the slider position (upper and
+        lower ends as value between 0 and 1)."""
+        self.tk.call((self._w, 'set') + args)
+
+class Text(Widget):
+    """Text widget which can display text in various forms."""
+    # XXX Add dump()
+    def __init__(self, master=None, cnf={}, **kw):
+        """Construct a text widget with the parent MASTER.
+
+        Valid resource names: background, bd, bg, borderwidth, cursor,
+        exportselection, fg, font, foreground, height,
+        highlightbackground, highlightcolor, highlightthickness,
+        insertbackground, insertborderwidth, insertofftime,
+        insertontime, insertwidth, padx, pady, relief,
+        selectbackground, selectborderwidth, selectforeground,
+        setgrid, spacing1, spacing2, spacing3, state, tabs, takefocus,
+        width, wrap, xscrollcommand, yscrollcommand."""
+        Widget.__init__(self, master, 'text', cnf, kw)
+    def bbox(self, *args):
+        """Return a tuple of (x,y,width,height) which gives the bounding
+        box of the visible part of the character at the index in ARGS."""
+        return self._getints(
+            self.tk.call((self._w, 'bbox') + args)) or None
+    def tk_textSelectTo(self, index):
+        self.tk.call('tk_textSelectTo', self._w, index)
+    def tk_textBackspace(self):
+        self.tk.call('tk_textBackspace', self._w)
+    def tk_textIndexCloser(self, a, b, c):
+        self.tk.call('tk_textIndexCloser', self._w, a, b, c)
+    def tk_textResetAnchor(self, index):
+        self.tk.call('tk_textResetAnchor', self._w, index)
+    def compare(self, index1, op, index2):
+        """Return whether between index INDEX1 and index INDEX2 the
+        relation OP is satisfied. OP is one of <, <=, ==, >=, >, or !=."""
+        return self.tk.getboolean(self.tk.call(
+            self._w, 'compare', index1, op, index2))
+    def debug(self, boolean=None):
+        """Turn on the internal consistency checks of the B-Tree inside the text
+        widget according to BOOLEAN."""
+        return self.tk.getboolean(self.tk.call(
+            self._w, 'debug', boolean))
+    def delete(self, index1, index2=None):
+        """Delete the characters between INDEX1 and INDEX2 (not included)."""
+        self.tk.call(self._w, 'delete', index1, index2)
+    def dlineinfo(self, index):
+        """Return tuple (x,y,width,height,baseline) giving the bounding box
+        and baseline position of the visible part of the line containing
+        the character at INDEX."""
+        return self._getints(self.tk.call(self._w, 'dlineinfo', index))
+    def get(self, index1, index2=None):
+        """Return the text from INDEX1 to INDEX2 (not included)."""
+        return self.tk.call(self._w, 'get', index1, index2)
+    # (Image commands are new in 8.0)
+    def image_cget(self, index, option):
+        """Return the value of OPTION of an embedded image at INDEX."""
+        if option[:1] != "-":
+            option = "-" + option
+        if option[-1:] == "_":
+            option = option[:-1]
+        return self.tk.call(self._w, "image", "cget", index, option)
+    def image_configure(self, index, cnf={}, **kw):
+        """Configure an embedded image at INDEX."""
+        if not cnf and not kw:
+            cnf = {}
+            for x in self.tk.split(
+                    self.tk.call(
+                    self._w, "image", "configure", index)):
+                cnf[x[0][1:]] = (x[0][1:],) + x[1:]
+            return cnf
+        apply(self.tk.call,
+              (self._w, "image", "configure", index)
+              + self._options(cnf, kw))
+    def image_create(self, index, cnf={}, **kw):
+        """Create an embedded image at INDEX."""
+        return apply(self.tk.call,
+                 (self._w, "image", "create", index)
+                 + self._options(cnf, kw))
+    def image_names(self):
+        """Return all names of embedded images in this widget."""
+        return self.tk.call(self._w, "image", "names")
+    def index(self, index):
+        """Return the index in the form line.char for INDEX."""
+        return self.tk.call(self._w, 'index', index)
+    def insert(self, index, chars, *args):
+        """Insert CHARS before the characters at INDEX. An additional
+        tag can be given in ARGS. Additional CHARS and tags can follow in ARGS."""
+        self.tk.call((self._w, 'insert', index, chars) + args)
+    def mark_gravity(self, markName, direction=None):
+        """Change the gravity of a mark MARKNAME to DIRECTION (LEFT or RIGHT).
+        Return the current value if None is given for DIRECTION."""
+        return self.tk.call(
+            (self._w, 'mark', 'gravity', markName, direction))
+    def mark_names(self):
+        """Return all mark names."""
+        return self.tk.splitlist(self.tk.call(
+            self._w, 'mark', 'names'))
+    def mark_set(self, markName, index):
+        """Set mark MARKNAME before the character at INDEX."""
+        self.tk.call(self._w, 'mark', 'set', markName, index)
+    def mark_unset(self, *markNames):
+        """Delete all marks in MARKNAMES."""
+        self.tk.call((self._w, 'mark', 'unset') + markNames)
+    def mark_next(self, index):
+        """Return the name of the next mark after INDEX."""
+        return self.tk.call(self._w, 'mark', 'next', index) or None
+    def mark_previous(self, index):
+        """Return the name of the previous mark before INDEX."""
+        return self.tk.call(self._w, 'mark', 'previous', index) or None
+    def scan_mark(self, x, y):
+        """Remember the current X, Y coordinates."""
+        self.tk.call(self._w, 'scan', 'mark', x, y)
+    def scan_dragto(self, x, y):
+        """Adjust the view of the text to 10 times the
+        difference between X and Y and the coordinates given in
+        scan_mark."""
+        self.tk.call(self._w, 'scan', 'dragto', x, y)
+    def search(self, pattern, index, stopindex=None,
+           forwards=None, backwards=None, exact=None,
+           regexp=None, nocase=None, count=None):
+        """Search PATTERN beginning from INDEX until STOPINDEX.
+        Return the index of the first character of a match or an empty string."""
+        args = [self._w, 'search']
+        if forwards: args.append('-forwards')
+        if backwards: args.append('-backwards')
+        if exact: args.append('-exact')
+        if regexp: args.append('-regexp')
+        if nocase: args.append('-nocase')
+        if count: args.append('-count'); args.append(count)
+        if pattern[0] == '-': args.append('--')
+        args.append(pattern)
+        args.append(index)
+        if stopindex: args.append(stopindex)
+        return self.tk.call(tuple(args))
+    def see(self, index):
+        """Scroll such that the character at INDEX is visible."""
+        self.tk.call(self._w, 'see', index)
+    def tag_add(self, tagName, index1, *args):
+        """Add tag TAGNAME to all characters between INDEX1 and index2 in ARGS.
+        Additional pairs of indices may follow in ARGS."""
+        self.tk.call(
+            (self._w, 'tag', 'add', tagName, index1) + args)
+    def tag_unbind(self, tagName, sequence, funcid=None):
+        """Unbind for all characters with TAGNAME for event SEQUENCE  the
+        function identified with FUNCID."""
+        self.tk.call(self._w, 'tag', 'bind', tagName, sequence, '')
+        if funcid:
+            self.deletecommand(funcid)
+    def tag_bind(self, tagName, sequence, func, add=None):
+        """Bind to all characters with TAGNAME at event SEQUENCE a call to function FUNC.
+
+        An additional boolean parameter ADD specifies whether FUNC will be
+        called additionally to the other bound function or whether it will
+        replace the previous function. See bind for the return value."""
+        return self._bind((self._w, 'tag', 'bind', tagName),
+                  sequence, func, add)
+    def tag_cget(self, tagName, option):
+        """Return the value of OPTION for tag TAGNAME."""
+        if option[:1] != '-':
+            option = '-' + option
+        if option[-1:] == '_':
+            option = option[:-1]
+        return self.tk.call(self._w, 'tag', 'cget', tagName, option)
+    def tag_configure(self, tagName, cnf={}, **kw):
+        """Configure a tag TAGNAME."""
+        if type(cnf) == StringType:
+            x = self.tk.split(self.tk.call(
+                self._w, 'tag', 'configure', tagName, '-'+cnf))
+            return (x[0][1:],) + x[1:]
+        self.tk.call(
+              (self._w, 'tag', 'configure', tagName)
+              + self._options(cnf, kw))
+    tag_config = tag_configure
+    def tag_delete(self, *tagNames):
+        """Delete all tags in TAGNAMES."""
+        self.tk.call((self._w, 'tag', 'delete') + tagNames)
+    def tag_lower(self, tagName, belowThis=None):
+        """Change the priority of tag TAGNAME such that it is lower
+        than the priority of BELOWTHIS."""
+        self.tk.call(self._w, 'tag', 'lower', tagName, belowThis)
+    def tag_names(self, index=None):
+        """Return a list of all tag names."""
+        return self.tk.splitlist(
+            self.tk.call(self._w, 'tag', 'names', index))
+    def tag_nextrange(self, tagName, index1, index2=None):
+        """Return a list of start and end index for the first sequence of
+        characters between INDEX1 and INDEX2 which all have tag TAGNAME.
+        The text is searched forward from INDEX1."""
+        return self.tk.splitlist(self.tk.call(
+            self._w, 'tag', 'nextrange', tagName, index1, index2))
+    def tag_prevrange(self, tagName, index1, index2=None):
+        """Return a list of start and end index for the first sequence of
+        characters between INDEX1 and INDEX2 which all have tag TAGNAME.
+        The text is searched backwards from INDEX1."""
+        return self.tk.splitlist(self.tk.call(
+            self._w, 'tag', 'prevrange', tagName, index1, index2))
+    def tag_raise(self, tagName, aboveThis=None):
+        """Change the priority of tag TAGNAME such that it is higher
+        than the priority of ABOVETHIS."""
+        self.tk.call(
+            self._w, 'tag', 'raise', tagName, aboveThis)
+    def tag_ranges(self, tagName):
+        """Return a list of ranges of text which have tag TAGNAME."""
+        return self.tk.splitlist(self.tk.call(
+            self._w, 'tag', 'ranges', tagName))
+    def tag_remove(self, tagName, index1, index2=None):
+        """Remove tag TAGNAME from all characters between INDEX1 and INDEX2."""
+        self.tk.call(
+            self._w, 'tag', 'remove', tagName, index1, index2)
+    def window_cget(self, index, option):
+        """Return the value of OPTION of an embedded window at INDEX."""
+        if option[:1] != '-':
+            option = '-' + option
+        if option[-1:] == '_':
+            option = option[:-1]
+        return self.tk.call(self._w, 'window', 'cget', index, option)
+    def window_configure(self, index, cnf={}, **kw):
+        """Configure an embedded window at INDEX."""
+        if type(cnf) == StringType:
+            x = self.tk.split(self.tk.call(
+                self._w, 'window', 'configure',
+                index, '-'+cnf))
+            return (x[0][1:],) + x[1:]
+        self.tk.call(
+              (self._w, 'window', 'configure', index)
+              + self._options(cnf, kw))
+    window_config = window_configure
+    def window_create(self, index, cnf={}, **kw):
+        """Create a window at INDEX."""
+        self.tk.call(
+              (self._w, 'window', 'create', index)
+              + self._options(cnf, kw))
+    def window_names(self):
+        """Return all names of embedded windows in this widget."""
+        return self.tk.splitlist(
+            self.tk.call(self._w, 'window', 'names'))
+    def xview(self, *what):
+        """Query and change horizontal position of the view."""
+        if not what:
+            return self._getdoubles(self.tk.call(self._w, 'xview'))
+        self.tk.call((self._w, 'xview') + what)
+    def xview_moveto(self, fraction):
+        """Adjusts the view in the window so that FRACTION of the
+        total width of the canvas is off-screen to the left."""
+        self.tk.call(self._w, 'xview', 'moveto', fraction)
+    def xview_scroll(self, number, what):
+        """Shift the x-view according to NUMBER which is measured
+        in "units" or "pages" (WHAT)."""
+        self.tk.call(self._w, 'xview', 'scroll', number, what)
+    def yview(self, *what):
+        """Query and change vertical position of the view."""
+        if not what:
+            return self._getdoubles(self.tk.call(self._w, 'yview'))
+        self.tk.call((self._w, 'yview') + what)
+    def yview_moveto(self, fraction):
+        """Adjusts the view in the window so that FRACTION of the
+        total height of the canvas is off-screen to the top."""
+        self.tk.call(self._w, 'yview', 'moveto', fraction)
+    def yview_scroll(self, number, what):
+        """Shift the y-view according to NUMBER which is measured
+        in "units" or "pages" (WHAT)."""
+        self.tk.call(self._w, 'yview', 'scroll', number, what)
+    def yview_pickplace(self, *what):
+        """Obsolete function, use see."""
+        self.tk.call((self._w, 'yview', '-pickplace') + what)
+
+class _setit:
+    """Internal class. It wraps the command in the widget OptionMenu."""
+    def __init__(self, var, value, callback=None):
+        self.__value = value
+        self.__var = var
+        self.__callback = callback
+    def __call__(self, *args):
+        self.__var.set(self.__value)
+        if self.__callback:
+            apply(self.__callback, (self.__value,)+args)
+
+class OptionMenu(Menubutton):
+    """OptionMenu which allows the user to select a value from a menu."""
+    def __init__(self, master, variable, value, *values, **kwargs):
+        """Construct an optionmenu widget with the parent MASTER, with
+        the resource textvariable set to VARIABLE, the initially selected
+        value VALUE, the other menu values VALUES and an additional
+        keyword argument command."""
+        kw = {"borderwidth": 2, "textvariable": variable,
+              "indicatoron": 1, "relief": RAISED, "anchor": "c",
+              "highlightthickness": 2}
+        Widget.__init__(self, master, "menubutton", kw)
+        self.widgetName = 'tk_optionMenu'
+        menu = self.__menu = Menu(self, name="menu", tearoff=0)
+        self.menuname = menu._w
+        # 'command' is the only supported keyword
+        callback = kwargs.get('command')
+        if kwargs.has_key('command'):
+            del kwargs['command']
+        if kwargs:
+            raise TclError, 'unknown option -'+kwargs.keys()[0]
+        menu.add_command(label=value,
+                 command=_setit(variable, value, callback))
+        for v in values:
+            menu.add_command(label=v,
+                     command=_setit(variable, v, callback))
+        self["menu"] = menu
+
+    def __getitem__(self, name):
+        if name == 'menu':
+            return self.__menu
+        return Widget.__getitem__(self, name)
+
+    def destroy(self):
+        """Destroy this widget and the associated menu."""
+        Menubutton.destroy(self)
+        self.__menu = None
+
+class Image:
+    """Base class for images."""
+    _last_id = 0
+    def __init__(self, imgtype, name=None, cnf={}, master=None, **kw):
+        self.name = None
+        if not master:
+            master = _default_root
+            if not master:
+                raise RuntimeError, 'Too early to create image'
+        self.tk = master.tk
+        if not name:
+            Image._last_id += 1
+            name = "pyimage" +`Image._last_id` # tk itself would use image<x>
+            # The following is needed for systems where id(x)
+            # can return a negative number, such as Linux/m68k:
+            if name[0] == '-': name = '_' + name[1:]
+        if kw and cnf: cnf = _cnfmerge((cnf, kw))
+        elif kw: cnf = kw
+        options = ()
+        for k, v in cnf.items():
+            if callable(v):
+                v = self._register(v)
+            options = options + ('-'+k, v)
+        self.tk.call(('image', 'create', imgtype, name,) + options)
+        self.name = name
+    def __str__(self): return self.name
+    def __del__(self):
+        if self.name:
+            try:
+                self.tk.call('image', 'delete', self.name)
+            except TclError:
+                # May happen if the root was destroyed
+                pass
+    def __setitem__(self, key, value):
+        self.tk.call(self.name, 'configure', '-'+key, value)
+    def __getitem__(self, key):
+        return self.tk.call(self.name, 'configure', '-'+key)
+    def configure(self, **kw):
+        """Configure the image."""
+        res = ()
+        for k, v in _cnfmerge(kw).items():
+            if v is not None:
+                if k[-1] == '_': k = k[:-1]
+                if callable(v):
+                    v = self._register(v)
+                res = res + ('-'+k, v)
+        self.tk.call((self.name, 'config') + res)
+    config = configure
+    def height(self):
+        """Return the height of the image."""
+        return getint(
+            self.tk.call('image', 'height', self.name))
+    def type(self):
+        """Return the type of the imgage, e.g. "photo" or "bitmap"."""
+        return self.tk.call('image', 'type', self.name)
+    def width(self):
+        """Return the width of the image."""
+        return getint(
+            self.tk.call('image', 'width', self.name))
+
+class PhotoImage(Image):
+    """Widget which can display colored images in GIF, PPM/PGM format."""
+    def __init__(self, name=None, cnf={}, master=None, **kw):
+        """Create an image with NAME.
+
+        Valid resource names: data, format, file, gamma, height, palette,
+        width."""
+        apply(Image.__init__, (self, 'photo', name, cnf, master), kw)
+    def blank(self):
+        """Display a transparent image."""
+        self.tk.call(self.name, 'blank')
+    def cget(self, option):
+        """Return the value of OPTION."""
+        return self.tk.call(self.name, 'cget', '-' + option)
+    # XXX config
+    def __getitem__(self, key):
+        return self.tk.call(self.name, 'cget', '-' + key)
+    # XXX copy -from, -to, ...?
+    def copy(self):
+        """Return a new PhotoImage with the same image as this widget."""
+        destImage = PhotoImage()
+        self.tk.call(destImage, 'copy', self.name)
+        return destImage
+    def zoom(self,x,y=''):
+        """Return a new PhotoImage with the same image as this widget
+        but zoom it with X and Y."""
+        destImage = PhotoImage()
+        if y=='': y=x
+        self.tk.call(destImage, 'copy', self.name, '-zoom',x,y)
+        return destImage
+    def subsample(self,x,y=''):
+        """Return a new PhotoImage based on the same image as this widget
+        but use only every Xth or Yth pixel."""
+        destImage = PhotoImage()
+        if y=='': y=x
+        self.tk.call(destImage, 'copy', self.name, '-subsample',x,y)
+        return destImage
+    def get(self, x, y):
+        """Return the color (red, green, blue) of the pixel at X,Y."""
+        return self.tk.call(self.name, 'get', x, y)
+    def put(self, data, to=None):
+        """Put row formated colors to image starting from
+        position TO, e.g. image.put("{red green} {blue yellow}", to=(4,6))"""
+        args = (self.name, 'put', data)
+        if to:
+            if to[0] == '-to':
+                to = to[1:]
+            args = args + ('-to',) + tuple(to)
+        self.tk.call(args)
+    # XXX read
+    def write(self, filename, format=None, from_coords=None):
+        """Write image to file FILENAME in FORMAT starting from
+        position FROM_COORDS."""
+        args = (self.name, 'write', filename)
+        if format:
+            args = args + ('-format', format)
+        if from_coords:
+            args = args + ('-from',) + tuple(from_coords)
+        self.tk.call(args)
+
+class BitmapImage(Image):
+    """Widget which can display a bitmap."""
+    def __init__(self, name=None, cnf={}, master=None, **kw):
+        """Create a bitmap with NAME.
+
+        Valid resource names: background, data, file, foreground, maskdata, maskfile."""
+        apply(Image.__init__, (self, 'bitmap', name, cnf, master), kw)
+
+def image_names(): return _default_root.tk.call('image', 'names')
+def image_types(): return _default_root.tk.call('image', 'types')
+
+######################################################################
+# Extensions:
+
+class Studbutton(Button):
+    def __init__(self, master=None, cnf={}, **kw):
+        Widget.__init__(self, master, 'studbutton', cnf, kw)
+        self.bind('<Any-Enter>',       self.tkButtonEnter)
+        self.bind('<Any-Leave>',       self.tkButtonLeave)
+        self.bind('<1>',               self.tkButtonDown)
+        self.bind('<ButtonRelease-1>', self.tkButtonUp)
+
+class Tributton(Button):
+    def __init__(self, master=None, cnf={}, **kw):
+        Widget.__init__(self, master, 'tributton', cnf, kw)
+        self.bind('<Any-Enter>',       self.tkButtonEnter)
+        self.bind('<Any-Leave>',       self.tkButtonLeave)
+        self.bind('<1>',               self.tkButtonDown)
+        self.bind('<ButtonRelease-1>', self.tkButtonUp)
+        self['fg']               = self['bg']
+        self['activebackground'] = self['bg']
+
+######################################################################
+# Test:
+
+def _test():
+    root = Tk()
+    text = "This is Tcl/Tk version %s" % TclVersion
+    if TclVersion >= 8.1:
+        try:
+            text = text + unicode("\nThis should be a cedilla: \347",
+                                  "iso-8859-1")
+        except NameError:
+            pass # no unicode support
+    label = Label(root, text=text)
+    label.pack()
+    test = Button(root, text="Click me!",
+              command=lambda root=root: root.test.configure(
+                  text="[%s]" % root.test['text']))
+    test.pack()
+    root.test = test
+    quit = Button(root, text="QUIT", command=root.destroy)
+    quit.pack()
+    # The following three commands are needed so the window pops
+    # up on top on Windows...
+    root.iconify()
+    root.update()
+    root.deiconify()
+    root.mainloop()
+
+if __name__ == '__main__':
+    _test()
diff --git a/lib-python/2.2/lib-tk/tkColorChooser.py b/lib-python/2.2/lib-tk/tkColorChooser.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/lib-tk/tkColorChooser.py
@@ -0,0 +1,74 @@
+#
+# Instant Python
+# $Id$
+#
+# tk common colour chooser dialogue
+#
+# this module provides an interface to the native color dialogue
+# available in Tk 4.2 and newer.
+#
+# written by Fredrik Lundh, May 1997
+#
+# fixed initialcolor handling in August 1998
+#
+
+#
+# options (all have default values):
+#
+# - initialcolor: colour to mark as selected when dialog is displayed
+#   (given as an RGB triplet or a Tk color string)
+#
+# - parent: which window to place the dialog on top of
+#
+# - title: dialog title
+#
+
+from tkCommonDialog import Dialog
+
+
+#
+# color chooser class
+
+class Chooser(Dialog):
+    "Ask for a color"
+
+    command = "tk_chooseColor"
+
+    def _fixoptions(self):
+        try:
+            # make sure initialcolor is a tk color string
+            color = self.options["initialcolor"]
+            if type(color) == type(()):
+                # assume an RGB triplet
+                self.options["initialcolor"] = "#%02x%02x%02x" % color
+        except KeyError:
+            pass
+
+    def _fixresult(self, widget, result):
+        # to simplify application code, the color chooser returns
+        # an RGB tuple together with the Tk color string
+        if not result:
+            return None, None # canceled
+        r, g, b = widget.winfo_rgb(result)
+        return (r/256, g/256, b/256), result
+
+
+#
+# convenience stuff
+
+def askcolor(color = None, **options):
+    "Ask for a color"
+
+    if color:
+        options = options.copy()
+        options["initialcolor"] = color
+
+    return apply(Chooser, (), options).show()
+
+
+# --------------------------------------------------------------------
+# test stuff
+
+if __name__ == "__main__":
+
+    print "color", askcolor()
diff --git a/lib-python/2.2/lib-tk/tkCommonDialog.py b/lib-python/2.2/lib-tk/tkCommonDialog.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/lib-tk/tkCommonDialog.py
@@ -0,0 +1,65 @@
+#
+# Instant Python
+# $Id$
+#
+# base class for tk common dialogues
+#
+# this module provides a base class for accessing the common
+# dialogues available in Tk 4.2 and newer.  use tkFileDialog,
+# tkColorChooser, and tkMessageBox to access the individual
+# dialogs.
+#
+# written by Fredrik Lundh, May 1997
+#
+
+from Tkinter import *
+import os
+
+class Dialog:
+
+    command  = None
+
+    def __init__(self, master=None, **options):
+
+        # FIXME: should this be placed on the module level instead?
+        if TkVersion < 4.2:
+            raise TclError, "this module requires Tk 4.2 or newer"
+
+        self.master  = master
+        self.options = options
+        if not master and options.get('parent'):
+            self.master = options['parent']
+
+    def _fixoptions(self):
+        pass # hook
+
+    def _fixresult(self, widget, result):
+        return result # hook
+
+    def show(self, **options):
+
+        # update instance options
+        for k, v in options.items():
+            self.options[k] = v
+
+        self._fixoptions()
+
+        # we need a dummy widget to properly process the options
+        # (at least as long as we use Tkinter 1.63)
+        w = Frame(self.master)
+
+        try:
+
+            s = apply(w.tk.call, (self.command,) + w._options(self.options))
+
+            s = self._fixresult(w, s)
+
+        finally:
+
+            try:
+                # get rid of the widget
+                w.destroy()
+            except:
+                pass
+
+        return s
diff --git a/lib-python/2.2/lib-tk/tkFileDialog.py b/lib-python/2.2/lib-tk/tkFileDialog.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/lib-tk/tkFileDialog.py
@@ -0,0 +1,129 @@
+#
+# Instant Python
+# $Id$
+#
+# tk common file dialogues
+#
+# this module provides interfaces to the native file dialogues
+# available in Tk 4.2 and newer, and the directory dialogue available
+# in Tk 8.3 and newer.
+#
+# written by Fredrik Lundh, May 1997.
+#
+
+#
+# options (all have default values):
+#
+# - defaultextension: added to filename if not explicitly given
+#
+# - filetypes: sequence of (label, pattern) tuples.  the same pattern
+#   may occur with several patterns.  use "*" as pattern to indicate
+#   all files.
+#
+# - initialdir: initial directory.  preserved by dialog instance.
+#
+# - initialfile: initial file (ignored by the open dialog).  preserved
+#   by dialog instance.
+#
+# - parent: which window to place the dialog on top of
+#
+# - title: dialog title
+#
+# options for the directory chooser:
+#
+# - initialdir, parent, title: see above
+#
+# - mustexist: if true, user must pick an existing directory
+#
+
+from tkCommonDialog import Dialog
+
+class _Dialog(Dialog):
+
+    def _fixoptions(self):
+        try:
+            # make sure "filetypes" is a tuple
+            self.options["filetypes"] = tuple(self.options["filetypes"])
+        except KeyError:
+            pass
+
+    def _fixresult(self, widget, result):
+        if result:
+            # keep directory and filename until next time
+            import os
+            path, file = os.path.split(result)
+            self.options["initialdir"] = path
+            self.options["initialfile"] = file
+        self.filename = result # compatibility
+        return result
+
+
+#
+# file dialogs
+
+class Open(_Dialog):
+    "Ask for a filename to open"
+
+    command = "tk_getOpenFile"
+
+class SaveAs(_Dialog):
+    "Ask for a filename to save as"
+
+    command = "tk_getSaveFile"
+
+
+# the directory dialog has its own _fix routines.
+class Directory(Dialog):
+    "Ask for a directory"
+
+    command = "tk_chooseDirectory"
+
+    def _fixresult(self, widget, result):
+        if result:
+            # keep directory until next time
+            self.options["initialdir"] = result
+        self.directory = result # compatibility
+        return result
+
+#
+# convenience stuff
+
+def askopenfilename(**options):
+    "Ask for a filename to open"
+
+    return Open(**options).show()
+
+def asksaveasfilename(**options):
+    "Ask for a filename to save as"
+
+    return SaveAs(**options).show()
+
+# FIXME: are the following two perhaps a bit too convenient?
+
+def askopenfile(mode = "r", **options):
+    "Ask for a filename to open, and returned the opened file"
+
+    filename = Open(**options).show()
+    if filename:
+        return open(filename, mode)
+    return None
+
+def asksaveasfile(mode = "w", **options):
+    "Ask for a filename to save as, and returned the opened file"
+
+    filename = SaveAs(**options).show()
+    if filename:
+        return open(filename, mode)
+    return None
+
+def askdirectory (**options):
+    "Ask for a directory, and return the file name"
+    return Directory(**options).show()
+
+# --------------------------------------------------------------------
+# test stuff
+
+if __name__ == "__main__":
+
+    print "open", askopenfilename(filetypes=[("all filez", "*")])
+    print "saveas", asksaveasfilename()
diff --git a/lib-python/2.2/lib-tk/tkFont.py b/lib-python/2.2/lib-tk/tkFont.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/lib-tk/tkFont.py
@@ -0,0 +1,191 @@
+#
+# Tkinter
+# $Id$
+#
+# font wrapper
+#
+# written by Fredrik Lundh <fredrik at pythonware.com>, February 1998
+#
+# FIXME: should add 'displayof' option where relevant (actual, families,
+#        measure, and metrics)
+#
+# Copyright (c) Secret Labs AB 1998.
+#
+# info at pythonware.com
+# http://www.pythonware.com
+#
+
+__version__ = "0.9"
+
+import Tkinter
+
+# weight/slant
+NORMAL = "normal"
+BOLD   = "bold"
+ITALIC = "italic"
+
+class Font:
+
+    """Represents a named font.
+
+    Constructor options are:
+
+    font -- font specifier (name, system font, or (family, size, style)-tuple)
+
+       or any combination of
+
+    family -- font 'family', e.g. Courier, Times, Helvetica
+    size -- font size in points
+    weight -- font thickness: NORMAL, BOLD
+    slant -- font slant: NORMAL, ITALIC
+    underline -- font underlining: false (0), true (1)
+    overstrike -- font strikeout: false (0), true (1)
+    name -- name to use for this font configuration (defaults to a unique name)
+    """
+
+    def _set(self, kw):
+        options = []
+        for k, v in kw.items():
+            options.append("-"+k)
+            options.append(str(v))
+        return tuple(options)
+
+    def _get(self, args):
+        options = []
+        for k in args:
+            options.append("-"+k)
+        return tuple(options)
+
+    def _mkdict(self, args):
+        options = {}
+        for i in range(0, len(args), 2):
+            options[args[i][1:]] = args[i+1]
+        return options
+
+    def __init__(self, root=None, font=None, name=None, **options):
+        if not root:
+            root = Tkinter._default_root
+        if font:
+            # get actual settings corresponding to the given font
+            font = root.tk.splitlist(root.tk.call("font", "actual", font))
+        else:
+            font = self._set(options)
+        if not name:
+            name = "font" + str(id(self))
+        self.name = name
+        apply(root.tk.call, ("font", "create", name) + font)
+        # backlinks!
+        self._root  = root
+        self._split = root.tk.splitlist
+        self._call  = root.tk.call
+
+    def __str__(self):
+        return self.name
+
+    def __del__(self):
+        try:
+            self._call("font", "delete", self.name)
+        except (AttributeError, Tkinter.TclError):
+            pass
+
+    def copy(self):
+        "Return a distinct copy of the current font"
+        return apply(Font, (self._root,), self.actual())
+
+    def actual(self, option=None):
+        "Return actual font attributes"
+        if option:
+            return self._call("font", "actual", self.name, "-"+option)
+        else:
+            return self._mkdict(
+                self._split(self._call("font", "actual", self.name))
+                )
+
+    def cget(self, option):
+        "Get font attribute"
+        return self._call("font", "config", self.name, "-"+option)
+
+    def config(self, **options):
+        "Modify font attributes"
+        if options:
+            apply(self._call, ("font", "config", self.name) +
+                  self._set(options))
+        else:
+            return self._mkdict(
+                self._split(self._call("font", "config", self.name))
+                )
+
+    configure = config
+
+    def measure(self, text):
+        "Return text width"
+        return int(self._call("font", "measure", self.name, text))
+
+    def metrics(self, *options):
+        """Return font metrics.
+
+        For best performance, create a dummy widget
+        using this font before calling this method."""
+
+        if options:
+            return int(
+                self._call("font", "metrics", self.name, self._get(options))
+                )
+        else:
+            res = self._split(self._call("font", "metrics", self.name))
+            options = {}
+            for i in range(0, len(res), 2):
+                options[res[i][1:]] = int(res[i+1])
+            return options
+
+def families(root=None):
+    "Get font families (as a tuple)"
+    if not root:
+        root = Tkinter._default_root
+    return root.tk.splitlist(root.tk.call("font", "families"))
+
+def names(root=None):
+    "Get names of defined fonts (as a tuple)"
+    if not root:
+        root = Tkinter._default_root
+    return root.tk.splitlist(root.tk.call("font", "names"))
+
+# --------------------------------------------------------------------
+# test stuff
+
+if __name__ == "__main__":
+
+    root = Tkinter.Tk()
+
+    # create a font
+    f = Font(family="times", size=30, weight=NORMAL)
+
+    print f.actual()
+    print f.actual("family")
+    print f.actual("weight")
+
+    print f.config()
+    print f.cget("family")
+    print f.cget("weight")
+
+    print names()
+
+    print f.measure("hello"), f.metrics("linespace")
+
+    print f.metrics()
+
+    f = Font(font=("Courier", 20, "bold"))
+    print f.measure("hello"), f.metrics("linespace")
+
+    w = Tkinter.Label(root, text="Hello, world", font=f)
+    w.pack()
+
+    w = Tkinter.Button(root, text="Quit!", command=root.destroy)
+    w.pack()
+
+    fb = Font(font=w["font"]).copy()
+    fb.config(weight=BOLD)
+
+    w.config(font=fb)
+
+    Tkinter.mainloop()
diff --git a/lib-python/2.2/lib-tk/tkMessageBox.py b/lib-python/2.2/lib-tk/tkMessageBox.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/lib-tk/tkMessageBox.py
@@ -0,0 +1,120 @@
+#
+# Instant Python
+# $Id$
+#
+# tk common message boxes
+#
+# this module provides an interface to the native message boxes
+# available in Tk 4.2 and newer.
+#
+# written by Fredrik Lundh, May 1997
+#
+
+#
+# options (all have default values):
+#
+# - default: which button to make default (one of the reply codes)
+#
+# - icon: which icon to display (see below)
+#
+# - message: the message to display
+#
+# - parent: which window to place the dialog on top of
+#
+# - title: dialog title
+#
+# - type: dialog type; that is, which buttons to display (see below)
+#
+
+from tkCommonDialog import Dialog
+
+#
+# constants
+
+# icons
+ERROR = "error"
+INFO = "info"
+QUESTION = "question"
+WARNING = "warning"
+
+# types
+ABORTRETRYIGNORE = "abortretryignore"
+OK = "ok"
+OKCANCEL = "okcancel"
+RETRYCANCEL = "retrycancel"
+YESNO = "yesno"
+YESNOCANCEL = "yesnocancel"
+
+# replies
+ABORT = "abort"
+RETRY = "retry"
+IGNORE = "ignore"
+OK = "ok"
+CANCEL = "cancel"
+YES = "yes"
+NO = "no"
+
+
+#
+# message dialog class
+
+class Message(Dialog):
+    "A message box"
+
+    command  = "tk_messageBox"
+
+
+#
+# convenience stuff
+
+def _show(title=None, message=None, icon=None, type=None, **options):
+    if icon:    options["icon"] = icon
+    if type:    options["type"] = type
+    if title:   options["title"] = title
+    if message: options["message"] = message
+    return apply(Message, (), options).show()
+
+def showinfo(title=None, message=None, **options):
+    "Show an info message"
+    return apply(_show, (title, message, INFO, OK), options)
+
+def showwarning(title=None, message=None, **options):
+    "Show a warning message"
+    return apply(_show, (title, message, WARNING, OK), options)
+
+def showerror(title=None, message=None, **options):
+    "Show an error message"
+    return apply(_show, (title, message, ERROR, OK), options)
+
+def askquestion(title=None, message=None, **options):
+    "Ask a question"
+    return apply(_show, (title, message, QUESTION, YESNO), options)
+
+def askokcancel(title=None, message=None, **options):
+    "Ask if operation should proceed; return true if the answer is ok"
+    s = apply(_show, (title, message, QUESTION, OKCANCEL), options)
+    return s == OK
+
+def askyesno(title=None, message=None, **options):
+    "Ask a question; return true if the answer is yes"
+    s = apply(_show, (title, message, QUESTION, YESNO), options)
+    return s == YES
+
+def askretrycancel(title=None, message=None, **options):
+    "Ask if operation should be retried; return true if the answer is yes"
+    s = apply(_show, (title, message, WARNING, RETRYCANCEL), options)
+    return s == RETRY
+
+
+# --------------------------------------------------------------------
+# test stuff
+
+if __name__ == "__main__":
+
+    print "info", showinfo("Spam", "Egg Information")
+    print "warning", showwarning("Spam", "Egg Warning")
+    print "error", showerror("Spam", "Egg Alert")
+    print "question", askquestion("Spam", "Question?")
+    print "proceed", askokcancel("Spam", "Proceed?")
+    print "yes/no", askyesno("Spam", "Got it?")
+    print "try again", askretrycancel("Spam", "Try again?")
diff --git a/lib-python/2.2/lib-tk/tkSimpleDialog.py b/lib-python/2.2/lib-tk/tkSimpleDialog.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/lib-tk/tkSimpleDialog.py
@@ -0,0 +1,313 @@
+#
+# An Introduction to Tkinter
+# tkSimpleDialog.py
+#
+# Copyright (c) 1997 by Fredrik Lundh
+#
+# fredrik at pythonware.com
+# http://www.pythonware.com
+#
+
+# --------------------------------------------------------------------
+# dialog base class
+
+'''Dialog boxes
+
+This module handles dialog boxes. It contains the following
+public symbols:
+
+Dialog -- a base class for dialogs
+
+askinteger -- get an integer from the user
+
+askfloat -- get a float from the user
+
+askstring -- get a string from the user
+'''
+
+from Tkinter import *
+import os
+
+class Dialog(Toplevel):
+
+    '''Class to open dialogs.
+
+    This class is intended as a base class for custom dialogs
+    '''
+
+    def __init__(self, parent, title = None):
+
+        '''Initialize a dialog.
+
+        Arguments:
+
+            parent -- a parent window (the application window)
+
+            title -- the dialog title
+        '''
+        Toplevel.__init__(self, parent)
+        self.transient(parent)
+
+        if title:
+            self.title(title)
+
+        self.parent = parent
+
+        self.result = None
+
+        body = Frame(self)
+        self.initial_focus = self.body(body)
+        body.pack(padx=5, pady=5)
+
+        self.buttonbox()
+
+        self.grab_set()
+
+        if not self.initial_focus:
+            self.initial_focus = self
+
+        self.protocol("WM_DELETE_WINDOW", self.cancel)
+
+        if self.parent is not None:
+            self.geometry("+%d+%d" % (parent.winfo_rootx()+50,
+                                      parent.winfo_rooty()+50))
+
+        self.initial_focus.focus_set()
+
+        self.wait_window(self)
+
+    def destroy(self):
+        '''Destroy the window'''
+        self.initial_focus = None
+        Toplevel.destroy(self)
+
+    #
+    # construction hooks
+
+    def body(self, master):
+        '''create dialog body.
+
+        return widget that should have initial focus.
+        This method should be overridden, and is called
+        by the __init__ method.
+        '''
+        pass
+
+    def buttonbox(self):
+        '''add standard button box.
+
+        override if you do not want the standard buttons
+        '''
+
+        box = Frame(self)
+
+        w = Button(box, text="OK", width=10, command=self.ok, default=ACTIVE)
+        w.pack(side=LEFT, padx=5, pady=5)
+        w = Button(box, text="Cancel", width=10, command=self.cancel)
+        w.pack(side=LEFT, padx=5, pady=5)
+
+        self.bind("<Return>", self.ok)
+        self.bind("<Escape>", self.cancel)
+
+        box.pack()
+
+    #
+    # standard button semantics
+
+    def ok(self, event=None):
+
+        if not self.validate():
+            self.initial_focus.focus_set() # put focus back
+            return
+
+        self.withdraw()
+        self.update_idletasks()
+
+        self.apply()
+
+        self.cancel()
+
+    def cancel(self, event=None):
+
+        # put focus back to the parent window
+        if self.parent is not None:
+            self.parent.focus_set()
+        self.destroy()
+
+    #
+    # command hooks
+
+    def validate(self):
+        '''validate the data
+
+        This method is called automatically to validate the data before the
+        dialog is destroyed. By default, it always validates OK.
+        '''
+
+        return 1 # override
+
+    def apply(self):
+        '''process the data
+
+        This method is called automatically to process the data, *after*
+        the dialog is destroyed. By default, it does nothing.
+        '''
+
+        pass # override
+
+
+# --------------------------------------------------------------------
+# convenience dialogues
+
+class _QueryDialog(Dialog):
+
+    def __init__(self, title, prompt,
+                 initialvalue=None,
+                 minvalue = None, maxvalue = None,
+                 parent = None):
+
+        if not parent:
+            import Tkinter
+            parent = Tkinter._default_root
+
+        self.prompt   = prompt
+        self.minvalue = minvalue
+        self.maxvalue = maxvalue
+
+        self.initialvalue = initialvalue
+
+        Dialog.__init__(self, parent, title)
+
+    def destroy(self):
+        self.entry = None
+        Dialog.destroy(self)
+
+    def body(self, master):
+
+        w = Label(master, text=self.prompt, justify=LEFT)
+        w.grid(row=0, padx=5, sticky=W)
+
+        self.entry = Entry(master, name="entry")
+        self.entry.grid(row=1, padx=5, sticky=W+E)
+
+        if self.initialvalue:
+            self.entry.insert(0, self.initialvalue)
+            self.entry.select_range(0, END)
+
+        return self.entry
+
+    def validate(self):
+
+        import tkMessageBox
+
+        try:
+            result = self.getresult()
+        except ValueError:
+            tkMessageBox.showwarning(
+                "Illegal value",
+                self.errormessage + "\nPlease try again",
+                parent = self
+            )
+            return 0
+
+        if self.minvalue is not None and result < self.minvalue:
+            tkMessageBox.showwarning(
+                "Too small",
+                "The allowed minimum value is %s. "
+                "Please try again." % self.minvalue,
+                parent = self
+            )
+            return 0
+
+        if self.maxvalue is not None and result > self.maxvalue:
+            tkMessageBox.showwarning(
+                "Too large",
+                "The allowed maximum value is %s. "
+                "Please try again." % self.maxvalue,
+                parent = self
+            )
+            return 0
+
+        self.result = result
+
+        return 1
+
+
+class _QueryInteger(_QueryDialog):
+    errormessage = "Not an integer."
+    def getresult(self):
+        return int(self.entry.get())
+
+def askinteger(title, prompt, **kw):
+    '''get an integer from the user
+
+    Arguments:
+
+        title -- the dialog title
+        prompt -- the label text
+        **kw -- see SimpleDialog class
+
+    Return value is an integer
+    '''
+    d = apply(_QueryInteger, (title, prompt), kw)
+    return d.result
+
+class _QueryFloat(_QueryDialog):
+    errormessage = "Not a floating point value."
+    def getresult(self):
+        return float(self.entry.get())
+
+def askfloat(title, prompt, **kw):
+    '''get a float from the user
+
+    Arguments:
+
+        title -- the dialog title
+        prompt -- the label text
+        **kw -- see SimpleDialog class
+
+    Return value is a float
+    '''
+    d = apply(_QueryFloat, (title, prompt), kw)
+    return d.result
+
+class _QueryString(_QueryDialog):
+    def __init__(self, *args, **kw):
+        if kw.has_key("show"):
+            self.__show = kw["show"]
+            del kw["show"]
+        else:
+            self.__show = None
+        _QueryDialog.__init__(self, *args, **kw)
+
+    def body(self, master):
+        entry = _QueryDialog.body(self, master)
+        if self.__show is not None:
+            entry.configure(show=self.__show)
+        return entry
+
+    def getresult(self):
+        return self.entry.get()
+
+def askstring(title, prompt, **kw):
+    '''get a string from the user
+
+    Arguments:
+
+        title -- the dialog title
+        prompt -- the label text
+        **kw -- see SimpleDialog class
+
+    Return value is a string
+    '''
+    d = apply(_QueryString, (title, prompt), kw)
+    return d.result
+
+if __name__ == "__main__":
+
+    root = Tk()
+    root.update()
+
+    print askinteger("Spam", "Egg count", initialvalue=12*12)
+    print askfloat("Spam", "Egg weight\n(in tons)", minvalue=1, maxvalue=100)
+    print askstring("Spam", "Egg label")
diff --git a/lib-python/2.2/lib-tk/turtle.py b/lib-python/2.2/lib-tk/turtle.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/lib-tk/turtle.py
@@ -0,0 +1,385 @@
+# LogoMation-like turtle graphics
+
+from math import * # Also for export
+import Tkinter
+class Error(Exception):
+    pass
+
+class RawPen:
+
+    def __init__(self, canvas):
+        self._canvas = canvas
+        self._items = []
+        self._tracing = 1
+        self._arrow = 0
+        self.degrees()
+        self.reset()
+
+    def degrees(self, fullcircle=360.0):
+        self._fullcircle = fullcircle
+        self._invradian = pi / (fullcircle * 0.5)
+
+    def radians(self):
+        self.degrees(2.0*pi)
+
+    def reset(self):
+        canvas = self._canvas
+        self._canvas.update()
+        width = canvas.winfo_width()
+        height = canvas.winfo_height()
+        if width <= 1:
+            width = canvas['width']
+        if height <= 1:
+            height = canvas['height']
+        self._origin = float(width)/2.0, float(height)/2.0
+        self._position = self._origin
+        self._angle = 0.0
+        self._drawing = 1
+        self._width = 1
+        self._color = "black"
+        self._filling = 0
+        self._path = []
+        self._tofill = []
+        self.clear()
+        canvas._root().tkraise()
+
+    def clear(self):
+        self.fill(0)
+        canvas = self._canvas
+        items = self._items
+        self._items = []
+        for item in items:
+            canvas.delete(item)
+        self._delete_turtle()
+        self._draw_turtle()
+
+
+    def tracer(self, flag):
+        self._tracing = flag
+        if not self._tracing:
+            self._delete_turtle()
+        self._draw_turtle()
+
+    def forward(self, distance):
+        x0, y0 = start = self._position
+        x1 = x0 + distance * cos(self._angle*self._invradian)
+        y1 = y0 - distance * sin(self._angle*self._invradian)
+        self._goto(x1, y1)
+
+    def backward(self, distance):
+        self.forward(-distance)
+
+    def left(self, angle):
+        self._angle = (self._angle + angle) % self._fullcircle
+        self._draw_turtle()
+
+    def right(self, angle):
+        self.left(-angle)
+
+    def up(self):
+        self._drawing = 0
+
+    def down(self):
+        self._drawing = 1
+
+    def width(self, width):
+        self._width = float(width)
+
+    def color(self, *args):
+        if not args:
+            raise Error, "no color arguments"
+        if len(args) == 1:
+            color = args[0]
+            if type(color) == type(""):
+                # Test the color first
+                try:
+                    id = self._canvas.create_line(0, 0, 0, 0, fill=color)
+                except Tkinter.TclError:
+                    raise Error, "bad color string: %s" % `color`
+                self._set_color(color)
+                return
+            try:
+                r, g, b = color
+            except:
+                raise Error, "bad color sequence: %s" % `color`
+        else:
+            try:
+                r, g, b = args
+            except:
+                raise Error, "bad color arguments: %s" % `args`
+        assert 0 <= r <= 1
+        assert 0 <= g <= 1
+        assert 0 <= b <= 1
+        x = 255.0
+        y = 0.5
+        self._set_color("#%02x%02x%02x" % (int(r*x+y), int(g*x+y), int(b*x+y)))
+
+    def _set_color(self,color):
+        self._color = color
+        self._draw_turtle()
+
+
+    def write(self, arg, move=0):
+        x, y = start = self._position
+        x = x-1 # correction -- calibrated for Windows
+        item = self._canvas.create_text(x, y,
+                                        text=str(arg), anchor="sw",
+                                        fill=self._color)
+        self._items.append(item)
+        if move:
+            x0, y0, x1, y1 = self._canvas.bbox(item)
+            self._goto(x1, y1)
+        self._draw_turtle()
+
+    def fill(self, flag):
+        if self._filling:
+            path = tuple(self._path)
+            smooth = self._filling < 0
+            if len(path) > 2:
+                item = self._canvas._create('polygon', path,
+                                            {'fill': self._color,
+                                             'smooth': smooth})
+                self._items.append(item)
+                self._canvas.lower(item)
+                if self._tofill:
+                    for item in self._tofill:
+                        self._canvas.itemconfigure(item, fill=self._color)
+                        self._items.append(item)
+        self._path = []
+        self._tofill = []
+        self._filling = flag
+        if flag:
+            self._path.append(self._position)
+
+    def circle(self, radius, extent=None):
+        if extent is None:
+            extent = self._fullcircle
+        x0, y0 = self._position
+        xc = x0 - radius * sin(self._angle * self._invradian)
+        yc = y0 - radius * cos(self._angle * self._invradian)
+        if radius >= 0.0:
+            start = self._angle - 90.0
+        else:
+            start = self._angle + 90.0
+            extent = -extent
+        if self._filling:
+            if abs(extent) >= self._fullcircle:
+                item = self._canvas.create_oval(xc-radius, yc-radius,
+                                                xc+radius, yc+radius,
+                                                width=self._width,
+                                                outline="")
+                self._tofill.append(item)
+            item = self._canvas.create_arc(xc-radius, yc-radius,
+                                           xc+radius, yc+radius,
+                                           style="chord",
+                                           start=start,
+                                           extent=extent,
+                                           width=self._width,
+                                           outline="")
+            self._tofill.append(item)
+        if self._drawing:
+            if abs(extent) >= self._fullcircle:
+                item = self._canvas.create_oval(xc-radius, yc-radius,
+                                                xc+radius, yc+radius,
+                                                width=self._width,
+                                                outline=self._color)
+                self._items.append(item)
+            item = self._canvas.create_arc(xc-radius, yc-radius,
+                                           xc+radius, yc+radius,
+                                           style="arc",
+                                           start=start,
+                                           extent=extent,
+                                           width=self._width,
+                                           outline=self._color)
+            self._items.append(item)
+        angle = start + extent
+        x1 = xc + abs(radius) * cos(angle * self._invradian)
+        y1 = yc - abs(radius) * sin(angle * self._invradian)
+        self._angle = (self._angle + extent) % self._fullcircle
+        self._position = x1, y1
+        if self._filling:
+            self._path.append(self._position)
+        self._draw_turtle()
+
+    def goto(self, *args):
+        if len(args) == 1:
+            try:
+                x, y = args[0]
+            except:
+                raise Error, "bad point argument: %s" % `args[0]`
+        else:
+            try:
+                x, y = args
+            except:
+                raise Error, "bad coordinates: %s" % `args[0]`
+        x0, y0 = self._origin
+        self._goto(x0+x, y0-y)
+
+    def _goto(self, x1, y1):
+        x0, y0 = start = self._position
+        self._position = map(float, (x1, y1))
+        if self._filling:
+            self._path.append(self._position)
+        if self._drawing:
+            if self._tracing:                
+                dx = float(x1 - x0)
+                dy = float(y1 - y0)
+                distance = hypot(dx, dy)
+                nhops = int(distance)
+                item = self._canvas.create_line(x0, y0, x0, y0,
+                                                width=self._width,
+                                                capstyle="round",
+                                                fill=self._color)
+                try:
+                    for i in range(1, 1+nhops):
+                        x, y = x0 + dx*i/nhops, y0 + dy*i/nhops
+                        self._canvas.coords(item, x0, y0, x, y)
+                        self._draw_turtle((x,y))
+                        self._canvas.update()
+                        self._canvas.after(10)
+                    # in case nhops==0
+                    self._canvas.coords(item, x0, y0, x1, y1)
+                    self._canvas.itemconfigure(item, arrow="none")
+                except Tkinter.TclError:
+                    # Probably the window was closed!
+                    return
+            else:
+                item = self._canvas.create_line(x0, y0, x1, y1,
+                                                width=self._width,
+                                                capstyle="round",
+                                                fill=self._color)
+            self._items.append(item)
+        self._draw_turtle()
+
+    def _draw_turtle(self,position=[]):
+        if not self._tracing:
+            return
+        if position == []:
+            position = self._position
+        x,y = position
+        distance = 8
+        dx = distance * cos(self._angle*self._invradian)
+        dy = distance * sin(self._angle*self._invradian)
+        self._delete_turtle()
+        self._arrow = self._canvas.create_line(x-dx,y+dy,x,y,
+                                          width=self._width,
+                                          arrow="last",
+                                          capstyle="round",
+                                          fill=self._color)
+        self._canvas.update()
+
+    def _delete_turtle(self):
+        if self._arrow != 0:
+            self._canvas.delete(self._arrow)
+        self._arrow = 0
+
+
+
+_root = None
+_canvas = None
+_pen = None
+
+class Pen(RawPen):
+
+    def __init__(self):
+        global _root, _canvas
+        if _root is None:
+            _root = Tkinter.Tk()
+            _root.wm_protocol("WM_DELETE_WINDOW", self._destroy)
+        if _canvas is None:
+            # XXX Should have scroll bars
+            _canvas = Tkinter.Canvas(_root, background="white")
+            _canvas.pack(expand=1, fill="both")
+        RawPen.__init__(self, _canvas)
+
+    def _destroy(self):
+        global _root, _canvas, _pen
+        root = self._canvas._root()
+        if root is _root:
+            _pen = None
+            _root = None
+            _canvas = None
+        root.destroy()
+
+
+def _getpen():
+    global _pen
+    pen = _pen
+    if not pen:
+        _pen = pen = Pen()
+    return pen
+
+def degrees(): _getpen().degrees()
+def radians(): _getpen().radians()
+def reset(): _getpen().reset()
+def clear(): _getpen().clear()
+def tracer(flag): _getpen().tracer(flag)
+def forward(distance): _getpen().forward(distance)
+def backward(distance): _getpen().backward(distance)
+def left(angle): _getpen().left(angle)
+def right(angle): _getpen().right(angle)
+def up(): _getpen().up()
+def down(): _getpen().down()
+def width(width): _getpen().width(width)
+def color(*args): apply(_getpen().color, args)
+def write(arg, move=0): _getpen().write(arg, move)
+def fill(flag): _getpen().fill(flag)
+def circle(radius, extent=None): _getpen().circle(radius, extent)
+def goto(*args): apply(_getpen().goto, args)
+
+def demo():
+    reset()
+    tracer(1)
+    up()
+    backward(100)
+    down()
+    # draw 3 squares; the last filled
+    width(3)
+    for i in range(3):
+        if i == 2:
+            fill(1)
+        for j in range(4):
+            forward(20)
+            left(90)
+        if i == 2:
+            color("maroon")
+            fill(0)
+        up()
+        forward(30)
+        down()
+    width(1)
+    color("black")
+    # move out of the way
+    tracer(0)
+    up()
+    right(90)
+    forward(100)
+    right(90)
+    forward(100)
+    right(180)
+    down()
+    # some text
+    write("startstart", 1)
+    write("start", 1)
+    color("red")
+    # staircase
+    for i in range(5):
+        forward(20)
+        left(90)
+        forward(20)
+        right(90)
+    # filled staircase
+    fill(1)
+    for i in range(5):
+        forward(20)
+        left(90)
+        forward(20)
+        right(90)
+    fill(0)
+    # more text
+    write("end")
+    if __name__ == '__main__':
+        _root.mainloop()
+
+if __name__ == '__main__':
+    demo()
diff --git a/lib-python/2.2/linecache.py b/lib-python/2.2/linecache.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/linecache.py
@@ -0,0 +1,101 @@
+"""Cache lines from files.
+
+This is intended to read lines from modules imported -- hence if a filename
+is not found, it will look down the module search path for a file by
+that name.
+"""
+
+import sys
+import os
+from stat import *
+
+__all__ = ["getline","clearcache","checkcache"]
+
+def getline(filename, lineno):
+    lines = getlines(filename)
+    if 1 <= lineno <= len(lines):
+        return lines[lineno-1]
+    else:
+        return ''
+
+
+# The cache
+
+cache = {} # The cache
+
+
+def clearcache():
+    """Clear the cache entirely."""
+
+    global cache
+    cache = {}
+
+
+def getlines(filename):
+    """Get the lines for a file from the cache.
+    Update the cache if it doesn't contain an entry for this file already."""
+
+    if cache.has_key(filename):
+        return cache[filename][2]
+    else:
+        return updatecache(filename)
+
+
+def checkcache():
+    """Discard cache entries that are out of date.
+    (This is not checked upon each call!)"""
+
+    for filename in cache.keys():
+        size, mtime, lines, fullname = cache[filename]
+        try:
+            stat = os.stat(fullname)
+        except os.error:
+            del cache[filename]
+            continue
+        if size != stat[ST_SIZE] or mtime != stat[ST_MTIME]:
+            del cache[filename]
+
+
+def updatecache(filename):
+    """Update a cache entry and return its list of lines.
+    If something's wrong, print a message, discard the cache entry,
+    and return an empty list."""
+
+    if cache.has_key(filename):
+        del cache[filename]
+    if not filename or filename[0] + filename[-1] == '<>':
+        return []
+    fullname = filename
+    try:
+        stat = os.stat(fullname)
+    except os.error, msg:
+        # Try looking through the module search path.
+        basename = os.path.split(filename)[1]
+        for dirname in sys.path:
+            # When using imputil, sys.path may contain things other than
+            # strings; ignore them when it happens.
+            try:
+                fullname = os.path.join(dirname, basename)
+            except (TypeError, AttributeError):
+                # Not sufficiently string-like to do anything useful with.
+                pass
+            else:
+                try:
+                    stat = os.stat(fullname)
+                    break
+                except os.error:
+                    pass
+        else:
+            # No luck
+##          print '*** Cannot stat', filename, ':', msg
+            return []
+    try:
+        fp = open(fullname, 'r')
+        lines = fp.readlines()
+        fp.close()
+    except IOError, msg:
+##      print '*** Cannot open', fullname, ':', msg
+        return []
+    size, mtime = stat[ST_SIZE], stat[ST_MTIME]
+    cache[filename] = size, mtime, lines, fullname
+    return lines
diff --git a/lib-python/2.2/locale.py b/lib-python/2.2/locale.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/locale.py
@@ -0,0 +1,743 @@
+""" Locale support.
+
+    The module provides low-level access to the C lib's locale APIs
+    and adds high level number formatting APIs as well as a locale
+    aliasing engine to complement these.
+
+    The aliasing engine includes support for many commonly used locale
+    names and maps them to values suitable for passing to the C lib's
+    setlocale() function. It also includes default encodings for all
+    supported locale names.
+
+"""
+
+import sys
+
+# Try importing the _locale module.
+#
+# If this fails, fall back on a basic 'C' locale emulation.
+
+# Yuck:  LC_MESSAGES is non-standard:  can't tell whether it exists before
+# trying the import.  So __all__ is also fiddled at the end of the file.
+__all__ = ["setlocale","Error","localeconv","strcoll","strxfrm",
+           "format","str","atof","atoi","LC_CTYPE","LC_COLLATE",
+           "LC_TIME","LC_MONETARY","LC_NUMERIC", "LC_ALL","CHAR_MAX"]
+
+try:
+
+    from _locale import *
+
+except ImportError:
+
+    # Locale emulation
+
+    CHAR_MAX = 127
+    LC_ALL = 6
+    LC_COLLATE = 3
+    LC_CTYPE = 0
+    LC_MESSAGES = 5
+    LC_MONETARY = 4
+    LC_NUMERIC = 1
+    LC_TIME = 2
+    Error = ValueError
+
+    def localeconv():
+        """ localeconv() -> dict.
+            Returns numeric and monetary locale-specific parameters.
+        """
+        # 'C' locale default values
+        return {'grouping': [127],
+                'currency_symbol': '',
+                'n_sign_posn': 127,
+                'p_cs_precedes': 127,
+                'n_cs_precedes': 127,
+                'mon_grouping': [],
+                'n_sep_by_space': 127,
+                'decimal_point': '.',
+                'negative_sign': '',
+                'positive_sign': '',
+                'p_sep_by_space': 127,
+                'int_curr_symbol': '',
+                'p_sign_posn': 127,
+                'thousands_sep': '',
+                'mon_thousands_sep': '',
+                'frac_digits': 127,
+                'mon_decimal_point': '',
+                'int_frac_digits': 127}
+
+    def setlocale(category, value=None):
+        """ setlocale(integer,string=None) -> string.
+            Activates/queries locale processing.
+        """
+        if value is not None and value != 'C':
+            raise Error, '_locale emulation only supports "C" locale'
+        return 'C'
+
+    def strcoll(a,b):
+        """ strcoll(string,string) -> int.
+            Compares two strings according to the locale.
+        """
+        return cmp(a,b)
+
+    def strxfrm(s):
+        """ strxfrm(string) -> string.
+            Returns a string that behaves for cmp locale-aware.
+        """
+        return s
+
+### Number formatting APIs
+
+# Author: Martin von Loewis
+
+#perform the grouping from right to left
+def _group(s):
+    conv=localeconv()
+    grouping=conv['grouping']
+    if not grouping:return (s, 0)
+    result=""
+    seps = 0
+    spaces = ""
+    if s[-1] == ' ':
+        sp = s.find(' ')
+        spaces = s[sp:]
+        s = s[:sp]
+    while s and grouping:
+        # if grouping is -1, we are done
+        if grouping[0]==CHAR_MAX:
+            break
+        # 0: re-use last group ad infinitum
+        elif grouping[0]!=0:
+            #process last group
+            group=grouping[0]
+            grouping=grouping[1:]
+        if result:
+            result=s[-group:]+conv['thousands_sep']+result
+            seps += 1
+        else:
+            result=s[-group:]
+        s=s[:-group]
+        if s and s[-1] not in "0123456789":
+            # the leading string is only spaces and signs
+            return s+result+spaces,seps
+    if not result:
+        return s+spaces,seps
+    if s:
+        result=s+conv['thousands_sep']+result
+        seps += 1
+    return result+spaces,seps
+
+def format(f,val,grouping=0):
+    """Formats a value in the same way that the % formatting would use,
+    but takes the current locale into account.
+    Grouping is applied if the third parameter is true."""
+    result = f % val
+    fields = result.split(".")
+    seps = 0
+    if grouping:
+        fields[0],seps=_group(fields[0])
+    if len(fields)==2:
+        result = fields[0]+localeconv()['decimal_point']+fields[1]
+    elif len(fields)==1:
+        result = fields[0]
+    else:
+        raise Error, "Too many decimal points in result string"
+
+    while seps:
+        # If the number was formatted for a specific width, then it
+        # might have been filled with spaces to the left or right. If
+        # so, kill as much spaces as there where separators.
+        # Leading zeroes as fillers are not yet dealt with, as it is
+        # not clear how they should interact with grouping.
+        sp = result.find(" ")
+        if sp==-1:break
+        result = result[:sp]+result[sp+1:]
+        seps -= 1
+
+    return result
+
+def str(val):
+    """Convert float to integer, taking the locale into account."""
+    return format("%.12g",val)
+
+def atof(str,func=float):
+    "Parses a string as a float according to the locale settings."
+    #First, get rid of the grouping
+    ts = localeconv()['thousands_sep']
+    if ts:
+        s=str.split(ts)
+        str="".join(s)
+    #next, replace the decimal point with a dot
+    dd = localeconv()['decimal_point']
+    if dd:
+        s=str.split(dd)
+        str='.'.join(s)
+    #finally, parse the string
+    return func(str)
+
+def atoi(str):
+    "Converts a string to an integer according to the locale settings."
+    return atof(str, int)
+
+def _test():
+    setlocale(LC_ALL, "")
+    #do grouping
+    s1=format("%d", 123456789,1)
+    print s1, "is", atoi(s1)
+    #standard formatting
+    s1=str(3.14)
+    print s1, "is", atof(s1)
+
+### Locale name aliasing engine
+
+# Author: Marc-Andre Lemburg, mal at lemburg.com
+# Various tweaks by Fredrik Lundh <effbot at telia.com>
+
+# store away the low-level version of setlocale (it's
+# overridden below)
+_setlocale = setlocale
+
+def normalize(localename):
+
+    """ Returns a normalized locale code for the given locale
+        name.
+
+        The returned locale code is formatted for use with
+        setlocale().
+
+        If normalization fails, the original name is returned
+        unchanged.
+
+        If the given encoding is not known, the function defaults to
+        the default encoding for the locale code just like setlocale()
+        does.
+
+    """
+    # Normalize the locale name and extract the encoding
+    fullname = localename.lower()
+    if ':' in fullname:
+        # ':' is sometimes used as encoding delimiter.
+        fullname = fullname.replace(':', '.')
+    if '.' in fullname:
+        langname, encoding = fullname.split('.')[:2]
+        fullname = langname + '.' + encoding
+    else:
+        langname = fullname
+        encoding = ''
+
+    # First lookup: fullname (possibly with encoding)
+    code = locale_alias.get(fullname, None)
+    if code is not None:
+        return code
+
+    # Second try: langname (without encoding)
+    code = locale_alias.get(langname, None)
+    if code is not None:
+        if '.' in code:
+            langname, defenc = code.split('.')
+        else:
+            langname = code
+            defenc = ''
+        if encoding:
+            encoding = encoding_alias.get(encoding, encoding)
+        else:
+            encoding = defenc
+        if encoding:
+            return langname + '.' + encoding
+        else:
+            return langname
+
+    else:
+        return localename
+
+def _parse_localename(localename):
+
+    """ Parses the locale code for localename and returns the
+        result as tuple (language code, encoding).
+
+        The localename is normalized and passed through the locale
+        alias engine. A ValueError is raised in case the locale name
+        cannot be parsed.
+
+        The language code corresponds to RFC 1766.  code and encoding
+        can be None in case the values cannot be determined or are
+        unknown to this implementation.
+
+    """
+    code = normalize(localename)
+    if '@' in localename:
+        # Deal with locale modifiers
+        code, modifier = code.split('@')
+        if modifier == 'euro' and '.' not in code:
+            # Assume Latin-9 for @euro locales. This is bogus,
+            # since some systems may use other encodings for these
+            # locales. Also, we ignore other modifiers.
+            return code, 'iso-8859-15'
+            
+    if '.' in code:
+        return code.split('.')[:2]
+    elif code == 'C':
+        return None, None
+    raise ValueError, 'unknown locale: %s' % localename
+
+def _build_localename(localetuple):
+
+    """ Builds a locale code from the given tuple (language code,
+        encoding).
+
+        No aliasing or normalizing takes place.
+
+    """
+    language, encoding = localetuple
+    if language is None:
+        language = 'C'
+    if encoding is None:
+        return language
+    else:
+        return language + '.' + encoding
+
+def getdefaultlocale(envvars=('LANGUAGE', 'LC_ALL', 'LC_CTYPE', 'LANG')):
+
+    """ Tries to determine the default locale settings and returns
+        them as tuple (language code, encoding).
+
+        According to POSIX, a program which has not called
+        setlocale(LC_ALL, "") runs using the portable 'C' locale.
+        Calling setlocale(LC_ALL, "") lets it use the default locale as
+        defined by the LANG variable. Since we don't want to interfere
+        with the current locale setting we thus emulate the behavior
+        in the way described above.
+
+        To maintain compatibility with other platforms, not only the
+        LANG variable is tested, but a list of variables given as
+        envvars parameter. The first found to be defined will be
+        used. envvars defaults to the search path used in GNU gettext;
+        it must always contain the variable name 'LANG'.
+
+        Except for the code 'C', the language code corresponds to RFC
+        1766.  code and encoding can be None in case the values cannot
+        be determined.
+
+    """
+
+    try:
+        # check if it's supported by the _locale module
+        import _locale
+        code, encoding = _locale._getdefaultlocale()
+    except (ImportError, AttributeError):
+        pass
+    else:
+        # make sure the code/encoding values are valid
+        if sys.platform == "win32" and code and code[:2] == "0x":
+            # map windows language identifier to language name
+            code = windows_locale.get(int(code, 0))
+        # ...add other platform-specific processing here, if
+        # necessary...
+        return code, encoding
+
+    # fall back on POSIX behaviour
+    import os
+    lookup = os.environ.get
+    for variable in envvars:
+        localename = lookup(variable,None)
+        if localename is not None:
+            break
+    else:
+        localename = 'C'
+    return _parse_localename(localename)
+
+
+def getlocale(category=LC_CTYPE):
+
+    """ Returns the current setting for the given locale category as
+        tuple (language code, encoding).
+
+        category may be one of the LC_* value except LC_ALL. It
+        defaults to LC_CTYPE.
+
+        Except for the code 'C', the language code corresponds to RFC
+        1766.  code and encoding can be None in case the values cannot
+        be determined.
+
+    """
+    localename = _setlocale(category)
+    if category == LC_ALL and ';' in localename:
+        raise TypeError, 'category LC_ALL is not supported'
+    return _parse_localename(localename)
+
+def setlocale(category, locale=None):
+
+    """ Set the locale for the given category.  The locale can be
+        a string, a locale tuple (language code, encoding), or None.
+
+        Locale tuples are converted to strings the locale aliasing
+        engine.  Locale strings are passed directly to the C lib.
+
+        category may be given as one of the LC_* values.
+
+    """
+    if locale and type(locale) is not type(""):
+        # convert to string
+        locale = normalize(_build_localename(locale))
+    return _setlocale(category, locale)
+
+def resetlocale(category=LC_ALL):
+
+    """ Sets the locale for category to the default setting.
+
+        The default setting is determined by calling
+        getdefaultlocale(). category defaults to LC_ALL.
+
+    """
+    _setlocale(category, _build_localename(getdefaultlocale()))
+
+### Database
+#
+# The following data was extracted from the locale.alias file which
+# comes with X11 and then hand edited removing the explicit encoding
+# definitions and adding some more aliases. The file is usually
+# available as /usr/lib/X11/locale/locale.alias.
+#
+
+#
+# The encoding_alias table maps lowercase encoding alias names to C
+# locale encoding names (case-sensitive).
+#
+encoding_alias = {
+        '437':                          'C',
+        'c':                            'C',
+        'iso8859':                      'ISO8859-1',
+        '8859':                         'ISO8859-1',
+        '88591':                        'ISO8859-1',
+        'ascii':                        'ISO8859-1',
+        'en':                           'ISO8859-1',
+        'iso88591':                     'ISO8859-1',
+        'iso_8859-1':                   'ISO8859-1',
+        '885915':                       'ISO8859-15',
+        'iso885915':                    'ISO8859-15',
+        'iso_8859-15':                  'ISO8859-15',
+        'iso8859-2':                    'ISO8859-2',
+        'iso88592':                     'ISO8859-2',
+        'iso_8859-2':                   'ISO8859-2',
+        'iso88595':                     'ISO8859-5',
+        'iso88596':                     'ISO8859-6',
+        'iso88597':                     'ISO8859-7',
+        'iso88598':                     'ISO8859-8',
+        'iso88599':                     'ISO8859-9',
+        'iso-2022-jp':                  'JIS7',
+        'jis':                          'JIS7',
+        'jis7':                         'JIS7',
+        'sjis':                         'SJIS',
+        'tis620':                       'TACTIS',
+        'ajec':                         'eucJP',
+        'eucjp':                        'eucJP',
+        'ujis':                         'eucJP',
+        'utf-8':                        'utf',
+        'utf8':                         'utf',
+        'utf8 at ucs4':                    'utf',
+}
+
+#
+# The locale_alias table maps lowercase alias names to C locale names
+# (case-sensitive). Encodings are always separated from the locale
+# name using a dot ('.'); they should only be given in case the
+# language name is needed to interpret the given encoding alias
+# correctly (CJK codes often have this need).
+#
+locale_alias = {
+        'american':                      'en_US.ISO8859-1',
+        'ar':                            'ar_AA.ISO8859-6',
+        'ar_aa':                         'ar_AA.ISO8859-6',
+        'ar_sa':                         'ar_SA.ISO8859-6',
+        'arabic':                        'ar_AA.ISO8859-6',
+        'bg':                            'bg_BG.ISO8859-5',
+        'bg_bg':                         'bg_BG.ISO8859-5',
+        'bulgarian':                     'bg_BG.ISO8859-5',
+        'c-french':                      'fr_CA.ISO8859-1',
+        'c':                             'C',
+        'c_c':                           'C',
+        'cextend':                       'en_US.ISO8859-1',
+        'chinese-s':                     'zh_CN.eucCN',
+        'chinese-t':                     'zh_TW.eucTW',
+        'croatian':                      'hr_HR.ISO8859-2',
+        'cs':                            'cs_CZ.ISO8859-2',
+        'cs_cs':                         'cs_CZ.ISO8859-2',
+        'cs_cz':                         'cs_CZ.ISO8859-2',
+        'cz':                            'cz_CZ.ISO8859-2',
+        'cz_cz':                         'cz_CZ.ISO8859-2',
+        'czech':                         'cs_CS.ISO8859-2',
+        'da':                            'da_DK.ISO8859-1',
+        'da_dk':                         'da_DK.ISO8859-1',
+        'danish':                        'da_DK.ISO8859-1',
+        'de':                            'de_DE.ISO8859-1',
+        'de_at':                         'de_AT.ISO8859-1',
+        'de_ch':                         'de_CH.ISO8859-1',
+        'de_de':                         'de_DE.ISO8859-1',
+        'dutch':                         'nl_BE.ISO8859-1',
+        'ee':                            'ee_EE.ISO8859-4',
+        'el':                            'el_GR.ISO8859-7',
+        'el_gr':                         'el_GR.ISO8859-7',
+        'en':                            'en_US.ISO8859-1',
+        'en_au':                         'en_AU.ISO8859-1',
+        'en_ca':                         'en_CA.ISO8859-1',
+        'en_gb':                         'en_GB.ISO8859-1',
+        'en_ie':                         'en_IE.ISO8859-1',
+        'en_nz':                         'en_NZ.ISO8859-1',
+        'en_uk':                         'en_GB.ISO8859-1',
+        'en_us':                         'en_US.ISO8859-1',
+        'eng_gb':                        'en_GB.ISO8859-1',
+        'english':                       'en_EN.ISO8859-1',
+        'english_uk':                    'en_GB.ISO8859-1',
+        'english_united-states':         'en_US.ISO8859-1',
+        'english_us':                    'en_US.ISO8859-1',
+        'es':                            'es_ES.ISO8859-1',
+        'es_ar':                         'es_AR.ISO8859-1',
+        'es_bo':                         'es_BO.ISO8859-1',
+        'es_cl':                         'es_CL.ISO8859-1',
+        'es_co':                         'es_CO.ISO8859-1',
+        'es_cr':                         'es_CR.ISO8859-1',
+        'es_ec':                         'es_EC.ISO8859-1',
+        'es_es':                         'es_ES.ISO8859-1',
+        'es_gt':                         'es_GT.ISO8859-1',
+        'es_mx':                         'es_MX.ISO8859-1',
+        'es_ni':                         'es_NI.ISO8859-1',
+        'es_pa':                         'es_PA.ISO8859-1',
+        'es_pe':                         'es_PE.ISO8859-1',
+        'es_py':                         'es_PY.ISO8859-1',
+        'es_sv':                         'es_SV.ISO8859-1',
+        'es_uy':                         'es_UY.ISO8859-1',
+        'es_ve':                         'es_VE.ISO8859-1',
+        'et':                            'et_EE.ISO8859-4',
+        'et_ee':                         'et_EE.ISO8859-4',
+        'fi':                            'fi_FI.ISO8859-1',
+        'fi_fi':                         'fi_FI.ISO8859-1',
+        'finnish':                       'fi_FI.ISO8859-1',
+        'fr':                            'fr_FR.ISO8859-1',
+        'fr_be':                         'fr_BE.ISO8859-1',
+        'fr_ca':                         'fr_CA.ISO8859-1',
+        'fr_ch':                         'fr_CH.ISO8859-1',
+        'fr_fr':                         'fr_FR.ISO8859-1',
+        'fre_fr':                        'fr_FR.ISO8859-1',
+        'french':                        'fr_FR.ISO8859-1',
+        'french_france':                 'fr_FR.ISO8859-1',
+        'ger_de':                        'de_DE.ISO8859-1',
+        'german':                        'de_DE.ISO8859-1',
+        'german_germany':                'de_DE.ISO8859-1',
+        'greek':                         'el_GR.ISO8859-7',
+        'hebrew':                        'iw_IL.ISO8859-8',
+        'hr':                            'hr_HR.ISO8859-2',
+        'hr_hr':                         'hr_HR.ISO8859-2',
+        'hu':                            'hu_HU.ISO8859-2',
+        'hu_hu':                         'hu_HU.ISO8859-2',
+        'hungarian':                     'hu_HU.ISO8859-2',
+        'icelandic':                     'is_IS.ISO8859-1',
+        'id':                            'id_ID.ISO8859-1',
+        'id_id':                         'id_ID.ISO8859-1',
+        'is':                            'is_IS.ISO8859-1',
+        'is_is':                         'is_IS.ISO8859-1',
+        'iso-8859-1':                    'en_US.ISO8859-1',
+        'iso-8859-15':                   'en_US.ISO8859-15',
+        'iso8859-1':                     'en_US.ISO8859-1',
+        'iso8859-15':                    'en_US.ISO8859-15',
+        'iso_8859_1':                    'en_US.ISO8859-1',
+        'iso_8859_15':                   'en_US.ISO8859-15',
+        'it':                            'it_IT.ISO8859-1',
+        'it_ch':                         'it_CH.ISO8859-1',
+        'it_it':                         'it_IT.ISO8859-1',
+        'italian':                       'it_IT.ISO8859-1',
+        'iw':                            'iw_IL.ISO8859-8',
+        'iw_il':                         'iw_IL.ISO8859-8',
+        'ja':                            'ja_JP.eucJP',
+        'ja.jis':                        'ja_JP.JIS7',
+        'ja.sjis':                       'ja_JP.SJIS',
+        'ja_jp':                         'ja_JP.eucJP',
+        'ja_jp.ajec':                    'ja_JP.eucJP',
+        'ja_jp.euc':                     'ja_JP.eucJP',
+        'ja_jp.eucjp':                   'ja_JP.eucJP',
+        'ja_jp.iso-2022-jp':             'ja_JP.JIS7',
+        'ja_jp.jis':                     'ja_JP.JIS7',
+        'ja_jp.jis7':                    'ja_JP.JIS7',
+        'ja_jp.mscode':                  'ja_JP.SJIS',
+        'ja_jp.sjis':                    'ja_JP.SJIS',
+        'ja_jp.ujis':                    'ja_JP.eucJP',
+        'japan':                         'ja_JP.eucJP',
+        'japanese':                      'ja_JP.SJIS',
+        'japanese-euc':                  'ja_JP.eucJP',
+        'japanese.euc':                  'ja_JP.eucJP',
+        'jp_jp':                         'ja_JP.eucJP',
+        'ko':                            'ko_KR.eucKR',
+        'ko_kr':                         'ko_KR.eucKR',
+        'ko_kr.euc':                     'ko_KR.eucKR',
+        'korean':                        'ko_KR.eucKR',
+        'lt':                            'lt_LT.ISO8859-4',
+        'lv':                            'lv_LV.ISO8859-4',
+        'mk':                            'mk_MK.ISO8859-5',
+        'mk_mk':                         'mk_MK.ISO8859-5',
+        'nl':                            'nl_NL.ISO8859-1',
+        'nl_be':                         'nl_BE.ISO8859-1',
+        'nl_nl':                         'nl_NL.ISO8859-1',
+        'no':                            'no_NO.ISO8859-1',
+        'no_no':                         'no_NO.ISO8859-1',
+        'norwegian':                     'no_NO.ISO8859-1',
+        'pl':                            'pl_PL.ISO8859-2',
+        'pl_pl':                         'pl_PL.ISO8859-2',
+        'polish':                        'pl_PL.ISO8859-2',
+        'portuguese':                    'pt_PT.ISO8859-1',
+        'portuguese_brazil':             'pt_BR.ISO8859-1',
+        'posix':                         'C',
+        'posix-utf2':                    'C',
+        'pt':                            'pt_PT.ISO8859-1',
+        'pt_br':                         'pt_BR.ISO8859-1',
+        'pt_pt':                         'pt_PT.ISO8859-1',
+        'ro':                            'ro_RO.ISO8859-2',
+        'ro_ro':                         'ro_RO.ISO8859-2',
+        'ru':                            'ru_RU.ISO8859-5',
+        'ru_ru':                         'ru_RU.ISO8859-5',
+        'rumanian':                      'ro_RO.ISO8859-2',
+        'russian':                       'ru_RU.ISO8859-5',
+        'serbocroatian':                 'sh_YU.ISO8859-2',
+        'sh':                            'sh_YU.ISO8859-2',
+        'sh_hr':                         'sh_HR.ISO8859-2',
+        'sh_sp':                         'sh_YU.ISO8859-2',
+        'sh_yu':                         'sh_YU.ISO8859-2',
+        'sk':                            'sk_SK.ISO8859-2',
+        'sk_sk':                         'sk_SK.ISO8859-2',
+        'sl':                            'sl_CS.ISO8859-2',
+        'sl_cs':                         'sl_CS.ISO8859-2',
+        'sl_si':                         'sl_SI.ISO8859-2',
+        'slovak':                        'sk_SK.ISO8859-2',
+        'slovene':                       'sl_CS.ISO8859-2',
+        'sp':                            'sp_YU.ISO8859-5',
+        'sp_yu':                         'sp_YU.ISO8859-5',
+        'spanish':                       'es_ES.ISO8859-1',
+        'spanish_spain':                 'es_ES.ISO8859-1',
+        'sr_sp':                         'sr_SP.ISO8859-2',
+        'sv':                            'sv_SE.ISO8859-1',
+        'sv_se':                         'sv_SE.ISO8859-1',
+        'swedish':                       'sv_SE.ISO8859-1',
+        'th_th':                         'th_TH.TACTIS',
+        'tr':                            'tr_TR.ISO8859-9',
+        'tr_tr':                         'tr_TR.ISO8859-9',
+        'turkish':                       'tr_TR.ISO8859-9',
+        'univ':                          'en_US.utf',
+        'universal':                     'en_US.utf',
+        'zh':                            'zh_CN.eucCN',
+        'zh_cn':                         'zh_CN.eucCN',
+        'zh_cn.big5':                    'zh_TW.eucTW',
+        'zh_cn.euc':                     'zh_CN.eucCN',
+        'zh_tw':                         'zh_TW.eucTW',
+        'zh_tw.euc':                     'zh_TW.eucTW',
+}
+
+#
+# this maps windows language identifiers (as used on Windows 95 and
+# earlier) to locale strings.
+#
+# NOTE: this mapping is incomplete.  If your language is missing, send
+# a note with the missing language identifier and the suggested locale
+# code to Fredrik Lundh <effbot at telia.com>.  Thanks /F
+
+windows_locale = {
+    0x0404: "zh_TW", # Chinese (Taiwan)
+    0x0804: "zh_CN", # Chinese (PRC)
+    0x0406: "da_DK", # Danish
+    0x0413: "nl_NL", # Dutch (Netherlands)
+    0x0409: "en_US", # English (United States)
+    0x0809: "en_UK", # English (United Kingdom)
+    0x0c09: "en_AU", # English (Australian)
+    0x1009: "en_CA", # English (Canadian)
+    0x1409: "en_NZ", # English (New Zealand)
+    0x1809: "en_IE", # English (Ireland)
+    0x1c09: "en_ZA", # English (South Africa)
+    0x040b: "fi_FI", # Finnish
+    0x040c: "fr_FR", # French (Standard)
+    0x080c: "fr_BE", # French (Belgian)
+    0x0c0c: "fr_CA", # French (Canadian)
+    0x100c: "fr_CH", # French (Switzerland)
+    0x0407: "de_DE", # German (Standard)
+    0x0408: "el_GR", # Greek
+    0x040d: "iw_IL", # Hebrew
+    0x040f: "is_IS", # Icelandic
+    0x0410: "it_IT", # Italian (Standard)
+    0x0411: "ja_JA", # Japanese
+    0x0414: "no_NO", # Norwegian (Bokmal)
+    0x0816: "pt_PT", # Portuguese (Standard)
+    0x0c0a: "es_ES", # Spanish (Modern Sort)
+    0x0441: "sw_KE", # Swahili (Kenya)
+    0x041d: "sv_SE", # Swedish
+    0x081d: "sv_FI", # Swedish (Finland)
+    0x041f: "tr_TR", # Turkish
+}
+
+def _print_locale():
+
+    """ Test function.
+    """
+    categories = {}
+    def _init_categories(categories=categories):
+        for k,v in globals().items():
+            if k[:3] == 'LC_':
+                categories[k] = v
+    _init_categories()
+    del categories['LC_ALL']
+
+    print 'Locale defaults as determined by getdefaultlocale():'
+    print '-'*72
+    lang, enc = getdefaultlocale()
+    print 'Language: ', lang or '(undefined)'
+    print 'Encoding: ', enc or '(undefined)'
+    print
+
+    print 'Locale settings on startup:'
+    print '-'*72
+    for name,category in categories.items():
+        print name, '...'
+        lang, enc = getlocale(category)
+        print '   Language: ', lang or '(undefined)'
+        print '   Encoding: ', enc or '(undefined)'
+        print
+
+    print
+    print 'Locale settings after calling resetlocale():'
+    print '-'*72
+    resetlocale()
+    for name,category in categories.items():
+        print name, '...'
+        lang, enc = getlocale(category)
+        print '   Language: ', lang or '(undefined)'
+        print '   Encoding: ', enc or '(undefined)'
+        print
+
+    try:
+        setlocale(LC_ALL, "")
+    except:
+        print 'NOTE:'
+        print 'setlocale(LC_ALL, "") does not support the default locale'
+        print 'given in the OS environment variables.'
+    else:
+        print
+        print 'Locale settings after calling setlocale(LC_ALL, ""):'
+        print '-'*72
+        for name,category in categories.items():
+            print name, '...'
+            lang, enc = getlocale(category)
+            print '   Language: ', lang or '(undefined)'
+            print '   Encoding: ', enc or '(undefined)'
+            print
+
+###
+
+try:
+    LC_MESSAGES
+except:
+    pass
+else:
+    __all__.append("LC_MESSAGES")
+
+if __name__=='__main__':
+    print 'Locale aliasing:'
+    print
+    _print_locale()
+    print
+    print 'Number formatting:'
+    print
+    _test()
diff --git a/lib-python/2.2/macpath.py b/lib-python/2.2/macpath.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/macpath.py
@@ -0,0 +1,242 @@
+"""Pathname and path-related operations for the Macintosh."""
+
+import os
+from stat import *
+
+__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
+           "basename","dirname","commonprefix","getsize","getmtime",
+           "getatime","islink","exists","isdir","isfile",
+           "walk","expanduser","expandvars","normpath","abspath",
+           "realpath"]
+
+# Normalize the case of a pathname.  Dummy in Posix, but <s>.lower() here.
+
+def normcase(path):
+    return path.lower()
+
+
+def isabs(s):
+    """Return true if a path is absolute.
+    On the Mac, relative paths begin with a colon,
+    but as a special case, paths with no colons at all are also relative.
+    Anything else is absolute (the string up to the first colon is the
+    volume name)."""
+
+    return ':' in s and s[0] != ':'
+
+
+def join(s, *p):
+    path = s
+    for t in p:
+        if (not s) or isabs(t):
+            path = t
+            continue
+        if t[:1] == ':':
+            t = t[1:]
+        if ':' not in path:
+            path = ':' + path
+        if path[-1:] != ':':
+            path = path + ':'
+        path = path + t
+    return path
+
+
+def split(s):
+    """Split a pathname into two parts: the directory leading up to the final
+    bit, and the basename (the filename, without colons, in that directory).
+    The result (s, t) is such that join(s, t) yields the original argument."""
+
+    if ':' not in s: return '', s
+    colon = 0
+    for i in range(len(s)):
+        if s[i] == ':': colon = i + 1
+    path, file = s[:colon-1], s[colon:]
+    if path and not ':' in path:
+        path = path + ':'
+    return path, file
+
+
+def splitext(p):
+    """Split a path into root and extension.
+    The extension is everything starting at the last dot in the last
+    pathname component; the root is everything before that.
+    It is always true that root + ext == p."""
+
+    root, ext = '', ''
+    for c in p:
+        if c == ':':
+            root, ext = root + ext + c, ''
+        elif c == '.':
+            if ext:
+                root, ext = root + ext, c
+            else:
+                ext = c
+        elif ext:
+            ext = ext + c
+        else:
+            root = root + c
+    return root, ext
+
+
+def splitdrive(p):
+    """Split a pathname into a drive specification and the rest of the
+    path.  Useful on DOS/Windows/NT; on the Mac, the drive is always
+    empty (don't use the volume name -- it doesn't have the same
+    syntactic and semantic oddities as DOS drive letters, such as there
+    being a separate current directory per drive)."""
+
+    return '', p
+
+
+# Short interfaces to split()
+
+def dirname(s): return split(s)[0]
+def basename(s): return split(s)[1]
+
+def ismount(s):
+	if not isabs(s):
+		return False
+	components = split(s)
+	return len(components) == 2 and components[1] == ''
+
+def isdir(s):
+    """Return true if the pathname refers to an existing directory."""
+
+    try:
+        st = os.stat(s)
+    except os.error:
+        return 0
+    return S_ISDIR(st[ST_MODE])
+
+
+# Get size, mtime, atime of files.
+
+def getsize(filename):
+    """Return the size of a file, reported by os.stat()."""
+    st = os.stat(filename)
+    return st[ST_SIZE]
+
+def getmtime(filename):
+    """Return the last modification time of a file, reported by os.stat()."""
+    st = os.stat(filename)
+    return st[ST_MTIME]
+
+def getatime(filename):
+    """Return the last access time of a file, reported by os.stat()."""
+    st = os.stat(filename)
+    return st[ST_ATIME]
+
+
+def islink(s):
+    """Return true if the pathname refers to a symbolic link.
+    Always false on the Mac, until we understand Aliases.)"""
+
+    return 0
+
+
+def isfile(s):
+    """Return true if the pathname refers to an existing regular file."""
+
+    try:
+        st = os.stat(s)
+    except os.error:
+        return 0
+    return S_ISREG(st[ST_MODE])
+
+
+def exists(s):
+    """Return true if the pathname refers to an existing file or directory."""
+
+    try:
+        st = os.stat(s)
+    except os.error:
+        return 0
+    return 1
+
+# Return the longest prefix of all list elements.
+
+def commonprefix(m):
+    "Given a list of pathnames, returns the longest common leading component"
+    if not m: return ''
+    prefix = m[0]
+    for item in m:
+        for i in range(len(prefix)):
+            if prefix[:i+1] != item[:i+1]:
+                prefix = prefix[:i]
+                if i == 0: return ''
+                break
+    return prefix
+
+def expandvars(path):
+    """Dummy to retain interface-compatibility with other operating systems."""
+    return path
+
+
+def expanduser(path):
+    """Dummy to retain interface-compatibility with other operating systems."""
+    return path
+
+norm_error = 'macpath.norm_error: path cannot be normalized'
+
+def normpath(s):
+    """Normalize a pathname.  Will return the same result for
+    equivalent paths."""
+
+    if ":" not in s:
+        return ":"+s
+
+    comps = s.split(":")
+    i = 1
+    while i < len(comps)-1:
+        if comps[i] == "" and comps[i-1] != "":
+            if i > 1:
+                del comps[i-1:i+1]
+                i = i - 1
+            else:
+                # best way to handle this is to raise an exception
+                raise norm_error, 'Cannot use :: immediately after volume name'
+        else:
+            i = i + 1
+
+    s = ":".join(comps)
+
+    # remove trailing ":" except for ":" and "Volume:"
+    if s[-1] == ":" and len(comps) > 2 and s != ":"*len(s):
+        s = s[:-1]
+    return s
+
+
+def walk(top, func, arg):
+    """Directory tree walk with callback function.
+
+    For each directory in the directory tree rooted at top (including top
+    itself, but excluding '.' and '..'), call func(arg, dirname, fnames).
+    dirname is the name of the directory, and fnames a list of the names of
+    the files and subdirectories in dirname (excluding '.' and '..').  func
+    may modify the fnames list in-place (e.g. via del or slice assignment),
+    and walk will only recurse into the subdirectories whose names remain in
+    fnames; this can be used to implement a filter, or to impose a specific
+    order of visiting.  No semantics are defined for, or required of, arg,
+    beyond that arg is always passed to func.  It can be used, e.g., to pass
+    a filename pattern, or a mutable object designed to accumulate
+    statistics.  Passing None for arg is common."""
+
+    try:
+        names = os.listdir(top)
+    except os.error:
+        return
+    func(arg, top, names)
+    for name in names:
+        name = join(top, name)
+        if isdir(name):
+            walk(name, func, arg)
+
+
+def abspath(path):
+    """Return an absolute path."""
+    if not isabs(path):
+        path = join(os.getcwd(), path)
+    return normpath(path)
+
+# realpath is a no-op on systems without islink support
+realpath = abspath
diff --git a/lib-python/2.2/macurl2path.py b/lib-python/2.2/macurl2path.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/macurl2path.py
@@ -0,0 +1,95 @@
+"""Macintosh-specific module for conversion between pathnames and URLs.
+
+Do not import directly; use urllib instead."""
+
+import urllib
+import os
+
+__all__ = ["url2pathname","pathname2url"]
+
+def url2pathname(pathname):
+    "Convert /-delimited pathname to mac pathname"
+    #
+    # XXXX The .. handling should be fixed...
+    #
+    tp = urllib.splittype(pathname)[0]
+    if tp and tp != 'file':
+        raise RuntimeError, 'Cannot convert non-local URL to pathname'
+    # Turn starting /// into /, an empty hostname means current host
+    if pathname[:3] == '///':
+        pathname = pathname[2:]
+    elif pathname[:2] == '//':
+        raise RuntimeError, 'Cannot convert non-local URL to pathname'
+    components = pathname.split('/')
+    # Remove . and embedded ..
+    i = 0
+    while i < len(components):
+        if components[i] == '.':
+            del components[i]
+        elif components[i] == '..' and i > 0 and \
+                                  components[i-1] not in ('', '..'):
+            del components[i-1:i+1]
+            i = i-1
+        elif components[i] == '' and i > 0 and components[i-1] != '':
+            del components[i]
+        else:
+            i = i+1
+    if not components[0]:
+        # Absolute unix path, don't start with colon
+        rv = ':'.join(components[1:])
+    else:
+        # relative unix path, start with colon. First replace
+        # leading .. by empty strings (giving ::file)
+        i = 0
+        while i < len(components) and components[i] == '..':
+            components[i] = ''
+            i = i + 1
+        rv = ':' + ':'.join(components)
+    # and finally unquote slashes and other funny characters
+    return urllib.unquote(rv)
+
+def pathname2url(pathname):
+    "convert mac pathname to /-delimited pathname"
+    if '/' in pathname:
+        raise RuntimeError, "Cannot convert pathname containing slashes"
+    components = pathname.split(':')
+    # Remove empty first and/or last component
+    if components[0] == '':
+        del components[0]
+    if components[-1] == '':
+        del components[-1]
+    # Replace empty string ('::') by .. (will result in '/../' later)
+    for i in range(len(components)):
+        if components[i] == '':
+            components[i] = '..'
+    # Truncate names longer than 31 bytes
+    components = map(_pncomp2url, components)
+
+    if os.path.isabs(pathname):
+        return '/' + '/'.join(components)
+    else:
+        return '/'.join(components)
+
+def _pncomp2url(component):
+    component = urllib.quote(component[:31], safe='')  # We want to quote slashes
+    return component
+
+def test():
+    for url in ["index.html",
+                "bar/index.html",
+                "/foo/bar/index.html",
+                "/foo/bar/",
+                "/"]:
+        print `url`, '->', `url2pathname(url)`
+    for path in ["drive:",
+                 "drive:dir:",
+                 "drive:dir:file",
+                 "drive:file",
+                 "file",
+                 ":file",
+                 ":dir:",
+                 ":dir:file"]:
+        print `path`, '->', `pathname2url(path)`
+
+if __name__ == '__main__':
+    test()
diff --git a/lib-python/2.2/mailbox.py b/lib-python/2.2/mailbox.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/mailbox.py
@@ -0,0 +1,313 @@
+#! /usr/bin/env python
+
+"""Classes to handle Unix style, MMDF style, and MH style mailboxes."""
+
+
+import rfc822
+import os
+
+__all__ = ["UnixMailbox","MmdfMailbox","MHMailbox","Maildir","BabylMailbox",
+           "PortableUnixMailbox"]
+
+class _Mailbox:
+    def __init__(self, fp, factory=rfc822.Message):
+        self.fp = fp
+        self.seekp = 0
+        self.factory = factory
+
+    def __iter__(self):
+        return iter(self.next, None)
+
+    def next(self):
+        while 1:
+            self.fp.seek(self.seekp)
+            try:
+                self._search_start()
+            except EOFError:
+                self.seekp = self.fp.tell()
+                return None
+            start = self.fp.tell()
+            self._search_end()
+            self.seekp = stop = self.fp.tell()
+            if start != stop:
+                break
+        return self.factory(_Subfile(self.fp, start, stop))
+
+
+class _Subfile:
+    def __init__(self, fp, start, stop):
+        self.fp = fp
+        self.start = start
+        self.stop = stop
+        self.pos = self.start
+
+    def read(self, length = None):
+        if self.pos >= self.stop:
+            return ''
+        remaining = self.stop - self.pos
+        if length is None or length < 0:
+            length = remaining
+        elif length > remaining:
+            length = remaining
+        self.fp.seek(self.pos)
+        data = self.fp.read(length)
+        self.pos = self.fp.tell()
+        return data
+
+    def readline(self, length = None):
+        if self.pos >= self.stop:
+            return ''
+        if length is None:
+            length = self.stop - self.pos
+        self.fp.seek(self.pos)
+        data = self.fp.readline(length)
+        self.pos = self.fp.tell()
+        return data
+
+    def readlines(self, sizehint = -1):
+        lines = []
+        while 1:
+            line = self.readline()
+            if not line:
+                break
+            lines.append(line)
+            if sizehint >= 0:
+                sizehint = sizehint - len(line)
+                if sizehint <= 0:
+                    break
+        return lines
+
+    def tell(self):
+        return self.pos - self.start
+
+    def seek(self, pos, whence=0):
+        if whence == 0:
+            self.pos = self.start + pos
+        elif whence == 1:
+            self.pos = self.pos + pos
+        elif whence == 2:
+            self.pos = self.stop + pos
+
+    def close(self):
+        del self.fp
+
+
+# Recommended to use PortableUnixMailbox instead!
+class UnixMailbox(_Mailbox):
+    def _search_start(self):
+        while 1:
+            pos = self.fp.tell()
+            line = self.fp.readline()
+            if not line:
+                raise EOFError
+            if line[:5] == 'From ' and self._isrealfromline(line):
+                self.fp.seek(pos)
+                return
+
+    def _search_end(self):
+        self.fp.readline()      # Throw away header line
+        while 1:
+            pos = self.fp.tell()
+            line = self.fp.readline()
+            if not line:
+                return
+            if line[:5] == 'From ' and self._isrealfromline(line):
+                self.fp.seek(pos)
+                return
+
+    # An overridable mechanism to test for From-line-ness.  You can either
+    # specify a different regular expression or define a whole new
+    # _isrealfromline() method.  Note that this only gets called for lines
+    # starting with the 5 characters "From ".
+    #
+    # BAW: According to
+    #http://home.netscape.com/eng/mozilla/2.0/relnotes/demo/content-length.html
+    # the only portable, reliable way to find message delimiters in a BSD (i.e
+    # Unix mailbox) style folder is to search for "\n\nFrom .*\n", or at the
+    # beginning of the file, "^From .*\n".  While _fromlinepattern below seems
+    # like a good idea, in practice, there are too many variations for more
+    # strict parsing of the line to be completely accurate.
+    #
+    # _strict_isrealfromline() is the old version which tries to do stricter
+    # parsing of the From_ line.  _portable_isrealfromline() simply returns
+    # true, since it's never called if the line doesn't already start with
+    # "From ".
+    #
+    # This algorithm, and the way it interacts with _search_start() and
+    # _search_end() may not be completely correct, because it doesn't check
+    # that the two characters preceding "From " are \n\n or the beginning of
+    # the file.  Fixing this would require a more extensive rewrite than is
+    # necessary.  For convenience, we've added a StrictUnixMailbox class which
+    # uses the older, more strict _fromlinepattern regular expression.
+
+    _fromlinepattern = r"From \s*[^\s]+\s+\w\w\w\s+\w\w\w\s+\d?\d\s+" \
+                       r"\d?\d:\d\d(:\d\d)?(\s+[^\s]+)?\s+\d\d\d\d\s*$"
+    _regexp = None
+
+    def _strict_isrealfromline(self, line):
+        if not self._regexp:
+            import re
+            self._regexp = re.compile(self._fromlinepattern)
+        return self._regexp.match(line)
+
+    def _portable_isrealfromline(self, line):
+        return 1
+
+    _isrealfromline = _strict_isrealfromline
+
+
+class PortableUnixMailbox(UnixMailbox):
+    _isrealfromline = UnixMailbox._portable_isrealfromline
+
+
+class MmdfMailbox(_Mailbox):
+    def _search_start(self):
+        while 1:
+            line = self.fp.readline()
+            if not line:
+                raise EOFError
+            if line[:5] == '\001\001\001\001\n':
+                return
+
+    def _search_end(self):
+        while 1:
+            pos = self.fp.tell()
+            line = self.fp.readline()
+            if not line:
+                return
+            if line == '\001\001\001\001\n':
+                self.fp.seek(pos)
+                return
+
+
+class MHMailbox:
+    def __init__(self, dirname, factory=rfc822.Message):
+        import re
+        pat = re.compile('^[1-9][0-9]*$')
+        self.dirname = dirname
+        # the three following lines could be combined into:
+        # list = map(long, filter(pat.match, os.listdir(self.dirname)))
+        list = os.listdir(self.dirname)
+        list = filter(pat.match, list)
+        list = map(long, list)
+        list.sort()
+        # This only works in Python 1.6 or later;
+        # before that str() added 'L':
+        self.boxes = map(str, list)
+        self.factory = factory
+
+    def __iter__(self):
+        return iter(self.next, None)
+
+    def next(self):
+        if not self.boxes:
+            return None
+        fn = self.boxes[0]
+        del self.boxes[0]
+        fp = open(os.path.join(self.dirname, fn))
+        return self.factory(fp)
+
+
+class Maildir:
+    # Qmail directory mailbox
+
+    def __init__(self, dirname, factory=rfc822.Message):
+        self.dirname = dirname
+        self.factory = factory
+
+        # check for new mail
+        newdir = os.path.join(self.dirname, 'new')
+        boxes = [os.path.join(newdir, f)
+                 for f in os.listdir(newdir) if f[0] != '.']
+
+        # Now check for current mail in this maildir
+        curdir = os.path.join(self.dirname, 'cur')
+        boxes += [os.path.join(curdir, f)
+                  for f in os.listdir(curdir) if f[0] != '.']
+
+        self.boxes = boxes
+
+    def __iter__(self):
+        return iter(self.next, None)
+
+    def next(self):
+        if not self.boxes:
+            return None
+        fn = self.boxes[0]
+        del self.boxes[0]
+        fp = open(fn)
+        return self.factory(fp)
+
+
+class BabylMailbox(_Mailbox):
+    def _search_start(self):
+        while 1:
+            line = self.fp.readline()
+            if not line:
+                raise EOFError
+            if line == '*** EOOH ***\n':
+                return
+
+    def _search_end(self):
+        while 1:
+            pos = self.fp.tell()
+            line = self.fp.readline()
+            if not line:
+                return
+            if line == '\037\014\n':
+                self.fp.seek(pos)
+                return
+
+
+def _test():
+    import sys
+
+    args = sys.argv[1:]
+    if not args:
+        for key in 'MAILDIR', 'MAIL', 'LOGNAME', 'USER':
+            if os.environ.has_key(key):
+                mbox = os.environ[key]
+                break
+        else:
+            print "$MAIL, $LOGNAME nor $USER set -- who are you?"
+            return
+    else:
+        mbox = args[0]
+    if mbox[:1] == '+':
+        mbox = os.environ['HOME'] + '/Mail/' + mbox[1:]
+    elif not '/' in mbox:
+        mbox = '/usr/mail/' + mbox
+    if os.path.isdir(mbox):
+        if os.path.isdir(os.path.join(mbox, 'cur')):
+            mb = Maildir(mbox)
+        else:
+            mb = MHMailbox(mbox)
+    else:
+        fp = open(mbox, 'r')
+        mb = PortableUnixMailbox(fp)
+
+    msgs = []
+    while 1:
+        msg = mb.next()
+        if msg is None:
+            break
+        msgs.append(msg)
+        if len(args) <= 1:
+            msg.fp = None
+    if len(args) > 1:
+        num = int(args[1])
+        print 'Message %d body:'%num
+        msg = msgs[num-1]
+        msg.rewindbody()
+        sys.stdout.write(msg.fp.read())
+    else:
+        print 'Mailbox',mbox,'has',len(msgs),'messages:'
+        for msg in msgs:
+            f = msg.getheader('from') or ""
+            s = msg.getheader('subject') or ""
+            d = msg.getheader('date') or ""
+            print '-%20.20s   %20.20s   %-30.30s'%(f, d[5:], s)
+
+
+if __name__ == '__main__':
+    _test()
diff --git a/lib-python/2.2/mailcap.py b/lib-python/2.2/mailcap.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/mailcap.py
@@ -0,0 +1,255 @@
+"""Mailcap file handling.  See RFC 1524."""
+
+import os
+
+__all__ = ["getcaps","findmatch"]
+
+# Part 1: top-level interface.
+
+def getcaps():
+    """Return a dictionary containing the mailcap database.
+
+    The dictionary maps a MIME type (in all lowercase, e.g. 'text/plain')
+    to a list of dictionaries corresponding to mailcap entries.  The list
+    collects all the entries for that MIME type from all available mailcap
+    files.  Each dictionary contains key-value pairs for that MIME type,
+    where the viewing command is stored with the key "view".
+
+    """
+    caps = {}
+    for mailcap in listmailcapfiles():
+        try:
+            fp = open(mailcap, 'r')
+        except IOError:
+            continue
+        morecaps = readmailcapfile(fp)
+        fp.close()
+        for key in morecaps.keys():
+            if not caps.has_key(key):
+                caps[key] = morecaps[key]
+            else:
+                caps[key] = caps[key] + morecaps[key]
+    return caps
+
+def listmailcapfiles():
+    """Return a list of all mailcap files found on the system."""
+    # XXX Actually, this is Unix-specific
+    if os.environ.has_key('MAILCAPS'):
+        str = os.environ['MAILCAPS']
+        mailcaps = str.split(':')
+    else:
+        if os.environ.has_key('HOME'):
+            home = os.environ['HOME']
+        else:
+            # Don't bother with getpwuid()
+            home = '.' # Last resort
+        mailcaps = [home + '/.mailcap', '/etc/mailcap',
+                '/usr/etc/mailcap', '/usr/local/etc/mailcap']
+    return mailcaps
+
+
+# Part 2: the parser.
+
+def readmailcapfile(fp):
+    """Read a mailcap file and return a dictionary keyed by MIME type.
+
+    Each MIME type is mapped to an entry consisting of a list of
+    dictionaries; the list will contain more than one such dictionary
+    if a given MIME type appears more than once in the mailcap file.
+    Each dictionary contains key-value pairs for that MIME type, where
+    the viewing command is stored with the key "view".
+    """
+    caps = {}
+    while 1:
+        line = fp.readline()
+        if not line: break
+        # Ignore comments and blank lines
+        if line[0] == '#' or line.strip() == '':
+            continue
+        nextline = line
+        # Join continuation lines
+        while nextline[-2:] == '\\\n':
+            nextline = fp.readline()
+            if not nextline: nextline = '\n'
+            line = line[:-2] + nextline
+        # Parse the line
+        key, fields = parseline(line)
+        if not (key and fields):
+            continue
+        # Normalize the key
+        types = key.split('/')
+        for j in range(len(types)):
+            types[j] = types[j].strip()
+        key = '/'.join(types).lower()
+        # Update the database
+        if caps.has_key(key):
+            caps[key].append(fields)
+        else:
+            caps[key] = [fields]
+    return caps
+
+def parseline(line):
+    """Parse one entry in a mailcap file and return a dictionary.
+
+    The viewing command is stored as the value with the key "view",
+    and the rest of the fields produce key-value pairs in the dict.
+    """
+    fields = []
+    i, n = 0, len(line)
+    while i < n:
+        field, i = parsefield(line, i, n)
+        fields.append(field)
+        i = i+1 # Skip semicolon
+    if len(fields) < 2:
+        return None, None
+    key, view, rest = fields[0], fields[1], fields[2:]
+    fields = {'view': view}
+    for field in rest:
+        i = field.find('=')
+        if i < 0:
+            fkey = field
+            fvalue = ""
+        else:
+            fkey = field[:i].strip()
+            fvalue = field[i+1:].strip()
+        if fields.has_key(fkey):
+            # Ignore it
+            pass
+        else:
+            fields[fkey] = fvalue
+    return key, fields
+
+def parsefield(line, i, n):
+    """Separate one key-value pair in a mailcap entry."""
+    start = i
+    while i < n:
+        c = line[i]
+        if c == ';':
+            break
+        elif c == '\\':
+            i = i+2
+        else:
+            i = i+1
+    return line[start:i].strip(), i
+
+
+# Part 3: using the database.
+
+def findmatch(caps, MIMEtype, key='view', filename="/dev/null", plist=[]):
+    """Find a match for a mailcap entry.
+
+    Return a tuple containing the command line, and the mailcap entry
+    used; (None, None) if no match is found.  This may invoke the
+    'test' command of several matching entries before deciding which
+    entry to use.
+
+    """
+    entries = lookup(caps, MIMEtype, key)
+    # XXX This code should somehow check for the needsterminal flag.
+    for e in entries:
+        if e.has_key('test'):
+            test = subst(e['test'], filename, plist)
+            if test and os.system(test) != 0:
+                continue
+        command = subst(e[key], MIMEtype, filename, plist)
+        return command, e
+    return None, None
+
+def lookup(caps, MIMEtype, key=None):
+    entries = []
+    if caps.has_key(MIMEtype):
+        entries = entries + caps[MIMEtype]
+    MIMEtypes = MIMEtype.split('/')
+    MIMEtype = MIMEtypes[0] + '/*'
+    if caps.has_key(MIMEtype):
+        entries = entries + caps[MIMEtype]
+    if key is not None:
+        entries = filter(lambda e, key=key: e.has_key(key), entries)
+    return entries
+
+def subst(field, MIMEtype, filename, plist=[]):
+    # XXX Actually, this is Unix-specific
+    res = ''
+    i, n = 0, len(field)
+    while i < n:
+        c = field[i]; i = i+1
+        if c != '%':
+            if c == '\\':
+                c = field[i:i+1]; i = i+1
+            res = res + c
+        else:
+            c = field[i]; i = i+1
+            if c == '%':
+                res = res + c
+            elif c == 's':
+                res = res + filename
+            elif c == 't':
+                res = res + MIMEtype
+            elif c == '{':
+                start = i
+                while i < n and field[i] != '}':
+                    i = i+1
+                name = field[start:i]
+                i = i+1
+                res = res + findparam(name, plist)
+            # XXX To do:
+            # %n == number of parts if type is multipart/*
+            # %F == list of alternating type and filename for parts
+            else:
+                res = res + '%' + c
+    return res
+
+def findparam(name, plist):
+    name = name.lower() + '='
+    n = len(name)
+    for p in plist:
+        if p[:n].lower() == name:
+            return p[n:]
+    return ''
+
+
+# Part 4: test program.
+
+def test():
+    import sys
+    caps = getcaps()
+    if not sys.argv[1:]:
+        show(caps)
+        return
+    for i in range(1, len(sys.argv), 2):
+        args = sys.argv[i:i+2]
+        if len(args) < 2:
+            print "usage: mailcap [MIMEtype file] ..."
+            return
+        MIMEtype = args[0]
+        file = args[1]
+        command, e = findmatch(caps, MIMEtype, 'view', file)
+        if not command:
+            print "No viewer found for", type
+        else:
+            print "Executing:", command
+            sts = os.system(command)
+            if sts:
+                print "Exit status:", sts
+
+def show(caps):
+    print "Mailcap files:"
+    for fn in listmailcapfiles(): print "\t" + fn
+    print
+    if not caps: caps = getcaps()
+    print "Mailcap entries:"
+    print
+    ckeys = caps.keys()
+    ckeys.sort()
+    for type in ckeys:
+        print type
+        entries = caps[type]
+        for e in entries:
+            keys = e.keys()
+            keys.sort()
+            for k in keys:
+                print "  %-15s" % k, e[k]
+            print
+
+if __name__ == '__main__':
+    test()
diff --git a/lib-python/2.2/markupbase.py b/lib-python/2.2/markupbase.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/markupbase.py
@@ -0,0 +1,317 @@
+"""Shared support for scanning document type declarations in HTML and XHTML."""
+
+import re
+import string
+
+_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9]*\s*').match
+_declstringlit_match = re.compile(r'(\'[^\']*\'|"[^"]*")\s*').match
+
+del re
+
+
+class ParserBase:
+    """Parser base class which provides some common support methods used
+    by the SGML/HTML and XHTML parsers."""
+
+    def __init__(self):
+        if self.__class__ is ParserBase:
+            raise RuntimeError(
+                "markupbase.ParserBase must be subclassed")
+
+    def error(self, message):
+        raise NotImplementedError(
+            "subclasses of ParserBase must override error()")
+
+    def reset(self):
+        self.lineno = 1
+        self.offset = 0
+
+    def getpos(self):
+        """Return current line number and offset."""
+        return self.lineno, self.offset
+
+    # Internal -- update line number and offset.  This should be
+    # called for each piece of data exactly once, in order -- in other
+    # words the concatenation of all the input strings to this
+    # function should be exactly the entire input.
+    def updatepos(self, i, j):
+        if i >= j:
+            return j
+        rawdata = self.rawdata
+        nlines = string.count(rawdata, "\n", i, j)
+        if nlines:
+            self.lineno = self.lineno + nlines
+            pos = string.rindex(rawdata, "\n", i, j) # Should not fail
+            self.offset = j-(pos+1)
+        else:
+            self.offset = self.offset + j-i
+        return j
+
+    _decl_otherchars = ''
+
+    # Internal -- parse declaration (for use by subclasses).
+    def parse_declaration(self, i):
+        # This is some sort of declaration; in "HTML as
+        # deployed," this should only be the document type
+        # declaration ("<!DOCTYPE html...>").
+        rawdata = self.rawdata
+        j = i + 2
+        assert rawdata[i:j] == "<!", "unexpected call to parse_declaration"
+        if rawdata[j:j+1] in ("-", ""):
+            # Start of comment followed by buffer boundary,
+            # or just a buffer boundary.
+            return -1
+        # in practice, this should look like: ((name|stringlit) S*)+ '>'
+        n = len(rawdata)
+        decltype, j = self._scan_name(j, i)
+        if j < 0:
+            return j
+        if decltype == "doctype":
+            self._decl_otherchars = ''
+        while j < n:
+            c = rawdata[j]
+            if c == ">":
+                # end of declaration syntax
+                data = rawdata[i+2:j]
+                if decltype == "doctype":
+                    self.handle_decl(data)
+                else:
+                    self.unknown_decl(data)
+                return j + 1
+            if c in "\"'":
+                m = _declstringlit_match(rawdata, j)
+                if not m:
+                    return -1 # incomplete
+                j = m.end()
+            elif c in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ":
+                name, j = self._scan_name(j, i)
+            elif c in self._decl_otherchars:
+                j = j + 1
+            elif c == "[":
+                if decltype == "doctype":
+                    j = self._parse_doctype_subset(j + 1, i)
+                else:
+                    self.error("unexpected '[' char in declaration")
+            else:
+                self.error(
+                    "unexpected %s char in declaration" % `rawdata[j]`)
+            if j < 0:
+                return j
+        return -1 # incomplete
+
+    # Internal -- scan past the internal subset in a <!DOCTYPE declaration,
+    # returning the index just past any whitespace following the trailing ']'.
+    def _parse_doctype_subset(self, i, declstartpos):
+        rawdata = self.rawdata
+        n = len(rawdata)
+        j = i
+        while j < n:
+            c = rawdata[j]
+            if c == "<":
+                s = rawdata[j:j+2]
+                if s == "<":
+                    # end of buffer; incomplete
+                    return -1
+                if s != "<!":
+                    self.updatepos(declstartpos, j + 1)
+                    self.error("unexpected char in internal subset (in %s)"
+                               % `s`)
+                if (j + 2) == n:
+                    # end of buffer; incomplete
+                    return -1
+                if (j + 4) > n:
+                    # end of buffer; incomplete
+                    return -1
+                if rawdata[j:j+4] == "<!--":
+                    j = self.parse_comment(j, report=0)
+                    if j < 0:
+                        return j
+                    continue
+                name, j = self._scan_name(j + 2, declstartpos)
+                if j == -1:
+                    return -1
+                if name not in ("attlist", "element", "entity", "notation"):
+                    self.updatepos(declstartpos, j + 2)
+                    self.error(
+                        "unknown declaration %s in internal subset" % `name`)
+                # handle the individual names
+                meth = getattr(self, "_parse_doctype_" + name)
+                j = meth(j, declstartpos)
+                if j < 0:
+                    return j
+            elif c == "%":
+                # parameter entity reference
+                if (j + 1) == n:
+                    # end of buffer; incomplete
+                    return -1
+                s, j = self._scan_name(j + 1, declstartpos)
+                if j < 0:
+                    return j
+                if rawdata[j] == ";":
+                    j = j + 1
+            elif c == "]":
+                j = j + 1
+                while j < n and rawdata[j] in string.whitespace:
+                    j = j + 1
+                if j < n:
+                    if rawdata[j] == ">":
+                        return j
+                    self.updatepos(declstartpos, j)
+                    self.error("unexpected char after internal subset")
+                else:
+                    return -1
+            elif c in string.whitespace:
+                j = j + 1
+            else:
+                self.updatepos(declstartpos, j)
+                self.error("unexpected char %s in internal subset" % `c`)
+        # end of buffer reached
+        return -1
+
+    # Internal -- scan past <!ELEMENT declarations
+    def _parse_doctype_element(self, i, declstartpos):
+        name, j = self._scan_name(i, declstartpos)
+        if j == -1:
+            return -1
+        # style content model; just skip until '>'
+        rawdata = self.rawdata
+        if '>' in rawdata[j:]:
+            return string.find(rawdata, ">", j) + 1
+        return -1
+
+    # Internal -- scan past <!ATTLIST declarations
+    def _parse_doctype_attlist(self, i, declstartpos):
+        rawdata = self.rawdata
+        name, j = self._scan_name(i, declstartpos)
+        c = rawdata[j:j+1]
+        if c == "":
+            return -1
+        if c == ">":
+            return j + 1
+        while 1:
+            # scan a series of attribute descriptions; simplified:
+            #   name type [value] [#constraint]
+            name, j = self._scan_name(j, declstartpos)
+            if j < 0:
+                return j
+            c = rawdata[j:j+1]
+            if c == "":
+                return -1
+            if c == "(":
+                # an enumerated type; look for ')'
+                if ")" in rawdata[j:]:
+                    j = string.find(rawdata, ")", j) + 1
+                else:
+                    return -1
+                while rawdata[j:j+1] in string.whitespace:
+                    j = j + 1
+                if not rawdata[j:]:
+                    # end of buffer, incomplete
+                    return -1
+            else:
+                name, j = self._scan_name(j, declstartpos)
+            c = rawdata[j:j+1]
+            if not c:
+                return -1
+            if c in "'\"":
+                m = _declstringlit_match(rawdata, j)
+                if m:
+                    j = m.end()
+                else:
+                    return -1
+                c = rawdata[j:j+1]
+                if not c:
+                    return -1
+            if c == "#":
+                if rawdata[j:] == "#":
+                    # end of buffer
+                    return -1
+                name, j = self._scan_name(j + 1, declstartpos)
+                if j < 0:
+                    return j
+                c = rawdata[j:j+1]
+                if not c:
+                    return -1
+            if c == '>':
+                # all done
+                return j + 1
+
+    # Internal -- scan past <!NOTATION declarations
+    def _parse_doctype_notation(self, i, declstartpos):
+        name, j = self._scan_name(i, declstartpos)
+        if j < 0:
+            return j
+        rawdata = self.rawdata
+        while 1:
+            c = rawdata[j:j+1]
+            if not c:
+                # end of buffer; incomplete
+                return -1
+            if c == '>':
+                return j + 1
+            if c in "'\"":
+                m = _declstringlit_match(rawdata, j)
+                if not m:
+                    return -1
+                j = m.end()
+            else:
+                name, j = self._scan_name(j, declstartpos)
+                if j < 0:
+                    return j
+
+    # Internal -- scan past <!ENTITY declarations
+    def _parse_doctype_entity(self, i, declstartpos):
+        rawdata = self.rawdata
+        if rawdata[i:i+1] == "%":
+            j = i + 1
+            while 1:
+                c = rawdata[j:j+1]
+                if not c:
+                    return -1
+                if c in string.whitespace:
+                    j = j + 1
+                else:
+                    break
+        else:
+            j = i
+        name, j = self._scan_name(j, declstartpos)
+        if j < 0:
+            return j
+        while 1:
+            c = self.rawdata[j:j+1]
+            if not c:
+                return -1
+            if c in "'\"":
+                m = _declstringlit_match(rawdata, j)
+                if m:
+                    j = m.end()
+                else:
+                    return -1    # incomplete
+            elif c == ">":
+                return j + 1
+            else:
+                name, j = self._scan_name(j, declstartpos)
+                if j < 0:
+                    return j
+
+    # Internal -- scan a name token and the new position and the token, or
+    # return -1 if we've reached the end of the buffer.
+    def _scan_name(self, i, declstartpos):
+        rawdata = self.rawdata
+        n = len(rawdata)
+        if i == n:
+            return None, -1
+        m = _declname_match(rawdata, i)
+        if m:
+            s = m.group()
+            name = s.strip()
+            if (i + len(s)) == n:
+                return None, -1  # end of buffer
+            return string.lower(name), m.end()
+        else:
+            self.updatepos(declstartpos, i)
+            self.error("expected name token")
+
+    # To be overridden -- handlers for unknown objects
+    def unknown_decl(self, data):
+        pass
diff --git a/lib-python/2.2/mhlib.py b/lib-python/2.2/mhlib.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/mhlib.py
@@ -0,0 +1,1003 @@
+"""MH interface -- purely object-oriented (well, almost)
+
+Executive summary:
+
+import mhlib
+
+mh = mhlib.MH()         # use default mailbox directory and profile
+mh = mhlib.MH(mailbox)  # override mailbox location (default from profile)
+mh = mhlib.MH(mailbox, profile) # override mailbox and profile
+
+mh.error(format, ...)   # print error message -- can be overridden
+s = mh.getprofile(key)  # profile entry (None if not set)
+path = mh.getpath()     # mailbox pathname
+name = mh.getcontext()  # name of current folder
+mh.setcontext(name)     # set name of current folder
+
+list = mh.listfolders() # names of top-level folders
+list = mh.listallfolders() # names of all folders, including subfolders
+list = mh.listsubfolders(name) # direct subfolders of given folder
+list = mh.listallsubfolders(name) # all subfolders of given folder
+
+mh.makefolder(name)     # create new folder
+mh.deletefolder(name)   # delete folder -- must have no subfolders
+
+f = mh.openfolder(name) # new open folder object
+
+f.error(format, ...)    # same as mh.error(format, ...)
+path = f.getfullname()  # folder's full pathname
+path = f.getsequencesfilename() # full pathname of folder's sequences file
+path = f.getmessagefilename(n)  # full pathname of message n in folder
+
+list = f.listmessages() # list of messages in folder (as numbers)
+n = f.getcurrent()      # get current message
+f.setcurrent(n)         # set current message
+list = f.parsesequence(seq)     # parse msgs syntax into list of messages
+n = f.getlast()         # get last message (0 if no messagse)
+f.setlast(n)            # set last message (internal use only)
+
+dict = f.getsequences() # dictionary of sequences in folder {name: list}
+f.putsequences(dict)    # write sequences back to folder
+
+f.createmessage(n, fp)  # add message from file f as number n
+f.removemessages(list)  # remove messages in list from folder
+f.refilemessages(list, tofolder) # move messages in list to other folder
+f.movemessage(n, tofolder, ton)  # move one message to a given destination
+f.copymessage(n, tofolder, ton)  # copy one message to a given destination
+
+m = f.openmessage(n)    # new open message object (costs a file descriptor)
+m is a derived class of mimetools.Message(rfc822.Message), with:
+s = m.getheadertext()   # text of message's headers
+s = m.getheadertext(pred) # text of message's headers, filtered by pred
+s = m.getbodytext()     # text of message's body, decoded
+s = m.getbodytext(0)    # text of message's body, not decoded
+"""
+
+# XXX To do, functionality:
+# - annotate messages
+# - send messages
+#
+# XXX To do, organization:
+# - move IntSet to separate file
+# - move most Message functionality to module mimetools
+
+
+# Customizable defaults
+
+MH_PROFILE = '~/.mh_profile'
+PATH = '~/Mail'
+MH_SEQUENCES = '.mh_sequences'
+FOLDER_PROTECT = 0700
+
+
+# Imported modules
+
+import os
+import sys
+from stat import ST_NLINK
+import re
+import mimetools
+import multifile
+import shutil
+from bisect import bisect
+
+__all__ = ["MH","Error","Folder","Message"]
+
+# Exported constants
+
+class Error(Exception):
+    pass
+
+
+class MH:
+    """Class representing a particular collection of folders.
+    Optional constructor arguments are the pathname for the directory
+    containing the collection, and the MH profile to use.
+    If either is omitted or empty a default is used; the default
+    directory is taken from the MH profile if it is specified there."""
+
+    def __init__(self, path = None, profile = None):
+        """Constructor."""
+        if not profile: profile = MH_PROFILE
+        self.profile = os.path.expanduser(profile)
+        if not path: path = self.getprofile('Path')
+        if not path: path = PATH
+        if not os.path.isabs(path) and path[0] != '~':
+            path = os.path.join('~', path)
+        path = os.path.expanduser(path)
+        if not os.path.isdir(path): raise Error, 'MH() path not found'
+        self.path = path
+
+    def __repr__(self):
+        """String representation."""
+        return 'MH(%s, %s)' % (`self.path`, `self.profile`)
+
+    def error(self, msg, *args):
+        """Routine to print an error.  May be overridden by a derived class."""
+        sys.stderr.write('MH error: %s\n' % (msg % args))
+
+    def getprofile(self, key):
+        """Return a profile entry, None if not found."""
+        return pickline(self.profile, key)
+
+    def getpath(self):
+        """Return the path (the name of the collection's directory)."""
+        return self.path
+
+    def getcontext(self):
+        """Return the name of the current folder."""
+        context = pickline(os.path.join(self.getpath(), 'context'),
+                  'Current-Folder')
+        if not context: context = 'inbox'
+        return context
+
+    def setcontext(self, context):
+        """Set the name of the current folder."""
+        fn = os.path.join(self.getpath(), 'context')
+        f = open(fn, "w")
+        f.write("Current-Folder: %s\n" % context)
+        f.close()
+
+    def listfolders(self):
+        """Return the names of the top-level folders."""
+        folders = []
+        path = self.getpath()
+        for name in os.listdir(path):
+            fullname = os.path.join(path, name)
+            if os.path.isdir(fullname):
+                folders.append(name)
+        folders.sort()
+        return folders
+
+    def listsubfolders(self, name):
+        """Return the names of the subfolders in a given folder
+        (prefixed with the given folder name)."""
+        fullname = os.path.join(self.path, name)
+        # Get the link count so we can avoid listing folders
+        # that have no subfolders.
+        st = os.stat(fullname)
+        nlinks = st[ST_NLINK]
+        if nlinks <= 2:
+            return []
+        subfolders = []
+        subnames = os.listdir(fullname)
+        for subname in subnames:
+            fullsubname = os.path.join(fullname, subname)
+            if os.path.isdir(fullsubname):
+                name_subname = os.path.join(name, subname)
+                subfolders.append(name_subname)
+                # Stop looking for subfolders when
+                # we've seen them all
+                nlinks = nlinks - 1
+                if nlinks <= 2:
+                    break
+        subfolders.sort()
+        return subfolders
+
+    def listallfolders(self):
+        """Return the names of all folders and subfolders, recursively."""
+        return self.listallsubfolders('')
+
+    def listallsubfolders(self, name):
+        """Return the names of subfolders in a given folder, recursively."""
+        fullname = os.path.join(self.path, name)
+        # Get the link count so we can avoid listing folders
+        # that have no subfolders.
+        st = os.stat(fullname)
+        nlinks = st[ST_NLINK]
+        if nlinks <= 2:
+            return []
+        subfolders = []
+        subnames = os.listdir(fullname)
+        for subname in subnames:
+            if subname[0] == ',' or isnumeric(subname): continue
+            fullsubname = os.path.join(fullname, subname)
+            if os.path.isdir(fullsubname):
+                name_subname = os.path.join(name, subname)
+                subfolders.append(name_subname)
+                if not os.path.islink(fullsubname):
+                    subsubfolders = self.listallsubfolders(
+                              name_subname)
+                    subfolders = subfolders + subsubfolders
+                # Stop looking for subfolders when
+                # we've seen them all
+                nlinks = nlinks - 1
+                if nlinks <= 2:
+                    break
+        subfolders.sort()
+        return subfolders
+
+    def openfolder(self, name):
+        """Return a new Folder object for the named folder."""
+        return Folder(self, name)
+
+    def makefolder(self, name):
+        """Create a new folder (or raise os.error if it cannot be created)."""
+        protect = pickline(self.profile, 'Folder-Protect')
+        if protect and isnumeric(protect):
+            mode = int(protect, 8)
+        else:
+            mode = FOLDER_PROTECT
+        os.mkdir(os.path.join(self.getpath(), name), mode)
+
+    def deletefolder(self, name):
+        """Delete a folder.  This removes files in the folder but not
+        subdirectories.  Raise os.error if deleting the folder itself fails."""
+        fullname = os.path.join(self.getpath(), name)
+        for subname in os.listdir(fullname):
+            fullsubname = os.path.join(fullname, subname)
+            try:
+                os.unlink(fullsubname)
+            except os.error:
+                self.error('%s not deleted, continuing...' %
+                          fullsubname)
+        os.rmdir(fullname)
+
+
+numericprog = re.compile('^[1-9][0-9]*$')
+def isnumeric(str):
+    return numericprog.match(str) is not None
+
+class Folder:
+    """Class representing a particular folder."""
+
+    def __init__(self, mh, name):
+        """Constructor."""
+        self.mh = mh
+        self.name = name
+        if not os.path.isdir(self.getfullname()):
+            raise Error, 'no folder %s' % name
+
+    def __repr__(self):
+        """String representation."""
+        return 'Folder(%s, %s)' % (`self.mh`, `self.name`)
+
+    def error(self, *args):
+        """Error message handler."""
+        apply(self.mh.error, args)
+
+    def getfullname(self):
+        """Return the full pathname of the folder."""
+        return os.path.join(self.mh.path, self.name)
+
+    def getsequencesfilename(self):
+        """Return the full pathname of the folder's sequences file."""
+        return os.path.join(self.getfullname(), MH_SEQUENCES)
+
+    def getmessagefilename(self, n):
+        """Return the full pathname of a message in the folder."""
+        return os.path.join(self.getfullname(), str(n))
+
+    def listsubfolders(self):
+        """Return list of direct subfolders."""
+        return self.mh.listsubfolders(self.name)
+
+    def listallsubfolders(self):
+        """Return list of all subfolders."""
+        return self.mh.listallsubfolders(self.name)
+
+    def listmessages(self):
+        """Return the list of messages currently present in the folder.
+        As a side effect, set self.last to the last message (or 0)."""
+        messages = []
+        match = numericprog.match
+        append = messages.append
+        for name in os.listdir(self.getfullname()):
+            if match(name):
+                append(name)
+        messages = map(int, messages)
+        messages.sort()
+        if messages:
+            self.last = messages[-1]
+        else:
+            self.last = 0
+        return messages
+
+    def getsequences(self):
+        """Return the set of sequences for the folder."""
+        sequences = {}
+        fullname = self.getsequencesfilename()
+        try:
+            f = open(fullname, 'r')
+        except IOError:
+            return sequences
+        while 1:
+            line = f.readline()
+            if not line: break
+            fields = line.split(':')
+            if len(fields) != 2:
+                self.error('bad sequence in %s: %s' %
+                          (fullname, line.strip()))
+            key = fields[0].strip()
+            value = IntSet(fields[1].strip(), ' ').tolist()
+            sequences[key] = value
+        return sequences
+
+    def putsequences(self, sequences):
+        """Write the set of sequences back to the folder."""
+        fullname = self.getsequencesfilename()
+        f = None
+        for key in sequences.keys():
+            s = IntSet('', ' ')
+            s.fromlist(sequences[key])
+            if not f: f = open(fullname, 'w')
+            f.write('%s: %s\n' % (key, s.tostring()))
+        if not f:
+            try:
+                os.unlink(fullname)
+            except os.error:
+                pass
+        else:
+            f.close()
+
+    def getcurrent(self):
+        """Return the current message.  Raise Error when there is none."""
+        seqs = self.getsequences()
+        try:
+            return max(seqs['cur'])
+        except (ValueError, KeyError):
+            raise Error, "no cur message"
+
+    def setcurrent(self, n):
+        """Set the current message."""
+        updateline(self.getsequencesfilename(), 'cur', str(n), 0)
+
+    def parsesequence(self, seq):
+        """Parse an MH sequence specification into a message list.
+        Attempt to mimic mh-sequence(5) as close as possible.
+        Also attempt to mimic observed behavior regarding which
+        conditions cause which error messages."""
+        # XXX Still not complete (see mh-format(5)).
+        # Missing are:
+        # - 'prev', 'next' as count
+        # - Sequence-Negation option
+        all = self.listmessages()
+        # Observed behavior: test for empty folder is done first
+        if not all:
+            raise Error, "no messages in %s" % self.name
+        # Common case first: all is frequently the default
+        if seq == 'all':
+            return all
+        # Test for X:Y before X-Y because 'seq:-n' matches both
+        i = seq.find(':')
+        if i >= 0:
+            head, dir, tail = seq[:i], '', seq[i+1:]
+            if tail[:1] in '-+':
+                dir, tail = tail[:1], tail[1:]
+            if not isnumeric(tail):
+                raise Error, "bad message list %s" % seq
+            try:
+                count = int(tail)
+            except (ValueError, OverflowError):
+                # Can't use sys.maxint because of i+count below
+                count = len(all)
+            try:
+                anchor = self._parseindex(head, all)
+            except Error, msg:
+                seqs = self.getsequences()
+                if not seqs.has_key(head):
+                    if not msg:
+                        msg = "bad message list %s" % seq
+                    raise Error, msg, sys.exc_info()[2]
+                msgs = seqs[head]
+                if not msgs:
+                    raise Error, "sequence %s empty" % head
+                if dir == '-':
+                    return msgs[-count:]
+                else:
+                    return msgs[:count]
+            else:
+                if not dir:
+                    if head in ('prev', 'last'):
+                        dir = '-'
+                if dir == '-':
+                    i = bisect(all, anchor)
+                    return all[max(0, i-count):i]
+                else:
+                    i = bisect(all, anchor-1)
+                    return all[i:i+count]
+        # Test for X-Y next
+        i = seq.find('-')
+        if i >= 0:
+            begin = self._parseindex(seq[:i], all)
+            end = self._parseindex(seq[i+1:], all)
+            i = bisect(all, begin-1)
+            j = bisect(all, end)
+            r = all[i:j]
+            if not r:
+                raise Error, "bad message list %s" % seq
+            return r
+        # Neither X:Y nor X-Y; must be a number or a (pseudo-)sequence
+        try:
+            n = self._parseindex(seq, all)
+        except Error, msg:
+            seqs = self.getsequences()
+            if not seqs.has_key(seq):
+                if not msg:
+                    msg = "bad message list %s" % seq
+                raise Error, msg
+            return seqs[seq]
+        else:
+            if n not in all:
+                if isnumeric(seq):
+                    raise Error, "message %d doesn't exist" % n
+                else:
+                    raise Error, "no %s message" % seq
+            else:
+                return [n]
+
+    def _parseindex(self, seq, all):
+        """Internal: parse a message number (or cur, first, etc.)."""
+        if isnumeric(seq):
+            try:
+                return int(seq)
+            except (OverflowError, ValueError):
+                return sys.maxint
+        if seq in ('cur', '.'):
+            return self.getcurrent()
+        if seq == 'first':
+            return all[0]
+        if seq == 'last':
+            return all[-1]
+        if seq == 'next':
+            n = self.getcurrent()
+            i = bisect(all, n)
+            try:
+                return all[i]
+            except IndexError:
+                raise Error, "no next message"
+        if seq == 'prev':
+            n = self.getcurrent()
+            i = bisect(all, n-1)
+            if i == 0:
+                raise Error, "no prev message"
+            try:
+                return all[i-1]
+            except IndexError:
+                raise Error, "no prev message"
+        raise Error, None
+
+    def openmessage(self, n):
+        """Open a message -- returns a Message object."""
+        return Message(self, n)
+
+    def removemessages(self, list):
+        """Remove one or more messages -- may raise os.error."""
+        errors = []
+        deleted = []
+        for n in list:
+            path = self.getmessagefilename(n)
+            commapath = self.getmessagefilename(',' + str(n))
+            try:
+                os.unlink(commapath)
+            except os.error:
+                pass
+            try:
+                os.rename(path, commapath)
+            except os.error, msg:
+                errors.append(msg)
+            else:
+                deleted.append(n)
+        if deleted:
+            self.removefromallsequences(deleted)
+        if errors:
+            if len(errors) == 1:
+                raise os.error, errors[0]
+            else:
+                raise os.error, ('multiple errors:', errors)
+
+    def refilemessages(self, list, tofolder, keepsequences=0):
+        """Refile one or more messages -- may raise os.error.
+        'tofolder' is an open folder object."""
+        errors = []
+        refiled = {}
+        for n in list:
+            ton = tofolder.getlast() + 1
+            path = self.getmessagefilename(n)
+            topath = tofolder.getmessagefilename(ton)
+            try:
+                os.rename(path, topath)
+            except os.error:
+                # Try copying
+                try:
+                    shutil.copy2(path, topath)
+                    os.unlink(path)
+                except (IOError, os.error), msg:
+                    errors.append(msg)
+                    try:
+                        os.unlink(topath)
+                    except os.error:
+                        pass
+                    continue
+            tofolder.setlast(ton)
+            refiled[n] = ton
+        if refiled:
+            if keepsequences:
+                tofolder._copysequences(self, refiled.items())
+            self.removefromallsequences(refiled.keys())
+        if errors:
+            if len(errors) == 1:
+                raise os.error, errors[0]
+            else:
+                raise os.error, ('multiple errors:', errors)
+
+    def _copysequences(self, fromfolder, refileditems):
+        """Helper for refilemessages() to copy sequences."""
+        fromsequences = fromfolder.getsequences()
+        tosequences = self.getsequences()
+        changed = 0
+        for name, seq in fromsequences.items():
+            try:
+                toseq = tosequences[name]
+                new = 0
+            except KeyError:
+                toseq = []
+                new = 1
+            for fromn, ton in refileditems:
+                if fromn in seq:
+                    toseq.append(ton)
+                    changed = 1
+            if new and toseq:
+                tosequences[name] = toseq
+        if changed:
+            self.putsequences(tosequences)
+
+    def movemessage(self, n, tofolder, ton):
+        """Move one message over a specific destination message,
+        which may or may not already exist."""
+        path = self.getmessagefilename(n)
+        # Open it to check that it exists
+        f = open(path)
+        f.close()
+        del f
+        topath = tofolder.getmessagefilename(ton)
+        backuptopath = tofolder.getmessagefilename(',%d' % ton)
+        try:
+            os.rename(topath, backuptopath)
+        except os.error:
+            pass
+        try:
+            os.rename(path, topath)
+        except os.error:
+            # Try copying
+            ok = 0
+            try:
+                tofolder.setlast(None)
+                shutil.copy2(path, topath)
+                ok = 1
+            finally:
+                if not ok:
+                    try:
+                        os.unlink(topath)
+                    except os.error:
+                        pass
+            os.unlink(path)
+        self.removefromallsequences([n])
+
+    def copymessage(self, n, tofolder, ton):
+        """Copy one message over a specific destination message,
+        which may or may not already exist."""
+        path = self.getmessagefilename(n)
+        # Open it to check that it exists
+        f = open(path)
+        f.close()
+        del f
+        topath = tofolder.getmessagefilename(ton)
+        backuptopath = tofolder.getmessagefilename(',%d' % ton)
+        try:
+            os.rename(topath, backuptopath)
+        except os.error:
+            pass
+        ok = 0
+        try:
+            tofolder.setlast(None)
+            shutil.copy2(path, topath)
+            ok = 1
+        finally:
+            if not ok:
+                try:
+                    os.unlink(topath)
+                except os.error:
+                    pass
+
+    def createmessage(self, n, txt):
+        """Create a message, with text from the open file txt."""
+        path = self.getmessagefilename(n)
+        backuppath = self.getmessagefilename(',%d' % n)
+        try:
+            os.rename(path, backuppath)
+        except os.error:
+            pass
+        ok = 0
+        BUFSIZE = 16*1024
+        try:
+            f = open(path, "w")
+            while 1:
+                buf = txt.read(BUFSIZE)
+                if not buf:
+                    break
+                f.write(buf)
+            f.close()
+            ok = 1
+        finally:
+            if not ok:
+                try:
+                    os.unlink(path)
+                except os.error:
+                    pass
+
+    def removefromallsequences(self, list):
+        """Remove one or more messages from all sequences (including last)
+        -- but not from 'cur'!!!"""
+        if hasattr(self, 'last') and self.last in list:
+            del self.last
+        sequences = self.getsequences()
+        changed = 0
+        for name, seq in sequences.items():
+            if name == 'cur':
+                continue
+            for n in list:
+                if n in seq:
+                    seq.remove(n)
+                    changed = 1
+                    if not seq:
+                        del sequences[name]
+        if changed:
+            self.putsequences(sequences)
+
+    def getlast(self):
+        """Return the last message number."""
+        if not hasattr(self, 'last'):
+            self.listmessages() # Set self.last
+        return self.last
+
+    def setlast(self, last):
+        """Set the last message number."""
+        if last is None:
+            if hasattr(self, 'last'):
+                del self.last
+        else:
+            self.last = last
+
+class Message(mimetools.Message):
+
+    def __init__(self, f, n, fp = None):
+        """Constructor."""
+        self.folder = f
+        self.number = n
+        if not fp:
+            path = f.getmessagefilename(n)
+            fp = open(path, 'r')
+        mimetools.Message.__init__(self, fp)
+
+    def __repr__(self):
+        """String representation."""
+        return 'Message(%s, %s)' % (repr(self.folder), self.number)
+
+    def getheadertext(self, pred = None):
+        """Return the message's header text as a string.  If an
+        argument is specified, it is used as a filter predicate to
+        decide which headers to return (its argument is the header
+        name converted to lower case)."""
+        if not pred:
+            return ''.join(self.headers)
+        headers = []
+        hit = 0
+        for line in self.headers:
+            if not line[0].isspace():
+                i = line.find(':')
+                if i > 0:
+                    hit = pred(line[:i].lower())
+            if hit: headers.append(line)
+        return ''.join(headers)
+
+    def getbodytext(self, decode = 1):
+        """Return the message's body text as string.  This undoes a
+        Content-Transfer-Encoding, but does not interpret other MIME
+        features (e.g. multipart messages).  To suppress decoding,
+        pass 0 as an argument."""
+        self.fp.seek(self.startofbody)
+        encoding = self.getencoding()
+        if not decode or encoding in ('', '7bit', '8bit', 'binary'):
+            return self.fp.read()
+        from StringIO import StringIO
+        output = StringIO()
+        mimetools.decode(self.fp, output, encoding)
+        return output.getvalue()
+
+    def getbodyparts(self):
+        """Only for multipart messages: return the message's body as a
+        list of SubMessage objects.  Each submessage object behaves
+        (almost) as a Message object."""
+        if self.getmaintype() != 'multipart':
+            raise Error, 'Content-Type is not multipart/*'
+        bdry = self.getparam('boundary')
+        if not bdry:
+            raise Error, 'multipart/* without boundary param'
+        self.fp.seek(self.startofbody)
+        mf = multifile.MultiFile(self.fp)
+        mf.push(bdry)
+        parts = []
+        while mf.next():
+            n = str(self.number) + '.' + `1 + len(parts)`
+            part = SubMessage(self.folder, n, mf)
+            parts.append(part)
+        mf.pop()
+        return parts
+
+    def getbody(self):
+        """Return body, either a string or a list of messages."""
+        if self.getmaintype() == 'multipart':
+            return self.getbodyparts()
+        else:
+            return self.getbodytext()
+
+
+class SubMessage(Message):
+
+    def __init__(self, f, n, fp):
+        """Constructor."""
+        Message.__init__(self, f, n, fp)
+        if self.getmaintype() == 'multipart':
+            self.body = Message.getbodyparts(self)
+        else:
+            self.body = Message.getbodytext(self)
+        self.bodyencoded = Message.getbodytext(self, decode=0)
+            # XXX If this is big, should remember file pointers
+
+    def __repr__(self):
+        """String representation."""
+        f, n, fp = self.folder, self.number, self.fp
+        return 'SubMessage(%s, %s, %s)' % (f, n, fp)
+
+    def getbodytext(self, decode = 1):
+        if not decode:
+            return self.bodyencoded
+        if type(self.body) == type(''):
+            return self.body
+
+    def getbodyparts(self):
+        if type(self.body) == type([]):
+            return self.body
+
+    def getbody(self):
+        return self.body
+
+
+class IntSet:
+    """Class implementing sets of integers.
+
+    This is an efficient representation for sets consisting of several
+    continuous ranges, e.g. 1-100,200-400,402-1000 is represented
+    internally as a list of three pairs: [(1,100), (200,400),
+    (402,1000)].  The internal representation is always kept normalized.
+
+    The constructor has up to three arguments:
+    - the string used to initialize the set (default ''),
+    - the separator between ranges (default ',')
+    - the separator between begin and end of a range (default '-')
+    The separators must be strings (not regexprs) and should be different.
+
+    The tostring() function yields a string that can be passed to another
+    IntSet constructor; __repr__() is a valid IntSet constructor itself.
+    """
+
+    # XXX The default begin/end separator means that negative numbers are
+    #     not supported very well.
+    #
+    # XXX There are currently no operations to remove set elements.
+
+    def __init__(self, data = None, sep = ',', rng = '-'):
+        self.pairs = []
+        self.sep = sep
+        self.rng = rng
+        if data: self.fromstring(data)
+
+    def reset(self):
+        self.pairs = []
+
+    def __cmp__(self, other):
+        return cmp(self.pairs, other.pairs)
+
+    def __hash__(self):
+        return hash(self.pairs)
+
+    def __repr__(self):
+        return 'IntSet(%s, %s, %s)' % (`self.tostring()`,
+                  `self.sep`, `self.rng`)
+
+    def normalize(self):
+        self.pairs.sort()
+        i = 1
+        while i < len(self.pairs):
+            alo, ahi = self.pairs[i-1]
+            blo, bhi = self.pairs[i]
+            if ahi >= blo-1:
+                self.pairs[i-1:i+1] = [(alo, max(ahi, bhi))]
+            else:
+                i = i+1
+
+    def tostring(self):
+        s = ''
+        for lo, hi in self.pairs:
+            if lo == hi: t = `lo`
+            else: t = `lo` + self.rng + `hi`
+            if s: s = s + (self.sep + t)
+            else: s = t
+        return s
+
+    def tolist(self):
+        l = []
+        for lo, hi in self.pairs:
+            m = range(lo, hi+1)
+            l = l + m
+        return l
+
+    def fromlist(self, list):
+        for i in list:
+            self.append(i)
+
+    def clone(self):
+        new = IntSet()
+        new.pairs = self.pairs[:]
+        return new
+
+    def min(self):
+        return self.pairs[0][0]
+
+    def max(self):
+        return self.pairs[-1][-1]
+
+    def contains(self, x):
+        for lo, hi in self.pairs:
+            if lo <= x <= hi: return 1
+        return 0
+
+    def append(self, x):
+        for i in range(len(self.pairs)):
+            lo, hi = self.pairs[i]
+            if x < lo: # Need to insert before
+                if x+1 == lo:
+                    self.pairs[i] = (x, hi)
+                else:
+                    self.pairs.insert(i, (x, x))
+                if i > 0 and x-1 == self.pairs[i-1][1]:
+                    # Merge with previous
+                    self.pairs[i-1:i+1] = [
+                            (self.pairs[i-1][0],
+                             self.pairs[i][1])
+                          ]
+                return
+            if x <= hi: # Already in set
+                return
+        i = len(self.pairs) - 1
+        if i >= 0:
+            lo, hi = self.pairs[i]
+            if x-1 == hi:
+                self.pairs[i] = lo, x
+                return
+        self.pairs.append((x, x))
+
+    def addpair(self, xlo, xhi):
+        if xlo > xhi: return
+        self.pairs.append((xlo, xhi))
+        self.normalize()
+
+    def fromstring(self, data):
+        new = []
+        for part in data.split(self.sep):
+            list = []
+            for subp in part.split(self.rng):
+                s = subp.strip()
+                list.append(int(s))
+            if len(list) == 1:
+                new.append((list[0], list[0]))
+            elif len(list) == 2 and list[0] <= list[1]:
+                new.append((list[0], list[1]))
+            else:
+                raise ValueError, 'bad data passed to IntSet'
+        self.pairs = self.pairs + new
+        self.normalize()
+
+
+# Subroutines to read/write entries in .mh_profile and .mh_sequences
+
+def pickline(file, key, casefold = 1):
+    try:
+        f = open(file, 'r')
+    except IOError:
+        return None
+    pat = re.escape(key) + ':'
+    prog = re.compile(pat, casefold and re.IGNORECASE)
+    while 1:
+        line = f.readline()
+        if not line: break
+        if prog.match(line):
+            text = line[len(key)+1:]
+            while 1:
+                line = f.readline()
+                if not line or not line[0].isspace():
+                    break
+                text = text + line
+            return text.strip()
+    return None
+
+def updateline(file, key, value, casefold = 1):
+    try:
+        f = open(file, 'r')
+        lines = f.readlines()
+        f.close()
+    except IOError:
+        lines = []
+    pat = re.escape(key) + ':(.*)\n'
+    prog = re.compile(pat, casefold and re.IGNORECASE)
+    if value is None:
+        newline = None
+    else:
+        newline = '%s: %s\n' % (key, value)
+    for i in range(len(lines)):
+        line = lines[i]
+        if prog.match(line):
+            if newline is None:
+                del lines[i]
+            else:
+                lines[i] = newline
+            break
+    else:
+        if newline is not None:
+            lines.append(newline)
+    tempfile = file + "~"
+    f = open(tempfile, 'w')
+    for line in lines:
+        f.write(line)
+    f.close()
+    os.rename(tempfile, file)
+
+
+# Test program
+
+def test():
+    global mh, f
+    os.system('rm -rf $HOME/Mail/@test')
+    mh = MH()
+    def do(s): print s; print eval(s)
+    do('mh.listfolders()')
+    do('mh.listallfolders()')
+    testfolders = ['@test', '@test/test1', '@test/test2',
+                   '@test/test1/test11', '@test/test1/test12',
+                   '@test/test1/test11/test111']
+    for t in testfolders: do('mh.makefolder(%s)' % `t`)
+    do('mh.listsubfolders(\'@test\')')
+    do('mh.listallsubfolders(\'@test\')')
+    f = mh.openfolder('@test')
+    do('f.listsubfolders()')
+    do('f.listallsubfolders()')
+    do('f.getsequences()')
+    seqs = f.getsequences()
+    seqs['foo'] = IntSet('1-10 12-20', ' ').tolist()
+    print seqs
+    f.putsequences(seqs)
+    do('f.getsequences()')
+    testfolders.reverse()
+    for t in testfolders: do('mh.deletefolder(%s)' % `t`)
+    do('mh.getcontext()')
+    context = mh.getcontext()
+    f = mh.openfolder(context)
+    do('f.getcurrent()')
+    for seq in ['first', 'last', 'cur', '.', 'prev', 'next',
+                'first:3', 'last:3', 'cur:3', 'cur:-3',
+                'prev:3', 'next:3',
+                '1:3', '1:-3', '100:3', '100:-3', '10000:3', '10000:-3',
+                'all']:
+        try:
+            do('f.parsesequence(%s)' % `seq`)
+        except Error, msg:
+            print "Error:", msg
+        stuff = os.popen("pick %s 2>/dev/null" % `seq`).read()
+        list = map(int, stuff.split())
+        print list, "<-- pick"
+    do('f.listmessages()')
+
+
+if __name__ == '__main__':
+    test()
diff --git a/lib-python/2.2/mimetools.py b/lib-python/2.2/mimetools.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/mimetools.py
@@ -0,0 +1,226 @@
+"""Various tools used by MIME-reading or MIME-writing programs."""
+
+
+import os
+import rfc822
+import tempfile
+
+__all__ = ["Message","choose_boundary","encode","decode","copyliteral",
+           "copybinary"]
+
+class Message(rfc822.Message):
+    """A derived class of rfc822.Message that knows about MIME headers and
+    contains some hooks for decoding encoded and multipart messages."""
+
+    def __init__(self, fp, seekable = 1):
+        rfc822.Message.__init__(self, fp, seekable)
+        self.encodingheader = \
+                self.getheader('content-transfer-encoding')
+        self.typeheader = \
+                self.getheader('content-type')
+        self.parsetype()
+        self.parseplist()
+
+    def parsetype(self):
+        str = self.typeheader
+        if str is None:
+            str = 'text/plain'
+        if ';' in str:
+            i = str.index(';')
+            self.plisttext = str[i:]
+            str = str[:i]
+        else:
+            self.plisttext = ''
+        fields = str.split('/')
+        for i in range(len(fields)):
+            fields[i] = fields[i].strip().lower()
+        self.type = '/'.join(fields)
+        self.maintype = fields[0]
+        self.subtype = '/'.join(fields[1:])
+
+    def parseplist(self):
+        str = self.plisttext
+        self.plist = []
+        while str[:1] == ';':
+            str = str[1:]
+            if ';' in str:
+                # XXX Should parse quotes!
+                end = str.index(';')
+            else:
+                end = len(str)
+            f = str[:end]
+            if '=' in f:
+                i = f.index('=')
+                f = f[:i].strip().lower() + \
+                        '=' + f[i+1:].strip()
+            self.plist.append(f.strip())
+            str = str[end:]
+
+    def getplist(self):
+        return self.plist
+
+    def getparam(self, name):
+        name = name.lower() + '='
+        n = len(name)
+        for p in self.plist:
+            if p[:n] == name:
+                return rfc822.unquote(p[n:])
+        return None
+
+    def getparamnames(self):
+        result = []
+        for p in self.plist:
+            i = p.find('=')
+            if i >= 0:
+                result.append(p[:i].lower())
+        return result
+
+    def getencoding(self):
+        if self.encodingheader is None:
+            return '7bit'
+        return self.encodingheader.lower()
+
+    def gettype(self):
+        return self.type
+
+    def getmaintype(self):
+        return self.maintype
+
+    def getsubtype(self):
+        return self.subtype
+
+
+
+
+# Utility functions
+# -----------------
+
+
+_prefix = None
+
+def choose_boundary():
+    """Return a random string usable as a multipart boundary.
+    The method used is so that it is *very* unlikely that the same
+    string of characters will every occur again in the Universe,
+    so the caller needn't check the data it is packing for the
+    occurrence of the boundary.
+
+    The boundary contains dots so you have to quote it in the header."""
+
+    global _prefix
+    import time
+    import random
+    if _prefix is None:
+        import socket
+        import os
+        hostid = socket.gethostbyname(socket.gethostname())
+        try:
+            uid = `os.getuid()`
+        except:
+            uid = '1'
+        try:
+            pid = `os.getpid()`
+        except:
+            pid = '1'
+        _prefix = hostid + '.' + uid + '.' + pid
+    timestamp = '%.3f' % time.time()
+    seed = `random.randint(0, 32767)`
+    return _prefix + '.' + timestamp + '.' + seed
+
+
+# Subroutines for decoding some common content-transfer-types
+
+def decode(input, output, encoding):
+    """Decode common content-transfer-encodings (base64, quopri, uuencode)."""
+    if encoding == 'base64':
+        import base64
+        return base64.decode(input, output)
+    if encoding == 'quoted-printable':
+        import quopri
+        return quopri.decode(input, output)
+    if encoding in ('uuencode', 'x-uuencode', 'uue', 'x-uue'):
+        import uu
+        return uu.decode(input, output)
+    if encoding in ('7bit', '8bit'):
+        return output.write(input.read())
+    if decodetab.has_key(encoding):
+        pipethrough(input, decodetab[encoding], output)
+    else:
+        raise ValueError, \
+              'unknown Content-Transfer-Encoding: %s' % encoding
+
+def encode(input, output, encoding):
+    """Encode common content-transfer-encodings (base64, quopri, uuencode)."""
+    if encoding == 'base64':
+        import base64
+        return base64.encode(input, output)
+    if encoding == 'quoted-printable':
+        import quopri
+        return quopri.encode(input, output, 0)
+    if encoding in ('uuencode', 'x-uuencode', 'uue', 'x-uue'):
+        import uu
+        return uu.encode(input, output)
+    if encoding in ('7bit', '8bit'):
+        return output.write(input.read())
+    if encodetab.has_key(encoding):
+        pipethrough(input, encodetab[encoding], output)
+    else:
+        raise ValueError, \
+              'unknown Content-Transfer-Encoding: %s' % encoding
+
+# The following is no longer used for standard encodings
+
+# XXX This requires that uudecode and mmencode are in $PATH
+
+uudecode_pipe = '''(
+TEMP=/tmp/@uu.$$
+sed "s%^begin [0-7][0-7]* .*%begin 600 $TEMP%" | uudecode
+cat $TEMP
+rm $TEMP
+)'''
+
+decodetab = {
+        'uuencode':             uudecode_pipe,
+        'x-uuencode':           uudecode_pipe,
+        'uue':                  uudecode_pipe,
+        'x-uue':                uudecode_pipe,
+        'quoted-printable':     'mmencode -u -q',
+        'base64':               'mmencode -u -b',
+}
+
+encodetab = {
+        'x-uuencode':           'uuencode tempfile',
+        'uuencode':             'uuencode tempfile',
+        'x-uue':                'uuencode tempfile',
+        'uue':                  'uuencode tempfile',
+        'quoted-printable':     'mmencode -q',
+        'base64':               'mmencode -b',
+}
+
+def pipeto(input, command):
+    pipe = os.popen(command, 'w')
+    copyliteral(input, pipe)
+    pipe.close()
+
+def pipethrough(input, command, output):
+    tempname = tempfile.mktemp()
+    temp = open(tempname, 'w')
+    copyliteral(input, temp)
+    temp.close()
+    pipe = os.popen(command + ' <' + tempname, 'r')
+    copybinary(pipe, output)
+    pipe.close()
+    os.unlink(tempname)
+
+def copyliteral(input, output):
+    while 1:
+        line = input.readline()
+        if not line: break
+        output.write(line)
+
+def copybinary(input, output):
+    BUFSIZE = 8192
+    while 1:
+        line = input.read(BUFSIZE)
+        if not line: break
+        output.write(line)
diff --git a/lib-python/2.2/mimetypes.py b/lib-python/2.2/mimetypes.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/mimetypes.py
@@ -0,0 +1,435 @@
+"""Guess the MIME type of a file.
+
+This module defines two useful functions:
+
+guess_type(url, strict=1) -- guess the MIME type and encoding of a URL.
+
+guess_extension(type, strict=1) -- guess the extension for a given MIME type.
+
+It also contains the following, for tuning the behavior:
+
+Data:
+
+knownfiles -- list of files to parse
+inited -- flag set when init() has been called
+suffix_map -- dictionary mapping suffixes to suffixes
+encodings_map -- dictionary mapping suffixes to encodings
+types_map -- dictionary mapping suffixes to types
+
+Functions:
+
+init([files]) -- parse a list of files, default knownfiles
+read_mime_types(file) -- parse one file, return a dictionary or None
+"""
+
+import os
+import posixpath
+import urllib
+
+__all__ = ["guess_type","guess_extension","read_mime_types","init"]
+
+knownfiles = [
+    "/usr/local/etc/httpd/conf/mime.types",
+    "/usr/local/lib/netscape/mime.types",
+    "/usr/local/etc/httpd/conf/mime.types",     # Apache 1.2
+    "/usr/local/etc/mime.types",                # Apache 1.3
+    ]
+
+inited = False
+
+
+class MimeTypes:
+    """MIME-types datastore.
+
+    This datastore can handle information from mime.types-style files
+    and supports basic determination of MIME type from a filename or
+    URL, and can guess a reasonable extension given a MIME type.
+    """
+
+    def __init__(self, filenames=()):
+        if not inited:
+            init()
+        self.encodings_map = encodings_map.copy()
+        self.suffix_map = suffix_map.copy()
+        self.types_map = types_map.copy()
+        self.common_types = common_types.copy()
+        for name in filenames:
+            self.read(name)
+
+    def guess_type(self, url, strict=1):
+        """Guess the type of a file based on its URL.
+
+        Return value is a tuple (type, encoding) where type is None if
+        the type can't be guessed (no or unknown suffix) or a string
+        of the form type/subtype, usable for a MIME Content-type
+        header; and encoding is None for no encoding or the name of
+        the program used to encode (e.g. compress or gzip).  The
+        mappings are table driven.  Encoding suffixes are case
+        sensitive; type suffixes are first tried case sensitive, then
+        case insensitive.
+
+        The suffixes .tgz, .taz and .tz (case sensitive!) are all
+        mapped to '.tar.gz'.  (This is table-driven too, using the
+        dictionary suffix_map.)
+
+        Optional `strict' argument when false adds a bunch of commonly found,
+        but non-standard types.
+        """
+        scheme, url = urllib.splittype(url)
+        if scheme == 'data':
+            # syntax of data URLs:
+            # dataurl   := "data:" [ mediatype ] [ ";base64" ] "," data
+            # mediatype := [ type "/" subtype ] *( ";" parameter )
+            # data      := *urlchar
+            # parameter := attribute "=" value
+            # type/subtype defaults to "text/plain"
+            comma = url.find(',')
+            if comma < 0:
+                # bad data URL
+                return None, None
+            semi = url.find(';', 0, comma)
+            if semi >= 0:
+                type = url[:semi]
+            else:
+                type = url[:comma]
+            if '=' in type or '/' not in type:
+                type = 'text/plain'
+            return type, None           # never compressed, so encoding is None
+        base, ext = posixpath.splitext(url)
+        while self.suffix_map.has_key(ext):
+            base, ext = posixpath.splitext(base + self.suffix_map[ext])
+        if self.encodings_map.has_key(ext):
+            encoding = self.encodings_map[ext]
+            base, ext = posixpath.splitext(base)
+        else:
+            encoding = None
+        types_map = self.types_map
+        common_types = self.common_types
+        if types_map.has_key(ext):
+            return types_map[ext], encoding
+        elif types_map.has_key(ext.lower()):
+            return types_map[ext.lower()], encoding
+        elif strict:
+            return None, encoding
+        elif common_types.has_key(ext):
+            return common_types[ext], encoding
+        elif common_types.has_key(ext.lower()):
+            return common_types[ext.lower()], encoding
+        else:
+            return None, encoding
+
+    def guess_extension(self, type, strict=1):
+        """Guess the extension for a file based on its MIME type.
+
+        Return value is a string giving a filename extension,
+        including the leading dot ('.').  The extension is not
+        guaranteed to have been associated with any particular data
+        stream, but would be mapped to the MIME type `type' by
+        guess_type().  If no extension can be guessed for `type', None
+        is returned.
+
+        Optional `strict' argument when false adds a bunch of commonly found,
+        but non-standard types.
+        """
+        type = type.lower()
+        for ext, stype in self.types_map.items():
+            if type == stype:
+                return ext
+        if not strict:
+            for ext, stype in common_types.items():
+                if type == stype:
+                    return ext
+        return None
+
+    def read(self, filename):
+        """Read a single mime.types-format file, specified by pathname."""
+        fp = open(filename)
+        self.readfp(fp)
+        fp.close()
+
+    def readfp(self, fp):
+        """Read a single mime.types-format file."""
+        map = self.types_map
+        while 1:
+            line = fp.readline()
+            if not line:
+                break
+            words = line.split()
+            for i in range(len(words)):
+                if words[i][0] == '#':
+                    del words[i:]
+                    break
+            if not words:
+                continue
+            type, suffixes = words[0], words[1:]
+            for suff in suffixes:
+                map['.' + suff] = type
+
+
+def guess_type(url, strict=1):
+    """Guess the type of a file based on its URL.
+
+    Return value is a tuple (type, encoding) where type is None if the
+    type can't be guessed (no or unknown suffix) or a string of the
+    form type/subtype, usable for a MIME Content-type header; and
+    encoding is None for no encoding or the name of the program used
+    to encode (e.g. compress or gzip).  The mappings are table
+    driven.  Encoding suffixes are case sensitive; type suffixes are
+    first tried case sensitive, then case insensitive.
+
+    The suffixes .tgz, .taz and .tz (case sensitive!) are all mapped
+    to ".tar.gz".  (This is table-driven too, using the dictionary
+    suffix_map).
+
+    Optional `strict' argument when false adds a bunch of commonly found, but
+    non-standard types.
+    """
+    init()
+    return guess_type(url, strict)
+
+
+def guess_extension(type, strict=1):
+    """Guess the extension for a file based on its MIME type.
+
+    Return value is a string giving a filename extension, including the
+    leading dot ('.').  The extension is not guaranteed to have been
+    associated with any particular data stream, but would be mapped to the
+    MIME type `type' by guess_type().  If no extension can be guessed for
+    `type', None is returned.
+
+    Optional `strict' argument when false adds a bunch of commonly found,
+    but non-standard types.
+    """
+    init()
+    return guess_extension(type, strict)
+
+
+def init(files=None):
+    global guess_extension, guess_type
+    global suffix_map, types_map, encodings_map, common_types
+    global inited
+    inited = True
+    db = MimeTypes()
+    if files is None:
+        files = knownfiles
+    for file in files:
+        if os.path.isfile(file):
+            db.readfp(open(file))
+    encodings_map = db.encodings_map
+    suffix_map = db.suffix_map
+    types_map = db.types_map
+    guess_extension = db.guess_extension
+    guess_type = db.guess_type
+    common_types = db.common_types
+
+
+def read_mime_types(file):
+    try:
+        f = open(file)
+    except IOError:
+        return None
+    db = MimeTypes()
+    db.readfp(f)
+    return db.types_map
+
+
+suffix_map = {
+    '.tgz': '.tar.gz',
+    '.taz': '.tar.gz',
+    '.tz': '.tar.gz',
+    }
+
+encodings_map = {
+    '.gz': 'gzip',
+    '.Z': 'compress',
+    }
+
+# Before adding new types, make sure they are either registered with IANA, at
+# http://www.isi.edu/in-notes/iana/assignments/media-types
+# or extensions, i.e. using the x- prefix
+
+# If you add to these, please keep them sorted!
+types_map = {
+    '.a'      : 'application/octet-stream',
+    '.ai'     : 'application/postscript',
+    '.aif'    : 'audio/x-aiff',
+    '.aifc'   : 'audio/x-aiff',
+    '.aiff'   : 'audio/x-aiff',
+    '.au'     : 'audio/basic',
+    '.avi'    : 'video/x-msvideo',
+    '.bat'    : 'text/plain',
+    '.bcpio'  : 'application/x-bcpio',
+    '.bin'    : 'application/octet-stream',
+    '.bmp'    : 'image/x-ms-bmp',
+    '.c'      : 'text/plain',
+    # Duplicates :(
+    '.cdf'    : 'application/x-cdf',
+    '.cdf'    : 'application/x-netcdf',
+    '.cpio'   : 'application/x-cpio',
+    '.csh'    : 'application/x-csh',
+    '.css'    : 'text/css',
+    '.dll'    : 'application/octet-stream',
+    '.doc'    : 'application/msword',
+    '.dot'    : 'application/msword',
+    '.dvi'    : 'application/x-dvi',
+    '.eml'    : 'message/rfc822',
+    '.eps'    : 'application/postscript',
+    '.etx'    : 'text/x-setext',
+    '.exe'    : 'application/octet-stream',
+    '.gif'    : 'image/gif',
+    '.gtar'   : 'application/x-gtar',
+    '.h'      : 'text/plain',
+    '.hdf'    : 'application/x-hdf',
+    '.htm'    : 'text/html',
+    '.html'   : 'text/html',
+    '.ief'    : 'image/ief',
+    '.jpe'    : 'image/jpeg',
+    '.jpeg'   : 'image/jpeg',
+    '.jpg'    : 'image/jpeg',
+    '.js'     : 'application/x-javascript',
+    '.ksh'    : 'text/plain',
+    '.latex'  : 'application/x-latex',
+    '.m1v'    : 'video/mpeg',
+    '.man'    : 'application/x-troff-man',
+    '.me'     : 'application/x-troff-me',
+    '.mht'    : 'message/rfc822',
+    '.mhtml'  : 'message/rfc822',
+    '.mif'    : 'application/x-mif',
+    '.mov'    : 'video/quicktime',
+    '.movie'  : 'video/x-sgi-movie',
+    '.mp2'    : 'audio/mpeg',
+    '.mp3'    : 'audio/mpeg',
+    '.mpa'    : 'video/mpeg',
+    '.mpe'    : 'video/mpeg',
+    '.mpeg'   : 'video/mpeg',
+    '.mpg'    : 'video/mpeg',
+    '.ms'     : 'application/x-troff-ms',
+    '.nc'     : 'application/x-netcdf',
+    '.nws'    : 'message/rfc822',
+    '.o'      : 'application/octet-stream',
+    '.obj'    : 'application/octet-stream',
+    '.oda'    : 'application/oda',
+    '.p12'    : 'application/x-pkcs12',
+    '.p7c'    : 'application/pkcs7-mime',
+    '.pbm'    : 'image/x-portable-bitmap',
+    '.pdf'    : 'application/pdf',
+    '.pfx'    : 'application/x-pkcs12',
+    '.pgm'    : 'image/x-portable-graymap',
+    '.pl'     : 'text/plain',
+    '.png'    : 'image/png',
+    '.pnm'    : 'image/x-portable-anymap',
+    '.pot'    : 'application/vnd.ms-powerpoint',
+    '.ppa'    : 'application/vnd.ms-powerpoint',
+    '.ppm'    : 'image/x-portable-pixmap',
+    '.pps'    : 'application/vnd.ms-powerpoint',
+    '.ppt'    : 'application/vnd.ms-powerpoint',
+    '.ps'     : 'application/postscript',
+    '.pwz'    : 'application/vnd.ms-powerpoint',
+    '.py'     : 'text/x-python',
+    '.pyc'    : 'application/x-python-code',
+    '.pyo'    : 'application/x-python-code',
+    '.qt'     : 'video/quicktime',
+    '.ra'     : 'audio/x-pn-realaudio',
+    '.ram'    : 'application/x-pn-realaudio',
+    '.ras'    : 'image/x-cmu-raster',
+    '.rdf'    : 'application/xml',
+    '.rgb'    : 'image/x-rgb',
+    '.roff'   : 'application/x-troff',
+    '.rtx'    : 'text/richtext',
+    '.sgm'    : 'text/x-sgml',
+    '.sgml'   : 'text/x-sgml',
+    '.sh'     : 'application/x-sh',
+    '.shar'   : 'application/x-shar',
+    '.snd'    : 'audio/basic',
+    '.so'     : 'application/octet-stream',
+    '.src'    : 'application/x-wais-source',
+    '.sv4cpio': 'application/x-sv4cpio',
+    '.sv4crc' : 'application/x-sv4crc',
+    '.t'      : 'application/x-troff',
+    '.tar'    : 'application/x-tar',
+    '.tcl'    : 'application/x-tcl',
+    '.tex'    : 'application/x-tex',
+    '.texi'   : 'application/x-texinfo',
+    '.texinfo': 'application/x-texinfo',
+    '.tif'    : 'image/tiff',
+    '.tiff'   : 'image/tiff',
+    '.tr'     : 'application/x-troff',
+    '.tsv'    : 'text/tab-separated-values',
+    '.txt'    : 'text/plain',
+    '.ustar'  : 'application/x-ustar',
+    '.vcf'    : 'text/x-vcard',
+    '.wav'    : 'audio/x-wav',
+    '.wiz'    : 'application/msword',
+    '.xbm'    : 'image/x-xbitmap',
+    '.xlb'    : 'application/vnd.ms-excel',
+    # Duplicates :(
+    '.xls'    : 'application/excel',
+    '.xls'    : 'application/vnd.ms-excel',
+    '.xml'    : 'text/xml',
+    '.xpm'    : 'image/x-xpixmap',
+    '.xsl'    : 'application/xml',
+    '.xwd'    : 'image/x-xwindowdump',
+    '.zip'    : 'application/zip',
+    }
+
+# These are non-standard types, commonly found in the wild.  They will only
+# match if strict=0 flag is given to the API methods.
+
+# Please sort these too
+common_types = {
+    '.jpg' : 'image/jpg',
+    '.mid' : 'audio/midi',
+    '.midi': 'audio/midi',
+    '.pct' : 'image/pict',
+    '.pic' : 'image/pict',
+    '.pict': 'image/pict',
+    '.rtf' : 'application/rtf',
+    '.xul' : 'text/xul'
+    }
+
+
+if __name__ == '__main__':
+    import sys
+    import getopt
+
+    USAGE = """\
+Usage: mimetypes.py [options] type
+
+Options:
+    --help / -h       -- print this message and exit
+    --lenient / -l    -- additionally search of some common, but non-standard
+                         types.
+    --extension / -e  -- guess extension instead of type
+
+More than one type argument may be given.
+"""
+
+    def usage(code, msg=''):
+        print USAGE
+        if msg: print msg
+        sys.exit(code)
+
+    try:
+        opts, args = getopt.getopt(sys.argv[1:], 'hle',
+                                   ['help', 'lenient', 'extension'])
+    except getopt.error, msg:
+        usage(1, msg)
+
+    strict = 1
+    extension = 0
+    for opt, arg in opts:
+        if opt in ('-h', '--help'):
+            usage(0)
+        elif opt in ('-l', '--lenient'):
+            strict = 0
+        elif opt in ('-e', '--extension'):
+            extension = 1
+    for gtype in args:
+        if extension:
+            guess = guess_extension(gtype, strict)
+            if not guess: print "I don't know anything about type", gtype
+            else: print guess
+        else:
+            guess, encoding = guess_type(gtype, strict)
+            if not guess: print "I don't know anything about type", gtype
+            else: print 'type:', guess, 'encoding:', encoding
diff --git a/lib-python/2.2/mimify.py b/lib-python/2.2/mimify.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/mimify.py
@@ -0,0 +1,464 @@
+#! /usr/bin/env python
+
+"""Mimification and unmimification of mail messages.
+
+Decode quoted-printable parts of a mail message or encode using
+quoted-printable.
+
+Usage:
+        mimify(input, output)
+        unmimify(input, output, decode_base64 = 0)
+to encode and decode respectively.  Input and output may be the name
+of a file or an open file object.  Only a readline() method is used
+on the input file, only a write() method is used on the output file.
+When using file names, the input and output file names may be the
+same.
+
+Interactive usage:
+        mimify.py -e [infile [outfile]]
+        mimify.py -d [infile [outfile]]
+to encode and decode respectively.  Infile defaults to standard
+input and outfile to standard output.
+"""
+
+# Configure
+MAXLEN = 200    # if lines longer than this, encode as quoted-printable
+CHARSET = 'ISO-8859-1'  # default charset for non-US-ASCII mail
+QUOTE = '> '            # string replies are quoted with
+# End configure
+
+import re
+
+__all__ = ["mimify","unmimify","mime_encode_header","mime_decode_header"]
+
+qp = re.compile('^content-transfer-encoding:\\s*quoted-printable', re.I)
+base64_re = re.compile('^content-transfer-encoding:\\s*base64', re.I)
+mp = re.compile('^content-type:.*multipart/.*boundary="?([^;"\n]*)', re.I|re.S)
+chrset = re.compile('^(content-type:.*charset=")(us-ascii|iso-8859-[0-9]+)(".*)', re.I|re.S)
+he = re.compile('^-*\n')
+mime_code = re.compile('=([0-9a-f][0-9a-f])', re.I)
+mime_head = re.compile('=\\?iso-8859-1\\?q\\?([^? \t\n]+)\\?=', re.I)
+repl = re.compile('^subject:\\s+re: ', re.I)
+
+class File:
+    """A simple fake file object that knows about limited read-ahead and
+    boundaries.  The only supported method is readline()."""
+
+    def __init__(self, file, boundary):
+        self.file = file
+        self.boundary = boundary
+        self.peek = None
+
+    def readline(self):
+        if self.peek is not None:
+            return ''
+        line = self.file.readline()
+        if not line:
+            return line
+        if self.boundary:
+            if line == self.boundary + '\n':
+                self.peek = line
+                return ''
+            if line == self.boundary + '--\n':
+                self.peek = line
+                return ''
+        return line
+
+class HeaderFile:
+    def __init__(self, file):
+        self.file = file
+        self.peek = None
+
+    def readline(self):
+        if self.peek is not None:
+            line = self.peek
+            self.peek = None
+        else:
+            line = self.file.readline()
+        if not line:
+            return line
+        if he.match(line):
+            return line
+        while 1:
+            self.peek = self.file.readline()
+            if len(self.peek) == 0 or \
+               (self.peek[0] != ' ' and self.peek[0] != '\t'):
+                return line
+            line = line + self.peek
+            self.peek = None
+
+def mime_decode(line):
+    """Decode a single line of quoted-printable text to 8bit."""
+    newline = ''
+    pos = 0
+    while 1:
+        res = mime_code.search(line, pos)
+        if res is None:
+            break
+        newline = newline + line[pos:res.start(0)] + \
+                  chr(int(res.group(1), 16))
+        pos = res.end(0)
+    return newline + line[pos:]
+
+def mime_decode_header(line):
+    """Decode a header line to 8bit."""
+    newline = ''
+    pos = 0
+    while 1:
+        res = mime_head.search(line, pos)
+        if res is None:
+            break
+        match = res.group(1)
+        # convert underscores to spaces (before =XX conversion!)
+        match = ' '.join(match.split('_'))
+        newline = newline + line[pos:res.start(0)] + mime_decode(match)
+        pos = res.end(0)
+    return newline + line[pos:]
+
+def unmimify_part(ifile, ofile, decode_base64 = 0):
+    """Convert a quoted-printable part of a MIME mail message to 8bit."""
+    multipart = None
+    quoted_printable = 0
+    is_base64 = 0
+    is_repl = 0
+    if ifile.boundary and ifile.boundary[:2] == QUOTE:
+        prefix = QUOTE
+    else:
+        prefix = ''
+
+    # read header
+    hfile = HeaderFile(ifile)
+    while 1:
+        line = hfile.readline()
+        if not line:
+            return
+        if prefix and line[:len(prefix)] == prefix:
+            line = line[len(prefix):]
+            pref = prefix
+        else:
+            pref = ''
+        line = mime_decode_header(line)
+        if qp.match(line):
+            quoted_printable = 1
+            continue        # skip this header
+        if decode_base64 and base64_re.match(line):
+            is_base64 = 1
+            continue
+        ofile.write(pref + line)
+        if not prefix and repl.match(line):
+            # we're dealing with a reply message
+            is_repl = 1
+        mp_res = mp.match(line)
+        if mp_res:
+            multipart = '--' + mp_res.group(1)
+        if he.match(line):
+            break
+    if is_repl and (quoted_printable or multipart):
+        is_repl = 0
+
+    # read body
+    while 1:
+        line = ifile.readline()
+        if not line:
+            return
+        line = re.sub(mime_head, '\\1', line)
+        if prefix and line[:len(prefix)] == prefix:
+            line = line[len(prefix):]
+            pref = prefix
+        else:
+            pref = ''
+##              if is_repl and len(line) >= 4 and line[:4] == QUOTE+'--' and line[-3:] != '--\n':
+##                      multipart = line[:-1]
+        while multipart:
+            if line == multipart + '--\n':
+                ofile.write(pref + line)
+                multipart = None
+                line = None
+                break
+            if line == multipart + '\n':
+                ofile.write(pref + line)
+                nifile = File(ifile, multipart)
+                unmimify_part(nifile, ofile, decode_base64)
+                line = nifile.peek
+                if not line:
+                    # premature end of file
+                    break
+                continue
+            # not a boundary between parts
+            break
+        if line and quoted_printable:
+            while line[-2:] == '=\n':
+                line = line[:-2]
+                newline = ifile.readline()
+                if newline[:len(QUOTE)] == QUOTE:
+                    newline = newline[len(QUOTE):]
+                line = line + newline
+            line = mime_decode(line)
+        if line and is_base64 and not pref:
+            import base64
+            line = base64.decodestring(line)
+        if line:
+            ofile.write(pref + line)
+
+def unmimify(infile, outfile, decode_base64 = 0):
+    """Convert quoted-printable parts of a MIME mail message to 8bit."""
+    if type(infile) == type(''):
+        ifile = open(infile)
+        if type(outfile) == type('') and infile == outfile:
+            import os
+            d, f = os.path.split(infile)
+            os.rename(infile, os.path.join(d, ',' + f))
+    else:
+        ifile = infile
+    if type(outfile) == type(''):
+        ofile = open(outfile, 'w')
+    else:
+        ofile = outfile
+    nifile = File(ifile, None)
+    unmimify_part(nifile, ofile, decode_base64)
+    ofile.flush()
+
+mime_char = re.compile('[=\177-\377]') # quote these chars in body
+mime_header_char = re.compile('[=?\177-\377]') # quote these in header
+
+def mime_encode(line, header):
+    """Code a single line as quoted-printable.
+    If header is set, quote some extra characters."""
+    if header:
+        reg = mime_header_char
+    else:
+        reg = mime_char
+    newline = ''
+    pos = 0
+    if len(line) >= 5 and line[:5] == 'From ':
+        # quote 'From ' at the start of a line for stupid mailers
+        newline = ('=%02x' % ord('F')).upper()
+        pos = 1
+    while 1:
+        res = reg.search(line, pos)
+        if res is None:
+            break
+        newline = newline + line[pos:res.start(0)] + \
+                  ('=%02x' % ord(res.group(0))).upper()
+        pos = res.end(0)
+    line = newline + line[pos:]
+
+    newline = ''
+    while len(line) >= 75:
+        i = 73
+        while line[i] == '=' or line[i-1] == '=':
+            i = i - 1
+        i = i + 1
+        newline = newline + line[:i] + '=\n'
+        line = line[i:]
+    return newline + line
+
+mime_header = re.compile('([ \t(]|^)([-a-zA-Z0-9_+]*[\177-\377][-a-zA-Z0-9_+\177-\377]*)(?=[ \t)]|\n)')
+
+def mime_encode_header(line):
+    """Code a single header line as quoted-printable."""
+    newline = ''
+    pos = 0
+    while 1:
+        res = mime_header.search(line, pos)
+        if res is None:
+            break
+        newline = '%s%s%s=?%s?Q?%s?=' % \
+                  (newline, line[pos:res.start(0)], res.group(1),
+                   CHARSET, mime_encode(res.group(2), 1))
+        pos = res.end(0)
+    return newline + line[pos:]
+
+mv = re.compile('^mime-version:', re.I)
+cte = re.compile('^content-transfer-encoding:', re.I)
+iso_char = re.compile('[\177-\377]')
+
+def mimify_part(ifile, ofile, is_mime):
+    """Convert an 8bit part of a MIME mail message to quoted-printable."""
+    has_cte = is_qp = is_base64 = 0
+    multipart = None
+    must_quote_body = must_quote_header = has_iso_chars = 0
+
+    header = []
+    header_end = ''
+    message = []
+    message_end = ''
+    # read header
+    hfile = HeaderFile(ifile)
+    while 1:
+        line = hfile.readline()
+        if not line:
+            break
+        if not must_quote_header and iso_char.search(line):
+            must_quote_header = 1
+        if mv.match(line):
+            is_mime = 1
+        if cte.match(line):
+            has_cte = 1
+            if qp.match(line):
+                is_qp = 1
+            elif base64_re.match(line):
+                is_base64 = 1
+        mp_res = mp.match(line)
+        if mp_res:
+            multipart = '--' + mp_res.group(1)
+        if he.match(line):
+            header_end = line
+            break
+        header.append(line)
+
+    # read body
+    while 1:
+        line = ifile.readline()
+        if not line:
+            break
+        if multipart:
+            if line == multipart + '--\n':
+                message_end = line
+                break
+            if line == multipart + '\n':
+                message_end = line
+                break
+        if is_base64:
+            message.append(line)
+            continue
+        if is_qp:
+            while line[-2:] == '=\n':
+                line = line[:-2]
+                newline = ifile.readline()
+                if newline[:len(QUOTE)] == QUOTE:
+                    newline = newline[len(QUOTE):]
+                line = line + newline
+            line = mime_decode(line)
+        message.append(line)
+        if not has_iso_chars:
+            if iso_char.search(line):
+                has_iso_chars = must_quote_body = 1
+        if not must_quote_body:
+            if len(line) > MAXLEN:
+                must_quote_body = 1
+
+    # convert and output header and body
+    for line in header:
+        if must_quote_header:
+            line = mime_encode_header(line)
+        chrset_res = chrset.match(line)
+        if chrset_res:
+            if has_iso_chars:
+                # change us-ascii into iso-8859-1
+                if chrset_res.group(2).lower() == 'us-ascii':
+                    line = '%s%s%s' % (chrset_res.group(1),
+                                       CHARSET,
+                                       chrset_res.group(3))
+            else:
+                # change iso-8859-* into us-ascii
+                line = '%sus-ascii%s' % chrset_res.group(1, 3)
+        if has_cte and cte.match(line):
+            line = 'Content-Transfer-Encoding: '
+            if is_base64:
+                line = line + 'base64\n'
+            elif must_quote_body:
+                line = line + 'quoted-printable\n'
+            else:
+                line = line + '7bit\n'
+        ofile.write(line)
+    if (must_quote_header or must_quote_body) and not is_mime:
+        ofile.write('Mime-Version: 1.0\n')
+        ofile.write('Content-Type: text/plain; ')
+        if has_iso_chars:
+            ofile.write('charset="%s"\n' % CHARSET)
+        else:
+            ofile.write('charset="us-ascii"\n')
+    if must_quote_body and not has_cte:
+        ofile.write('Content-Transfer-Encoding: quoted-printable\n')
+    ofile.write(header_end)
+
+    for line in message:
+        if must_quote_body:
+            line = mime_encode(line, 0)
+        ofile.write(line)
+    ofile.write(message_end)
+
+    line = message_end
+    while multipart:
+        if line == multipart + '--\n':
+            # read bit after the end of the last part
+            while 1:
+                line = ifile.readline()
+                if not line:
+                    return
+                if must_quote_body:
+                    line = mime_encode(line, 0)
+                ofile.write(line)
+        if line == multipart + '\n':
+            nifile = File(ifile, multipart)
+            mimify_part(nifile, ofile, 1)
+            line = nifile.peek
+            if not line:
+                # premature end of file
+                break
+            ofile.write(line)
+            continue
+        # unexpectedly no multipart separator--copy rest of file
+        while 1:
+            line = ifile.readline()
+            if not line:
+                return
+            if must_quote_body:
+                line = mime_encode(line, 0)
+            ofile.write(line)
+
+def mimify(infile, outfile):
+    """Convert 8bit parts of a MIME mail message to quoted-printable."""
+    if type(infile) == type(''):
+        ifile = open(infile)
+        if type(outfile) == type('') and infile == outfile:
+            import os
+            d, f = os.path.split(infile)
+            os.rename(infile, os.path.join(d, ',' + f))
+    else:
+        ifile = infile
+    if type(outfile) == type(''):
+        ofile = open(outfile, 'w')
+    else:
+        ofile = outfile
+    nifile = File(ifile, None)
+    mimify_part(nifile, ofile, 0)
+    ofile.flush()
+
+import sys
+if __name__ == '__main__' or (len(sys.argv) > 0 and sys.argv[0] == 'mimify'):
+    import getopt
+    usage = 'Usage: mimify [-l len] -[ed] [infile [outfile]]'
+
+    decode_base64 = 0
+    opts, args = getopt.getopt(sys.argv[1:], 'l:edb')
+    if len(args) not in (0, 1, 2):
+        print usage
+        sys.exit(1)
+    if (('-e', '') in opts) == (('-d', '') in opts) or \
+       ((('-b', '') in opts) and (('-d', '') not in opts)):
+        print usage
+        sys.exit(1)
+    for o, a in opts:
+        if o == '-e':
+            encode = mimify
+        elif o == '-d':
+            encode = unmimify
+        elif o == '-l':
+            try:
+                MAXLEN = int(a)
+            except (ValueError, OverflowError):
+                print usage
+                sys.exit(1)
+        elif o == '-b':
+            decode_base64 = 1
+    if len(args) == 0:
+        encode_args = (sys.stdin, sys.stdout)
+    elif len(args) == 1:
+        encode_args = (args[0], sys.stdout)
+    else:
+        encode_args = (args[0], args[1])
+    if decode_base64:
+        encode_args = encode_args + (decode_base64,)
+    apply(encode, encode_args)
diff --git a/lib-python/2.2/multifile.py b/lib-python/2.2/multifile.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/multifile.py
@@ -0,0 +1,160 @@
+"""A readline()-style interface to the parts of a multipart message.
+
+The MultiFile class makes each part of a multipart message "feel" like
+an ordinary file, as long as you use fp.readline().  Allows recursive
+use, for nested multipart messages.  Probably best used together
+with module mimetools.
+
+Suggested use:
+
+real_fp = open(...)
+fp = MultiFile(real_fp)
+
+"read some lines from fp"
+fp.push(separator)
+while 1:
+        "read lines from fp until it returns an empty string" (A)
+        if not fp.next(): break
+fp.pop()
+"read remaining lines from fp until it returns an empty string"
+
+The latter sequence may be used recursively at (A).
+It is also allowed to use multiple push()...pop() sequences.
+
+If seekable is given as 0, the class code will not do the bookkeeping
+it normally attempts in order to make seeks relative to the beginning of the
+current file part.  This may be useful when using MultiFile with a non-
+seekable stream object.
+"""
+
+__all__ = ["MultiFile","Error"]
+
+class Error(Exception):
+    pass
+
+class MultiFile:
+
+    seekable = 0
+
+    def __init__(self, fp, seekable=1):
+        self.fp = fp
+        self.stack = [] # Grows down
+        self.level = 0
+        self.last = 0
+        if seekable:
+            self.seekable = 1
+            self.start = self.fp.tell()
+            self.posstack = [] # Grows down
+
+    def tell(self):
+        if self.level > 0:
+            return self.lastpos
+        return self.fp.tell() - self.start
+
+    def seek(self, pos, whence=0):
+        here = self.tell()
+        if whence:
+            if whence == 1:
+                pos = pos + here
+            elif whence == 2:
+                if self.level > 0:
+                    pos = pos + self.lastpos
+                else:
+                    raise Error, "can't use whence=2 yet"
+        if not 0 <= pos <= here or \
+                        self.level > 0 and pos > self.lastpos:
+            raise Error, 'bad MultiFile.seek() call'
+        self.fp.seek(pos + self.start)
+        self.level = 0
+        self.last = 0
+
+    def readline(self):
+        if self.level > 0:
+            return ''
+        line = self.fp.readline()
+        # Real EOF?
+        if not line:
+            self.level = len(self.stack)
+            self.last = (self.level > 0)
+            if self.last:
+                raise Error, 'sudden EOF in MultiFile.readline()'
+            return ''
+        assert self.level == 0
+        # Fast check to see if this is just data
+        if self.is_data(line):
+            return line
+        else:
+            # Ignore trailing whitespace on marker lines
+            marker = line.rstrip()
+        # No?  OK, try to match a boundary.
+        # Return the line (unstripped) if we don't.
+        for i in range(len(self.stack)):
+            sep = self.stack[i]
+            if marker == self.section_divider(sep):
+                self.last = 0
+                break
+            elif marker == self.end_marker(sep):
+                self.last = 1
+                break
+        else:
+            return line
+        # We only get here if we see a section divider or EOM line
+        if self.seekable:
+            self.lastpos = self.tell() - len(line)
+        self.level = i+1
+        if self.level > 1:
+            raise Error,'Missing endmarker in MultiFile.readline()'
+        return ''
+
+    def readlines(self):
+        list = []
+        while 1:
+            line = self.readline()
+            if not line: break
+            list.append(line)
+        return list
+
+    def read(self): # Note: no size argument -- read until EOF only!
+        return ''.join(self.readlines())
+
+    def next(self):
+        while self.readline(): pass
+        if self.level > 1 or self.last:
+            return 0
+        self.level = 0
+        self.last = 0
+        if self.seekable:
+            self.start = self.fp.tell()
+        return 1
+
+    def push(self, sep):
+        if self.level > 0:
+            raise Error, 'bad MultiFile.push() call'
+        self.stack.insert(0, sep)
+        if self.seekable:
+            self.posstack.insert(0, self.start)
+            self.start = self.fp.tell()
+
+    def pop(self):
+        if self.stack == []:
+            raise Error, 'bad MultiFile.pop() call'
+        if self.level <= 1:
+            self.last = 0
+        else:
+            abslastpos = self.lastpos + self.start
+        self.level = max(0, self.level - 1)
+        del self.stack[0]
+        if self.seekable:
+            self.start = self.posstack[0]
+            del self.posstack[0]
+            if self.level > 0:
+                self.lastpos = abslastpos - self.start
+
+    def is_data(self, line):
+        return line[:2] != '--'
+
+    def section_divider(self, str):
+        return "--" + str
+
+    def end_marker(self, str):
+        return "--" + str + "--"
diff --git a/lib-python/2.2/mutex.py b/lib-python/2.2/mutex.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/mutex.py
@@ -0,0 +1,51 @@
+"""Mutual exclusion -- for use with module sched
+
+A mutex has two pieces of state -- a 'locked' bit and a queue.
+When the mutex is not locked, the queue is empty.
+Otherwise, the queue contains 0 or more (function, argument) pairs
+representing functions (or methods) waiting to acquire the lock.
+When the mutex is unlocked while the queue is not empty,
+the first queue entry is removed and its function(argument) pair called,
+implying it now has the lock.
+
+Of course, no multi-threading is implied -- hence the funny interface
+for lock, where a function is called once the lock is aquired.
+"""
+
+class mutex:
+    def __init__(self):
+        """Create a new mutex -- initially unlocked."""
+        self.locked = 0
+        self.queue = []
+
+    def test(self):
+        """Test the locked bit of the mutex."""
+        return self.locked
+
+    def testandset(self):
+        """Atomic test-and-set -- grab the lock if it is not set,
+        return true if it succeeded."""
+        if not self.locked:
+            self.locked = 1
+            return 1
+        else:
+            return 0
+
+    def lock(self, function, argument):
+        """Lock a mutex, call the function with supplied argument
+        when it is acquired.  If the mutex is already locked, place
+        function and argument in the queue."""
+        if self.testandset():
+            function(argument)
+        else:
+            self.queue.append((function, argument))
+
+    def unlock(self):
+        """Unlock a mutex.  If the queue is not empty, call the next
+        function with its argument."""
+        if self.queue:
+            function, argument = self.queue[0]
+            del self.queue[0]
+            function(argument)
+        else:
+            self.locked = 0
diff --git a/lib-python/2.2/netrc.py b/lib-python/2.2/netrc.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/netrc.py
@@ -0,0 +1,108 @@
+"""An object-oriented interface to .netrc files."""
+
+# Module and documentation by Eric S. Raymond, 21 Dec 1998
+
+import os, shlex
+
+__all__ = ["netrc", "NetrcParseError"]
+
+
+class NetrcParseError(Exception):
+    """Exception raised on syntax errors in the .netrc file."""
+    def __init__(self, msg, filename=None, lineno=None):
+        self.filename = filename
+        self.lineno = lineno
+        self.msg = msg
+        Exception.__init__(self, msg)
+
+    def __str__(self):
+        return "%s (%s, line %s)" % (self.msg, self.filename, self.lineno)
+
+
+class netrc:
+    def __init__(self, file=None):
+        if not file:
+            file = os.path.join(os.environ['HOME'], ".netrc")
+        fp = open(file)
+        self.hosts = {}
+        self.macros = {}
+        lexer = shlex.shlex(fp)
+        lexer.wordchars += r"""!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
+        while 1:
+            # Look for a machine, default, or macdef top-level keyword
+            toplevel = tt = lexer.get_token()
+            if not tt:
+                break
+            elif tt == 'machine':
+                entryname = lexer.get_token()
+            elif tt == 'default':
+                entryname = 'default'
+            elif tt == 'macdef':                # Just skip to end of macdefs
+                entryname = lexer.get_token()
+                self.macros[entryname] = []
+                lexer.whitespace = ' \t'
+                while 1:
+                    line = lexer.instream.readline()
+                    if not line or line == '\012':
+                        lexer.whitespace = ' \t\r\n'
+                        break
+                    self.macros[entryname].append(line)
+                continue
+            else:
+                raise NetrcParseError(
+                    "bad toplevel token %r" % tt, file, lexer.lineno)
+
+            # We're looking at start of an entry for a named machine or default.
+            login = ''
+            account = password = None
+            self.hosts[entryname] = {}
+            while 1:
+                tt = lexer.get_token()
+                if (tt=='' or tt == 'machine' or
+                    tt == 'default' or tt =='macdef'):
+                    if password:
+                        self.hosts[entryname] = (login, account, password)
+                        lexer.push_token(tt)
+                        break
+                    else:
+                        raise NetrcParseError(
+                            "malformed %s entry %s terminated by %s"
+                            % (toplevel, entryname, repr(tt)),
+                            file, lexer.lineno)
+                elif tt == 'login' or tt == 'user':
+                    login = lexer.get_token()
+                elif tt == 'account':
+                    account = lexer.get_token()
+                elif tt == 'password':
+                    password = lexer.get_token()
+                else:
+                    raise NetrcParseError("bad follower token %r" % tt,
+                                          file, lexer.lineno)
+
+    def authenticators(self, host):
+        """Return a (user, account, password) tuple for given host."""
+        if self.hosts.has_key(host):
+            return self.hosts[host]
+        elif self.hosts.has_key('default'):
+            return self.hosts['default']
+        else:
+            return None
+
+    def __repr__(self):
+        """Dump the class data in the format of a .netrc file."""
+        rep = ""
+        for host in self.hosts.keys():
+            attrs = self.hosts[host]
+            rep = rep + "machine "+ host + "\n\tlogin " + repr(attrs[0]) + "\n"
+            if attrs[1]:
+                rep = rep + "account " + repr(attrs[1])
+            rep = rep + "\tpassword " + repr(attrs[2]) + "\n"
+        for macro in self.macros.keys():
+            rep = rep + "macdef " + macro + "\n"
+            for line in self.macros[macro]:
+                rep = rep + line
+            rep = rep + "\n"
+        return rep
+
+if __name__ == '__main__':
+    print netrc()
diff --git a/lib-python/2.2/nntplib.py b/lib-python/2.2/nntplib.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/nntplib.py
@@ -0,0 +1,575 @@
+"""An NNTP client class based on RFC 977: Network News Transfer Protocol.
+
+Example:
+
+>>> from nntplib import NNTP
+>>> s = NNTP('news')
+>>> resp, count, first, last, name = s.group('comp.lang.python')
+>>> print 'Group', name, 'has', count, 'articles, range', first, 'to', last
+Group comp.lang.python has 51 articles, range 5770 to 5821
+>>> resp, subs = s.xhdr('subject', first + '-' + last)
+>>> resp = s.quit()
+>>>
+
+Here 'resp' is the server response line.
+Error responses are turned into exceptions.
+
+To post an article from a file:
+>>> f = open(filename, 'r') # file containing article, including header
+>>> resp = s.post(f)
+>>>
+
+For descriptions of all methods, read the comments in the code below.
+Note that all arguments and return values representing article numbers
+are strings, not numbers, since they are rarely used for calculations.
+"""
+
+# RFC 977 by Brian Kantor and Phil Lapsley.
+# xover, xgtitle, xpath, date methods by Kevan Heydon
+
+
+# Imports
+import re
+import socket
+import types
+
+__all__ = ["NNTP","NNTPReplyError","NNTPTemporaryError",
+           "NNTPPermanentError","NNTPProtocolError","NNTPDataError",
+           "error_reply","error_temp","error_perm","error_proto",
+           "error_data",]
+
+# Exceptions raised when an error or invalid response is received
+class NNTPError(Exception):
+    """Base class for all nntplib exceptions"""
+    def __init__(self, *args):
+        apply(Exception.__init__, (self,)+args)
+        try:
+            self.response = args[0]
+        except IndexError:
+            self.response = 'No response given'
+
+class NNTPReplyError(NNTPError):
+    """Unexpected [123]xx reply"""
+    pass
+
+class NNTPTemporaryError(NNTPError):
+    """4xx errors"""
+    pass
+
+class NNTPPermanentError(NNTPError):
+    """5xx errors"""
+    pass
+
+class NNTPProtocolError(NNTPError):
+    """Response does not begin with [1-5]"""
+    pass
+
+class NNTPDataError(NNTPError):
+    """Error in response data"""
+    pass
+
+# for backwards compatibility
+error_reply = NNTPReplyError
+error_temp = NNTPTemporaryError
+error_perm = NNTPPermanentError
+error_proto = NNTPProtocolError
+error_data = NNTPDataError
+
+
+
+# Standard port used by NNTP servers
+NNTP_PORT = 119
+
+
+# Response numbers that are followed by additional text (e.g. article)
+LONGRESP = ['100', '215', '220', '221', '222', '224', '230', '231', '282']
+
+
+# Line terminators (we always output CRLF, but accept any of CRLF, CR, LF)
+CRLF = '\r\n'
+
+
+
+# The class itself
+class NNTP:
+    def __init__(self, host, port=NNTP_PORT, user=None, password=None,
+                 readermode=None):
+        """Initialize an instance.  Arguments:
+        - host: hostname to connect to
+        - port: port to connect to (default the standard NNTP port)
+        - user: username to authenticate with
+        - password: password to use with username
+        - readermode: if true, send 'mode reader' command after
+                      connecting.
+
+        readermode is sometimes necessary if you are connecting to an
+        NNTP server on the local machine and intend to call
+        reader-specific comamnds, such as `group'.  If you get
+        unexpected NNTPPermanentErrors, you might need to set
+        readermode.
+        """
+        self.host = host
+        self.port = port
+        self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        self.sock.connect((self.host, self.port))
+        self.file = self.sock.makefile('rb')
+        self.debugging = 0
+        self.welcome = self.getresp()
+
+        # 'mode reader' is sometimes necessary to enable 'reader' mode.
+        # However, the order in which 'mode reader' and 'authinfo' need to
+        # arrive differs between some NNTP servers. Try to send
+        # 'mode reader', and if it fails with an authorization failed
+        # error, try again after sending authinfo.
+        readermode_afterauth = 0
+        if readermode:
+            try:
+                self.welcome = self.shortcmd('mode reader')
+            except NNTPPermanentError:
+                # error 500, probably 'not implemented'
+                pass
+            except NNTPTemporaryError, e:
+                if user and e.response[:3] == '480':
+                    # Need authorization before 'mode reader'
+                    readermode_afterauth = 1
+                else:
+                    raise
+        if user:
+            resp = self.shortcmd('authinfo user '+user)
+            if resp[:3] == '381':
+                if not password:
+                    raise NNTPReplyError(resp)
+                else:
+                    resp = self.shortcmd(
+                            'authinfo pass '+password)
+                    if resp[:3] != '281':
+                        raise NNTPPermanentError(resp)
+            if readermode_afterauth:
+                try:
+                    self.welcome = self.shortcmd('mode reader')
+                except NNTPPermanentError:
+                    # error 500, probably 'not implemented'
+                    pass
+
+
+    # Get the welcome message from the server
+    # (this is read and squirreled away by __init__()).
+    # If the response code is 200, posting is allowed;
+    # if it 201, posting is not allowed
+
+    def getwelcome(self):
+        """Get the welcome message from the server
+        (this is read and squirreled away by __init__()).
+        If the response code is 200, posting is allowed;
+        if it 201, posting is not allowed."""
+
+        if self.debugging: print '*welcome*', `self.welcome`
+        return self.welcome
+
+    def set_debuglevel(self, level):
+        """Set the debugging level.  Argument 'level' means:
+        0: no debugging output (default)
+        1: print commands and responses but not body text etc.
+        2: also print raw lines read and sent before stripping CR/LF"""
+
+        self.debugging = level
+    debug = set_debuglevel
+
+    def putline(self, line):
+        """Internal: send one line to the server, appending CRLF."""
+        line = line + CRLF
+        if self.debugging > 1: print '*put*', `line`
+        self.sock.sendall(line)
+
+    def putcmd(self, line):
+        """Internal: send one command to the server (through putline())."""
+        if self.debugging: print '*cmd*', `line`
+        self.putline(line)
+
+    def getline(self):
+        """Internal: return one line from the server, stripping CRLF.
+        Raise EOFError if the connection is closed."""
+        line = self.file.readline()
+        if self.debugging > 1:
+            print '*get*', `line`
+        if not line: raise EOFError
+        if line[-2:] == CRLF: line = line[:-2]
+        elif line[-1:] in CRLF: line = line[:-1]
+        return line
+
+    def getresp(self):
+        """Internal: get a response from the server.
+        Raise various errors if the response indicates an error."""
+        resp = self.getline()
+        if self.debugging: print '*resp*', `resp`
+        c = resp[:1]
+        if c == '4':
+            raise NNTPTemporaryError(resp)
+        if c == '5':
+            raise NNTPPermanentError(resp)
+        if c not in '123':
+            raise NNTPProtocolError(resp)
+        return resp
+
+    def getlongresp(self, file=None):
+        """Internal: get a response plus following text from the server.
+        Raise various errors if the response indicates an error."""
+
+        openedFile = None
+        try:
+            # If a string was passed then open a file with that name
+            if isinstance(file, types.StringType):
+                openedFile = file = open(file, "w")
+
+            resp = self.getresp()
+            if resp[:3] not in LONGRESP:
+                raise NNTPReplyError(resp)
+            list = []
+            while 1:
+                line = self.getline()
+                if line == '.':
+                    break
+                if line[:2] == '..':
+                    line = line[1:]
+                if file:
+                    file.write(line + "\n")
+                else:
+                    list.append(line)
+        finally:
+            # If this method created the file, then it must close it
+            if openedFile:
+                openedFile.close()
+
+        return resp, list
+
+    def shortcmd(self, line):
+        """Internal: send a command and get the response."""
+        self.putcmd(line)
+        return self.getresp()
+
+    def longcmd(self, line, file=None):
+        """Internal: send a command and get the response plus following text."""
+        self.putcmd(line)
+        return self.getlongresp(file)
+
+    def newgroups(self, date, time):
+        """Process a NEWGROUPS command.  Arguments:
+        - date: string 'yymmdd' indicating the date
+        - time: string 'hhmmss' indicating the time
+        Return:
+        - resp: server response if successful
+        - list: list of newsgroup names"""
+
+        return self.longcmd('NEWGROUPS ' + date + ' ' + time)
+
+    def newnews(self, group, date, time):
+        """Process a NEWNEWS command.  Arguments:
+        - group: group name or '*'
+        - date: string 'yymmdd' indicating the date
+        - time: string 'hhmmss' indicating the time
+        Return:
+        - resp: server response if successful
+        - list: list of article ids"""
+
+        cmd = 'NEWNEWS ' + group + ' ' + date + ' ' + time
+        return self.longcmd(cmd)
+
+    def list(self):
+        """Process a LIST command.  Return:
+        - resp: server response if successful
+        - list: list of (group, last, first, flag) (strings)"""
+
+        resp, list = self.longcmd('LIST')
+        for i in range(len(list)):
+            # Parse lines into "group last first flag"
+            list[i] = tuple(list[i].split())
+        return resp, list
+
+    def group(self, name):
+        """Process a GROUP command.  Argument:
+        - group: the group name
+        Returns:
+        - resp: server response if successful
+        - count: number of articles (string)
+        - first: first article number (string)
+        - last: last article number (string)
+        - name: the group name"""
+
+        resp = self.shortcmd('GROUP ' + name)
+        if resp[:3] != '211':
+            raise NNTPReplyError(resp)
+        words = resp.split()
+        count = first = last = 0
+        n = len(words)
+        if n > 1:
+            count = words[1]
+            if n > 2:
+                first = words[2]
+                if n > 3:
+                    last = words[3]
+                    if n > 4:
+                        name = words[4].lower()
+        return resp, count, first, last, name
+
+    def help(self):
+        """Process a HELP command.  Returns:
+        - resp: server response if successful
+        - list: list of strings"""
+
+        return self.longcmd('HELP')
+
+    def statparse(self, resp):
+        """Internal: parse the response of a STAT, NEXT or LAST command."""
+        if resp[:2] != '22':
+            raise NNTPReplyError(resp)
+        words = resp.split()
+        nr = 0
+        id = ''
+        n = len(words)
+        if n > 1:
+            nr = words[1]
+            if n > 2:
+                id = words[2]
+        return resp, nr, id
+
+    def statcmd(self, line):
+        """Internal: process a STAT, NEXT or LAST command."""
+        resp = self.shortcmd(line)
+        return self.statparse(resp)
+
+    def stat(self, id):
+        """Process a STAT command.  Argument:
+        - id: article number or message id
+        Returns:
+        - resp: server response if successful
+        - nr:   the article number
+        - id:   the article id"""
+
+        return self.statcmd('STAT ' + id)
+
+    def next(self):
+        """Process a NEXT command.  No arguments.  Return as for STAT."""
+        return self.statcmd('NEXT')
+
+    def last(self):
+        """Process a LAST command.  No arguments.  Return as for STAT."""
+        return self.statcmd('LAST')
+
+    def artcmd(self, line, file=None):
+        """Internal: process a HEAD, BODY or ARTICLE command."""
+        resp, list = self.longcmd(line, file)
+        resp, nr, id = self.statparse(resp)
+        return resp, nr, id, list
+
+    def head(self, id):
+        """Process a HEAD command.  Argument:
+        - id: article number or message id
+        Returns:
+        - resp: server response if successful
+        - nr: article number
+        - id: message id
+        - list: the lines of the article's header"""
+
+        return self.artcmd('HEAD ' + id)
+
+    def body(self, id, file=None):
+        """Process a BODY command.  Argument:
+        - id: article number or message id
+        - file: Filename string or file object to store the article in
+        Returns:
+        - resp: server response if successful
+        - nr: article number
+        - id: message id
+        - list: the lines of the article's body or an empty list
+                if file was used"""
+
+        return self.artcmd('BODY ' + id, file)
+
+    def article(self, id):
+        """Process an ARTICLE command.  Argument:
+        - id: article number or message id
+        Returns:
+        - resp: server response if successful
+        - nr: article number
+        - id: message id
+        - list: the lines of the article"""
+
+        return self.artcmd('ARTICLE ' + id)
+
+    def slave(self):
+        """Process a SLAVE command.  Returns:
+        - resp: server response if successful"""
+
+        return self.shortcmd('SLAVE')
+
+    def xhdr(self, hdr, str):
+        """Process an XHDR command (optional server extension).  Arguments:
+        - hdr: the header type (e.g. 'subject')
+        - str: an article nr, a message id, or a range nr1-nr2
+        Returns:
+        - resp: server response if successful
+        - list: list of (nr, value) strings"""
+
+        pat = re.compile('^([0-9]+) ?(.*)\n?')
+        resp, lines = self.longcmd('XHDR ' + hdr + ' ' + str)
+        for i in range(len(lines)):
+            line = lines[i]
+            m = pat.match(line)
+            if m:
+                lines[i] = m.group(1, 2)
+        return resp, lines
+
+    def xover(self,start,end):
+        """Process an XOVER command (optional server extension) Arguments:
+        - start: start of range
+        - end: end of range
+        Returns:
+        - resp: server response if successful
+        - list: list of (art-nr, subject, poster, date,
+                         id, references, size, lines)"""
+
+        resp, lines = self.longcmd('XOVER ' + start + '-' + end)
+        xover_lines = []
+        for line in lines:
+            elem = line.split("\t")
+            try:
+                xover_lines.append((elem[0],
+                                    elem[1],
+                                    elem[2],
+                                    elem[3],
+                                    elem[4],
+                                    elem[5].split(),
+                                    elem[6],
+                                    elem[7]))
+            except IndexError:
+                raise NNTPDataError(line)
+        return resp,xover_lines
+
+    def xgtitle(self, group):
+        """Process an XGTITLE command (optional server extension) Arguments:
+        - group: group name wildcard (i.e. news.*)
+        Returns:
+        - resp: server response if successful
+        - list: list of (name,title) strings"""
+
+        line_pat = re.compile("^([^ \t]+)[ \t]+(.*)$")
+        resp, raw_lines = self.longcmd('XGTITLE ' + group)
+        lines = []
+        for raw_line in raw_lines:
+            match = line_pat.search(raw_line.strip())
+            if match:
+                lines.append(match.group(1, 2))
+        return resp, lines
+
+    def xpath(self,id):
+        """Process an XPATH command (optional server extension) Arguments:
+        - id: Message id of article
+        Returns:
+        resp: server response if successful
+        path: directory path to article"""
+
+        resp = self.shortcmd("XPATH " + id)
+        if resp[:3] != '223':
+            raise NNTPReplyError(resp)
+        try:
+            [resp_num, path] = resp.split()
+        except ValueError:
+            raise NNTPReplyError(resp)
+        else:
+            return resp, path
+
+    def date (self):
+        """Process the DATE command. Arguments:
+        None
+        Returns:
+        resp: server response if successful
+        date: Date suitable for newnews/newgroups commands etc.
+        time: Time suitable for newnews/newgroups commands etc."""
+
+        resp = self.shortcmd("DATE")
+        if resp[:3] != '111':
+            raise NNTPReplyError(resp)
+        elem = resp.split()
+        if len(elem) != 2:
+            raise NNTPDataError(resp)
+        date = elem[1][2:8]
+        time = elem[1][-6:]
+        if len(date) != 6 or len(time) != 6:
+            raise NNTPDataError(resp)
+        return resp, date, time
+
+
+    def post(self, f):
+        """Process a POST command.  Arguments:
+        - f: file containing the article
+        Returns:
+        - resp: server response if successful"""
+
+        resp = self.shortcmd('POST')
+        # Raises error_??? if posting is not allowed
+        if resp[0] != '3':
+            raise NNTPReplyError(resp)
+        while 1:
+            line = f.readline()
+            if not line:
+                break
+            if line[-1] == '\n':
+                line = line[:-1]
+            if line[:1] == '.':
+                line = '.' + line
+            self.putline(line)
+        self.putline('.')
+        return self.getresp()
+
+    def ihave(self, id, f):
+        """Process an IHAVE command.  Arguments:
+        - id: message-id of the article
+        - f:  file containing the article
+        Returns:
+        - resp: server response if successful
+        Note that if the server refuses the article an exception is raised."""
+
+        resp = self.shortcmd('IHAVE ' + id)
+        # Raises error_??? if the server already has it
+        if resp[0] != '3':
+            raise NNTPReplyError(resp)
+        while 1:
+            line = f.readline()
+            if not line:
+                break
+            if line[-1] == '\n':
+                line = line[:-1]
+            if line[:1] == '.':
+                line = '.' + line
+            self.putline(line)
+        self.putline('.')
+        return self.getresp()
+
+    def quit(self):
+        """Process a QUIT command and close the socket.  Returns:
+        - resp: server response if successful"""
+
+        resp = self.shortcmd('QUIT')
+        self.file.close()
+        self.sock.close()
+        del self.file, self.sock
+        return resp
+
+
+def _test():
+    """Minimal test function."""
+    s = NNTP('news', readermode='reader')
+    resp, count, first, last, name = s.group('comp.lang.python')
+    print resp
+    print 'Group', name, 'has', count, 'articles, range', first, 'to', last
+    resp, subs = s.xhdr('subject', first + '-' + last)
+    print resp
+    for item in subs:
+        print "%7s %s" % item
+    resp = s.quit()
+    print resp
+
+
+# Run the test when run as a script
+if __name__ == '__main__':
+    _test()
diff --git a/lib-python/2.2/ntpath.py b/lib-python/2.2/ntpath.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/ntpath.py
@@ -0,0 +1,482 @@
+# Module 'ntpath' -- common operations on WinNT/Win95 pathnames
+"""Common pathname manipulations, WindowsNT/95 version.
+
+Instead of importing this module directly, import os and refer to this
+module as os.path.
+"""
+
+import os
+import stat
+
+__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
+           "basename","dirname","commonprefix","getsize","getmtime",
+           "getatime","islink","exists","isdir","isfile","ismount",
+           "walk","expanduser","expandvars","normpath","abspath","splitunc",
+           "realpath"]
+
+# Normalize the case of a pathname and map slashes to backslashes.
+# Other normalizations (such as optimizing '../' away) are not done
+# (this is done by normpath).
+
+def normcase(s):
+    """Normalize case of pathname.
+
+    Makes all characters lowercase and all slashes into backslashes."""
+    return s.replace("/", "\\").lower()
+
+
+# Return whether a path is absolute.
+# Trivial in Posix, harder on the Mac or MS-DOS.
+# For DOS it is absolute if it starts with a slash or backslash (current
+# volume), or if a pathname after the volume letter and colon / UNC resource
+# starts with a slash or backslash.
+
+def isabs(s):
+    """Test whether a path is absolute"""
+    s = splitdrive(s)[1]
+    return s != '' and s[:1] in '/\\'
+
+
+# Join two (or more) paths.
+
+def join(a, *p):
+    """Join two or more pathname components, inserting "\\" as needed"""
+    path = a
+    for b in p:
+        b_wins = 0  # set to 1 iff b makes path irrelevant
+        if path == "":
+            b_wins = 1
+
+        elif isabs(b):
+            # This probably wipes out path so far.  However, it's more
+            # complicated if path begins with a drive letter:
+            #     1. join('c:', '/a') == 'c:/a'
+            #     2. join('c:/', '/a') == 'c:/a'
+            # But
+            #     3. join('c:/a', '/b') == '/b'
+            #     4. join('c:', 'd:/') = 'd:/'
+            #     5. join('c:/', 'd:/') = 'd:/'
+            if path[1:2] != ":" or b[1:2] == ":":
+                # Path doesn't start with a drive letter, or cases 4 and 5.
+                b_wins = 1
+
+            # Else path has a drive letter, and b doesn't but is absolute.
+            elif len(path) > 3 or (len(path) == 3 and
+                                   path[-1] not in "/\\"):
+                # case 3
+                b_wins = 1
+
+        if b_wins:
+            path = b
+        else:
+            # Join, and ensure there's a separator.
+            assert len(path) > 0
+            if path[-1] in "/\\":
+                if b and b[0] in "/\\":
+                    path += b[1:]
+                else:
+                    path += b
+            elif path[-1] == ":":
+                path += b
+            elif b:
+                if b[0] in "/\\":
+                    path += b
+                else:
+                    path += "\\" + b
+            else:
+                # path is not empty and does not end with a backslash,
+                # but b is empty; since, e.g., split('a/') produces
+                # ('a', ''), it's best if join() adds a backslash in
+                # this case.
+                path += '\\'
+
+    return path
+
+
+# Split a path in a drive specification (a drive letter followed by a
+# colon) and the path specification.
+# It is always true that drivespec + pathspec == p
+def splitdrive(p):
+    """Split a pathname into drive and path specifiers. Returns a 2-tuple
+"(drive,path)";  either part may be empty"""
+    if p[1:2] == ':':
+        return p[0:2], p[2:]
+    return '', p
+
+
+# Parse UNC paths
+def splitunc(p):
+    """Split a pathname into UNC mount point and relative path specifiers.
+
+    Return a 2-tuple (unc, rest); either part may be empty.
+    If unc is not empty, it has the form '//host/mount' (or similar
+    using backslashes).  unc+rest is always the input path.
+    Paths containing drive letters never have an UNC part.
+    """
+    if p[1:2] == ':':
+        return '', p # Drive letter present
+    firstTwo = p[0:2]
+    if firstTwo == '//' or firstTwo == '\\\\':
+        # is a UNC path:
+        # vvvvvvvvvvvvvvvvvvvv equivalent to drive letter
+        # \\machine\mountpoint\directories...
+        #           directory ^^^^^^^^^^^^^^^
+        normp = normcase(p)
+        index = normp.find('\\', 2)
+        if index == -1:
+            ##raise RuntimeError, 'illegal UNC path: "' + p + '"'
+            return ("", p)
+        index = normp.find('\\', index + 1)
+        if index == -1:
+            index = len(p)
+        return p[:index], p[index:]
+    return '', p
+
+
+# Split a path in head (everything up to the last '/') and tail (the
+# rest).  After the trailing '/' is stripped, the invariant
+# join(head, tail) == p holds.
+# The resulting head won't end in '/' unless it is the root.
+
+def split(p):
+    """Split a pathname.
+
+    Return tuple (head, tail) where tail is everything after the final slash.
+    Either part may be empty."""
+
+    d, p = splitdrive(p)
+    # set i to index beyond p's last slash
+    i = len(p)
+    while i and p[i-1] not in '/\\':
+        i = i - 1
+    head, tail = p[:i], p[i:]  # now tail has no slashes
+    # remove trailing slashes from head, unless it's all slashes
+    head2 = head
+    while head2 and head2[-1] in '/\\':
+        head2 = head2[:-1]
+    head = head2 or head
+    return d + head, tail
+
+
+# Split a path in root and extension.
+# The extension is everything starting at the last dot in the last
+# pathname component; the root is everything before that.
+# It is always true that root + ext == p.
+
+def splitext(p):
+    """Split the extension from a pathname.
+
+    Extension is everything from the last dot to the end.
+    Return (root, ext), either part may be empty."""
+    root, ext = '', ''
+    for c in p:
+        if c in ['/','\\']:
+            root, ext = root + ext + c, ''
+        elif c == '.':
+            if ext:
+                root, ext = root + ext, c
+            else:
+                ext = c
+        elif ext:
+            ext = ext + c
+        else:
+            root = root + c
+    return root, ext
+
+
+# Return the tail (basename) part of a path.
+
+def basename(p):
+    """Returns the final component of a pathname"""
+    return split(p)[1]
+
+
+# Return the head (dirname) part of a path.
+
+def dirname(p):
+    """Returns the directory component of a pathname"""
+    return split(p)[0]
+
+
+# Return the longest prefix of all list elements.
+
+def commonprefix(m):
+    "Given a list of pathnames, returns the longest common leading component"
+    if not m: return ''
+    prefix = m[0]
+    for item in m:
+        for i in range(len(prefix)):
+            if prefix[:i+1] != item[:i+1]:
+                prefix = prefix[:i]
+                if i == 0: return ''
+                break
+    return prefix
+
+
+# Get size, mtime, atime of files.
+
+def getsize(filename):
+    """Return the size of a file, reported by os.stat()"""
+    st = os.stat(filename)
+    return st[stat.ST_SIZE]
+
+def getmtime(filename):
+    """Return the last modification time of a file, reported by os.stat()"""
+    st = os.stat(filename)
+    return st[stat.ST_MTIME]
+
+def getatime(filename):
+    """Return the last access time of a file, reported by os.stat()"""
+    st = os.stat(filename)
+    return st[stat.ST_ATIME]
+
+
+# Is a path a symbolic link?
+# This will always return false on systems where posix.lstat doesn't exist.
+
+def islink(path):
+    """Test for symbolic link.  On WindowsNT/95 always returns false"""
+    return 0
+
+
+# Does a path exist?
+# This is false for dangling symbolic links.
+
+def exists(path):
+    """Test whether a path exists"""
+    try:
+        st = os.stat(path)
+    except os.error:
+        return 0
+    return 1
+
+
+# Is a path a dos directory?
+# This follows symbolic links, so both islink() and isdir() can be true
+# for the same path.
+
+def isdir(path):
+    """Test whether a path is a directory"""
+    try:
+        st = os.stat(path)
+    except os.error:
+        return 0
+    return stat.S_ISDIR(st[stat.ST_MODE])
+
+
+# Is a path a regular file?
+# This follows symbolic links, so both islink() and isdir() can be true
+# for the same path.
+
+def isfile(path):
+    """Test whether a path is a regular file"""
+    try:
+        st = os.stat(path)
+    except os.error:
+        return 0
+    return stat.S_ISREG(st[stat.ST_MODE])
+
+
+# Is a path a mount point?  Either a root (with or without drive letter)
+# or an UNC path with at most a / or \ after the mount point.
+
+def ismount(path):
+    """Test whether a path is a mount point (defined as root of drive)"""
+    unc, rest = splitunc(path)
+    if unc:
+        return rest in ("", "/", "\\")
+    p = splitdrive(path)[1]
+    return len(p) == 1 and p[0] in '/\\'
+
+
+# Directory tree walk.
+# For each directory under top (including top itself, but excluding
+# '.' and '..'), func(arg, dirname, filenames) is called, where
+# dirname is the name of the directory and filenames is the list
+# files files (and subdirectories etc.) in the directory.
+# The func may modify the filenames list, to implement a filter,
+# or to impose a different order of visiting.
+
+def walk(top, func, arg):
+    """Directory tree walk with callback function.
+
+    For each directory in the directory tree rooted at top (including top
+    itself, but excluding '.' and '..'), call func(arg, dirname, fnames).
+    dirname is the name of the directory, and fnames a list of the names of
+    the files and subdirectories in dirname (excluding '.' and '..').  func
+    may modify the fnames list in-place (e.g. via del or slice assignment),
+    and walk will only recurse into the subdirectories whose names remain in
+    fnames; this can be used to implement a filter, or to impose a specific
+    order of visiting.  No semantics are defined for, or required of, arg,
+    beyond that arg is always passed to func.  It can be used, e.g., to pass
+    a filename pattern, or a mutable object designed to accumulate
+    statistics.  Passing None for arg is common."""
+
+    try:
+        names = os.listdir(top)
+    except os.error:
+        return
+    func(arg, top, names)
+    exceptions = ('.', '..')
+    for name in names:
+        if name not in exceptions:
+            name = join(top, name)
+            if isdir(name):
+                walk(name, func, arg)
+
+
+# Expand paths beginning with '~' or '~user'.
+# '~' means $HOME; '~user' means that user's home directory.
+# If the path doesn't begin with '~', or if the user or $HOME is unknown,
+# the path is returned unchanged (leaving error reporting to whatever
+# function is called with the expanded path as argument).
+# See also module 'glob' for expansion of *, ? and [...] in pathnames.
+# (A function should also be defined to do full *sh-style environment
+# variable expansion.)
+
+def expanduser(path):
+    """Expand ~ and ~user constructs.
+
+    If user or $HOME is unknown, do nothing."""
+    if path[:1] != '~':
+        return path
+    i, n = 1, len(path)
+    while i < n and path[i] not in '/\\':
+        i = i + 1
+    if i == 1:
+        if os.environ.has_key('HOME'):
+            userhome = os.environ['HOME']
+        elif not os.environ.has_key('HOMEPATH'):
+            return path
+        else:
+            try:
+                drive = os.environ['HOMEDRIVE']
+            except KeyError:
+                drive = ''
+            userhome = join(drive, os.environ['HOMEPATH'])
+    else:
+        return path
+    return userhome + path[i:]
+
+
+# Expand paths containing shell variable substitutions.
+# The following rules apply:
+#       - no expansion within single quotes
+#       - no escape character, except for '$$' which is translated into '$'
+#       - ${varname} is accepted.
+#       - varnames can be made out of letters, digits and the character '_'
+# XXX With COMMAND.COM you can use any characters in a variable name,
+# XXX except '^|<>='.
+
+def expandvars(path):
+    """Expand shell variables of form $var and ${var}.
+
+    Unknown variables are left unchanged."""
+    if '$' not in path:
+        return path
+    import string
+    varchars = string.ascii_letters + string.digits + '_-'
+    res = ''
+    index = 0
+    pathlen = len(path)
+    while index < pathlen:
+        c = path[index]
+        if c == '\'':   # no expansion within single quotes
+            path = path[index + 1:]
+            pathlen = len(path)
+            try:
+                index = path.index('\'')
+                res = res + '\'' + path[:index + 1]
+            except ValueError:
+                res = res + path
+                index = pathlen - 1
+        elif c == '$':  # variable or '$$'
+            if path[index + 1:index + 2] == '$':
+                res = res + c
+                index = index + 1
+            elif path[index + 1:index + 2] == '{':
+                path = path[index+2:]
+                pathlen = len(path)
+                try:
+                    index = path.index('}')
+                    var = path[:index]
+                    if os.environ.has_key(var):
+                        res = res + os.environ[var]
+                except ValueError:
+                    res = res + path
+                    index = pathlen - 1
+            else:
+                var = ''
+                index = index + 1
+                c = path[index:index + 1]
+                while c != '' and c in varchars:
+                    var = var + c
+                    index = index + 1
+                    c = path[index:index + 1]
+                if os.environ.has_key(var):
+                    res = res + os.environ[var]
+                if c != '':
+                    res = res + c
+        else:
+            res = res + c
+        index = index + 1
+    return res
+
+
+# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A\B.
+# Previously, this function also truncated pathnames to 8+3 format,
+# but as this module is called "ntpath", that's obviously wrong!
+
+def normpath(path):
+    """Normalize path, eliminating double slashes, etc."""
+    path = path.replace("/", "\\")
+    prefix, path = splitdrive(path)
+    while path[:1] == "\\":
+        prefix = prefix + "\\"
+        path = path[1:]
+    comps = path.split("\\")
+    i = 0
+    while i < len(comps):
+        if comps[i] in ('.', ''):
+            del comps[i]
+        elif comps[i] == '..':
+            if i > 0 and comps[i-1] != '..':
+                del comps[i-1:i+1]
+                i -= 1
+            elif i == 0 and prefix.endswith("\\"):
+                del comps[i]
+            else:
+                i += 1
+        else:
+            i += 1
+    # If the path is now empty, substitute '.'
+    if not prefix and not comps:
+        comps.append('.')
+    return prefix + "\\".join(comps)
+
+
+# Return an absolute path.
+def abspath(path):
+    """Return the absolute version of a path"""
+    try:
+        from nt import _getfullpathname
+    except ImportError: # Not running on Windows - mock up something sensible.
+        global abspath
+        def _abspath(path):
+            if not isabs(path):
+                path = join(os.getcwd(), path)
+            return normpath(path)
+        abspath = _abspath
+        return _abspath(path)
+
+    if path: # Empty path must return current working directory.
+        try:
+            path = _getfullpathname(path)
+        except WindowsError:
+            pass # Bad path - return unchanged.
+    else:
+        path = os.getcwd()
+    return normpath(path)
+
+# realpath is a no-op on systems without islink support
+realpath = abspath
diff --git a/lib-python/2.2/nturl2path.py b/lib-python/2.2/nturl2path.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/nturl2path.py
@@ -0,0 +1,66 @@
+"""Convert a NT pathname to a file URL and vice versa."""
+
+def url2pathname(url):
+    r"""Convert a URL to a DOS path.
+
+            ///C|/foo/bar/spam.foo
+
+                    becomes
+
+            C:\foo\bar\spam.foo
+    """
+    import string, urllib
+    if not '|' in url:
+        # No drive specifier, just convert slashes
+        if url[:4] == '////':
+            # path is something like ////host/path/on/remote/host
+            # convert this to \\host\path\on\remote\host
+            # (notice halving of slashes at the start of the path)
+            url = url[2:]
+        components = url.split('/')
+        # make sure not to convert quoted slashes :-)
+        return urllib.unquote('\\'.join(components))
+    comp = url.split('|')
+    if len(comp) != 2 or comp[0][-1] not in string.ascii_letters:
+        error = 'Bad URL: ' + url
+        raise IOError, error
+    drive = comp[0][-1].upper()
+    components = comp[1].split('/')
+    path = drive + ':'
+    for  comp in components:
+        if comp:
+            path = path + '\\' + urllib.unquote(comp)
+    return path
+
+def pathname2url(p):
+    r"""Convert a DOS path name to a file url.
+
+            C:\foo\bar\spam.foo
+
+                    becomes
+
+            ///C|/foo/bar/spam.foo
+    """
+
+    import urllib
+    if not ':' in p:
+        # No drive specifier, just convert slashes and quote the name
+        if p[:2] == '\\\\':
+        # path is something like \\host\path\on\remote\host
+        # convert this to ////host/path/on/remote/host
+        # (notice doubling of slashes at the start of the path)
+            p = '\\\\' + p
+        components = p.split('\\')
+        return urllib.quote('/'.join(components))
+    comp = p.split(':')
+    if len(comp) != 2 or len(comp[0]) > 1:
+        error = 'Bad path: ' + p
+        raise IOError, error
+
+    drive = urllib.quote(comp[0].upper())
+    components = comp[1].split('\\')
+    path = '///' + drive + '|'
+    for comp in components:
+        if comp:
+            path = path + '/' + urllib.quote(comp)
+    return path
diff --git a/lib-python/2.2/os.py b/lib-python/2.2/os.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/os.py
@@ -0,0 +1,613 @@
+r"""OS routines for Mac, DOS, NT, or Posix depending on what system we're on.
+
+This exports:
+  - all functions from posix, nt, dos, os2, mac, or ce, e.g. unlink, stat, etc.
+  - os.path is one of the modules posixpath, ntpath, macpath, or dospath
+  - os.name is 'posix', 'nt', 'dos', 'os2', 'mac', 'ce' or 'riscos'
+  - os.curdir is a string representing the current directory ('.' or ':')
+  - os.pardir is a string representing the parent directory ('..' or '::')
+  - os.sep is the (or a most common) pathname separator ('/' or ':' or '\\')
+  - os.extsep is the extension separator ('.' or '/')
+  - os.altsep is the alternate pathname separator (None or '/')
+  - os.pathsep is the component separator used in $PATH etc
+  - os.linesep is the line separator in text files ('\r' or '\n' or '\r\n')
+  - os.defpath is the default search path for executables
+
+Programs that import and use 'os' stand a better chance of being
+portable between different platforms.  Of course, they must then
+only use functions that are defined by all platforms (e.g., unlink
+and opendir), and leave all pathname manipulation to os.path
+(e.g., split and join).
+"""
+
+#'
+
+import sys
+
+_names = sys.builtin_module_names
+
+altsep = None
+
+__all__ = ["altsep", "curdir", "pardir", "sep", "pathsep", "linesep",
+           "defpath", "name"]
+
+def _get_exports_list(module):
+    try:
+        return list(module.__all__)
+    except AttributeError:
+        return [n for n in dir(module) if n[0] != '_']
+
+if 'posix' in _names:
+    name = 'posix'
+    linesep = '\n'
+    curdir = '.'; pardir = '..'; sep = '/'; pathsep = ':'
+    defpath = ':/bin:/usr/bin'
+    from posix import *
+    try:
+        from posix import _exit
+    except ImportError:
+        pass
+    import posixpath
+    path = posixpath
+    del posixpath
+
+    import posix
+    __all__.extend(_get_exports_list(posix))
+    del posix
+
+elif 'nt' in _names:
+    name = 'nt'
+    linesep = '\r\n'
+    curdir = '.'; pardir = '..'; sep = '\\'; pathsep = ';'
+    defpath = '.;C:\\bin'
+    altsep = '/'
+    from nt import *
+    for i in ['_exit']:
+        try:
+            exec "from nt import " + i
+        except ImportError:
+            pass
+    import ntpath
+    path = ntpath
+    del ntpath
+
+    import nt
+    __all__.extend(_get_exports_list(nt))
+    del nt
+
+elif 'dos' in _names:
+    name = 'dos'
+    linesep = '\r\n'
+    curdir = '.'; pardir = '..'; sep = '\\'; pathsep = ';'
+    defpath = '.;C:\\bin'
+    from dos import *
+    try:
+        from dos import _exit
+    except ImportError:
+        pass
+    import dospath
+    path = dospath
+    del dospath
+
+    import dos
+    __all__.extend(_get_exports_list(dos))
+    del dos
+
+elif 'os2' in _names:
+    name = 'os2'
+    linesep = '\r\n'
+    curdir = '.'; pardir = '..'; sep = '\\'; pathsep = ';'
+    defpath = '.;C:\\bin'
+    from os2 import *
+    try:
+        from os2 import _exit
+    except ImportError:
+        pass
+    import ntpath
+    path = ntpath
+    del ntpath
+
+    import os2
+    __all__.extend(_get_exports_list(os2))
+    del os2
+
+elif 'mac' in _names:
+    name = 'mac'
+    linesep = '\r'
+    curdir = ':'; pardir = '::'; sep = ':'; pathsep = '\n'
+    defpath = ':'
+    from mac import *
+    try:
+        from mac import _exit
+    except ImportError:
+        pass
+    import macpath
+    path = macpath
+    del macpath
+
+    import mac
+    __all__.extend(_get_exports_list(mac))
+    del mac
+
+elif 'ce' in _names:
+    name = 'ce'
+    linesep = '\r\n'
+    curdir = '.'; pardir = '..'; sep = '\\'; pathsep = ';'
+    defpath = '\\Windows'
+    from ce import *
+    for i in ['_exit']:
+        try:
+            exec "from ce import " + i
+        except ImportError:
+            pass
+    # We can use the standard Windows path.
+    import ntpath
+    path = ntpath
+    del ntpath
+
+    import ce
+    __all__.extend(_get_exports_list(ce))
+    del ce
+
+elif 'riscos' in _names:
+    name = 'riscos'
+    linesep = '\n'
+    curdir = '@'; pardir = '^'; sep = '.'; pathsep = ','
+    defpath = '<Run$Dir>'
+    from riscos import *
+    try:
+        from riscos import _exit
+    except ImportError:
+        pass
+    import riscospath
+    path = riscospath
+    del riscospath
+
+    import riscos
+    __all__.extend(_get_exports_list(riscos))
+    del riscos
+
+else:
+    raise ImportError, 'no os specific module found'
+
+
+if sep=='.':
+    extsep = '/'
+else:
+    extsep = '.'
+
+__all__.append("path")
+
+del _names
+
+sys.modules['os.path'] = path
+
+#'
+
+# Super directory utilities.
+# (Inspired by Eric Raymond; the doc strings are mostly his)
+
+def makedirs(name, mode=0777):
+    """makedirs(path [, mode=0777]) -> None
+
+    Super-mkdir; create a leaf directory and all intermediate ones.
+    Works like mkdir, except that any intermediate path segment (not
+    just the rightmost) will be created if it does not exist.  This is
+    recursive.
+
+    """
+    head, tail = path.split(name)
+    if not tail:
+        head, tail = path.split(head)
+    if head and tail and not path.exists(head):
+        makedirs(head, mode)
+    mkdir(name, mode)
+
+def removedirs(name):
+    """removedirs(path) -> None
+
+    Super-rmdir; remove a leaf directory and empty all intermediate
+    ones.  Works like rmdir except that, if the leaf directory is
+    successfully removed, directories corresponding to rightmost path
+    segments will be pruned way until either the whole path is
+    consumed or an error occurs.  Errors during this latter phase are
+    ignored -- they generally mean that a directory was not empty.
+
+    """
+    rmdir(name)
+    head, tail = path.split(name)
+    if not tail:
+        head, tail = path.split(head)
+    while head and tail:
+        try:
+            rmdir(head)
+        except error:
+            break
+        head, tail = path.split(head)
+
+def renames(old, new):
+    """renames(old, new) -> None
+
+    Super-rename; create directories as necessary and delete any left
+    empty.  Works like rename, except creation of any intermediate
+    directories needed to make the new pathname good is attempted
+    first.  After the rename, directories corresponding to rightmost
+    path segments of the old name will be pruned way until either the
+    whole path is consumed or a nonempty directory is found.
+
+    Note: this function can fail with the new directory structure made
+    if you lack permissions needed to unlink the leaf directory or
+    file.
+
+    """
+    head, tail = path.split(new)
+    if head and tail and not path.exists(head):
+        makedirs(head)
+    rename(old, new)
+    head, tail = path.split(old)
+    if head and tail:
+        try:
+            removedirs(head)
+        except error:
+            pass
+
+__all__.extend(["makedirs", "removedirs", "renames"])
+
+# Make sure os.environ exists, at least
+try:
+    environ
+except NameError:
+    environ = {}
+
+def execl(file, *args):
+    """execl(file, *args)
+
+    Execute the executable file with argument list args, replacing the
+    current process. """
+    execv(file, args)
+
+def execle(file, *args):
+    """execle(file, *args, env)
+
+    Execute the executable file with argument list args and
+    environment env, replacing the current process. """
+    env = args[-1]
+    execve(file, args[:-1], env)
+
+def execlp(file, *args):
+    """execlp(file, *args)
+
+    Execute the executable file (which is searched for along $PATH)
+    with argument list args, replacing the current process. """
+    execvp(file, args)
+
+def execlpe(file, *args):
+    """execlpe(file, *args, env)
+
+    Execute the executable file (which is searched for along $PATH)
+    with argument list args and environment env, replacing the current
+    process. """
+    env = args[-1]
+    execvpe(file, args[:-1], env)
+
+def execvp(file, args):
+    """execp(file, args)
+
+    Execute the executable file (which is searched for along $PATH)
+    with argument list args, replacing the current process.
+    args may be a list or tuple of strings. """
+    _execvpe(file, args)
+
+def execvpe(file, args, env):
+    """execvpe(file, args, env)
+
+    Execute the executable file (which is searched for along $PATH)
+    with argument list args and environment env , replacing the
+    current process.
+    args may be a list or tuple of strings. """
+    _execvpe(file, args, env)
+
+__all__.extend(["execl","execle","execlp","execlpe","execvp","execvpe"])
+
+def _execvpe(file, args, env=None):
+    from errno import ENOENT, ENOTDIR
+
+    if env is not None:
+        func = execve
+        argrest = (args, env)
+    else:
+        func = execv
+        argrest = (args,)
+        env = environ
+
+    head, tail = path.split(file)
+    if head:
+        apply(func, (file,) + argrest)
+        return
+    if env.has_key('PATH'):
+        envpath = env['PATH']
+    else:
+        envpath = defpath
+    PATH = envpath.split(pathsep)
+    saved_exc = None
+    saved_tb = None
+    for dir in PATH:
+        fullname = path.join(dir, file)
+        try:
+            apply(func, (fullname,) + argrest)
+        except error, e:
+            tb = sys.exc_info()[2]
+            if (e.errno != ENOENT and e.errno != ENOTDIR
+                and saved_exc is None):
+                saved_exc = e
+                saved_tb = tb
+    if saved_exc:
+        raise error, saved_exc, saved_tb
+    raise error, e, tb
+
+# Change environ to automatically call putenv() if it exists
+try:
+    # This will fail if there's no putenv
+    putenv
+except NameError:
+    pass
+else:
+    import UserDict
+
+    # Fake unsetenv() for Windows
+    # not sure about os2 and dos here but
+    # I'm guessing they are the same.
+
+    if name in ('os2', 'nt', 'dos'):
+        def unsetenv(key):
+            putenv(key, "")
+
+    if name == "riscos":
+        # On RISC OS, all env access goes through getenv and putenv
+        from riscosenviron import _Environ
+    elif name in ('os2', 'nt', 'dos'):  # Where Env Var Names Must Be UPPERCASE
+        # But we store them as upper case
+        class _Environ(UserDict.IterableUserDict):
+            def __init__(self, environ):
+                UserDict.UserDict.__init__(self)
+                data = self.data
+                for k, v in environ.items():
+                    data[k.upper()] = v
+            def __setitem__(self, key, item):
+                putenv(key, item)
+                self.data[key.upper()] = item
+            def __getitem__(self, key):
+                return self.data[key.upper()]
+            try:
+                unsetenv
+            except NameError:
+                def __delitem__(self, key):
+                    del self.data[key.upper()]
+            else:
+                def __delitem__(self, key):
+                    unsetenv(key)
+                    del self.data[key.upper()]
+            def has_key(self, key):
+                return self.data.has_key(key.upper())
+            def get(self, key, failobj=None):
+                return self.data.get(key.upper(), failobj)
+            def update(self, dict):
+                for k, v in dict.items():
+                    self[k] = v
+
+    else:  # Where Env Var Names Can Be Mixed Case
+        class _Environ(UserDict.IterableUserDict):
+            def __init__(self, environ):
+                UserDict.UserDict.__init__(self)
+                self.data = environ
+            def __setitem__(self, key, item):
+                putenv(key, item)
+                self.data[key] = item
+            def update(self, dict):
+                for k, v in dict.items():
+                    self[k] = v
+            try:
+                unsetenv
+            except NameError:
+                pass
+            else:
+                def __delitem__(self, key):
+                    unsetenv(key)
+                    del self.data[key]
+
+
+    environ = _Environ(environ)
+
+    def getenv(key, default=None):
+        """Get an environment variable, return None if it doesn't exist.
+        The optional second argument can specify an alternate default."""
+        return environ.get(key, default)
+    __all__.append("getenv")
+
+def _exists(name):
+    try:
+        eval(name)
+        return 1
+    except NameError:
+        return 0
+
+# Supply spawn*() (probably only for Unix)
+if _exists("fork") and not _exists("spawnv") and _exists("execv"):
+
+    P_WAIT = 0
+    P_NOWAIT = P_NOWAITO = 1
+
+    # XXX Should we support P_DETACH?  I suppose it could fork()**2
+    # and close the std I/O streams.  Also, P_OVERLAY is the same
+    # as execv*()?
+
+    def _spawnvef(mode, file, args, env, func):
+        # Internal helper; func is the exec*() function to use
+        pid = fork()
+        if not pid:
+            # Child
+            try:
+                if env is None:
+                    func(file, args)
+                else:
+                    func(file, args, env)
+            except:
+                _exit(127)
+        else:
+            # Parent
+            if mode == P_NOWAIT:
+                return pid # Caller is responsible for waiting!
+            while 1:
+                wpid, sts = waitpid(pid, 0)
+                if WIFSTOPPED(sts):
+                    continue
+                elif WIFSIGNALED(sts):
+                    return -WTERMSIG(sts)
+                elif WIFEXITED(sts):
+                    return WEXITSTATUS(sts)
+                else:
+                    raise error, "Not stopped, signaled or exited???"
+
+    def spawnv(mode, file, args):
+        """spawnv(mode, file, args) -> integer
+
+Execute file with arguments from args in a subprocess.
+If mode == P_NOWAIT return the pid of the process.
+If mode == P_WAIT return the process's exit code if it exits normally;
+otherwise return -SIG, where SIG is the signal that killed it. """
+        return _spawnvef(mode, file, args, None, execv)
+
+    def spawnve(mode, file, args, env):
+        """spawnve(mode, file, args, env) -> integer
+
+Execute file with arguments from args in a subprocess with the
+specified environment.
+If mode == P_NOWAIT return the pid of the process.
+If mode == P_WAIT return the process's exit code if it exits normally;
+otherwise return -SIG, where SIG is the signal that killed it. """
+        return _spawnvef(mode, file, args, env, execve)
+
+    # Note: spawnvp[e] is't currently supported on Windows
+
+    def spawnvp(mode, file, args):
+        """spawnvp(mode, file, args) -> integer
+
+Execute file (which is looked for along $PATH) with arguments from
+args in a subprocess.
+If mode == P_NOWAIT return the pid of the process.
+If mode == P_WAIT return the process's exit code if it exits normally;
+otherwise return -SIG, where SIG is the signal that killed it. """
+        return _spawnvef(mode, file, args, None, execvp)
+
+    def spawnvpe(mode, file, args, env):
+        """spawnvpe(mode, file, args, env) -> integer
+
+Execute file (which is looked for along $PATH) with arguments from
+args in a subprocess with the supplied environment.
+If mode == P_NOWAIT return the pid of the process.
+If mode == P_WAIT return the process's exit code if it exits normally;
+otherwise return -SIG, where SIG is the signal that killed it. """
+        return _spawnvef(mode, file, args, env, execvpe)
+
+if _exists("spawnv"):
+    # These aren't supplied by the basic Windows code
+    # but can be easily implemented in Python
+
+    def spawnl(mode, file, *args):
+        """spawnl(mode, file, *args) -> integer
+
+Execute file with arguments from args in a subprocess.
+If mode == P_NOWAIT return the pid of the process.
+If mode == P_WAIT return the process's exit code if it exits normally;
+otherwise return -SIG, where SIG is the signal that killed it. """
+        return spawnv(mode, file, args)
+
+    def spawnle(mode, file, *args):
+        """spawnle(mode, file, *args, env) -> integer
+
+Execute file with arguments from args in a subprocess with the
+supplied environment.
+If mode == P_NOWAIT return the pid of the process.
+If mode == P_WAIT return the process's exit code if it exits normally;
+otherwise return -SIG, where SIG is the signal that killed it. """
+        env = args[-1]
+        return spawnve(mode, file, args[:-1], env)
+
+if _exists("spawnvp"):
+    # At the moment, Windows doesn't implement spawnvp[e],
+    # so it won't have spawnlp[e] either.
+    def spawnlp(mode, file, *args):
+        """spawnlp(mode, file, *args) -> integer
+
+Execute file (which is looked for along $PATH) with arguments from
+args in a subprocess with the supplied environment.
+If mode == P_NOWAIT return the pid of the process.
+If mode == P_WAIT return the process's exit code if it exits normally;
+otherwise return -SIG, where SIG is the signal that killed it. """
+        return spawnvp(mode, file, args)
+
+    def spawnlpe(mode, file, *args):
+        """spawnlpe(mode, file, *args, env) -> integer
+
+Execute file (which is looked for along $PATH) with arguments from
+args in a subprocess with the supplied environment.
+If mode == P_NOWAIT return the pid of the process.
+If mode == P_WAIT return the process's exit code if it exits normally;
+otherwise return -SIG, where SIG is the signal that killed it. """
+        env = args[-1]
+        return spawnvpe(mode, file, args[:-1], env)
+
+
+    __all__.extend(["spawnlp","spawnlpe","spawnv", "spawnve","spawnvp",
+                    "spawnvpe","spawnl","spawnle",])
+
+
+# Supply popen2 etc. (for Unix)
+if _exists("fork"):
+    if not _exists("popen2"):
+        def popen2(cmd, mode="t", bufsize=-1):
+            import popen2
+            stdout, stdin = popen2.popen2(cmd, bufsize)
+            return stdin, stdout
+        __all__.append("popen2")
+
+    if not _exists("popen3"):
+        def popen3(cmd, mode="t", bufsize=-1):
+            import popen2
+            stdout, stdin, stderr = popen2.popen3(cmd, bufsize)
+            return stdin, stdout, stderr
+        __all__.append("popen3")
+
+    if not _exists("popen4"):
+        def popen4(cmd, mode="t", bufsize=-1):
+            import popen2
+            stdout, stdin = popen2.popen4(cmd, bufsize)
+            return stdin, stdout
+        __all__.append("popen4")
+
+import copy_reg as _copy_reg
+
+def _make_stat_result(tup, dict):
+    return stat_result(tup, dict)
+
+def _pickle_stat_result(sr):
+    (type, args) = sr.__reduce__()
+    return (_make_stat_result, args)
+
+try:
+    _copy_reg.pickle(stat_result, _pickle_stat_result, _make_stat_result)
+except NameError: # stat_result may not exist
+    pass
+
+def _make_statvfs_result(tup, dict):
+    return statvfs_result(tup, dict)
+
+def _pickle_statvfs_result(sr):
+    (type, args) = sr.__reduce__()
+    return (_make_statvfs_result, args)
+
+try:
+    _copy_reg.pickle(statvfs_result, _pickle_statvfs_result,
+                     _make_statvfs_result)
+except NameError: # statvfs_result may not exist
+    pass
diff --git a/lib-python/2.2/pdb.doc b/lib-python/2.2/pdb.doc
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/pdb.doc
@@ -0,0 +1,192 @@
+The Python Debugger Pdb
+=======================
+
+To use the debugger in its simplest form:
+
+        >>> import pdb
+        >>> pdb.run('<a statement>')
+
+The debugger's prompt is '(Pdb) '.  This will stop in the first
+function call in <a statement>.
+
+Alternatively, if a statement terminated with an unhandled exception,
+you can use pdb's post-mortem facility to inspect the contents of the
+traceback:
+
+        >>> <a statement>
+        <exception traceback>
+        >>> import pdb
+        >>> pdb.pm()
+
+The commands recognized by the debugger are listed in the next
+section.  Most can be abbreviated as indicated; e.g., h(elp) means
+that 'help' can be typed as 'h' or 'help' (but not as 'he' or 'hel',
+nor as 'H' or 'Help' or 'HELP').  Optional arguments are enclosed in
+square brackets.
+
+A blank line repeats the previous command literally, except for
+'list', where it lists the next 11 lines.
+
+Commands that the debugger doesn't recognize are assumed to be Python
+statements and are executed in the context of the program being
+debugged.  Python statements can also be prefixed with an exclamation
+point ('!').  This is a powerful way to inspect the program being
+debugged; it is even possible to change variables.  When an exception
+occurs in such a statement, the exception name is printed but the
+debugger's state is not changed.
+
+The debugger supports aliases, which can save typing.  And aliases can
+have parameters (see the alias help entry) which allows one a certain
+level of adaptability to the context under examination.
+
+Multiple commands may be entered on a single line, separated by the
+pair ';;'.  No intelligence is applied to separating the commands; the
+input is split at the first ';;', even if it is in the middle of a
+quoted string.
+
+If a file ".pdbrc" exists in your home directory or in the current
+directory, it is read in and executed as if it had been typed at the
+debugger prompt.  This is particularly useful for aliases.  If both
+files exist, the one in the home directory is read first and aliases
+defined there can be overriden by the local file.
+
+Aside from aliases, the debugger is not directly programmable; but it
+is implemented as a class from which you can derive your own debugger
+class, which you can make as fancy as you like.
+
+
+Debugger commands
+=================
+
+h(elp)
+        Without argument, print the list of available commands.  With
+        a command name as argument, print help about that command
+        (this is currently not implemented).
+
+w(here)
+        Print a stack trace, with the most recent frame at the bottom.
+        An arrow indicates the "current frame", which determines the
+        context of most commands.
+
+d(own)
+        Move the current frame one level down in the stack trace
+        (to an older frame).
+
+u(p)
+        Move the current frame one level up in the stack trace
+        (to a newer frame).
+
+b(reak) [ ([filename:]lineno | function) [, condition] ]
+        With a filename:line number argument, set a break there.  If
+        filename is omitted, use the current file.  With a function
+        name, set a break at the first executable line of that
+        function.  Without argument, list all breaks.  Each breakpoint
+        is assigned a number to which all the other breakpoint
+        commands refer.
+
+        The condition argument, if present, is a string which must
+        evaluate to true in order for the breakpoint to be honored.
+
+tbreak [ ([filename:]lineno | function) [, condition] ]
+        Temporary breakpoint, which is removed automatically when it
+        is first hit.  The arguments are the same as break.
+
+cl(ear) [bpnumber [bpnumber ...] ]
+        With a space separated list of breakpoint numbers, clear those
+        breakpoints.  Without argument, clear all breaks (but first
+        ask confirmation).
+
+disable bpnumber [bpnumber ...]
+        Disables the breakpoints given as a space separated list of
+        breakpoint numbers.  Disabling a breakpoint means it cannot
+        cause the program to stop execution, but unlike clearing a
+        breakpoint, it remains in the list of breakpoints and can be
+        (re-)enabled.
+
+enable bpnumber [bpnumber ...]
+        Enables the breakpoints specified.
+
+ignore bpnumber count
+        Sets the ignore count for the given breakpoint number.  If
+        count is omitted, the ignore count is set to 0.  A breakpoint
+        becomes active when the ignore count is zero.  When non-zero,
+        the count is decremented each time the breakpoint is reached
+        and the breakpoint is not disabled and any associated
+        condition evaluates to true.
+
+condition bpnumber condition
+        condition is an expression which must evaluate to true before
+        the breakpoint is honored.  If condition is absent, any
+        existing condition is removed; i.e., the breakpoint is made
+        unconditional.
+
+s(tep)
+        Execute the current line, stop at the first possible occasion
+        (either in a function that is called or in the current function).
+
+n(ext)
+        Continue execution until the next line in the current function
+        is reached or it returns.
+
+r(eturn)
+        Continue execution until the current function returns.
+
+c(ont(inue))
+        Continue execution, only stop when a breakpoint is encountered.
+
+l(ist) [first [,last]]
+        List source code for the current file.
+        Without arguments, list 11 lines around the current line
+        or continue the previous listing.
+        With one argument, list 11 lines starting at that line.
+        With two arguments, list the given range;
+        if the second argument is less than the first, it is a count.
+
+a(rgs)
+        Print the argument list of the current function.
+
+p expression
+        Print the value of the expression.
+
+(!) statement
+        Execute the (one-line) statement in the context of the current
+        stack frame.  The exclamation point can be omitted unless the
+        first word of the statement resembles a debugger command.  To
+        assign to a global variable you must always prefix the command
+        with a 'global' command, e.g.:
+        (Pdb) global list_options; list_options = ['-l']
+        (Pdb)
+
+
+whatis arg
+         Prints the type of the argument.
+
+alias [name [command]]
+        Creates an alias called 'name' that executes 'command'.  The
+        command must *not* be enclosed in quotes.  Replaceable
+        parameters can be indicated by %1, %2, and so on, while %* is
+        replaced by all the parameters.  If no command is given, the
+        current alias for name is shown. If no name is given, all
+        aliases are listed.
+
+        Aliases may be nested and can contain anything that can be
+        legally typed at the pdb prompt.  Note!  You *can* override
+        internal pdb commands with aliases!  Those internal commands
+        are then hidden until the alias is removed.  Aliasing is
+        recursively applied to the first word of the command line; all
+        other words in the line are left alone.
+
+        As an example, here are two useful aliases (especially when
+        placed in the .pdbrc file):
+
+        #Print instance variables (usage "pi classInst")
+        alias pi for k in %1.__dict__.keys(): print "%1.",k,"=",%1.__dict__[k]
+        #Print instance variables in self
+        alias ps pi self
+                
+unalias name
+        Deletes the specified alias.
+
+q(uit)
+        Quit from the debugger.
+        The program being executed is aborted.
diff --git a/lib-python/2.2/pdb.py b/lib-python/2.2/pdb.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/pdb.py
@@ -0,0 +1,979 @@
+#! /usr/bin/env python
+
+"""A Python debugger."""
+
+# (See pdb.doc for documentation.)
+
+import sys
+import linecache
+import cmd
+import bdb
+from repr import Repr
+import os
+import re
+
+# Create a custom safe Repr instance and increase its maxstring.
+# The default of 30 truncates error messages too easily.
+_repr = Repr()
+_repr.maxstring = 200
+_saferepr = _repr.repr
+
+__all__ = ["run", "pm", "Pdb", "runeval", "runctx", "runcall", "set_trace",
+           "post_mortem", "help"]
+
+def find_function(funcname, filename):
+    cre = re.compile(r'def\s+%s\s*[(]' % funcname)
+    try:
+        fp = open(filename)
+    except IOError:
+        return None
+    # consumer of this info expects the first line to be 1
+    lineno = 1
+    answer = None
+    while 1:
+        line = fp.readline()
+        if line == '':
+            break
+        if cre.match(line):
+            answer = funcname, filename, lineno
+            break
+        lineno = lineno + 1
+    fp.close()
+    return answer
+
+
+# Interaction prompt line will separate file and call info from code
+# text using value of line_prefix string.  A newline and arrow may
+# be to your liking.  You can set it once pdb is imported using the
+# command "pdb.line_prefix = '\n% '".
+# line_prefix = ': '    # Use this to get the old situation back
+line_prefix = '\n-> '   # Probably a better default
+
+class Pdb(bdb.Bdb, cmd.Cmd):
+
+    def __init__(self):
+        bdb.Bdb.__init__(self)
+        cmd.Cmd.__init__(self)
+        self.prompt = '(Pdb) '
+        self.aliases = {}
+        # Try to load readline if it exists
+        try:
+            import readline
+        except ImportError:
+            pass
+
+        # Read $HOME/.pdbrc and ./.pdbrc
+        self.rcLines = []
+        if os.environ.has_key('HOME'):
+            envHome = os.environ['HOME']
+            try:
+                rcFile = open(os.path.join(envHome, ".pdbrc"))
+            except IOError:
+                pass
+            else:
+                for line in rcFile.readlines():
+                    self.rcLines.append(line)
+                rcFile.close()
+        try:
+            rcFile = open(".pdbrc")
+        except IOError:
+            pass
+        else:
+            for line in rcFile.readlines():
+                self.rcLines.append(line)
+            rcFile.close()
+
+    def reset(self):
+        bdb.Bdb.reset(self)
+        self.forget()
+
+    def forget(self):
+        self.lineno = None
+        self.stack = []
+        self.curindex = 0
+        self.curframe = None
+
+    def setup(self, f, t):
+        self.forget()
+        self.stack, self.curindex = self.get_stack(f, t)
+        self.curframe = self.stack[self.curindex][0]
+        self.execRcLines()
+
+    # Can be executed earlier than 'setup' if desired
+    def execRcLines(self):
+        if self.rcLines:
+            # Make local copy because of recursion
+            rcLines = self.rcLines
+            # executed only once
+            self.rcLines = []
+            for line in rcLines:
+                line = line[:-1]
+                if len (line) > 0 and line[0] != '#':
+                    self.onecmd (line)
+
+    # Override Bdb methods (except user_call, for now)
+
+    def user_line(self, frame):
+        """This function is called when we stop or break at this line."""
+        self.interaction(frame, None)
+
+    def user_return(self, frame, return_value):
+        """This function is called when a return trap is set here."""
+        frame.f_locals['__return__'] = return_value
+        print '--Return--'
+        self.interaction(frame, None)
+
+    def user_exception(self, frame, (exc_type, exc_value, exc_traceback)):
+        """This function is called if an exception occurs,
+        but only if we are to stop at or just below this level."""
+        frame.f_locals['__exception__'] = exc_type, exc_value
+        if type(exc_type) == type(''):
+            exc_type_name = exc_type
+        else: exc_type_name = exc_type.__name__
+        print exc_type_name + ':', _saferepr(exc_value)
+        self.interaction(frame, exc_traceback)
+
+    # General interaction function
+
+    def interaction(self, frame, traceback):
+        self.setup(frame, traceback)
+        self.print_stack_entry(self.stack[self.curindex])
+        self.cmdloop()
+        self.forget()
+
+    def default(self, line):
+        if line[:1] == '!': line = line[1:]
+        locals = self.curframe.f_locals
+        globals = self.curframe.f_globals
+        try:
+            code = compile(line + '\n', '<stdin>', 'single')
+            exec code in globals, locals
+        except:
+            t, v = sys.exc_info()[:2]
+            if type(t) == type(''):
+                exc_type_name = t
+            else: exc_type_name = t.__name__
+            print '***', exc_type_name + ':', v
+
+    def precmd(self, line):
+        """Handle alias expansion and ';;' separator."""
+        if not line.strip():
+            return line
+        args = line.split()
+        while self.aliases.has_key(args[0]):
+            line = self.aliases[args[0]]
+            ii = 1
+            for tmpArg in args[1:]:
+                line = line.replace("%" + str(ii),
+                                      tmpArg)
+                ii = ii + 1
+            line = line.replace("%*", ' '.join(args[1:]))
+            args = line.split()
+        # split into ';;' separated commands
+        # unless it's an alias command
+        if args[0] != 'alias':
+            marker = line.find(';;')
+            if marker >= 0:
+                # queue up everything after marker
+                next = line[marker+2:].lstrip()
+                self.cmdqueue.append(next)
+                line = line[:marker].rstrip()
+        return line
+
+    # Command definitions, called by cmdloop()
+    # The argument is the remaining string on the command line
+    # Return true to exit from the command loop
+
+    do_h = cmd.Cmd.do_help
+
+    def do_break(self, arg, temporary = 0):
+        # break [ ([filename:]lineno | function) [, "condition"] ]
+        if not arg:
+            if self.breaks:  # There's at least one
+                print "Num Type         Disp Enb   Where"
+                for bp in bdb.Breakpoint.bpbynumber:
+                    if bp:
+                        bp.bpprint()
+            return
+        # parse arguments; comma has lowest precedence
+        # and cannot occur in filename
+        filename = None
+        lineno = None
+        cond = None
+        comma = arg.find(',')
+        if comma > 0:
+            # parse stuff after comma: "condition"
+            cond = arg[comma+1:].lstrip()
+            arg = arg[:comma].rstrip()
+        # parse stuff before comma: [filename:]lineno | function
+        colon = arg.rfind(':')
+        if colon >= 0:
+            filename = arg[:colon].rstrip()
+            f = self.lookupmodule(filename)
+            if not f:
+                print '*** ', `filename`,
+                print 'not found from sys.path'
+                return
+            else:
+                filename = f
+            arg = arg[colon+1:].lstrip()
+            try:
+                lineno = int(arg)
+            except ValueError, msg:
+                print '*** Bad lineno:', arg
+                return
+        else:
+            # no colon; can be lineno or function
+            try:
+                lineno = int(arg)
+            except ValueError:
+                try:
+                    func = eval(arg,
+                                self.curframe.f_globals,
+                                self.curframe.f_locals)
+                except:
+                    func = arg
+                try:
+                    if hasattr(func, 'im_func'):
+                        func = func.im_func
+                    code = func.func_code
+                    lineno = code.co_firstlineno
+                    filename = code.co_filename
+                except:
+                    # last thing to try
+                    (ok, filename, ln) = self.lineinfo(arg)
+                    if not ok:
+                        print '*** The specified object',
+                        print `arg`,
+                        print 'is not a function'
+                        print ('or was not found '
+                               'along sys.path.')
+                        return
+                    lineno = int(ln)
+        if not filename:
+            filename = self.defaultFile()
+        # Check for reasonable breakpoint
+        line = self.checkline(filename, lineno)
+        if line:
+            # now set the break point
+            err = self.set_break(filename, line, temporary, cond)
+            if err: print '***', err
+            else:
+                bp = self.get_breaks(filename, line)[-1]
+                print "Breakpoint %d at %s:%d" % (bp.number,
+                                                  bp.file,
+                                                  bp.line)
+
+    # To be overridden in derived debuggers
+    def defaultFile(self):
+        """Produce a reasonable default."""
+        filename = self.curframe.f_code.co_filename
+        if filename == '<string>' and mainpyfile:
+            filename = mainpyfile
+        return filename
+
+    do_b = do_break
+
+    def do_tbreak(self, arg):
+        self.do_break(arg, 1)
+
+    def lineinfo(self, identifier):
+        failed = (None, None, None)
+        # Input is identifier, may be in single quotes
+        idstring = identifier.split("'")
+        if len(idstring) == 1:
+            # not in single quotes
+            id = idstring[0].strip()
+        elif len(idstring) == 3:
+            # quoted
+            id = idstring[1].strip()
+        else:
+            return failed
+        if id == '': return failed
+        parts = id.split('.')
+        # Protection for derived debuggers
+        if parts[0] == 'self':
+            del parts[0]
+            if len(parts) == 0:
+                return failed
+        # Best first guess at file to look at
+        fname = self.defaultFile()
+        if len(parts) == 1:
+            item = parts[0]
+        else:
+            # More than one part.
+            # First is module, second is method/class
+            f = self.lookupmodule(parts[0])
+            if f:
+                fname = f
+            item = parts[1]
+        answer = find_function(item, fname)
+        return answer or failed
+
+    def checkline(self, filename, lineno):
+        """Return line number of first line at or after input
+        argument such that if the input points to a 'def', the
+        returned line number is the first
+        non-blank/non-comment line to follow.  If the input
+        points to a blank or comment line, return 0.  At end
+        of file, also return 0."""
+
+        line = linecache.getline(filename, lineno)
+        if not line:
+            print 'End of file'
+            return 0
+        line = line.strip()
+        # Don't allow setting breakpoint at a blank line
+        if ( not line or (line[0] == '#') or
+             (line[:3] == '"""') or line[:3] == "'''" ):
+            print '*** Blank or comment'
+            return 0
+        # When a file is read in and a breakpoint is at
+        # the 'def' statement, the system stops there at
+        # code parse time.  We don't want that, so all breakpoints
+        # set at 'def' statements are moved one line onward
+        if line[:3] == 'def':
+            instr = ''
+            brackets = 0
+            while 1:
+                skipone = 0
+                for c in line:
+                    if instr:
+                        if skipone:
+                            skipone = 0
+                        elif c == '\\':
+                            skipone = 1
+                        elif c == instr:
+                            instr = ''
+                    elif c == '#':
+                        break
+                    elif c in ('"',"'"):
+                        instr = c
+                    elif c in ('(','{','['):
+                        brackets = brackets + 1
+                    elif c in (')','}',']'):
+                        brackets = brackets - 1
+                lineno = lineno+1
+                line = linecache.getline(filename, lineno)
+                if not line:
+                    print 'end of file'
+                    return 0
+                line = line.strip()
+                if not line: continue   # Blank line
+                if brackets <= 0 and line[0] not in ('#','"',"'"):
+                    break
+        return lineno
+
+    def do_enable(self, arg):
+        args = arg.split()
+        for i in args:
+            try:
+                i = int(i)
+            except ValueError:
+                print 'Breakpoint index %r is not a number' % i
+                continue
+
+            if not (0 <= i < len(bdb.Breakpoint.bpbynumber)):
+                print 'No breakpoint numbered', i
+                continue
+
+            bp = bdb.Breakpoint.bpbynumber[i]
+            if bp:
+                bp.enable()
+
+    def do_disable(self, arg):
+        args = arg.split()
+        for i in args:
+            try:
+                i = int(i)
+            except ValueError:
+                print 'Breakpoint index %r is not a number' % i
+                continue
+            
+            if not (0 <= i < len(bdb.Breakpoint.bpbynumber)):
+                print 'No breakpoint numbered', i
+                continue
+
+            bp = bdb.Breakpoint.bpbynumber[i]
+            if bp:
+                bp.disable()
+
+    def do_condition(self, arg):
+        # arg is breakpoint number and condition
+        args = arg.split(' ', 1)
+        bpnum = int(args[0].strip())
+        try:
+            cond = args[1]
+        except:
+            cond = None
+        bp = bdb.Breakpoint.bpbynumber[bpnum]
+        if bp:
+            bp.cond = cond
+            if not cond:
+                print 'Breakpoint', bpnum,
+                print 'is now unconditional.'
+
+    def do_ignore(self,arg):
+        """arg is bp number followed by ignore count."""
+        args = arg.split()
+        bpnum = int(args[0].strip())
+        try:
+            count = int(args[1].strip())
+        except:
+            count = 0
+        bp = bdb.Breakpoint.bpbynumber[bpnum]
+        if bp:
+            bp.ignore = count
+            if (count > 0):
+                reply = 'Will ignore next '
+                if (count > 1):
+                    reply = reply + '%d crossings' % count
+                else:
+                    reply = reply + '1 crossing'
+                print reply + ' of breakpoint %d.' % bpnum
+            else:
+                print 'Will stop next time breakpoint',
+                print bpnum, 'is reached.'
+
+    def do_clear(self, arg):
+        """Three possibilities, tried in this order:
+        clear -> clear all breaks, ask for confirmation
+        clear file:lineno -> clear all breaks at file:lineno
+        clear bpno bpno ... -> clear breakpoints by number"""
+        if not arg:
+            try:
+                reply = raw_input('Clear all breaks? ')
+            except EOFError:
+                reply = 'no'
+            reply = reply.strip().lower()
+            if reply in ('y', 'yes'):
+                self.clear_all_breaks()
+            return
+        if ':' in arg:
+            # Make sure it works for "clear C:\foo\bar.py:12"
+            i = arg.rfind(':')
+            filename = arg[:i]
+            arg = arg[i+1:]
+            try:
+                lineno = int(arg)
+            except:
+                err = "Invalid line number (%s)" % arg
+            else:
+                err = self.clear_break(filename, lineno)
+            if err: print '***', err
+            return
+        numberlist = arg.split()
+        for i in numberlist:
+            err = self.clear_bpbynumber(i)
+            if err:
+                print '***', err
+            else:
+                print 'Deleted breakpoint %s ' % (i,)
+    do_cl = do_clear # 'c' is already an abbreviation for 'continue'
+
+    def do_where(self, arg):
+        self.print_stack_trace()
+    do_w = do_where
+    do_bt = do_where
+
+    def do_up(self, arg):
+        if self.curindex == 0:
+            print '*** Oldest frame'
+        else:
+            self.curindex = self.curindex - 1
+            self.curframe = self.stack[self.curindex][0]
+            self.print_stack_entry(self.stack[self.curindex])
+            self.lineno = None
+    do_u = do_up
+
+    def do_down(self, arg):
+        if self.curindex + 1 == len(self.stack):
+            print '*** Newest frame'
+        else:
+            self.curindex = self.curindex + 1
+            self.curframe = self.stack[self.curindex][0]
+            self.print_stack_entry(self.stack[self.curindex])
+            self.lineno = None
+    do_d = do_down
+
+    def do_step(self, arg):
+        self.set_step()
+        return 1
+    do_s = do_step
+
+    def do_next(self, arg):
+        self.set_next(self.curframe)
+        return 1
+    do_n = do_next
+
+    def do_return(self, arg):
+        self.set_return(self.curframe)
+        return 1
+    do_r = do_return
+
+    def do_continue(self, arg):
+        self.set_continue()
+        return 1
+    do_c = do_cont = do_continue
+
+    def do_quit(self, arg):
+        self.set_quit()
+        return 1
+    do_q = do_quit
+    do_exit = do_quit
+
+    def do_EOF(self, arg):
+        print
+        self.set_quit()
+        return 1
+
+    def do_args(self, arg):
+        f = self.curframe
+        co = f.f_code
+        dict = f.f_locals
+        n = co.co_argcount
+        if co.co_flags & 4: n = n+1
+        if co.co_flags & 8: n = n+1
+        for i in range(n):
+            name = co.co_varnames[i]
+            print name, '=',
+            if dict.has_key(name): print dict[name]
+            else: print "*** undefined ***"
+    do_a = do_args
+
+    def do_retval(self, arg):
+        if self.curframe.f_locals.has_key('__return__'):
+            print self.curframe.f_locals['__return__']
+        else:
+            print '*** Not yet returned!'
+    do_rv = do_retval
+
+    def do_p(self, arg):
+        try:
+            value = eval(arg, self.curframe.f_globals,
+                            self.curframe.f_locals)
+        except:
+            t, v = sys.exc_info()[:2]
+            if type(t) == type(''):
+                exc_type_name = t
+            else: exc_type_name = t.__name__
+            print '***', exc_type_name + ':', `v`
+            return
+
+        print `value`
+
+    def do_list(self, arg):
+        self.lastcmd = 'list'
+        last = None
+        if arg:
+            try:
+                x = eval(arg, {}, {})
+                if type(x) == type(()):
+                    first, last = x
+                    first = int(first)
+                    last = int(last)
+                    if last < first:
+                        # Assume it's a count
+                        last = first + last
+                else:
+                    first = max(1, int(x) - 5)
+            except:
+                print '*** Error in argument:', `arg`
+                return
+        elif self.lineno is None:
+            first = max(1, self.curframe.f_lineno - 5)
+        else:
+            first = self.lineno + 1
+        if last is None:
+            last = first + 10
+        filename = self.curframe.f_code.co_filename
+        breaklist = self.get_file_breaks(filename)
+        try:
+            for lineno in range(first, last+1):
+                line = linecache.getline(filename, lineno)
+                if not line:
+                    print '[EOF]'
+                    break
+                else:
+                    s = `lineno`.rjust(3)
+                    if len(s) < 4: s = s + ' '
+                    if lineno in breaklist: s = s + 'B'
+                    else: s = s + ' '
+                    if lineno == self.curframe.f_lineno:
+                        s = s + '->'
+                    print s + '\t' + line,
+                    self.lineno = lineno
+        except KeyboardInterrupt:
+            pass
+    do_l = do_list
+
+    def do_whatis(self, arg):
+        try:
+            value = eval(arg, self.curframe.f_globals,
+                            self.curframe.f_locals)
+        except:
+            t, v = sys.exc_info()[:2]
+            if type(t) == type(''):
+                exc_type_name = t
+            else: exc_type_name = t.__name__
+            print '***', exc_type_name + ':', `v`
+            return
+        code = None
+        # Is it a function?
+        try: code = value.func_code
+        except: pass
+        if code:
+            print 'Function', code.co_name
+            return
+        # Is it an instance method?
+        try: code = value.im_func.func_code
+        except: pass
+        if code:
+            print 'Method', code.co_name
+            return
+        # None of the above...
+        print type(value)
+
+    def do_alias(self, arg):
+        args = arg.split()
+        if len(args) == 0:
+            keys = self.aliases.keys()
+            keys.sort()
+            for alias in keys:
+                print "%s = %s" % (alias, self.aliases[alias])
+            return
+        if self.aliases.has_key(args[0]) and len (args) == 1:
+            print "%s = %s" % (args[0], self.aliases[args[0]])
+        else:
+            self.aliases[args[0]] = ' '.join(args[1:])
+
+    def do_unalias(self, arg):
+        args = arg.split()
+        if len(args) == 0: return
+        if self.aliases.has_key(args[0]):
+            del self.aliases[args[0]]
+
+    # Print a traceback starting at the top stack frame.
+    # The most recently entered frame is printed last;
+    # this is different from dbx and gdb, but consistent with
+    # the Python interpreter's stack trace.
+    # It is also consistent with the up/down commands (which are
+    # compatible with dbx and gdb: up moves towards 'main()'
+    # and down moves towards the most recent stack frame).
+
+    def print_stack_trace(self):
+        try:
+            for frame_lineno in self.stack:
+                self.print_stack_entry(frame_lineno)
+        except KeyboardInterrupt:
+            pass
+
+    def print_stack_entry(self, frame_lineno, prompt_prefix=line_prefix):
+        frame, lineno = frame_lineno
+        if frame is self.curframe:
+            print '>',
+        else:
+            print ' ',
+        print self.format_stack_entry(frame_lineno, prompt_prefix)
+
+
+    # Help methods (derived from pdb.doc)
+
+    def help_help(self):
+        self.help_h()
+
+    def help_h(self):
+        print """h(elp)
+Without argument, print the list of available commands.
+With a command name as argument, print help about that command
+"help pdb" pipes the full documentation file to the $PAGER
+"help exec" gives help on the ! command"""
+
+    def help_where(self):
+        self.help_w()
+
+    def help_w(self):
+        print """w(here)
+Print a stack trace, with the most recent frame at the bottom.
+An arrow indicates the "current frame", which determines the
+context of most commands.  'bt' is an alias for this command."""
+
+    help_bt = help_w
+
+    def help_down(self):
+        self.help_d()
+
+    def help_d(self):
+        print """d(own)
+Move the current frame one level down in the stack trace
+(to an older frame)."""
+
+    def help_up(self):
+        self.help_u()
+
+    def help_u(self):
+        print """u(p)
+Move the current frame one level up in the stack trace
+(to a newer frame)."""
+
+    def help_break(self):
+        self.help_b()
+
+    def help_b(self):
+        print """b(reak) ([file:]lineno | function) [, condition]
+With a line number argument, set a break there in the current
+file.  With a function name, set a break at first executable line
+of that function.  Without argument, list all breaks.  If a second
+argument is present, it is a string specifying an expression
+which must evaluate to true before the breakpoint is honored.
+
+The line number may be prefixed with a filename and a colon,
+to specify a breakpoint in another file (probably one that
+hasn't been loaded yet).  The file is searched for on sys.path;
+the .py suffix may be omitted."""
+
+    def help_clear(self):
+        self.help_cl()
+
+    def help_cl(self):
+        print "cl(ear) filename:lineno"
+        print """cl(ear) [bpnumber [bpnumber...]]
+With a space separated list of breakpoint numbers, clear
+those breakpoints.  Without argument, clear all breaks (but
+first ask confirmation).  With a filename:lineno argument,
+clear all breaks at that line in that file.
+
+Note that the argument is different from previous versions of
+the debugger (in python distributions 1.5.1 and before) where
+a linenumber was used instead of either filename:lineno or
+breakpoint numbers."""
+
+    def help_tbreak(self):
+        print """tbreak  same arguments as break, but breakpoint is
+removed when first hit."""
+
+    def help_enable(self):
+        print """enable bpnumber [bpnumber ...]
+Enables the breakpoints given as a space separated list of
+bp numbers."""
+
+    def help_disable(self):
+        print """disable bpnumber [bpnumber ...]
+Disables the breakpoints given as a space separated list of
+bp numbers."""
+
+    def help_ignore(self):
+        print """ignore bpnumber count
+Sets the ignore count for the given breakpoint number.  A breakpoint
+becomes active when the ignore count is zero.  When non-zero, the
+count is decremented each time the breakpoint is reached and the
+breakpoint is not disabled and any associated condition evaluates
+to true."""
+
+    def help_condition(self):
+        print """condition bpnumber str_condition
+str_condition is a string specifying an expression which
+must evaluate to true before the breakpoint is honored.
+If str_condition is absent, any existing condition is removed;
+i.e., the breakpoint is made unconditional."""
+
+    def help_step(self):
+        self.help_s()
+
+    def help_s(self):
+        print """s(tep)
+Execute the current line, stop at the first possible occasion
+(either in a function that is called or in the current function)."""
+
+    def help_next(self):
+        self.help_n()
+
+    def help_n(self):
+        print """n(ext)
+Continue execution until the next line in the current function
+is reached or it returns."""
+
+    def help_return(self):
+        self.help_r()
+
+    def help_r(self):
+        print """r(eturn)
+Continue execution until the current function returns."""
+
+    def help_continue(self):
+        self.help_c()
+
+    def help_cont(self):
+        self.help_c()
+
+    def help_c(self):
+        print """c(ont(inue))
+Continue execution, only stop when a breakpoint is encountered."""
+
+    def help_list(self):
+        self.help_l()
+
+    def help_l(self):
+        print """l(ist) [first [,last]]
+List source code for the current file.
+Without arguments, list 11 lines around the current line
+or continue the previous listing.
+With one argument, list 11 lines starting at that line.
+With two arguments, list the given range;
+if the second argument is less than the first, it is a count."""
+
+    def help_args(self):
+        self.help_a()
+
+    def help_a(self):
+        print """a(rgs)
+Print the arguments of the current function."""
+
+    def help_p(self):
+        print """p expression
+Print the value of the expression."""
+
+    def help_exec(self):
+        print """(!) statement
+Execute the (one-line) statement in the context of
+the current stack frame.
+The exclamation point can be omitted unless the first word
+of the statement resembles a debugger command.
+To assign to a global variable you must always prefix the
+command with a 'global' command, e.g.:
+(Pdb) global list_options; list_options = ['-l']
+(Pdb)"""
+
+    def help_quit(self):
+        self.help_q()
+
+    def help_q(self):
+        print """q(uit) or exit - Quit from the debugger.
+The program being executed is aborted."""
+
+    help_exit = help_q
+
+    def help_whatis(self):
+        print """whatis arg
+Prints the type of the argument."""
+
+    def help_EOF(self):
+        print """EOF
+Handles the receipt of EOF as a command."""
+
+    def help_alias(self):
+        print """alias [name [command [parameter parameter ...] ]]
+Creates an alias called 'name' the executes 'command'.  The command
+must *not* be enclosed in quotes.  Replaceable parameters are
+indicated by %1, %2, and so on, while %* is replaced by all the
+parameters.  If no command is given, the current alias for name
+is shown. If no name is given, all aliases are listed.
+
+Aliases may be nested and can contain anything that can be
+legally typed at the pdb prompt.  Note!  You *can* override
+internal pdb commands with aliases!  Those internal commands
+are then hidden until the alias is removed.  Aliasing is recursively
+applied to the first word of the command line; all other words
+in the line are left alone.
+
+Some useful aliases (especially when placed in the .pdbrc file) are:
+
+#Print instance variables (usage "pi classInst")
+alias pi for k in %1.__dict__.keys(): print "%1.",k,"=",%1.__dict__[k]
+
+#Print instance variables in self
+alias ps pi self
+"""
+
+    def help_unalias(self):
+        print """unalias name
+Deletes the specified alias."""
+
+    def help_pdb(self):
+        help()
+
+    def lookupmodule(self, filename):
+        """Helper function for break/clear parsing -- may be overridden."""
+        root, ext = os.path.splitext(filename)
+        if ext == '':
+            filename = filename + '.py'
+        if os.path.isabs(filename):
+            return filename
+        for dirname in sys.path:
+            while os.path.islink(dirname):
+                dirname = os.readlink(dirname)
+            fullname = os.path.join(dirname, filename)
+            if os.path.exists(fullname):
+                return fullname
+        return None
+
+# Simplified interface
+
+def run(statement, globals=None, locals=None):
+    Pdb().run(statement, globals, locals)
+
+def runeval(expression, globals=None, locals=None):
+    return Pdb().runeval(expression, globals, locals)
+
+def runctx(statement, globals, locals):
+    # B/W compatibility
+    run(statement, globals, locals)
+
+def runcall(*args):
+    return apply(Pdb().runcall, args)
+
+def set_trace():
+    Pdb().set_trace()
+
+# Post-Mortem interface
+
+def post_mortem(t):
+    p = Pdb()
+    p.reset()
+    while t.tb_next is not None:
+        t = t.tb_next
+    p.interaction(t.tb_frame, t)
+
+def pm():
+    post_mortem(sys.last_traceback)
+
+
+# Main program for testing
+
+TESTCMD = 'import x; x.main()'
+
+def test():
+    run(TESTCMD)
+
+# print help
+def help():
+    for dirname in sys.path:
+        fullname = os.path.join(dirname, 'pdb.doc')
+        if os.path.exists(fullname):
+            sts = os.system('${PAGER-more} '+fullname)
+            if sts: print '*** Pager exit status:', sts
+            break
+    else:
+        print 'Sorry, can\'t find the help file "pdb.doc"',
+        print 'along the Python search path'
+
+mainmodule = ''
+mainpyfile = ''
+
+# When invoked as main program, invoke the debugger on a script
+if __name__=='__main__':
+    if not sys.argv[1:]:
+        print "usage: pdb.py scriptfile [arg] ..."
+        sys.exit(2)
+
+    mainpyfile = filename = sys.argv[1]     # Get script filename
+    if not os.path.exists(filename):
+        print 'Error:', `filename`, 'does not exist'
+        sys.exit(1)
+    mainmodule = os.path.basename(filename)
+    del sys.argv[0]         # Hide "pdb.py" from argument list
+
+    # Insert script directory in front of module search path
+    sys.path.insert(0, os.path.dirname(filename))
+
+    run('execfile(' + `filename` + ')')
diff --git a/lib-python/2.2/pickle.py b/lib-python/2.2/pickle.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/pickle.py
@@ -0,0 +1,986 @@
+"""Create portable serialized representations of Python objects.
+
+See module cPickle for a (much) faster implementation.
+See module copy_reg for a mechanism for registering custom picklers.
+
+Classes:
+
+    Pickler
+    Unpickler
+
+Functions:
+
+    dump(object, file)
+    dumps(object) -> string
+    load(file) -> object
+    loads(string) -> object
+
+Misc variables:
+
+    __version__
+    format_version
+    compatible_formats
+
+"""
+
+__version__ = "$Revision$"       # Code version
+
+from types import *
+from copy_reg import dispatch_table, safe_constructors
+import marshal
+import sys
+import struct
+import re
+
+__all__ = ["PickleError", "PicklingError", "UnpicklingError", "Pickler",
+           "Unpickler", "dump", "dumps", "load", "loads"]
+
+format_version = "1.3"                     # File format version we write
+compatible_formats = ["1.0", "1.1", "1.2"] # Old format versions we can read
+
+mdumps = marshal.dumps
+mloads = marshal.loads
+
+class PickleError(Exception): pass
+class PicklingError(PickleError): pass
+class UnpicklingError(PickleError): pass
+
+class _Stop(Exception):
+    def __init__(self, value):
+        self.value = value
+
+try:
+    from org.python.core import PyStringMap
+except ImportError:
+    PyStringMap = None
+
+try:
+    UnicodeType
+except NameError:
+    UnicodeType = None
+
+
+MARK            = '('
+STOP            = '.'
+POP             = '0'
+POP_MARK        = '1'
+DUP             = '2'
+FLOAT           = 'F'
+INT             = 'I'
+BININT          = 'J'
+BININT1         = 'K'
+LONG            = 'L'
+BININT2         = 'M'
+NONE            = 'N'
+PERSID          = 'P'
+BINPERSID       = 'Q'
+REDUCE          = 'R'
+STRING          = 'S'
+BINSTRING       = 'T'
+SHORT_BINSTRING = 'U'
+UNICODE         = 'V'
+BINUNICODE      = 'X'
+APPEND          = 'a'
+BUILD           = 'b'
+GLOBAL          = 'c'
+DICT            = 'd'
+EMPTY_DICT      = '}'
+APPENDS         = 'e'
+GET             = 'g'
+BINGET          = 'h'
+INST            = 'i'
+LONG_BINGET     = 'j'
+LIST            = 'l'
+EMPTY_LIST      = ']'
+OBJ             = 'o'
+PUT             = 'p'
+BINPUT          = 'q'
+LONG_BINPUT     = 'r'
+SETITEM         = 's'
+TUPLE           = 't'
+EMPTY_TUPLE     = ')'
+SETITEMS        = 'u'
+BINFLOAT        = 'G'
+
+__all__.extend([x for x in dir() if re.match("[A-Z][A-Z0-9_]+$",x)])
+
+class Pickler:
+
+    def __init__(self, file, bin = 0):
+        self.write = file.write
+        self.memo = {}
+        self.bin = bin
+
+    def dump(self, object):
+        self.save(object)
+        self.write(STOP)
+
+    def put(self, i):
+        if self.bin:
+            s = mdumps(i)[1:]
+            if i < 256:
+                return BINPUT + s[0]
+
+            return LONG_BINPUT + s
+
+        return PUT + `i` + '\n'
+
+    def get(self, i):
+        if self.bin:
+            s = mdumps(i)[1:]
+
+            if i < 256:
+                return BINGET + s[0]
+
+            return LONG_BINGET + s
+
+        return GET + `i` + '\n'
+
+    def save(self, object, pers_save = 0):
+        memo = self.memo
+
+        if not pers_save:
+            pid = self.persistent_id(object)
+            if pid is not None:
+                self.save_pers(pid)
+                return
+
+        d = id(object)
+
+        t = type(object)
+
+        if (t is TupleType) and (len(object) == 0):
+            if self.bin:
+                self.save_empty_tuple(object)
+            else:
+                self.save_tuple(object)
+            return
+
+        if memo.has_key(d):
+            self.write(self.get(memo[d][0]))
+            return
+
+        try:
+            f = self.dispatch[t]
+        except KeyError:
+            pid = self.inst_persistent_id(object)
+            if pid is not None:
+                self.save_pers(pid)
+                return
+
+            try:
+                issc = issubclass(t, TypeType)
+            except TypeError: # t is not a class
+                issc = 0
+            if issc:
+                self.save_global(object)
+                return
+
+            try:
+                reduce = dispatch_table[t]
+            except KeyError:
+                try:
+                    reduce = object.__reduce__
+                except AttributeError:
+                    raise PicklingError, \
+                        "can't pickle %s object: %s" % (`t.__name__`,
+                                                         `object`)
+                else:
+                    tup = reduce()
+            else:
+                tup = reduce(object)
+
+            if type(tup) is StringType:
+                self.save_global(object, tup)
+                return
+
+            if type(tup) is not TupleType:
+                raise PicklingError, "Value returned by %s must be a " \
+                                     "tuple" % reduce
+
+            l = len(tup)
+
+            if (l != 2) and (l != 3):
+                raise PicklingError, "tuple returned by %s must contain " \
+                                     "only two or three elements" % reduce
+
+            callable = tup[0]
+            arg_tup  = tup[1]
+
+            if l > 2:
+                state = tup[2]
+            else:
+                state = None
+
+            if type(arg_tup) is not TupleType and arg_tup is not None:
+                raise PicklingError, "Second element of tuple returned " \
+                                     "by %s must be a tuple" % reduce
+
+            self.save_reduce(callable, arg_tup, state)
+            memo_len = len(memo)
+            self.write(self.put(memo_len))
+            memo[d] = (memo_len, object)
+            return
+
+        f(self, object)
+
+    def persistent_id(self, object):
+        return None
+
+    def inst_persistent_id(self, object):
+        return None
+
+    def save_pers(self, pid):
+        if not self.bin:
+            self.write(PERSID + str(pid) + '\n')
+        else:
+            self.save(pid, 1)
+            self.write(BINPERSID)
+
+    def save_reduce(self, callable, arg_tup, state = None):
+        write = self.write
+        save = self.save
+
+        save(callable)
+        save(arg_tup)
+        write(REDUCE)
+
+        if state is not None:
+            save(state)
+            write(BUILD)
+
+    dispatch = {}
+
+    def save_none(self, object):
+        self.write(NONE)
+    dispatch[NoneType] = save_none
+
+    def save_int(self, object):
+        if self.bin:
+            # If the int is small enough to fit in a signed 4-byte 2's-comp
+            # format, we can store it more efficiently than the general
+            # case.
+            high_bits = object >> 31  # note that Python shift sign-extends
+            if  high_bits == 0 or high_bits == -1:
+                # All high bits are copies of bit 2**31, so the value
+                # fits in a 4-byte signed int.
+                i = mdumps(object)[1:]
+                assert len(i) == 4
+                if i[-2:] == '\000\000':    # fits in 2-byte unsigned int
+                    if i[-3] == '\000':     # fits in 1-byte unsigned int
+                        self.write(BININT1 + i[0])
+                    else:
+                        self.write(BININT2 + i[:2])
+                else:
+                    self.write(BININT + i)
+                return
+        # Text pickle, or int too big to fit in signed 4-byte format.
+        self.write(INT + `object` + '\n')
+    dispatch[IntType] = save_int
+
+    def save_long(self, object):
+        self.write(LONG + `object` + '\n')
+    dispatch[LongType] = save_long
+
+    def save_float(self, object, pack=struct.pack):
+        if self.bin:
+            self.write(BINFLOAT + pack('>d', object))
+        else:
+            self.write(FLOAT + `object` + '\n')
+    dispatch[FloatType] = save_float
+
+    def save_string(self, object):
+        d = id(object)
+        memo = self.memo
+
+        if self.bin:
+            l = len(object)
+            s = mdumps(l)[1:]
+            if l < 256:
+                self.write(SHORT_BINSTRING + s[0] + object)
+            else:
+                self.write(BINSTRING + s + object)
+        else:
+            self.write(STRING + `object` + '\n')
+
+        memo_len = len(memo)
+        self.write(self.put(memo_len))
+        memo[d] = (memo_len, object)
+    dispatch[StringType] = save_string
+
+    def save_unicode(self, object):
+        d = id(object)
+        memo = self.memo
+
+        if self.bin:
+            encoding = object.encode('utf-8')
+            l = len(encoding)
+            s = mdumps(l)[1:]
+            self.write(BINUNICODE + s + encoding)
+        else:
+            object = object.replace("\\", "\\u005c")
+            object = object.replace("\n", "\\u000a")
+            self.write(UNICODE + object.encode('raw-unicode-escape') + '\n')
+
+        memo_len = len(memo)
+        self.write(self.put(memo_len))
+        memo[d] = (memo_len, object)
+    dispatch[UnicodeType] = save_unicode
+
+    if StringType == UnicodeType:
+        # This is true for Jython
+        def save_string(self, object):
+            d = id(object)
+            memo = self.memo
+            unicode = object.isunicode()
+
+            if self.bin:
+                if unicode:
+                    object = object.encode("utf-8")
+                l = len(object)
+                s = mdumps(l)[1:]
+                if l < 256 and not unicode:
+                    self.write(SHORT_BINSTRING + s[0] + object)
+                else:
+                    if unicode:
+                        self.write(BINUNICODE + s + object)
+                    else:
+                        self.write(BINSTRING + s + object)
+            else:
+                if unicode:
+                    object = object.replace("\\", "\\u005c")
+                    object = object.replace("\n", "\\u000a")
+                    object = object.encode('raw-unicode-escape')
+                    self.write(UNICODE + object + '\n')
+                else:
+                    self.write(STRING + `object` + '\n')
+
+            memo_len = len(memo)
+            self.write(self.put(memo_len))
+            memo[d] = (memo_len, object)
+        dispatch[StringType] = save_string
+
+    def save_tuple(self, object):
+
+        write = self.write
+        save  = self.save
+        memo  = self.memo
+
+        d = id(object)
+
+        write(MARK)
+
+        for element in object:
+            save(element)
+
+        if len(object) and memo.has_key(d):
+            if self.bin:
+                write(POP_MARK + self.get(memo[d][0]))
+                return
+
+            write(POP * (len(object) + 1) + self.get(memo[d][0]))
+            return
+
+        memo_len = len(memo)
+        self.write(TUPLE + self.put(memo_len))
+        memo[d] = (memo_len, object)
+    dispatch[TupleType] = save_tuple
+
+    def save_empty_tuple(self, object):
+        self.write(EMPTY_TUPLE)
+
+    def save_list(self, object):
+        d = id(object)
+
+        write = self.write
+        save  = self.save
+        memo  = self.memo
+
+        if self.bin:
+            write(EMPTY_LIST)
+        else:
+            write(MARK + LIST)
+
+        memo_len = len(memo)
+        write(self.put(memo_len))
+        memo[d] = (memo_len, object)
+
+        using_appends = (self.bin and (len(object) > 1))
+
+        if using_appends:
+            write(MARK)
+
+        for element in object:
+            save(element)
+
+            if not using_appends:
+                write(APPEND)
+
+        if using_appends:
+            write(APPENDS)
+    dispatch[ListType] = save_list
+
+    def save_dict(self, object):
+        d = id(object)
+
+        write = self.write
+        save  = self.save
+        memo  = self.memo
+
+        if self.bin:
+            write(EMPTY_DICT)
+        else:
+            write(MARK + DICT)
+
+        memo_len = len(memo)
+        self.write(self.put(memo_len))
+        memo[d] = (memo_len, object)
+
+        using_setitems = (self.bin and (len(object) > 1))
+
+        if using_setitems:
+            write(MARK)
+
+        items = object.items()
+        for key, value in items:
+            save(key)
+            save(value)
+
+            if not using_setitems:
+                write(SETITEM)
+
+        if using_setitems:
+            write(SETITEMS)
+
+    dispatch[DictionaryType] = save_dict
+    if not PyStringMap is None:
+        dispatch[PyStringMap] = save_dict
+
+    def save_inst(self, object):
+        d = id(object)
+        cls = object.__class__
+
+        memo  = self.memo
+        write = self.write
+        save  = self.save
+
+        if hasattr(object, '__getinitargs__'):
+            args = object.__getinitargs__()
+            len(args) # XXX Assert it's a sequence
+            _keep_alive(args, memo)
+        else:
+            args = ()
+
+        write(MARK)
+
+        if self.bin:
+            save(cls)
+
+        for arg in args:
+            save(arg)
+
+        memo_len = len(memo)
+        if self.bin:
+            write(OBJ + self.put(memo_len))
+        else:
+            write(INST + cls.__module__ + '\n' + cls.__name__ + '\n' +
+                self.put(memo_len))
+
+        memo[d] = (memo_len, object)
+
+        try:
+            getstate = object.__getstate__
+        except AttributeError:
+            stuff = object.__dict__
+        else:
+            stuff = getstate()
+            _keep_alive(stuff, memo)
+        save(stuff)
+        write(BUILD)
+    dispatch[InstanceType] = save_inst
+
+    def save_global(self, object, name = None):
+        write = self.write
+        memo = self.memo
+
+        if name is None:
+            name = object.__name__
+
+        try:
+            module = object.__module__
+        except AttributeError:
+            module = whichmodule(object, name)
+
+        try:
+            __import__(module)
+            mod = sys.modules[module]
+            klass = getattr(mod, name)
+        except (ImportError, KeyError, AttributeError):
+            raise PicklingError(
+                "Can't pickle %r: it's not found as %s.%s" %
+                (object, module, name))
+        else:
+            if klass is not object:
+                raise PicklingError(
+                    "Can't pickle %r: it's not the same object as %s.%s" %
+                    (object, module, name))
+
+        memo_len = len(memo)
+        write(GLOBAL + module + '\n' + name + '\n' +
+            self.put(memo_len))
+        memo[id(object)] = (memo_len, object)
+    dispatch[ClassType] = save_global
+    dispatch[FunctionType] = save_global
+    dispatch[BuiltinFunctionType] = save_global
+    dispatch[TypeType] = save_global
+
+
+def _keep_alive(x, memo):
+    """Keeps a reference to the object x in the memo.
+
+    Because we remember objects by their id, we have
+    to assure that possibly temporary objects are kept
+    alive by referencing them.
+    We store a reference at the id of the memo, which should
+    normally not be used unless someone tries to deepcopy
+    the memo itself...
+    """
+    try:
+        memo[id(memo)].append(x)
+    except KeyError:
+        # aha, this is the first one :-)
+        memo[id(memo)]=[x]
+
+
+classmap = {} # called classmap for backwards compatibility
+
+def whichmodule(func, funcname):
+    """Figure out the module in which a function occurs.
+
+    Search sys.modules for the module.
+    Cache in classmap.
+    Return a module name.
+    If the function cannot be found, return __main__.
+    """
+    if classmap.has_key(func):
+        return classmap[func]
+
+    for name, module in sys.modules.items():
+        if module is None:
+            continue # skip dummy package entries
+        if name != '__main__' and \
+            hasattr(module, funcname) and \
+            getattr(module, funcname) is func:
+            break
+    else:
+        name = '__main__'
+    classmap[func] = name
+    return name
+
+
+class Unpickler:
+
+    def __init__(self, file):
+        self.readline = file.readline
+        self.read = file.read
+        self.memo = {}
+
+    def load(self):
+        self.mark = object() # any new unique object
+        self.stack = []
+        self.append = self.stack.append
+        read = self.read
+        dispatch = self.dispatch
+        try:
+            while 1:
+                key = read(1)
+                dispatch[key](self)
+        except _Stop, stopinst:
+            return stopinst.value
+
+    def marker(self):
+        stack = self.stack
+        mark = self.mark
+        k = len(stack)-1
+        while stack[k] is not mark: k = k-1
+        return k
+
+    dispatch = {}
+
+    def load_eof(self):
+        raise EOFError
+    dispatch[''] = load_eof
+
+    def load_persid(self):
+        pid = self.readline()[:-1]
+        self.append(self.persistent_load(pid))
+    dispatch[PERSID] = load_persid
+
+    def load_binpersid(self):
+        stack = self.stack
+
+        pid = stack[-1]
+        del stack[-1]
+
+        self.append(self.persistent_load(pid))
+    dispatch[BINPERSID] = load_binpersid
+
+    def load_none(self):
+        self.append(None)
+    dispatch[NONE] = load_none
+
+    def load_int(self):
+        data = self.readline()
+        try:
+            self.append(int(data))
+        except ValueError:
+            self.append(long(data))
+    dispatch[INT] = load_int
+
+    def load_binint(self):
+        self.append(mloads('i' + self.read(4)))
+    dispatch[BININT] = load_binint
+
+    def load_binint1(self):
+        self.append(mloads('i' + self.read(1) + '\000\000\000'))
+    dispatch[BININT1] = load_binint1
+
+    def load_binint2(self):
+        self.append(mloads('i' + self.read(2) + '\000\000'))
+    dispatch[BININT2] = load_binint2
+
+    def load_long(self):
+        self.append(long(self.readline()[:-1], 0))
+    dispatch[LONG] = load_long
+
+    def load_float(self):
+        self.append(float(self.readline()[:-1]))
+    dispatch[FLOAT] = load_float
+
+    def load_binfloat(self, unpack=struct.unpack):
+        self.append(unpack('>d', self.read(8))[0])
+    dispatch[BINFLOAT] = load_binfloat
+
+    def load_string(self):
+        rep = self.readline()[:-1]
+        if not self._is_string_secure(rep):
+            raise ValueError, "insecure string pickle"
+        self.append(eval(rep,
+                         {'__builtins__': {}})) # Let's be careful
+    dispatch[STRING] = load_string
+
+    def _is_string_secure(self, s):
+        """Return true if s contains a string that is safe to eval
+
+        The definition of secure string is based on the implementation
+        in cPickle.  s is secure as long as it only contains a quoted
+        string and optional trailing whitespace.
+        """
+        q = s[0]
+        if q not in ("'", '"'):
+            return 0
+        # find the closing quote
+        offset = 1
+        i = None
+        while 1:
+            try:
+                i = s.index(q, offset)
+            except ValueError:
+                # if there is an error the first time, there is no
+                # close quote
+                if offset == 1:
+                    return 0
+            if s[i-1] != '\\':
+                break
+            # check to see if this one is escaped
+            nslash = 0
+            j = i - 1
+            while j >= offset and s[j] == '\\':
+                j = j - 1
+                nslash = nslash + 1
+            if nslash % 2 == 0:
+                break
+            offset = i + 1
+        for c in s[i+1:]:
+            if ord(c) > 32:
+                return 0
+        return 1
+
+    def load_binstring(self):
+        len = mloads('i' + self.read(4))
+        self.append(self.read(len))
+    dispatch[BINSTRING] = load_binstring
+
+    def load_unicode(self):
+        self.append(unicode(self.readline()[:-1],'raw-unicode-escape'))
+    dispatch[UNICODE] = load_unicode
+
+    def load_binunicode(self):
+        len = mloads('i' + self.read(4))
+        self.append(unicode(self.read(len),'utf-8'))
+    dispatch[BINUNICODE] = load_binunicode
+
+    def load_short_binstring(self):
+        len = mloads('i' + self.read(1) + '\000\000\000')
+        self.append(self.read(len))
+    dispatch[SHORT_BINSTRING] = load_short_binstring
+
+    def load_tuple(self):
+        k = self.marker()
+        self.stack[k:] = [tuple(self.stack[k+1:])]
+    dispatch[TUPLE] = load_tuple
+
+    def load_empty_tuple(self):
+        self.stack.append(())
+    dispatch[EMPTY_TUPLE] = load_empty_tuple
+
+    def load_empty_list(self):
+        self.stack.append([])
+    dispatch[EMPTY_LIST] = load_empty_list
+
+    def load_empty_dictionary(self):
+        self.stack.append({})
+    dispatch[EMPTY_DICT] = load_empty_dictionary
+
+    def load_list(self):
+        k = self.marker()
+        self.stack[k:] = [self.stack[k+1:]]
+    dispatch[LIST] = load_list
+
+    def load_dict(self):
+        k = self.marker()
+        d = {}
+        items = self.stack[k+1:]
+        for i in range(0, len(items), 2):
+            key = items[i]
+            value = items[i+1]
+            d[key] = value
+        self.stack[k:] = [d]
+    dispatch[DICT] = load_dict
+
+    def load_inst(self):
+        k = self.marker()
+        args = tuple(self.stack[k+1:])
+        del self.stack[k:]
+        module = self.readline()[:-1]
+        name = self.readline()[:-1]
+        klass = self.find_class(module, name)
+        instantiated = 0
+        if (not args and type(klass) is ClassType and
+            not hasattr(klass, "__getinitargs__")):
+            try:
+                value = _EmptyClass()
+                value.__class__ = klass
+                instantiated = 1
+            except RuntimeError:
+                # In restricted execution, assignment to inst.__class__ is
+                # prohibited
+                pass
+        if not instantiated:
+            try:
+                if not hasattr(klass, '__safe_for_unpickling__'):
+                    raise UnpicklingError('%s is not safe for unpickling' %
+                                          klass)
+                value = apply(klass, args)
+            except TypeError, err:
+                raise TypeError, "in constructor for %s: %s" % (
+                    klass.__name__, str(err)), sys.exc_info()[2]
+        self.append(value)
+    dispatch[INST] = load_inst
+
+    def load_obj(self):
+        stack = self.stack
+        k = self.marker()
+        klass = stack[k + 1]
+        del stack[k + 1]
+        args = tuple(stack[k + 1:])
+        del stack[k:]
+        instantiated = 0
+        if (not args and type(klass) is ClassType and
+            not hasattr(klass, "__getinitargs__")):
+            try:
+                value = _EmptyClass()
+                value.__class__ = klass
+                instantiated = 1
+            except RuntimeError:
+                # In restricted execution, assignment to inst.__class__ is
+                # prohibited
+                pass
+        if not instantiated:
+            value = apply(klass, args)
+        self.append(value)
+    dispatch[OBJ] = load_obj
+
+    def load_global(self):
+        module = self.readline()[:-1]
+        name = self.readline()[:-1]
+        klass = self.find_class(module, name)
+        self.append(klass)
+    dispatch[GLOBAL] = load_global
+
+    def find_class(self, module, name):
+        __import__(module)
+        mod = sys.modules[module]
+        klass = getattr(mod, name)
+        return klass
+
+    def load_reduce(self):
+        stack = self.stack
+
+        callable = stack[-2]
+        arg_tup  = stack[-1]
+        del stack[-2:]
+
+        if type(callable) is not ClassType:
+            if not safe_constructors.has_key(callable):
+                try:
+                    safe = callable.__safe_for_unpickling__
+                except AttributeError:
+                    safe = None
+
+                if not safe:
+                    raise UnpicklingError, "%s is not safe for " \
+                                           "unpickling" % callable
+
+        if arg_tup is None:
+            value = callable.__basicnew__()
+        else:
+            value = apply(callable, arg_tup)
+        self.append(value)
+    dispatch[REDUCE] = load_reduce
+
+    def load_pop(self):
+        del self.stack[-1]
+    dispatch[POP] = load_pop
+
+    def load_pop_mark(self):
+        k = self.marker()
+        del self.stack[k:]
+    dispatch[POP_MARK] = load_pop_mark
+
+    def load_dup(self):
+        self.append(self.stack[-1])
+    dispatch[DUP] = load_dup
+
+    def load_get(self):
+        self.append(self.memo[self.readline()[:-1]])
+    dispatch[GET] = load_get
+
+    def load_binget(self):
+        i = mloads('i' + self.read(1) + '\000\000\000')
+        self.append(self.memo[`i`])
+    dispatch[BINGET] = load_binget
+
+    def load_long_binget(self):
+        i = mloads('i' + self.read(4))
+        self.append(self.memo[`i`])
+    dispatch[LONG_BINGET] = load_long_binget
+
+    def load_put(self):
+        self.memo[self.readline()[:-1]] = self.stack[-1]
+    dispatch[PUT] = load_put
+
+    def load_binput(self):
+        i = mloads('i' + self.read(1) + '\000\000\000')
+        self.memo[`i`] = self.stack[-1]
+    dispatch[BINPUT] = load_binput
+
+    def load_long_binput(self):
+        i = mloads('i' + self.read(4))
+        self.memo[`i`] = self.stack[-1]
+    dispatch[LONG_BINPUT] = load_long_binput
+
+    def load_append(self):
+        stack = self.stack
+        value = stack[-1]
+        del stack[-1]
+        list = stack[-1]
+        list.append(value)
+    dispatch[APPEND] = load_append
+
+    def load_appends(self):
+        stack = self.stack
+        mark = self.marker()
+        list = stack[mark - 1]
+        for i in range(mark + 1, len(stack)):
+            list.append(stack[i])
+
+        del stack[mark:]
+    dispatch[APPENDS] = load_appends
+
+    def load_setitem(self):
+        stack = self.stack
+        value = stack[-1]
+        key = stack[-2]
+        del stack[-2:]
+        dict = stack[-1]
+        dict[key] = value
+    dispatch[SETITEM] = load_setitem
+
+    def load_setitems(self):
+        stack = self.stack
+        mark = self.marker()
+        dict = stack[mark - 1]
+        for i in range(mark + 1, len(stack), 2):
+            dict[stack[i]] = stack[i + 1]
+
+        del stack[mark:]
+    dispatch[SETITEMS] = load_setitems
+
+    def load_build(self):
+        stack = self.stack
+        value = stack[-1]
+        del stack[-1]
+        inst = stack[-1]
+        try:
+            setstate = inst.__setstate__
+        except AttributeError:
+            try:
+                inst.__dict__.update(value)
+            except RuntimeError:
+                # XXX In restricted execution, the instance's __dict__ is not
+                # accessible.  Use the old way of unpickling the instance
+                # variables.  This is a semantic different when unpickling in
+                # restricted vs. unrestricted modes.
+                for k, v in value.items():
+                    setattr(inst, k, v)
+        else:
+            setstate(value)
+    dispatch[BUILD] = load_build
+
+    def load_mark(self):
+        self.append(self.mark)
+    dispatch[MARK] = load_mark
+
+    def load_stop(self):
+        value = self.stack[-1]
+        del self.stack[-1]
+        raise _Stop(value)
+    dispatch[STOP] = load_stop
+
+# Helper class for load_inst/load_obj
+
+class _EmptyClass:
+    pass
+
+# Shorthands
+
+try:
+    from cStringIO import StringIO
+except ImportError:
+    from StringIO import StringIO
+
+def dump(object, file, bin = 0):
+    Pickler(file, bin).dump(object)
+
+def dumps(object, bin = 0):
+    file = StringIO()
+    Pickler(file, bin).dump(object)
+    return file.getvalue()
+
+def load(file):
+    return Unpickler(file).load()
+
+def loads(str):
+    file = StringIO(str)
+    return Unpickler(file).load()
diff --git a/lib-python/2.2/pipes.py b/lib-python/2.2/pipes.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/pipes.py
@@ -0,0 +1,297 @@
+"""Conversion pipeline templates.
+
+The problem:
+------------
+
+Suppose you have some data that you want to convert to another format,
+such as from GIF image format to PPM image format.  Maybe the
+conversion involves several steps (e.g. piping it through compress or
+uuencode).  Some of the conversion steps may require that their input
+is a disk file, others may be able to read standard input; similar for
+their output.  The input to the entire conversion may also be read
+from a disk file or from an open file, and similar for its output.
+
+The module lets you construct a pipeline template by sticking one or
+more conversion steps together.  It will take care of creating and
+removing temporary files if they are necessary to hold intermediate
+data.  You can then use the template to do conversions from many
+different sources to many different destinations.  The temporary
+file names used are different each time the template is used.
+
+The templates are objects so you can create templates for many
+different conversion steps and store them in a dictionary, for
+instance.
+
+
+Directions:
+-----------
+
+To create a template:
+    t = Template()
+
+To add a conversion step to a template:
+   t.append(command, kind)
+where kind is a string of two characters: the first is '-' if the
+command reads its standard input or 'f' if it requires a file; the
+second likewise for the output. The command must be valid /bin/sh
+syntax.  If input or output files are required, they are passed as
+$IN and $OUT; otherwise, it must be  possible to use the command in
+a pipeline.
+
+To add a conversion step at the beginning:
+   t.prepend(command, kind)
+
+To convert a file to another file using a template:
+  sts = t.copy(infile, outfile)
+If infile or outfile are the empty string, standard input is read or
+standard output is written, respectively.  The return value is the
+exit status of the conversion pipeline.
+
+To open a file for reading or writing through a conversion pipeline:
+   fp = t.open(file, mode)
+where mode is 'r' to read the file, or 'w' to write it -- just like
+for the built-in function open() or for os.popen().
+
+To create a new template object initialized to a given one:
+   t2 = t.clone()
+
+For an example, see the function test() at the end of the file.
+"""                                     # '
+
+
+import re
+
+import os
+import tempfile
+import string
+
+__all__ = ["Template"]
+
+# Conversion step kinds
+
+FILEIN_FILEOUT = 'ff'                   # Must read & write real files
+STDIN_FILEOUT  = '-f'                   # Must write a real file
+FILEIN_STDOUT  = 'f-'                   # Must read a real file
+STDIN_STDOUT   = '--'                   # Normal pipeline element
+SOURCE         = '.-'                   # Must be first, writes stdout
+SINK           = '-.'                   # Must be last, reads stdin
+
+stepkinds = [FILEIN_FILEOUT, STDIN_FILEOUT, FILEIN_STDOUT, STDIN_STDOUT, \
+             SOURCE, SINK]
+
+
+class Template:
+    """Class representing a pipeline template."""
+
+    def __init__(self):
+        """Template() returns a fresh pipeline template."""
+        self.debugging = 0
+        self.reset()
+
+    def __repr__(self):
+        """t.__repr__() implements `t`."""
+        return '<Template instance, steps=' + `self.steps` + '>'
+
+    def reset(self):
+        """t.reset() restores a pipeline template to its initial state."""
+        self.steps = []
+
+    def clone(self):
+        """t.clone() returns a new pipeline template with identical
+        initial state as the current one."""
+        t = Template()
+        t.steps = self.steps[:]
+        t.debugging = self.debugging
+        return t
+
+    def debug(self, flag):
+        """t.debug(flag) turns debugging on or off."""
+        self.debugging = flag
+
+    def append(self, cmd, kind):
+        """t.append(cmd, kind) adds a new step at the end."""
+        if type(cmd) is not type(''):
+            raise TypeError, \
+                  'Template.append: cmd must be a string'
+        if kind not in stepkinds:
+            raise ValueError, \
+                  'Template.append: bad kind ' + `kind`
+        if kind == SOURCE:
+            raise ValueError, \
+                  'Template.append: SOURCE can only be prepended'
+        if self.steps and self.steps[-1][1] == SINK:
+            raise ValueError, \
+                  'Template.append: already ends with SINK'
+        if kind[0] == 'f' and not re.search(r'\$IN\b', cmd):
+            raise ValueError, \
+                  'Template.append: missing $IN in cmd'
+        if kind[1] == 'f' and not re.search(r'\$OUT\b', cmd):
+            raise ValueError, \
+                  'Template.append: missing $OUT in cmd'
+        self.steps.append((cmd, kind))
+
+    def prepend(self, cmd, kind):
+        """t.prepend(cmd, kind) adds a new step at the front."""
+        if type(cmd) is not type(''):
+            raise TypeError, \
+                  'Template.prepend: cmd must be a string'
+        if kind not in stepkinds:
+            raise ValueError, \
+                  'Template.prepend: bad kind ' + `kind`
+        if kind == SINK:
+            raise ValueError, \
+                  'Template.prepend: SINK can only be appended'
+        if self.steps and self.steps[0][1] == SOURCE:
+            raise ValueError, \
+                  'Template.prepend: already begins with SOURCE'
+        if kind[0] == 'f' and not re.search(r'\$IN\b', cmd):
+            raise ValueError, \
+                  'Template.prepend: missing $IN in cmd'
+        if kind[1] == 'f' and not re.search(r'\$OUT\b', cmd):
+            raise ValueError, \
+                  'Template.prepend: missing $OUT in cmd'
+        self.steps.insert(0, (cmd, kind))
+
+    def open(self, file, rw):
+        """t.open(file, rw) returns a pipe or file object open for
+        reading or writing; the file is the other end of the pipeline."""
+        if rw == 'r':
+            return self.open_r(file)
+        if rw == 'w':
+            return self.open_w(file)
+        raise ValueError, \
+              'Template.open: rw must be \'r\' or \'w\', not ' + `rw`
+
+    def open_r(self, file):
+        """t.open_r(file) and t.open_w(file) implement
+        t.open(file, 'r') and t.open(file, 'w') respectively."""
+        if not self.steps:
+            return open(file, 'r')
+        if self.steps[-1][1] == SINK:
+            raise ValueError, \
+                  'Template.open_r: pipeline ends width SINK'
+        cmd = self.makepipeline(file, '')
+        return os.popen(cmd, 'r')
+
+    def open_w(self, file):
+        if not self.steps:
+            return open(file, 'w')
+        if self.steps[0][1] == SOURCE:
+            raise ValueError, \
+                  'Template.open_w: pipeline begins with SOURCE'
+        cmd = self.makepipeline('', file)
+        return os.popen(cmd, 'w')
+
+    def copy(self, infile, outfile):
+        return os.system(self.makepipeline(infile, outfile))
+
+    def makepipeline(self, infile, outfile):
+        cmd = makepipeline(infile, self.steps, outfile)
+        if self.debugging:
+            print cmd
+            cmd = 'set -x; ' + cmd
+        return cmd
+
+
+def makepipeline(infile, steps, outfile):
+    # Build a list with for each command:
+    # [input filename or '', command string, kind, output filename or '']
+
+    list = []
+    for cmd, kind in steps:
+        list.append(['', cmd, kind, ''])
+    #
+    # Make sure there is at least one step
+    #
+    if not list:
+        list.append(['', 'cat', '--', ''])
+    #
+    # Take care of the input and output ends
+    #
+    [cmd, kind] = list[0][1:3]
+    if kind[0] == 'f' and not infile:
+        list.insert(0, ['', 'cat', '--', ''])
+    list[0][0] = infile
+    #
+    [cmd, kind] = list[-1][1:3]
+    if kind[1] == 'f' and not outfile:
+        list.append(['', 'cat', '--', ''])
+    list[-1][-1] = outfile
+    #
+    # Invent temporary files to connect stages that need files
+    #
+    garbage = []
+    for i in range(1, len(list)):
+        lkind = list[i-1][2]
+        rkind = list[i][2]
+        if lkind[1] == 'f' or rkind[0] == 'f':
+            temp = tempfile.mktemp()
+            garbage.append(temp)
+            list[i-1][-1] = list[i][0] = temp
+    #
+    for item in list:
+        [inf, cmd, kind, outf] = item
+        if kind[1] == 'f':
+            cmd = 'OUT=' + quote(outf) + '; ' + cmd
+        if kind[0] == 'f':
+            cmd = 'IN=' + quote(inf) + '; ' + cmd
+        if kind[0] == '-' and inf:
+            cmd = cmd + ' <' + quote(inf)
+        if kind[1] == '-' and outf:
+            cmd = cmd + ' >' + quote(outf)
+        item[1] = cmd
+    #
+    cmdlist = list[0][1]
+    for item in list[1:]:
+        [cmd, kind] = item[1:3]
+        if item[0] == '':
+            if 'f' in kind:
+                cmd = '{ ' + cmd + '; }'
+            cmdlist = cmdlist + ' |\n' + cmd
+        else:
+            cmdlist = cmdlist + '\n' + cmd
+    #
+    if garbage:
+        rmcmd = 'rm -f'
+        for file in garbage:
+            rmcmd = rmcmd + ' ' + quote(file)
+        trapcmd = 'trap ' + quote(rmcmd + '; exit') + ' 1 2 3 13 14 15'
+        cmdlist = trapcmd + '\n' + cmdlist + '\n' + rmcmd
+    #
+    return cmdlist
+
+
+# Reliably quote a string as a single argument for /bin/sh
+
+_safechars = string.ascii_letters + string.digits + '!@%_-+=:,./' # Safe unquoted
+_funnychars = '"`$\\'                           # Unsafe inside "double quotes"
+
+def quote(file):
+    for c in file:
+        if c not in _safechars:
+            break
+    else:
+        return file
+    if '\'' not in file:
+        return '\'' + file + '\''
+    res = ''
+    for c in file:
+        if c in _funnychars:
+            c = '\\' + c
+        res = res + c
+    return '"' + res + '"'
+
+
+# Small test program and example
+
+def test():
+    print 'Testing...'
+    t = Template()
+    t.append('togif $IN $OUT', 'ff')
+    t.append('giftoppm', '--')
+    t.append('ppmtogif >$OUT', '-f')
+    t.append('fromgif $IN $OUT', 'ff')
+    t.debug(1)
+    FILE = '/usr/local/images/rgb/rogues/guido.rgb'
+    t.copy(FILE, '@temp')
+    print 'Done.'
diff --git a/lib-python/2.2/plat-aix3/IN.py b/lib-python/2.2/plat-aix3/IN.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/plat-aix3/IN.py
@@ -0,0 +1,126 @@
+# Generated by h2py from /usr/include/netinet/in.h
+
+# Included from net/nh.h
+
+# Included from sys/machine.h
+LITTLE_ENDIAN = 1234
+BIG_ENDIAN = 4321
+PDP_ENDIAN = 3412
+BYTE_ORDER = BIG_ENDIAN
+DEFAULT_GPR = 0xDEADBEEF
+MSR_EE = 0x8000
+MSR_PR = 0x4000
+MSR_FP = 0x2000
+MSR_ME = 0x1000
+MSR_FE = 0x0800
+MSR_FE0 = 0x0800
+MSR_SE = 0x0400
+MSR_BE = 0x0200
+MSR_IE = 0x0100
+MSR_FE1 = 0x0100
+MSR_AL = 0x0080
+MSR_IP = 0x0040
+MSR_IR = 0x0020
+MSR_DR = 0x0010
+MSR_PM = 0x0004
+DEFAULT_MSR = (MSR_EE | MSR_ME | MSR_AL | MSR_IR | MSR_DR)
+DEFAULT_USER_MSR = (DEFAULT_MSR | MSR_PR)
+CR_LT = 0x80000000
+CR_GT = 0x40000000
+CR_EQ = 0x20000000
+CR_SO = 0x10000000
+CR_FX = 0x08000000
+CR_FEX = 0x04000000
+CR_VX = 0x02000000
+CR_OX = 0x01000000
+XER_SO = 0x80000000
+XER_OV = 0x40000000
+XER_CA = 0x20000000
+def XER_COMP_BYTE(xer): return ((xer >> 8) & 0x000000FF)
+
+def XER_LENGTH(xer): return (xer & 0x0000007F)
+
+DSISR_IO = 0x80000000
+DSISR_PFT = 0x40000000
+DSISR_LOCK = 0x20000000
+DSISR_FPIO = 0x10000000
+DSISR_PROT = 0x08000000
+DSISR_LOOP = 0x04000000
+DSISR_DRST = 0x04000000
+DSISR_ST = 0x02000000
+DSISR_SEGB = 0x01000000
+DSISR_DABR = 0x00400000
+DSISR_EAR = 0x00100000
+SRR_IS_PFT = 0x40000000
+SRR_IS_ISPEC = 0x20000000
+SRR_IS_IIO = 0x10000000
+SRR_IS_PROT = 0x08000000
+SRR_IS_LOOP = 0x04000000
+SRR_PR_FPEN = 0x00100000
+SRR_PR_INVAL = 0x00080000
+SRR_PR_PRIV = 0x00040000
+SRR_PR_TRAP = 0x00020000
+SRR_PR_IMPRE = 0x00010000
+def ntohl(x): return (x)
+
+def ntohs(x): return (x)
+
+def htonl(x): return (x)
+
+def htons(x): return (x)
+
+IPPROTO_IP = 0
+IPPROTO_ICMP = 1
+IPPROTO_GGP = 3
+IPPROTO_TCP = 6
+IPPROTO_EGP = 8
+IPPROTO_PUP = 12
+IPPROTO_UDP = 17
+IPPROTO_IDP = 22
+IPPROTO_TP = 29
+IPPROTO_LOCAL = 63
+IPPROTO_EON = 80
+IPPROTO_BIP = 0x53
+IPPROTO_RAW = 255
+IPPROTO_MAX = 256
+IPPORT_RESERVED = 1024
+IPPORT_USERRESERVED = 5000
+IPPORT_TIMESERVER = 37
+def IN_CLASSA(i): return (((long)(i) & 0x80000000) == 0)
+
+IN_CLASSA_NET = 0xff000000
+IN_CLASSA_NSHIFT = 24
+IN_CLASSA_HOST = 0x00ffffff
+IN_CLASSA_MAX = 128
+def IN_CLASSB(i): return (((long)(i) & 0xc0000000) == 0x80000000)
+
+IN_CLASSB_NET = 0xffff0000
+IN_CLASSB_NSHIFT = 16
+IN_CLASSB_HOST = 0x0000ffff
+IN_CLASSB_MAX = 65536
+def IN_CLASSC(i): return (((long)(i) & 0xe0000000) == 0xc0000000)
+
+IN_CLASSC_NET = 0xffffff00
+IN_CLASSC_NSHIFT = 8
+IN_CLASSC_HOST = 0x000000ff
+def IN_CLASSD(i): return (((long)(i) & 0xf0000000) == 0xe0000000)
+
+def IN_MULTICAST(i): return IN_CLASSD(i)
+
+def IN_EXPERIMENTAL(i): return (((long)(i) & 0xe0000000) == 0xe0000000)
+
+def IN_BADCLASS(i): return (((long)(i) & 0xf0000000) == 0xf0000000)
+
+INADDR_ANY = 0x00000000
+INADDR_LOOPBACK = 0x7f000001
+INADDR_BROADCAST = 0xffffffff
+INADDR_NONE = 0xffffffff
+IN_LOOPBACKNET = 127
+IP_OPTIONS = 1
+IP_HDRINCL = 2
+IP_TOS = 3
+IP_TTL = 4
+IP_RECVOPTS = 5
+IP_RECVRETOPTS = 6
+IP_RECVDSTADDR = 7
+IP_RETOPTS = 8
diff --git a/lib-python/2.2/plat-aix3/regen b/lib-python/2.2/plat-aix3/regen
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/plat-aix3/regen
@@ -0,0 +1,8 @@
+#! /bin/sh
+case `uname -sv` in
+'AIX 3'*)  ;;
+*)      echo Probably not on an AIX 3 system 1>&2
+        exit 1;;
+esac
+set -v
+h2py.py -i '(u_long)' /usr/include/netinet/in.h
diff --git a/lib-python/2.2/plat-aix4/IN.py b/lib-python/2.2/plat-aix4/IN.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/plat-aix4/IN.py
@@ -0,0 +1,165 @@
+# Generated by h2py from /usr/include/netinet/in.h
+
+# Included from net/nh.h
+
+# Included from sys/machine.h
+LITTLE_ENDIAN = 1234
+BIG_ENDIAN = 4321
+PDP_ENDIAN = 3412
+BYTE_ORDER = BIG_ENDIAN
+DEFAULT_GPR = 0xDEADBEEF
+MSR_EE = 0x8000
+MSR_PR = 0x4000
+MSR_FP = 0x2000
+MSR_ME = 0x1000
+MSR_FE = 0x0800
+MSR_FE0 = 0x0800
+MSR_SE = 0x0400
+MSR_BE = 0x0200
+MSR_IE = 0x0100
+MSR_FE1 = 0x0100
+MSR_AL = 0x0080
+MSR_IP = 0x0040
+MSR_IR = 0x0020
+MSR_DR = 0x0010
+MSR_PM = 0x0004
+DEFAULT_MSR = (MSR_EE | MSR_ME | MSR_AL | MSR_IR | MSR_DR)
+DEFAULT_USER_MSR = (DEFAULT_MSR | MSR_PR)
+CR_LT = 0x80000000
+CR_GT = 0x40000000
+CR_EQ = 0x20000000
+CR_SO = 0x10000000
+CR_FX = 0x08000000
+CR_FEX = 0x04000000
+CR_VX = 0x02000000
+CR_OX = 0x01000000
+XER_SO = 0x80000000
+XER_OV = 0x40000000
+XER_CA = 0x20000000
+def XER_COMP_BYTE(xer): return ((xer >> 8) & 0x000000FF)
+
+def XER_LENGTH(xer): return (xer & 0x0000007F)
+
+DSISR_IO = 0x80000000
+DSISR_PFT = 0x40000000
+DSISR_LOCK = 0x20000000
+DSISR_FPIO = 0x10000000
+DSISR_PROT = 0x08000000
+DSISR_LOOP = 0x04000000
+DSISR_DRST = 0x04000000
+DSISR_ST = 0x02000000
+DSISR_SEGB = 0x01000000
+DSISR_DABR = 0x00400000
+DSISR_EAR = 0x00100000
+SRR_IS_PFT = 0x40000000
+SRR_IS_ISPEC = 0x20000000
+SRR_IS_IIO = 0x10000000
+SRR_IS_GUARD = 0x10000000
+SRR_IS_PROT = 0x08000000
+SRR_IS_LOOP = 0x04000000
+SRR_PR_FPEN = 0x00100000
+SRR_PR_INVAL = 0x00080000
+SRR_PR_PRIV = 0x00040000
+SRR_PR_TRAP = 0x00020000
+SRR_PR_IMPRE = 0x00010000
+def BUID_7F_SRVAL(raddr): return (0x87F00000 | (((uint)(raddr)) >> 28))
+
+BT_256M = 0x1FFC
+BT_128M = 0x0FFC
+BT_64M = 0x07FC
+BT_32M = 0x03FC
+BT_16M = 0x01FC
+BT_8M = 0x00FC
+BT_4M = 0x007C
+BT_2M = 0x003C
+BT_1M = 0x001C
+BT_512K = 0x000C
+BT_256K = 0x0004
+BT_128K = 0x0000
+BT_NOACCESS = 0x0
+BT_RDONLY = 0x1
+BT_WRITE = 0x2
+BT_VS = 0x2
+BT_VP = 0x1
+def BAT_ESEG(dbatu): return (((uint)(dbatu) >> 28))
+
+MIN_BAT_SIZE = 0x00020000
+MAX_BAT_SIZE = 0x10000000
+def ntohl(x): return (x)
+
+def ntohs(x): return (x)
+
+def htonl(x): return (x)
+
+def htons(x): return (x)
+
+IPPROTO_IP = 0
+IPPROTO_ICMP = 1
+IPPROTO_IGMP = 2
+IPPROTO_GGP = 3
+IPPROTO_TCP = 6
+IPPROTO_EGP = 8
+IPPROTO_PUP = 12
+IPPROTO_UDP = 17
+IPPROTO_IDP = 22
+IPPROTO_TP = 29
+IPPROTO_LOCAL = 63
+IPPROTO_EON = 80
+IPPROTO_BIP = 0x53
+IPPROTO_RAW = 255
+IPPROTO_MAX = 256
+IPPORT_RESERVED = 1024
+IPPORT_USERRESERVED = 5000
+IPPORT_TIMESERVER = 37
+def IN_CLASSA(i): return (((long)(i) & 0x80000000) == 0)
+
+IN_CLASSA_NET = 0xff000000
+IN_CLASSA_NSHIFT = 24
+IN_CLASSA_HOST = 0x00ffffff
+IN_CLASSA_MAX = 128
+def IN_CLASSB(i): return (((long)(i) & 0xc0000000) == 0x80000000)
+
+IN_CLASSB_NET = 0xffff0000
+IN_CLASSB_NSHIFT = 16
+IN_CLASSB_HOST = 0x0000ffff
+IN_CLASSB_MAX = 65536
+def IN_CLASSC(i): return (((long)(i) & 0xe0000000) == 0xc0000000)
+
+IN_CLASSC_NET = 0xffffff00
+IN_CLASSC_NSHIFT = 8
+IN_CLASSC_HOST = 0x000000ff
+def IN_CLASSD(i): return (((long)(i) & 0xf0000000) == 0xe0000000)
+
+def IN_MULTICAST(i): return IN_CLASSD(i)
+
+IN_CLASSD_NET = 0xf0000000
+IN_CLASSD_NSHIFT = 28
+IN_CLASSD_HOST = 0x0fffffff
+INADDR_UNSPEC_GROUP = 0xe0000000
+INADDR_ALLHOSTS_GROUP = 0xe0000001
+INADDR_MAX_LOCAL_GROUP = 0xe00000ff
+def IN_EXPERIMENTAL(i): return (((long)(i) & 0xe0000000) == 0xe0000000)
+
+def IN_BADCLASS(i): return (((long)(i) & 0xf0000000) == 0xf0000000)
+
+INADDR_ANY = 0x00000000
+INADDR_BROADCAST = 0xffffffff
+INADDR_LOOPBACK = 0x7f000001
+INADDR_NONE = 0xffffffff
+IN_LOOPBACKNET = 127
+IP_OPTIONS = 1
+IP_HDRINCL = 2
+IP_TOS = 3
+IP_TTL = 4
+IP_RECVOPTS = 5
+IP_RECVRETOPTS = 6
+IP_RECVDSTADDR = 7
+IP_RETOPTS = 8
+IP_MULTICAST_IF = 9
+IP_MULTICAST_TTL = 10
+IP_MULTICAST_LOOP = 11
+IP_ADD_MEMBERSHIP = 12
+IP_DROP_MEMBERSHIP = 13
+IP_DEFAULT_MULTICAST_TTL = 1
+IP_DEFAULT_MULTICAST_LOOP = 1
+IP_MAX_MEMBERSHIPS = 20
diff --git a/lib-python/2.2/plat-aix4/regen b/lib-python/2.2/plat-aix4/regen
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/plat-aix4/regen
@@ -0,0 +1,8 @@
+#! /bin/sh
+case `uname -sv` in
+'AIX 4'*)  ;;
+*)      echo Probably not on an AIX 4 system 1>&2
+        exit 1;;
+esac
+set -v
+h2py.py -i '(u_long)' /usr/include/netinet/in.h
diff --git a/lib-python/2.2/plat-beos5/IN.py b/lib-python/2.2/plat-beos5/IN.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/plat-beos5/IN.py
@@ -0,0 +1,327 @@
+# Generated by h2py from /boot/develop/headers/be/net/netinet/in.h
+
+# Included from socket.h
+
+# Included from BeBuild.h
+B_BEOS_VERSION_4 = 0x0400
+B_BEOS_VERSION_4_5 = 0x0450
+B_BEOS_VERSION_5 = 0x0500
+B_BEOS_VERSION = B_BEOS_VERSION_5
+B_BEOS_VERSION_MAUI = B_BEOS_VERSION_5
+_PR2_COMPATIBLE_ = 1
+_PR3_COMPATIBLE_ = 1
+_R4_COMPATIBLE_ = 1
+_R4_5_COMPATIBLE_ = 1
+_PR2_COMPATIBLE_ = 0
+_PR3_COMPATIBLE_ = 0
+_R4_COMPATIBLE_ = 1
+_R4_5_COMPATIBLE_ = 1
+def _UNUSED(x): return x
+
+
+# Included from sys/types.h
+
+# Included from time.h
+
+# Included from be_setup.h
+def __std(ref): return ref
+
+__be_os = 2
+__dest_os = __be_os
+__MSL__ = 0x4011
+__GLIBC__ = -2
+__GLIBC_MINOR__ = 1
+
+# Included from null.h
+NULL = (0)
+NULL = 0L
+
+# Included from size_t.h
+
+# Included from stddef.h
+
+# Included from wchar_t.h
+CLOCKS_PER_SEC = 1000
+CLK_TCK = CLOCKS_PER_SEC
+MAX_TIMESTR = 70
+
+# Included from sys/time.h
+
+# Included from ByteOrder.h
+
+# Included from endian.h
+__LITTLE_ENDIAN = 1234
+LITTLE_ENDIAN = __LITTLE_ENDIAN
+__BYTE_ORDER = __LITTLE_ENDIAN
+BYTE_ORDER = __BYTE_ORDER
+__BIG_ENDIAN = 0
+BIG_ENDIAN = 0
+__BIG_ENDIAN = 4321
+BIG_ENDIAN = __BIG_ENDIAN
+__BYTE_ORDER = __BIG_ENDIAN
+BYTE_ORDER = __BYTE_ORDER
+__LITTLE_ENDIAN = 0
+LITTLE_ENDIAN = 0
+__PDP_ENDIAN = 3412
+PDP_ENDIAN = __PDP_ENDIAN
+
+# Included from SupportDefs.h
+
+# Included from Errors.h
+
+# Included from limits.h
+
+# Included from float.h
+FLT_ROUNDS = 1
+FLT_RADIX = 2
+FLT_MANT_DIG = 24
+FLT_DIG = 6
+FLT_MIN_EXP = (-125)
+FLT_MIN_10_EXP = (-37)
+FLT_MAX_EXP = 128
+FLT_MAX_10_EXP = 38
+DBL_MANT_DIG = 53
+DBL_DIG = 15
+DBL_MIN_EXP = (-1021)
+DBL_MIN_10_EXP = (-308)
+DBL_MAX_EXP = 1024
+DBL_MAX_10_EXP = 308
+LDBL_MANT_DIG = DBL_MANT_DIG
+LDBL_DIG = DBL_DIG
+LDBL_MIN_EXP = DBL_MIN_EXP
+LDBL_MIN_10_EXP = DBL_MIN_10_EXP
+LDBL_MAX_EXP = DBL_MAX_EXP
+LDBL_MAX_10_EXP = DBL_MAX_10_EXP
+CHAR_BIT = (8)
+SCHAR_MIN = (-127-1)
+SCHAR_MAX = (127)
+CHAR_MIN = SCHAR_MIN
+CHAR_MAX = SCHAR_MAX
+MB_LEN_MAX = (1)
+SHRT_MIN = (-32767-1)
+SHRT_MAX = (32767)
+LONG_MIN = (-2147483647L-1)
+LONG_MAX = (2147483647L)
+INT_MIN = LONG_MIN
+INT_MAX = LONG_MAX
+ARG_MAX = (32768)
+ATEXIT_MAX = (32)
+CHILD_MAX = (1024)
+IOV_MAX = (256)
+FILESIZEBITS = (64)
+LINK_MAX = (1)
+LOGIN_NAME_MAX = (32)
+MAX_CANON = (255)
+MAX_INPUT = (255)
+NAME_MAX = (256)
+NGROUPS_MAX = (32)
+OPEN_MAX = (128)
+PATH_MAX = (1024)
+PIPE_MAX = (512)
+SSIZE_MAX = (2147483647L)
+TTY_NAME_MAX = (256)
+TZNAME_MAX = (32)
+SYMLINKS_MAX = (16)
+_POSIX_ARG_MAX = (32768)
+_POSIX_CHILD_MAX = (1024)
+_POSIX_LINK_MAX = (1)
+_POSIX_LOGIN_NAME_MAX = (9)
+_POSIX_MAX_CANON = (255)
+_POSIX_MAX_INPUT = (255)
+_POSIX_NAME_MAX = (255)
+_POSIX_NGROUPS_MAX = (0)
+_POSIX_OPEN_MAX = (128)
+_POSIX_PATH_MAX = (1024)
+_POSIX_PIPE_BUF = (512)
+_POSIX_SSIZE_MAX = (2147483647L)
+_POSIX_STREAM_MAX = (8)
+_POSIX_TTY_NAME_MAX = (256)
+_POSIX_TZNAME_MAX = (3)
+B_GENERAL_ERROR_BASE = LONG_MIN
+B_OS_ERROR_BASE = B_GENERAL_ERROR_BASE + 0x1000
+B_APP_ERROR_BASE = B_GENERAL_ERROR_BASE + 0x2000
+B_INTERFACE_ERROR_BASE = B_GENERAL_ERROR_BASE + 0x3000
+B_MEDIA_ERROR_BASE = B_GENERAL_ERROR_BASE + 0x4000
+B_TRANSLATION_ERROR_BASE = B_GENERAL_ERROR_BASE + 0x4800
+B_MIDI_ERROR_BASE = B_GENERAL_ERROR_BASE + 0x5000
+B_STORAGE_ERROR_BASE = B_GENERAL_ERROR_BASE + 0x6000
+B_POSIX_ERROR_BASE = B_GENERAL_ERROR_BASE + 0x7000
+B_MAIL_ERROR_BASE = B_GENERAL_ERROR_BASE + 0x8000
+B_PRINT_ERROR_BASE = B_GENERAL_ERROR_BASE + 0x9000
+B_DEVICE_ERROR_BASE = B_GENERAL_ERROR_BASE + 0xa000
+B_ERRORS_END = (B_GENERAL_ERROR_BASE + 0xffff)
+E2BIG = (B_POSIX_ERROR_BASE + 1)
+ECHILD = (B_POSIX_ERROR_BASE + 2)
+EDEADLK = (B_POSIX_ERROR_BASE + 3)
+EFBIG = (B_POSIX_ERROR_BASE + 4)
+EMLINK = (B_POSIX_ERROR_BASE + 5)
+ENFILE = (B_POSIX_ERROR_BASE + 6)
+ENODEV = (B_POSIX_ERROR_BASE + 7)
+ENOLCK = (B_POSIX_ERROR_BASE + 8)
+ENOSYS = (B_POSIX_ERROR_BASE + 9)
+ENOTTY = (B_POSIX_ERROR_BASE + 10)
+ENXIO = (B_POSIX_ERROR_BASE + 11)
+ESPIPE = (B_POSIX_ERROR_BASE + 12)
+ESRCH = (B_POSIX_ERROR_BASE + 13)
+EFPOS = (B_POSIX_ERROR_BASE + 14)
+ESIGPARM = (B_POSIX_ERROR_BASE + 15)
+EDOM = (B_POSIX_ERROR_BASE + 16)
+ERANGE = (B_POSIX_ERROR_BASE + 17)
+EPROTOTYPE = (B_POSIX_ERROR_BASE + 18)
+EPROTONOSUPPORT = (B_POSIX_ERROR_BASE + 19)
+EPFNOSUPPORT = (B_POSIX_ERROR_BASE + 20)
+EAFNOSUPPORT = (B_POSIX_ERROR_BASE + 21)
+EADDRINUSE = (B_POSIX_ERROR_BASE + 22)
+EADDRNOTAVAIL = (B_POSIX_ERROR_BASE + 23)
+ENETDOWN = (B_POSIX_ERROR_BASE + 24)
+ENETUNREACH = (B_POSIX_ERROR_BASE + 25)
+ENETRESET = (B_POSIX_ERROR_BASE + 26)
+ECONNABORTED = (B_POSIX_ERROR_BASE + 27)
+ECONNRESET = (B_POSIX_ERROR_BASE + 28)
+EISCONN = (B_POSIX_ERROR_BASE + 29)
+ENOTCONN = (B_POSIX_ERROR_BASE + 30)
+ESHUTDOWN = (B_POSIX_ERROR_BASE + 31)
+ECONNREFUSED = (B_POSIX_ERROR_BASE + 32)
+EHOSTUNREACH = (B_POSIX_ERROR_BASE + 33)
+ENOPROTOOPT = (B_POSIX_ERROR_BASE + 34)
+ENOBUFS = (B_POSIX_ERROR_BASE + 35)
+EINPROGRESS = (B_POSIX_ERROR_BASE + 36)
+EALREADY = (B_POSIX_ERROR_BASE + 37)
+EILSEQ = (B_POSIX_ERROR_BASE + 38)
+ENOMSG = (B_POSIX_ERROR_BASE + 39)
+ESTALE = (B_POSIX_ERROR_BASE + 40)
+EOVERFLOW = (B_POSIX_ERROR_BASE + 41)
+EMSGSIZE = (B_POSIX_ERROR_BASE + 42)
+EOPNOTSUPP = (B_POSIX_ERROR_BASE + 43)
+ENOTSOCK = (B_POSIX_ERROR_BASE + 44)
+false = 0
+true = 1
+NULL = (0)
+FALSE = 0
+TRUE = 1
+
+# Included from TypeConstants.h
+B_HOST_IS_LENDIAN = 1
+B_HOST_IS_BENDIAN = 0
+def B_HOST_TO_LENDIAN_DOUBLE(arg): return (double)(arg)
+
+def B_HOST_TO_LENDIAN_FLOAT(arg): return (float)(arg)
+
+def B_HOST_TO_LENDIAN_INT64(arg): return (uint64)(arg)
+
+def B_HOST_TO_LENDIAN_INT32(arg): return (uint32)(arg)
+
+def B_HOST_TO_LENDIAN_INT16(arg): return (uint16)(arg)
+
+def B_HOST_TO_BENDIAN_DOUBLE(arg): return __swap_double(arg)
+
+def B_HOST_TO_BENDIAN_FLOAT(arg): return __swap_float(arg)
+
+def B_HOST_TO_BENDIAN_INT64(arg): return __swap_int64(arg)
+
+def B_HOST_TO_BENDIAN_INT32(arg): return __swap_int32(arg)
+
+def B_HOST_TO_BENDIAN_INT16(arg): return __swap_int16(arg)
+
+def B_LENDIAN_TO_HOST_DOUBLE(arg): return (double)(arg)
+
+def B_LENDIAN_TO_HOST_FLOAT(arg): return (float)(arg)
+
+def B_LENDIAN_TO_HOST_INT64(arg): return (uint64)(arg)
+
+def B_LENDIAN_TO_HOST_INT32(arg): return (uint32)(arg)
+
+def B_LENDIAN_TO_HOST_INT16(arg): return (uint16)(arg)
+
+def B_BENDIAN_TO_HOST_DOUBLE(arg): return __swap_double(arg)
+
+def B_BENDIAN_TO_HOST_FLOAT(arg): return __swap_float(arg)
+
+def B_BENDIAN_TO_HOST_INT64(arg): return __swap_int64(arg)
+
+def B_BENDIAN_TO_HOST_INT32(arg): return __swap_int32(arg)
+
+def B_BENDIAN_TO_HOST_INT16(arg): return __swap_int16(arg)
+
+B_HOST_IS_LENDIAN = 0
+B_HOST_IS_BENDIAN = 1
+def B_HOST_TO_LENDIAN_DOUBLE(arg): return __swap_double(arg)
+
+def B_HOST_TO_LENDIAN_FLOAT(arg): return __swap_float(arg)
+
+def B_HOST_TO_LENDIAN_INT64(arg): return __swap_int64(arg)
+
+def B_HOST_TO_LENDIAN_INT32(arg): return __swap_int32(arg)
+
+def B_HOST_TO_LENDIAN_INT16(arg): return __swap_int16(arg)
+
+def B_HOST_TO_BENDIAN_DOUBLE(arg): return (double)(arg)
+
+def B_HOST_TO_BENDIAN_FLOAT(arg): return (float)(arg)
+
+def B_HOST_TO_BENDIAN_INT64(arg): return (uint64)(arg)
+
+def B_HOST_TO_BENDIAN_INT32(arg): return (uint32)(arg)
+
+def B_HOST_TO_BENDIAN_INT16(arg): return (uint16)(arg)
+
+def B_LENDIAN_TO_HOST_DOUBLE(arg): return __swap_double(arg)
+
+def B_LENDIAN_TO_HOST_FLOAT(arg): return __swap_float(arg)
+
+def B_LENDIAN_TO_HOST_INT64(arg): return __swap_int64(arg)
+
+def B_LENDIAN_TO_HOST_INT32(arg): return __swap_int32(arg)
+
+def B_LENDIAN_TO_HOST_INT16(arg): return __swap_int16(arg)
+
+def B_BENDIAN_TO_HOST_DOUBLE(arg): return (double)(arg)
+
+def B_BENDIAN_TO_HOST_FLOAT(arg): return (float)(arg)
+
+def B_BENDIAN_TO_HOST_INT64(arg): return (uint64)(arg)
+
+def B_BENDIAN_TO_HOST_INT32(arg): return (uint32)(arg)
+
+def B_BENDIAN_TO_HOST_INT16(arg): return (uint16)(arg)
+
+def B_SWAP_DOUBLE(arg): return __swap_double(arg)
+
+def B_SWAP_FLOAT(arg): return __swap_float(arg)
+
+def B_SWAP_INT64(arg): return __swap_int64(arg)
+
+def B_SWAP_INT32(arg): return __swap_int32(arg)
+
+def B_SWAP_INT16(arg): return __swap_int16(arg)
+
+def htonl(x): return B_HOST_TO_BENDIAN_INT32(x)
+
+def ntohl(x): return B_BENDIAN_TO_HOST_INT32(x)
+
+def htons(x): return B_HOST_TO_BENDIAN_INT16(x)
+
+def ntohs(x): return B_BENDIAN_TO_HOST_INT16(x)
+
+AF_INET = 1
+INADDR_ANY = 0x00000000
+INADDR_BROADCAST = 0xffffffff
+INADDR_LOOPBACK = 0x7f000001
+SOL_SOCKET = 1
+SO_DEBUG = 1
+SO_REUSEADDR = 2
+SO_NONBLOCK = 3
+SO_REUSEPORT = 4
+MSG_OOB = 0x1
+SOCK_DGRAM = 1
+SOCK_STREAM = 2
+IPPROTO_UDP = 1
+IPPROTO_TCP = 2
+IPPROTO_ICMP = 3
+B_UDP_MAX_SIZE = (65536 - 1024)
+FD_SETSIZE = 256
+FDSETSIZE = FD_SETSIZE
+NFDBITS = 32
+def _FDMSKNO(fd): return ((fd) / NFDBITS)
+
+def _FDBITNO(fd): return ((fd) % NFDBITS)
\ No newline at end of file
diff --git a/lib-python/2.2/plat-beos5/regen b/lib-python/2.2/plat-beos5/regen
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/plat-beos5/regen
@@ -0,0 +1,7 @@
+#! /bin/sh
+
+H2PY=../../Tools/scripts/h2py.py
+HEADERS=/boot/develop/headers
+
+set -v
+python $H2PY -i '(u_long)' $HEADERS/be/net/netinet/in.h
diff --git a/lib-python/2.2/plat-darwin/IN.py b/lib-python/2.2/plat-darwin/IN.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/plat-darwin/IN.py
@@ -0,0 +1,357 @@
+# Generated by h2py from /usr/include/netinet/in.h
+
+# Included from sys/appleapiopts.h
+IPPROTO_IP = 0
+IPPROTO_HOPOPTS = 0
+IPPROTO_ICMP = 1
+IPPROTO_IGMP = 2
+IPPROTO_GGP = 3
+IPPROTO_IPV4 = 4
+IPPROTO_IPIP = IPPROTO_IPV4
+IPPROTO_TCP = 6
+IPPROTO_ST = 7
+IPPROTO_EGP = 8
+IPPROTO_PIGP = 9
+IPPROTO_RCCMON = 10
+IPPROTO_NVPII = 11
+IPPROTO_PUP = 12
+IPPROTO_ARGUS = 13
+IPPROTO_EMCON = 14
+IPPROTO_XNET = 15
+IPPROTO_CHAOS = 16
+IPPROTO_UDP = 17
+IPPROTO_MUX = 18
+IPPROTO_MEAS = 19
+IPPROTO_HMP = 20
+IPPROTO_PRM = 21
+IPPROTO_IDP = 22
+IPPROTO_TRUNK1 = 23
+IPPROTO_TRUNK2 = 24
+IPPROTO_LEAF1 = 25
+IPPROTO_LEAF2 = 26
+IPPROTO_RDP = 27
+IPPROTO_IRTP = 28
+IPPROTO_TP = 29
+IPPROTO_BLT = 30
+IPPROTO_NSP = 31
+IPPROTO_INP = 32
+IPPROTO_SEP = 33
+IPPROTO_3PC = 34
+IPPROTO_IDPR = 35
+IPPROTO_XTP = 36
+IPPROTO_DDP = 37
+IPPROTO_CMTP = 38
+IPPROTO_TPXX = 39
+IPPROTO_IL = 40
+IPPROTO_IPV6 = 41
+IPPROTO_SDRP = 42
+IPPROTO_ROUTING = 43
+IPPROTO_FRAGMENT = 44
+IPPROTO_IDRP = 45
+IPPROTO_RSVP = 46
+IPPROTO_GRE = 47
+IPPROTO_MHRP = 48
+IPPROTO_BHA = 49
+IPPROTO_ESP = 50
+IPPROTO_AH = 51
+IPPROTO_INLSP = 52
+IPPROTO_SWIPE = 53
+IPPROTO_NHRP = 54
+IPPROTO_ICMPV6 = 58
+IPPROTO_NONE = 59
+IPPROTO_DSTOPTS = 60
+IPPROTO_AHIP = 61
+IPPROTO_CFTP = 62
+IPPROTO_HELLO = 63
+IPPROTO_SATEXPAK = 64
+IPPROTO_KRYPTOLAN = 65
+IPPROTO_RVD = 66
+IPPROTO_IPPC = 67
+IPPROTO_ADFS = 68
+IPPROTO_SATMON = 69
+IPPROTO_VISA = 70
+IPPROTO_IPCV = 71
+IPPROTO_CPNX = 72
+IPPROTO_CPHB = 73
+IPPROTO_WSN = 74
+IPPROTO_PVP = 75
+IPPROTO_BRSATMON = 76
+IPPROTO_ND = 77
+IPPROTO_WBMON = 78
+IPPROTO_WBEXPAK = 79
+IPPROTO_EON = 80
+IPPROTO_VMTP = 81
+IPPROTO_SVMTP = 82
+IPPROTO_VINES = 83
+IPPROTO_TTP = 84
+IPPROTO_IGP = 85
+IPPROTO_DGP = 86
+IPPROTO_TCF = 87
+IPPROTO_IGRP = 88
+IPPROTO_OSPFIGP = 89
+IPPROTO_SRPC = 90
+IPPROTO_LARP = 91
+IPPROTO_MTP = 92
+IPPROTO_AX25 = 93
+IPPROTO_IPEIP = 94
+IPPROTO_MICP = 95
+IPPROTO_SCCSP = 96
+IPPROTO_ETHERIP = 97
+IPPROTO_ENCAP = 98
+IPPROTO_APES = 99
+IPPROTO_GMTP = 100
+IPPROTO_IPCOMP = 108
+IPPROTO_PIM = 103
+IPPROTO_PGM = 113
+IPPROTO_DIVERT = 254
+IPPROTO_RAW = 255
+IPPROTO_MAX = 256
+IPPROTO_DONE = 257
+IPPORT_RESERVED = 1024
+IPPORT_USERRESERVED = 5000
+IPPORT_HIFIRSTAUTO = 49152
+IPPORT_HILASTAUTO = 65535
+IPPORT_RESERVEDSTART = 600
+def IN_CLASSA(i): return (((u_int32_t)(i) & 0x80000000) == 0)
+
+IN_CLASSA_NET = 0xff000000
+IN_CLASSA_NSHIFT = 24
+IN_CLASSA_HOST = 0x00ffffff
+IN_CLASSA_MAX = 128
+def IN_CLASSB(i): return (((u_int32_t)(i) & 0xc0000000) == 0x80000000)
+
+IN_CLASSB_NET = 0xffff0000
+IN_CLASSB_NSHIFT = 16
+IN_CLASSB_HOST = 0x0000ffff
+IN_CLASSB_MAX = 65536
+def IN_CLASSC(i): return (((u_int32_t)(i) & 0xe0000000) == 0xc0000000)
+
+IN_CLASSC_NET = 0xffffff00
+IN_CLASSC_NSHIFT = 8
+IN_CLASSC_HOST = 0x000000ff
+def IN_CLASSD(i): return (((u_int32_t)(i) & 0xf0000000) == 0xe0000000)
+
+IN_CLASSD_NET = 0xf0000000
+IN_CLASSD_NSHIFT = 28
+IN_CLASSD_HOST = 0x0fffffff
+def IN_MULTICAST(i): return IN_CLASSD(i)
+
+def IN_EXPERIMENTAL(i): return (((u_int32_t)(i) & 0xf0000000) == 0xf0000000)
+
+def IN_BADCLASS(i): return (((u_int32_t)(i) & 0xf0000000) == 0xf0000000)
+
+INADDR_NONE = 0xffffffff
+def IN_LINKLOCAL(i): return (((u_int32_t)(i) & IN_CLASSB_NET) == IN_LINKLOCALNETNUM)
+
+IN_LOOPBACKNET = 127
+INET_ADDRSTRLEN = 16
+IP_OPTIONS = 1
+IP_HDRINCL = 2
+IP_TOS = 3
+IP_TTL = 4
+IP_RECVOPTS = 5
+IP_RECVRETOPTS = 6
+IP_RECVDSTADDR = 7
+IP_RETOPTS = 8
+IP_MULTICAST_IF = 9
+IP_MULTICAST_TTL = 10
+IP_MULTICAST_LOOP = 11
+IP_ADD_MEMBERSHIP = 12
+IP_DROP_MEMBERSHIP = 13
+IP_MULTICAST_VIF = 14
+IP_RSVP_ON = 15
+IP_RSVP_OFF = 16
+IP_RSVP_VIF_ON = 17
+IP_RSVP_VIF_OFF = 18
+IP_PORTRANGE = 19
+IP_RECVIF = 20
+IP_IPSEC_POLICY = 21
+IP_FAITH = 22
+IP_STRIPHDR = 23
+IP_FW_ADD = 40
+IP_FW_DEL = 41
+IP_FW_FLUSH = 42
+IP_FW_ZERO = 43
+IP_FW_GET = 44
+IP_FW_RESETLOG = 45
+IP_OLD_FW_ADD = 50
+IP_OLD_FW_DEL = 51
+IP_OLD_FW_FLUSH = 52
+IP_OLD_FW_ZERO = 53
+IP_OLD_FW_GET = 54
+IP_NAT__XXX = 55
+IP_OLD_FW_RESETLOG = 56
+IP_DUMMYNET_CONFIGURE = 60
+IP_DUMMYNET_DEL = 61
+IP_DUMMYNET_FLUSH = 62
+IP_DUMMYNET_GET = 64
+IP_DEFAULT_MULTICAST_TTL = 1
+IP_DEFAULT_MULTICAST_LOOP = 1
+IP_MAX_MEMBERSHIPS = 20
+IP_PORTRANGE_DEFAULT = 0
+IP_PORTRANGE_HIGH = 1
+IP_PORTRANGE_LOW = 2
+IPPROTO_MAXID = (IPPROTO_AH + 1)
+IPCTL_FORWARDING = 1
+IPCTL_SENDREDIRECTS = 2
+IPCTL_DEFTTL = 3
+IPCTL_DEFMTU = 4
+IPCTL_RTEXPIRE = 5
+IPCTL_RTMINEXPIRE = 6
+IPCTL_RTMAXCACHE = 7
+IPCTL_SOURCEROUTE = 8
+IPCTL_DIRECTEDBROADCAST = 9
+IPCTL_INTRQMAXLEN = 10
+IPCTL_INTRQDROPS = 11
+IPCTL_STATS = 12
+IPCTL_ACCEPTSOURCEROUTE = 13
+IPCTL_FASTFORWARDING = 14
+IPCTL_KEEPFAITH = 15
+IPCTL_GIF_TTL = 16
+IPCTL_MAXID = 17
+
+# Included from netinet6/in6.h
+__KAME_VERSION = "20010528/apple-darwin"
+IPV6PORT_RESERVED = 1024
+IPV6PORT_ANONMIN = 49152
+IPV6PORT_ANONMAX = 65535
+IPV6PORT_RESERVEDMIN = 600
+IPV6PORT_RESERVEDMAX = (IPV6PORT_RESERVED-1)
+INET6_ADDRSTRLEN = 46
+IPV6_ADDR_INT32_ONE = 1
+IPV6_ADDR_INT32_TWO = 2
+IPV6_ADDR_INT32_MNL = 0xff010000
+IPV6_ADDR_INT32_MLL = 0xff020000
+IPV6_ADDR_INT32_SMP = 0x0000ffff
+IPV6_ADDR_INT16_ULL = 0xfe80
+IPV6_ADDR_INT16_USL = 0xfec0
+IPV6_ADDR_INT16_MLL = 0xff02
+IPV6_ADDR_INT32_ONE = 0x01000000
+IPV6_ADDR_INT32_TWO = 0x02000000
+IPV6_ADDR_INT32_MNL = 0x000001ff
+IPV6_ADDR_INT32_MLL = 0x000002ff
+IPV6_ADDR_INT32_SMP = 0xffff0000
+IPV6_ADDR_INT16_ULL = 0x80fe
+IPV6_ADDR_INT16_USL = 0xc0fe
+IPV6_ADDR_INT16_MLL = 0x02ff
+def IN6_IS_ADDR_UNSPECIFIED(a): return \
+
+def IN6_IS_ADDR_LOOPBACK(a): return \
+
+def IN6_IS_ADDR_V4COMPAT(a): return \
+
+def IN6_IS_ADDR_V4MAPPED(a): return \
+
+IPV6_ADDR_SCOPE_NODELOCAL = 0x01
+IPV6_ADDR_SCOPE_LINKLOCAL = 0x02
+IPV6_ADDR_SCOPE_SITELOCAL = 0x05
+IPV6_ADDR_SCOPE_ORGLOCAL = 0x08
+IPV6_ADDR_SCOPE_GLOBAL = 0x0e
+__IPV6_ADDR_SCOPE_NODELOCAL = 0x01
+__IPV6_ADDR_SCOPE_LINKLOCAL = 0x02
+__IPV6_ADDR_SCOPE_SITELOCAL = 0x05
+__IPV6_ADDR_SCOPE_ORGLOCAL = 0x08
+__IPV6_ADDR_SCOPE_GLOBAL = 0x0e
+def IN6_IS_ADDR_LINKLOCAL(a): return \
+
+def IN6_IS_ADDR_SITELOCAL(a): return \
+
+def IN6_IS_ADDR_MC_NODELOCAL(a): return \
+
+def IN6_IS_ADDR_MC_LINKLOCAL(a): return \
+
+def IN6_IS_ADDR_MC_SITELOCAL(a): return \
+
+def IN6_IS_ADDR_MC_ORGLOCAL(a): return \
+
+def IN6_IS_ADDR_MC_GLOBAL(a): return \
+
+def IN6_IS_ADDR_MC_NODELOCAL(a): return \
+
+def IN6_IS_ADDR_MC_LINKLOCAL(a): return \
+
+def IN6_IS_ADDR_MC_SITELOCAL(a): return \
+
+def IN6_IS_ADDR_MC_ORGLOCAL(a): return \
+
+def IN6_IS_ADDR_MC_GLOBAL(a): return \
+
+def IN6_IS_SCOPE_LINKLOCAL(a): return \
+
+def IFA6_IS_DEPRECATED(a): return \
+
+def IFA6_IS_INVALID(a): return \
+
+IPV6_OPTIONS = 1
+IPV6_RECVOPTS = 5
+IPV6_RECVRETOPTS = 6
+IPV6_RECVDSTADDR = 7
+IPV6_RETOPTS = 8
+IPV6_SOCKOPT_RESERVED1 = 3
+IPV6_UNICAST_HOPS = 4
+IPV6_MULTICAST_IF = 9
+IPV6_MULTICAST_HOPS = 10
+IPV6_MULTICAST_LOOP = 11
+IPV6_JOIN_GROUP = 12
+IPV6_LEAVE_GROUP = 13
+IPV6_PORTRANGE = 14
+ICMP6_FILTER = 18
+IPV6_PKTINFO = 19
+IPV6_HOPLIMIT = 20
+IPV6_NEXTHOP = 21
+IPV6_HOPOPTS = 22
+IPV6_DSTOPTS = 23
+IPV6_RTHDR = 24
+IPV6_PKTOPTIONS = 25
+IPV6_CHECKSUM = 26
+IPV6_V6ONLY = 27
+IPV6_BINDV6ONLY = IPV6_V6ONLY
+IPV6_IPSEC_POLICY = 28
+IPV6_FAITH = 29
+IPV6_FW_ADD = 30
+IPV6_FW_DEL = 31
+IPV6_FW_FLUSH = 32
+IPV6_FW_ZERO = 33
+IPV6_FW_GET = 34
+IPV6_RTHDR_LOOSE = 0
+IPV6_RTHDR_STRICT = 1
+IPV6_RTHDR_TYPE_0 = 0
+IPV6_DEFAULT_MULTICAST_HOPS = 1
+IPV6_DEFAULT_MULTICAST_LOOP = 1
+IPV6_PORTRANGE_DEFAULT = 0
+IPV6_PORTRANGE_HIGH = 1
+IPV6_PORTRANGE_LOW = 2
+IPV6PROTO_MAXID = (IPPROTO_PIM + 1)
+IPV6CTL_FORWARDING = 1
+IPV6CTL_SENDREDIRECTS = 2
+IPV6CTL_DEFHLIM = 3
+IPV6CTL_DEFMTU = 4
+IPV6CTL_FORWSRCRT = 5
+IPV6CTL_STATS = 6
+IPV6CTL_MRTSTATS = 7
+IPV6CTL_MRTPROTO = 8
+IPV6CTL_MAXFRAGPACKETS = 9
+IPV6CTL_SOURCECHECK = 10
+IPV6CTL_SOURCECHECK_LOGINT = 11
+IPV6CTL_ACCEPT_RTADV = 12
+IPV6CTL_KEEPFAITH = 13
+IPV6CTL_LOG_INTERVAL = 14
+IPV6CTL_HDRNESTLIMIT = 15
+IPV6CTL_DAD_COUNT = 16
+IPV6CTL_AUTO_FLOWLABEL = 17
+IPV6CTL_DEFMCASTHLIM = 18
+IPV6CTL_GIF_HLIM = 19
+IPV6CTL_KAME_VERSION = 20
+IPV6CTL_USE_DEPRECATED = 21
+IPV6CTL_RR_PRUNE = 22
+IPV6CTL_MAPPED_ADDR = 23
+IPV6CTL_V6ONLY = 24
+IPV6CTL_RTEXPIRE = 25
+IPV6CTL_RTMINEXPIRE = 26
+IPV6CTL_RTMAXCACHE = 27
+IPV6CTL_USETEMPADDR = 32
+IPV6CTL_TEMPPLTIME = 33
+IPV6CTL_TEMPVLTIME = 34
+IPV6CTL_AUTO_LINKLOCAL = 35
+IPV6CTL_RIP6STATS = 36
+IPV6CTL_MAXID = 37
diff --git a/lib-python/2.2/plat-darwin/regen b/lib-python/2.2/plat-darwin/regen
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/plat-darwin/regen
@@ -0,0 +1,3 @@
+#! /bin/sh
+set -v
+python$EXE ../../Tools/scripts/h2py.py -i '(u_long)' /usr/include/netinet/in.h
diff --git a/lib-python/2.2/plat-freebsd2/IN.py b/lib-python/2.2/plat-freebsd2/IN.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/plat-freebsd2/IN.py
@@ -0,0 +1,187 @@
+# Generated by h2py from /usr/include/netinet/in.h
+IPPROTO_IP = 0
+IPPROTO_ICMP = 1
+IPPROTO_IGMP = 2
+IPPROTO_GGP = 3
+IPPROTO_IPIP = 4
+IPPROTO_TCP = 6
+IPPROTO_ST = 7
+IPPROTO_EGP = 8
+IPPROTO_PIGP = 9
+IPPROTO_RCCMON = 10
+IPPROTO_NVPII = 11
+IPPROTO_PUP = 12
+IPPROTO_ARGUS = 13
+IPPROTO_EMCON = 14
+IPPROTO_XNET = 15
+IPPROTO_CHAOS = 16
+IPPROTO_UDP = 17
+IPPROTO_MUX = 18
+IPPROTO_MEAS = 19
+IPPROTO_HMP = 20
+IPPROTO_PRM = 21
+IPPROTO_IDP = 22
+IPPROTO_TRUNK1 = 23
+IPPROTO_TRUNK2 = 24
+IPPROTO_LEAF1 = 25
+IPPROTO_LEAF2 = 26
+IPPROTO_RDP = 27
+IPPROTO_IRTP = 28
+IPPROTO_TP = 29
+IPPROTO_BLT = 30
+IPPROTO_NSP = 31
+IPPROTO_INP = 32
+IPPROTO_SEP = 33
+IPPROTO_3PC = 34
+IPPROTO_IDPR = 35
+IPPROTO_XTP = 36
+IPPROTO_DDP = 37
+IPPROTO_CMTP = 38
+IPPROTO_TPXX = 39
+IPPROTO_IL = 40
+IPPROTO_SIP = 41
+IPPROTO_SDRP = 42
+IPPROTO_SIPSR = 43
+IPPROTO_SIPFRAG = 44
+IPPROTO_IDRP = 45
+IPPROTO_RSVP = 46
+IPPROTO_GRE = 47
+IPPROTO_MHRP = 48
+IPPROTO_BHA = 49
+IPPROTO_ESP = 50
+IPPROTO_AH = 51
+IPPROTO_INLSP = 52
+IPPROTO_SWIPE = 53
+IPPROTO_NHRP = 54
+IPPROTO_AHIP = 61
+IPPROTO_CFTP = 62
+IPPROTO_HELLO = 63
+IPPROTO_SATEXPAK = 64
+IPPROTO_KRYPTOLAN = 65
+IPPROTO_RVD = 66
+IPPROTO_IPPC = 67
+IPPROTO_ADFS = 68
+IPPROTO_SATMON = 69
+IPPROTO_VISA = 70
+IPPROTO_IPCV = 71
+IPPROTO_CPNX = 72
+IPPROTO_CPHB = 73
+IPPROTO_WSN = 74
+IPPROTO_PVP = 75
+IPPROTO_BRSATMON = 76
+IPPROTO_ND = 77
+IPPROTO_WBMON = 78
+IPPROTO_WBEXPAK = 79
+IPPROTO_EON = 80
+IPPROTO_VMTP = 81
+IPPROTO_SVMTP = 82
+IPPROTO_VINES = 83
+IPPROTO_TTP = 84
+IPPROTO_IGP = 85
+IPPROTO_DGP = 86
+IPPROTO_TCF = 87
+IPPROTO_IGRP = 88
+IPPROTO_OSPFIGP = 89
+IPPROTO_SRPC = 90
+IPPROTO_LARP = 91
+IPPROTO_MTP = 92
+IPPROTO_AX25 = 93
+IPPROTO_IPEIP = 94
+IPPROTO_MICP = 95
+IPPROTO_SCCSP = 96
+IPPROTO_ETHERIP = 97
+IPPROTO_ENCAP = 98
+IPPROTO_APES = 99
+IPPROTO_GMTP = 100
+IPPROTO_DIVERT = 254
+IPPROTO_RAW = 255
+IPPROTO_MAX = 256
+IPPORT_RESERVED = 1024
+IPPORT_USERRESERVED = 5000
+IPPORT_HIFIRSTAUTO = 40000
+IPPORT_HILASTAUTO = 44999
+IPPORT_RESERVEDSTART = 600
+def IN_CLASSA(i): return (((long)(i) & 0x80000000) == 0)
+
+IN_CLASSA_NET = 0xff000000
+IN_CLASSA_NSHIFT = 24
+IN_CLASSA_HOST = 0x00ffffff
+IN_CLASSA_MAX = 128
+def IN_CLASSB(i): return (((long)(i) & 0xc0000000) == 0x80000000)
+
+IN_CLASSB_NET = 0xffff0000
+IN_CLASSB_NSHIFT = 16
+IN_CLASSB_HOST = 0x0000ffff
+IN_CLASSB_MAX = 65536
+def IN_CLASSC(i): return (((long)(i) & 0xe0000000) == 0xc0000000)
+
+IN_CLASSC_NET = 0xffffff00
+IN_CLASSC_NSHIFT = 8
+IN_CLASSC_HOST = 0x000000ff
+def IN_CLASSD(i): return (((long)(i) & 0xf0000000) == 0xe0000000)
+
+IN_CLASSD_NET = 0xf0000000
+IN_CLASSD_NSHIFT = 28
+IN_CLASSD_HOST = 0x0fffffff
+def IN_MULTICAST(i): return IN_CLASSD(i)
+
+def IN_EXPERIMENTAL(i): return (((long)(i) & 0xf0000000) == 0xf0000000)
+
+def IN_BADCLASS(i): return (((long)(i) & 0xf0000000) == 0xf0000000)
+
+INADDR_ANY = 0x00000000
+INADDR_BROADCAST = 0xffffffff
+INADDR_NONE = 0xffffffff
+INADDR_UNSPEC_GROUP = 0xe0000000
+INADDR_ALLHOSTS_GROUP = 0xe0000001
+INADDR_ALLRTRS_GROUP = 0xe0000002
+INADDR_MAX_LOCAL_GROUP = 0xe00000ff
+IN_LOOPBACKNET = 127
+IP_OPTIONS = 1
+IP_HDRINCL = 2
+IP_TOS = 3
+IP_TTL = 4
+IP_RECVOPTS = 5
+IP_RECVRETOPTS = 6
+IP_RECVDSTADDR = 7
+IP_RETOPTS = 8
+IP_MULTICAST_IF = 9
+IP_MULTICAST_TTL = 10
+IP_MULTICAST_LOOP = 11
+IP_ADD_MEMBERSHIP = 12
+IP_DROP_MEMBERSHIP = 13
+IP_MULTICAST_VIF = 14
+IP_RSVP_ON = 15
+IP_RSVP_OFF = 16
+IP_RSVP_VIF_ON = 17
+IP_RSVP_VIF_OFF = 18
+IP_PORTRANGE = 19
+IP_RECVIF = 20
+IP_FW_ADD = 50
+IP_FW_DEL = 51
+IP_FW_FLUSH = 52
+IP_FW_ZERO = 53
+IP_FW_GET = 54
+IP_NAT = 55
+IP_DEFAULT_MULTICAST_TTL = 1
+IP_DEFAULT_MULTICAST_LOOP = 1
+IP_MAX_MEMBERSHIPS = 20
+IP_PORTRANGE_DEFAULT = 0
+IP_PORTRANGE_HIGH = 1
+IP_PORTRANGE_LOW = 2
+IPPROTO_MAXID = (IPPROTO_IDP + 1)
+IPCTL_FORWARDING = 1
+IPCTL_SENDREDIRECTS = 2
+IPCTL_DEFTTL = 3
+IPCTL_DEFMTU = 4
+IPCTL_RTEXPIRE = 5
+IPCTL_RTMINEXPIRE = 6
+IPCTL_RTMAXCACHE = 7
+IPCTL_SOURCEROUTE = 8
+IPCTL_DIRECTEDBROADCAST = 9
+IPCTL_INTRQMAXLEN = 10
+IPCTL_INTRQDROPS = 11
+IPCTL_ACCEPTSOURCEROUTE = 13
+IPCTL_MAXID = 13
+IP_NAT_IN = 0x00000001
+IP_NAT_OUT = 0x00000002
diff --git a/lib-python/2.2/plat-freebsd2/regen b/lib-python/2.2/plat-freebsd2/regen
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/plat-freebsd2/regen
@@ -0,0 +1,3 @@
+#! /bin/sh
+set -v
+h2py -i '(u_long)' /usr/include/netinet/in.h
diff --git a/lib-python/2.2/plat-freebsd3/IN.py b/lib-python/2.2/plat-freebsd3/IN.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/plat-freebsd3/IN.py
@@ -0,0 +1,189 @@
+# Generated by h2py from /usr/include/netinet/in.h
+IPPROTO_IP = 0
+IPPROTO_ICMP = 1
+IPPROTO_IGMP = 2
+IPPROTO_GGP = 3
+IPPROTO_IPIP = 4
+IPPROTO_TCP = 6
+IPPROTO_ST = 7
+IPPROTO_EGP = 8
+IPPROTO_PIGP = 9
+IPPROTO_RCCMON = 10
+IPPROTO_NVPII = 11
+IPPROTO_PUP = 12
+IPPROTO_ARGUS = 13
+IPPROTO_EMCON = 14
+IPPROTO_XNET = 15
+IPPROTO_CHAOS = 16
+IPPROTO_UDP = 17
+IPPROTO_MUX = 18
+IPPROTO_MEAS = 19
+IPPROTO_HMP = 20
+IPPROTO_PRM = 21
+IPPROTO_IDP = 22
+IPPROTO_TRUNK1 = 23
+IPPROTO_TRUNK2 = 24
+IPPROTO_LEAF1 = 25
+IPPROTO_LEAF2 = 26
+IPPROTO_RDP = 27
+IPPROTO_IRTP = 28
+IPPROTO_TP = 29
+IPPROTO_BLT = 30
+IPPROTO_NSP = 31
+IPPROTO_INP = 32
+IPPROTO_SEP = 33
+IPPROTO_3PC = 34
+IPPROTO_IDPR = 35
+IPPROTO_XTP = 36
+IPPROTO_DDP = 37
+IPPROTO_CMTP = 38
+IPPROTO_TPXX = 39
+IPPROTO_IL = 40
+IPPROTO_SIP = 41
+IPPROTO_SDRP = 42
+IPPROTO_SIPSR = 43
+IPPROTO_SIPFRAG = 44
+IPPROTO_IDRP = 45
+IPPROTO_RSVP = 46
+IPPROTO_GRE = 47
+IPPROTO_MHRP = 48
+IPPROTO_BHA = 49
+IPPROTO_ESP = 50
+IPPROTO_AH = 51
+IPPROTO_INLSP = 52
+IPPROTO_SWIPE = 53
+IPPROTO_NHRP = 54
+IPPROTO_AHIP = 61
+IPPROTO_CFTP = 62
+IPPROTO_HELLO = 63
+IPPROTO_SATEXPAK = 64
+IPPROTO_KRYPTOLAN = 65
+IPPROTO_RVD = 66
+IPPROTO_IPPC = 67
+IPPROTO_ADFS = 68
+IPPROTO_SATMON = 69
+IPPROTO_VISA = 70
+IPPROTO_IPCV = 71
+IPPROTO_CPNX = 72
+IPPROTO_CPHB = 73
+IPPROTO_WSN = 74
+IPPROTO_PVP = 75
+IPPROTO_BRSATMON = 76
+IPPROTO_ND = 77
+IPPROTO_WBMON = 78
+IPPROTO_WBEXPAK = 79
+IPPROTO_EON = 80
+IPPROTO_VMTP = 81
+IPPROTO_SVMTP = 82
+IPPROTO_VINES = 83
+IPPROTO_TTP = 84
+IPPROTO_IGP = 85
+IPPROTO_DGP = 86
+IPPROTO_TCF = 87
+IPPROTO_IGRP = 88
+IPPROTO_OSPFIGP = 89
+IPPROTO_SRPC = 90
+IPPROTO_LARP = 91
+IPPROTO_MTP = 92
+IPPROTO_AX25 = 93
+IPPROTO_IPEIP = 94
+IPPROTO_MICP = 95
+IPPROTO_SCCSP = 96
+IPPROTO_ETHERIP = 97
+IPPROTO_ENCAP = 98
+IPPROTO_APES = 99
+IPPROTO_GMTP = 100
+IPPROTO_DIVERT = 254
+IPPROTO_RAW = 255
+IPPROTO_MAX = 256
+IPPORT_RESERVED = 1024
+IPPORT_USERRESERVED = 5000
+IPPORT_HIFIRSTAUTO = 49152
+IPPORT_HILASTAUTO = 65535
+IPPORT_RESERVEDSTART = 600
+def IN_CLASSA(i): return (((long)(i) & 0x80000000) == 0)
+
+IN_CLASSA_NET = 0xff000000
+IN_CLASSA_NSHIFT = 24
+IN_CLASSA_HOST = 0x00ffffff
+IN_CLASSA_MAX = 128
+def IN_CLASSB(i): return (((long)(i) & 0xc0000000) == 0x80000000)
+
+IN_CLASSB_NET = 0xffff0000
+IN_CLASSB_NSHIFT = 16
+IN_CLASSB_HOST = 0x0000ffff
+IN_CLASSB_MAX = 65536
+def IN_CLASSC(i): return (((long)(i) & 0xe0000000) == 0xc0000000)
+
+IN_CLASSC_NET = 0xffffff00
+IN_CLASSC_NSHIFT = 8
+IN_CLASSC_HOST = 0x000000ff
+def IN_CLASSD(i): return (((long)(i) & 0xf0000000) == 0xe0000000)
+
+IN_CLASSD_NET = 0xf0000000
+IN_CLASSD_NSHIFT = 28
+IN_CLASSD_HOST = 0x0fffffff
+def IN_MULTICAST(i): return IN_CLASSD(i)
+
+def IN_EXPERIMENTAL(i): return (((long)(i) & 0xf0000000) == 0xf0000000)
+
+def IN_BADCLASS(i): return (((long)(i) & 0xf0000000) == 0xf0000000)
+
+INADDR_ANY = 0x00000000
+INADDR_LOOPBACK = 0x7f000001
+INADDR_BROADCAST = 0xffffffff
+INADDR_NONE = 0xffffffff
+INADDR_UNSPEC_GROUP = 0xe0000000
+INADDR_ALLHOSTS_GROUP = 0xe0000001
+INADDR_ALLRTRS_GROUP = 0xe0000002
+INADDR_MAX_LOCAL_GROUP = 0xe00000ff
+IN_LOOPBACKNET = 127
+IP_OPTIONS = 1
+IP_HDRINCL = 2
+IP_TOS = 3
+IP_TTL = 4
+IP_RECVOPTS = 5
+IP_RECVRETOPTS = 6
+IP_RECVDSTADDR = 7
+IP_RETOPTS = 8
+IP_MULTICAST_IF = 9
+IP_MULTICAST_TTL = 10
+IP_MULTICAST_LOOP = 11
+IP_ADD_MEMBERSHIP = 12
+IP_DROP_MEMBERSHIP = 13
+IP_MULTICAST_VIF = 14
+IP_RSVP_ON = 15
+IP_RSVP_OFF = 16
+IP_RSVP_VIF_ON = 17
+IP_RSVP_VIF_OFF = 18
+IP_PORTRANGE = 19
+IP_RECVIF = 20
+IP_FW_ADD = 50
+IP_FW_DEL = 51
+IP_FW_FLUSH = 52
+IP_FW_ZERO = 53
+IP_FW_GET = 54
+IP_NAT = 55
+IP_DEFAULT_MULTICAST_TTL = 1
+IP_DEFAULT_MULTICAST_LOOP = 1
+IP_MAX_MEMBERSHIPS = 20
+IP_PORTRANGE_DEFAULT = 0
+IP_PORTRANGE_HIGH = 1
+IP_PORTRANGE_LOW = 2
+IPPROTO_MAXID = (IPPROTO_IDP + 1)
+IPCTL_FORWARDING = 1
+IPCTL_SENDREDIRECTS = 2
+IPCTL_DEFTTL = 3
+IPCTL_DEFMTU = 4
+IPCTL_RTEXPIRE = 5
+IPCTL_RTMINEXPIRE = 6
+IPCTL_RTMAXCACHE = 7
+IPCTL_SOURCEROUTE = 8
+IPCTL_DIRECTEDBROADCAST = 9
+IPCTL_INTRQMAXLEN = 10
+IPCTL_INTRQDROPS = 11
+IPCTL_STATS = 12
+IPCTL_ACCEPTSOURCEROUTE = 13
+IPCTL_MAXID = 14
+IP_NAT_IN = 0x00000001
+IP_NAT_OUT = 0x00000002
diff --git a/lib-python/2.2/plat-freebsd3/regen b/lib-python/2.2/plat-freebsd3/regen
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/plat-freebsd3/regen
@@ -0,0 +1,4 @@
+#! /bin/sh
+set -v
+h2py -i '(u_long)' /usr/include/netinet/in.h
+
diff --git a/lib-python/2.2/plat-freebsd4/IN.py b/lib-python/2.2/plat-freebsd4/IN.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/plat-freebsd4/IN.py
@@ -0,0 +1,355 @@
+# Generated by h2py from /usr/include/netinet/in.h
+IPPROTO_IP = 0
+IPPROTO_HOPOPTS = 0
+IPPROTO_ICMP = 1
+IPPROTO_IGMP = 2
+IPPROTO_GGP = 3
+IPPROTO_IPV4 = 4
+IPPROTO_IPIP = IPPROTO_IPV4
+IPPROTO_TCP = 6
+IPPROTO_ST = 7
+IPPROTO_EGP = 8
+IPPROTO_PIGP = 9
+IPPROTO_RCCMON = 10
+IPPROTO_NVPII = 11
+IPPROTO_PUP = 12
+IPPROTO_ARGUS = 13
+IPPROTO_EMCON = 14
+IPPROTO_XNET = 15
+IPPROTO_CHAOS = 16
+IPPROTO_UDP = 17
+IPPROTO_MUX = 18
+IPPROTO_MEAS = 19
+IPPROTO_HMP = 20
+IPPROTO_PRM = 21
+IPPROTO_IDP = 22
+IPPROTO_TRUNK1 = 23
+IPPROTO_TRUNK2 = 24
+IPPROTO_LEAF1 = 25
+IPPROTO_LEAF2 = 26
+IPPROTO_RDP = 27
+IPPROTO_IRTP = 28
+IPPROTO_TP = 29
+IPPROTO_BLT = 30
+IPPROTO_NSP = 31
+IPPROTO_INP = 32
+IPPROTO_SEP = 33
+IPPROTO_3PC = 34
+IPPROTO_IDPR = 35
+IPPROTO_XTP = 36
+IPPROTO_DDP = 37
+IPPROTO_CMTP = 38
+IPPROTO_TPXX = 39
+IPPROTO_IL = 40
+IPPROTO_IPV6 = 41
+IPPROTO_SDRP = 42
+IPPROTO_ROUTING = 43
+IPPROTO_FRAGMENT = 44
+IPPROTO_IDRP = 45
+IPPROTO_RSVP = 46
+IPPROTO_GRE = 47
+IPPROTO_MHRP = 48
+IPPROTO_BHA = 49
+IPPROTO_ESP = 50
+IPPROTO_AH = 51
+IPPROTO_INLSP = 52
+IPPROTO_SWIPE = 53
+IPPROTO_NHRP = 54
+IPPROTO_ICMPV6 = 58
+IPPROTO_NONE = 59
+IPPROTO_DSTOPTS = 60
+IPPROTO_AHIP = 61
+IPPROTO_CFTP = 62
+IPPROTO_HELLO = 63
+IPPROTO_SATEXPAK = 64
+IPPROTO_KRYPTOLAN = 65
+IPPROTO_RVD = 66
+IPPROTO_IPPC = 67
+IPPROTO_ADFS = 68
+IPPROTO_SATMON = 69
+IPPROTO_VISA = 70
+IPPROTO_IPCV = 71
+IPPROTO_CPNX = 72
+IPPROTO_CPHB = 73
+IPPROTO_WSN = 74
+IPPROTO_PVP = 75
+IPPROTO_BRSATMON = 76
+IPPROTO_ND = 77
+IPPROTO_WBMON = 78
+IPPROTO_WBEXPAK = 79
+IPPROTO_EON = 80
+IPPROTO_VMTP = 81
+IPPROTO_SVMTP = 82
+IPPROTO_VINES = 83
+IPPROTO_TTP = 84
+IPPROTO_IGP = 85
+IPPROTO_DGP = 86
+IPPROTO_TCF = 87
+IPPROTO_IGRP = 88
+IPPROTO_OSPFIGP = 89
+IPPROTO_SRPC = 90
+IPPROTO_LARP = 91
+IPPROTO_MTP = 92
+IPPROTO_AX25 = 93
+IPPROTO_IPEIP = 94
+IPPROTO_MICP = 95
+IPPROTO_SCCSP = 96
+IPPROTO_ETHERIP = 97
+IPPROTO_ENCAP = 98
+IPPROTO_APES = 99
+IPPROTO_GMTP = 100
+IPPROTO_IPCOMP = 108
+IPPROTO_PIM = 103
+IPPROTO_PGM = 113
+IPPROTO_DIVERT = 254
+IPPROTO_RAW = 255
+IPPROTO_MAX = 256
+IPPROTO_DONE = 257
+IPPORT_RESERVED = 1024
+IPPORT_USERRESERVED = 5000
+IPPORT_HIFIRSTAUTO = 49152
+IPPORT_HILASTAUTO = 65535
+IPPORT_RESERVEDSTART = 600
+def IN_CLASSA(i): return (((u_int32_t)(i) & 0x80000000) == 0)
+
+IN_CLASSA_NET = 0xff000000
+IN_CLASSA_NSHIFT = 24
+IN_CLASSA_HOST = 0x00ffffff
+IN_CLASSA_MAX = 128
+def IN_CLASSB(i): return (((u_int32_t)(i) & 0xc0000000) == 0x80000000)
+
+IN_CLASSB_NET = 0xffff0000
+IN_CLASSB_NSHIFT = 16
+IN_CLASSB_HOST = 0x0000ffff
+IN_CLASSB_MAX = 65536
+def IN_CLASSC(i): return (((u_int32_t)(i) & 0xe0000000) == 0xc0000000)
+
+IN_CLASSC_NET = 0xffffff00
+IN_CLASSC_NSHIFT = 8
+IN_CLASSC_HOST = 0x000000ff
+def IN_CLASSD(i): return (((u_int32_t)(i) & 0xf0000000) == 0xe0000000)
+
+IN_CLASSD_NET = 0xf0000000
+IN_CLASSD_NSHIFT = 28
+IN_CLASSD_HOST = 0x0fffffff
+def IN_MULTICAST(i): return IN_CLASSD(i)
+
+def IN_EXPERIMENTAL(i): return (((u_int32_t)(i) & 0xf0000000) == 0xf0000000)
+
+def IN_BADCLASS(i): return (((u_int32_t)(i) & 0xf0000000) == 0xf0000000)
+
+INADDR_NONE = 0xffffffff
+IN_LOOPBACKNET = 127
+INET_ADDRSTRLEN = 16
+IP_OPTIONS = 1
+IP_HDRINCL = 2
+IP_TOS = 3
+IP_TTL = 4
+IP_RECVOPTS = 5
+IP_RECVRETOPTS = 6
+IP_RECVDSTADDR = 7
+IP_RETOPTS = 8
+IP_MULTICAST_IF = 9
+IP_MULTICAST_TTL = 10
+IP_MULTICAST_LOOP = 11
+IP_ADD_MEMBERSHIP = 12
+IP_DROP_MEMBERSHIP = 13
+IP_MULTICAST_VIF = 14
+IP_RSVP_ON = 15
+IP_RSVP_OFF = 16
+IP_RSVP_VIF_ON = 17
+IP_RSVP_VIF_OFF = 18
+IP_PORTRANGE = 19
+IP_RECVIF = 20
+IP_IPSEC_POLICY = 21
+IP_FAITH = 22
+IP_FW_ADD = 50
+IP_FW_DEL = 51
+IP_FW_FLUSH = 52
+IP_FW_ZERO = 53
+IP_FW_GET = 54
+IP_FW_RESETLOG = 55
+IP_DUMMYNET_CONFIGURE = 60
+IP_DUMMYNET_DEL = 61
+IP_DUMMYNET_FLUSH = 62
+IP_DUMMYNET_GET = 64
+IP_DEFAULT_MULTICAST_TTL = 1
+IP_DEFAULT_MULTICAST_LOOP = 1
+IP_MAX_MEMBERSHIPS = 20
+IP_PORTRANGE_DEFAULT = 0
+IP_PORTRANGE_HIGH = 1
+IP_PORTRANGE_LOW = 2
+IPPROTO_MAXID = (IPPROTO_AH + 1)
+IPCTL_FORWARDING = 1
+IPCTL_SENDREDIRECTS = 2
+IPCTL_DEFTTL = 3
+IPCTL_DEFMTU = 4
+IPCTL_RTEXPIRE = 5
+IPCTL_RTMINEXPIRE = 6
+IPCTL_RTMAXCACHE = 7
+IPCTL_SOURCEROUTE = 8
+IPCTL_DIRECTEDBROADCAST = 9
+IPCTL_INTRQMAXLEN = 10
+IPCTL_INTRQDROPS = 11
+IPCTL_STATS = 12
+IPCTL_ACCEPTSOURCEROUTE = 13
+IPCTL_FASTFORWARDING = 14
+IPCTL_KEEPFAITH = 15
+IPCTL_GIF_TTL = 16
+IPCTL_MAXID = 17
+
+# Included from netinet6/in6.h
+
+# Included from sys/queue.h
+def SLIST_HEAD_INITIALIZER(head): return \
+
+def SLIST_ENTRY(type): return \
+
+def STAILQ_HEAD_INITIALIZER(head): return \
+
+def STAILQ_ENTRY(type): return \
+
+def LIST_HEAD_INITIALIZER(head): return \
+
+def LIST_ENTRY(type): return \
+
+def TAILQ_HEAD_INITIALIZER(head): return \
+
+def TAILQ_ENTRY(type): return \
+
+def CIRCLEQ_ENTRY(type): return \
+
+__KAME_VERSION = "20000701/FreeBSD-current"
+IPV6PORT_RESERVED = 1024
+IPV6PORT_ANONMIN = 49152
+IPV6PORT_ANONMAX = 65535
+IPV6PORT_RESERVEDMIN = 600
+IPV6PORT_RESERVEDMAX = (IPV6PORT_RESERVED-1)
+INET6_ADDRSTRLEN = 46
+IPV6_ADDR_INT32_ONE = 1
+IPV6_ADDR_INT32_TWO = 2
+IPV6_ADDR_INT32_MNL = 0xff010000
+IPV6_ADDR_INT32_MLL = 0xff020000
+IPV6_ADDR_INT32_SMP = 0x0000ffff
+IPV6_ADDR_INT16_ULL = 0xfe80
+IPV6_ADDR_INT16_USL = 0xfec0
+IPV6_ADDR_INT16_MLL = 0xff02
+IPV6_ADDR_INT32_ONE = 0x01000000
+IPV6_ADDR_INT32_TWO = 0x02000000
+IPV6_ADDR_INT32_MNL = 0x000001ff
+IPV6_ADDR_INT32_MLL = 0x000002ff
+IPV6_ADDR_INT32_SMP = 0xffff0000
+IPV6_ADDR_INT16_ULL = 0x80fe
+IPV6_ADDR_INT16_USL = 0xc0fe
+IPV6_ADDR_INT16_MLL = 0x02ff
+def IN6_IS_ADDR_UNSPECIFIED(a): return \
+
+def IN6_IS_ADDR_LOOPBACK(a): return \
+
+def IN6_IS_ADDR_V4COMPAT(a): return \
+
+def IN6_IS_ADDR_V4MAPPED(a): return \
+
+IPV6_ADDR_SCOPE_NODELOCAL = 0x01
+IPV6_ADDR_SCOPE_LINKLOCAL = 0x02
+IPV6_ADDR_SCOPE_SITELOCAL = 0x05
+IPV6_ADDR_SCOPE_ORGLOCAL = 0x08
+IPV6_ADDR_SCOPE_GLOBAL = 0x0e
+__IPV6_ADDR_SCOPE_NODELOCAL = 0x01
+__IPV6_ADDR_SCOPE_LINKLOCAL = 0x02
+__IPV6_ADDR_SCOPE_SITELOCAL = 0x05
+__IPV6_ADDR_SCOPE_ORGLOCAL = 0x08
+__IPV6_ADDR_SCOPE_GLOBAL = 0x0e
+def IN6_IS_ADDR_LINKLOCAL(a): return \
+
+def IN6_IS_ADDR_SITELOCAL(a): return \
+
+def IN6_IS_ADDR_MC_NODELOCAL(a): return \
+
+def IN6_IS_ADDR_MC_LINKLOCAL(a): return \
+
+def IN6_IS_ADDR_MC_SITELOCAL(a): return \
+
+def IN6_IS_ADDR_MC_ORGLOCAL(a): return \
+
+def IN6_IS_ADDR_MC_GLOBAL(a): return \
+
+def IN6_IS_ADDR_MC_NODELOCAL(a): return \
+
+def IN6_IS_ADDR_MC_LINKLOCAL(a): return \
+
+def IN6_IS_ADDR_MC_SITELOCAL(a): return \
+
+def IN6_IS_ADDR_MC_ORGLOCAL(a): return \
+
+def IN6_IS_ADDR_MC_GLOBAL(a): return \
+
+def IN6_IS_SCOPE_LINKLOCAL(a): return \
+
+IPV6_OPTIONS = 1
+IPV6_RECVOPTS = 5
+IPV6_RECVRETOPTS = 6
+IPV6_RECVDSTADDR = 7
+IPV6_RETOPTS = 8
+IPV6_SOCKOPT_RESERVED1 = 3
+IPV6_UNICAST_HOPS = 4
+IPV6_MULTICAST_IF = 9
+IPV6_MULTICAST_HOPS = 10
+IPV6_MULTICAST_LOOP = 11
+IPV6_JOIN_GROUP = 12
+IPV6_LEAVE_GROUP = 13
+IPV6_PORTRANGE = 14
+ICMP6_FILTER = 18
+IPV6_PKTINFO = 19
+IPV6_HOPLIMIT = 20
+IPV6_NEXTHOP = 21
+IPV6_HOPOPTS = 22
+IPV6_DSTOPTS = 23
+IPV6_RTHDR = 24
+IPV6_PKTOPTIONS = 25
+IPV6_CHECKSUM = 26
+IPV6_BINDV6ONLY = 27
+IPV6_IPSEC_POLICY = 28
+IPV6_FAITH = 29
+IPV6_FW_ADD = 30
+IPV6_FW_DEL = 31
+IPV6_FW_FLUSH = 32
+IPV6_FW_ZERO = 33
+IPV6_FW_GET = 34
+IPV6_RTHDR_LOOSE = 0
+IPV6_RTHDR_STRICT = 1
+IPV6_RTHDR_TYPE_0 = 0
+IPV6_DEFAULT_MULTICAST_HOPS = 1
+IPV6_DEFAULT_MULTICAST_LOOP = 1
+IPV6_PORTRANGE_DEFAULT = 0
+IPV6_PORTRANGE_HIGH = 1
+IPV6_PORTRANGE_LOW = 2
+IPV6PROTO_MAXID = (IPPROTO_PIM + 1)
+IPV6CTL_FORWARDING = 1
+IPV6CTL_SENDREDIRECTS = 2
+IPV6CTL_DEFHLIM = 3
+IPV6CTL_DEFMTU = 4
+IPV6CTL_FORWSRCRT = 5
+IPV6CTL_STATS = 6
+IPV6CTL_MRTSTATS = 7
+IPV6CTL_MRTPROTO = 8
+IPV6CTL_MAXFRAGPACKETS = 9
+IPV6CTL_SOURCECHECK = 10
+IPV6CTL_SOURCECHECK_LOGINT = 11
+IPV6CTL_ACCEPT_RTADV = 12
+IPV6CTL_KEEPFAITH = 13
+IPV6CTL_LOG_INTERVAL = 14
+IPV6CTL_HDRNESTLIMIT = 15
+IPV6CTL_DAD_COUNT = 16
+IPV6CTL_AUTO_FLOWLABEL = 17
+IPV6CTL_DEFMCASTHLIM = 18
+IPV6CTL_GIF_HLIM = 19
+IPV6CTL_KAME_VERSION = 20
+IPV6CTL_USE_DEPRECATED = 21
+IPV6CTL_RR_PRUNE = 22
+IPV6CTL_MAPPED_ADDR = 23
+IPV6CTL_BINDV6ONLY = 24
+IPV6CTL_RTEXPIRE = 25
+IPV6CTL_RTMINEXPIRE = 26
+IPV6CTL_RTMAXCACHE = 27
+IPV6CTL_MAXID = 28
diff --git a/lib-python/2.2/plat-freebsd4/regen b/lib-python/2.2/plat-freebsd4/regen
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/plat-freebsd4/regen
@@ -0,0 +1,3 @@
+#! /bin/sh
+set -v
+python ../../Tools/scripts/h2py.py -i '(u_long)' /usr/include/netinet/in.h
diff --git a/lib-python/2.2/plat-freebsd5/IN.py b/lib-python/2.2/plat-freebsd5/IN.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/plat-freebsd5/IN.py
@@ -0,0 +1,355 @@
+# Generated by h2py from /usr/include/netinet/in.h
+IPPROTO_IP = 0
+IPPROTO_HOPOPTS = 0
+IPPROTO_ICMP = 1
+IPPROTO_IGMP = 2
+IPPROTO_GGP = 3
+IPPROTO_IPV4 = 4
+IPPROTO_IPIP = IPPROTO_IPV4
+IPPROTO_TCP = 6
+IPPROTO_ST = 7
+IPPROTO_EGP = 8
+IPPROTO_PIGP = 9
+IPPROTO_RCCMON = 10
+IPPROTO_NVPII = 11
+IPPROTO_PUP = 12
+IPPROTO_ARGUS = 13
+IPPROTO_EMCON = 14
+IPPROTO_XNET = 15
+IPPROTO_CHAOS = 16
+IPPROTO_UDP = 17
+IPPROTO_MUX = 18
+IPPROTO_MEAS = 19
+IPPROTO_HMP = 20
+IPPROTO_PRM = 21
+IPPROTO_IDP = 22
+IPPROTO_TRUNK1 = 23
+IPPROTO_TRUNK2 = 24
+IPPROTO_LEAF1 = 25
+IPPROTO_LEAF2 = 26
+IPPROTO_RDP = 27
+IPPROTO_IRTP = 28
+IPPROTO_TP = 29
+IPPROTO_BLT = 30
+IPPROTO_NSP = 31
+IPPROTO_INP = 32
+IPPROTO_SEP = 33
+IPPROTO_3PC = 34
+IPPROTO_IDPR = 35
+IPPROTO_XTP = 36
+IPPROTO_DDP = 37
+IPPROTO_CMTP = 38
+IPPROTO_TPXX = 39
+IPPROTO_IL = 40
+IPPROTO_IPV6 = 41
+IPPROTO_SDRP = 42
+IPPROTO_ROUTING = 43
+IPPROTO_FRAGMENT = 44
+IPPROTO_IDRP = 45
+IPPROTO_RSVP = 46
+IPPROTO_GRE = 47
+IPPROTO_MHRP = 48
+IPPROTO_BHA = 49
+IPPROTO_ESP = 50
+IPPROTO_AH = 51
+IPPROTO_INLSP = 52
+IPPROTO_SWIPE = 53
+IPPROTO_NHRP = 54
+IPPROTO_ICMPV6 = 58
+IPPROTO_NONE = 59
+IPPROTO_DSTOPTS = 60
+IPPROTO_AHIP = 61
+IPPROTO_CFTP = 62
+IPPROTO_HELLO = 63
+IPPROTO_SATEXPAK = 64
+IPPROTO_KRYPTOLAN = 65
+IPPROTO_RVD = 66
+IPPROTO_IPPC = 67
+IPPROTO_ADFS = 68
+IPPROTO_SATMON = 69
+IPPROTO_VISA = 70
+IPPROTO_IPCV = 71
+IPPROTO_CPNX = 72
+IPPROTO_CPHB = 73
+IPPROTO_WSN = 74
+IPPROTO_PVP = 75
+IPPROTO_BRSATMON = 76
+IPPROTO_ND = 77
+IPPROTO_WBMON = 78
+IPPROTO_WBEXPAK = 79
+IPPROTO_EON = 80
+IPPROTO_VMTP = 81
+IPPROTO_SVMTP = 82
+IPPROTO_VINES = 83
+IPPROTO_TTP = 84
+IPPROTO_IGP = 85
+IPPROTO_DGP = 86
+IPPROTO_TCF = 87
+IPPROTO_IGRP = 88
+IPPROTO_OSPFIGP = 89
+IPPROTO_SRPC = 90
+IPPROTO_LARP = 91
+IPPROTO_MTP = 92
+IPPROTO_AX25 = 93
+IPPROTO_IPEIP = 94
+IPPROTO_MICP = 95
+IPPROTO_SCCSP = 96
+IPPROTO_ETHERIP = 97
+IPPROTO_ENCAP = 98
+IPPROTO_APES = 99
+IPPROTO_GMTP = 100
+IPPROTO_IPCOMP = 108
+IPPROTO_PIM = 103
+IPPROTO_PGM = 113
+IPPROTO_DIVERT = 254
+IPPROTO_RAW = 255
+IPPROTO_MAX = 256
+IPPROTO_DONE = 257
+IPPORT_RESERVED = 1024
+IPPORT_USERRESERVED = 5000
+IPPORT_HIFIRSTAUTO = 49152
+IPPORT_HILASTAUTO = 65535
+IPPORT_RESERVEDSTART = 600
+def IN_CLASSA(i): return (((u_int32_t)(i) & 0x80000000) == 0)
+
+IN_CLASSA_NET = 0xff000000
+IN_CLASSA_NSHIFT = 24
+IN_CLASSA_HOST = 0x00ffffff
+IN_CLASSA_MAX = 128
+def IN_CLASSB(i): return (((u_int32_t)(i) & 0xc0000000) == 0x80000000)
+
+IN_CLASSB_NET = 0xffff0000
+IN_CLASSB_NSHIFT = 16
+IN_CLASSB_HOST = 0x0000ffff
+IN_CLASSB_MAX = 65536
+def IN_CLASSC(i): return (((u_int32_t)(i) & 0xe0000000) == 0xc0000000)
+
+IN_CLASSC_NET = 0xffffff00
+IN_CLASSC_NSHIFT = 8
+IN_CLASSC_HOST = 0x000000ff
+def IN_CLASSD(i): return (((u_int32_t)(i) & 0xf0000000) == 0xe0000000)
+
+IN_CLASSD_NET = 0xf0000000
+IN_CLASSD_NSHIFT = 28
+IN_CLASSD_HOST = 0x0fffffff
+def IN_MULTICAST(i): return IN_CLASSD(i)
+
+def IN_EXPERIMENTAL(i): return (((u_int32_t)(i) & 0xf0000000) == 0xf0000000)
+
+def IN_BADCLASS(i): return (((u_int32_t)(i) & 0xf0000000) == 0xf0000000)
+
+INADDR_NONE = 0xffffffff
+IN_LOOPBACKNET = 127
+INET_ADDRSTRLEN = 16
+IP_OPTIONS = 1
+IP_HDRINCL = 2
+IP_TOS = 3
+IP_TTL = 4
+IP_RECVOPTS = 5
+IP_RECVRETOPTS = 6
+IP_RECVDSTADDR = 7
+IP_RETOPTS = 8
+IP_MULTICAST_IF = 9
+IP_MULTICAST_TTL = 10
+IP_MULTICAST_LOOP = 11
+IP_ADD_MEMBERSHIP = 12
+IP_DROP_MEMBERSHIP = 13
+IP_MULTICAST_VIF = 14
+IP_RSVP_ON = 15
+IP_RSVP_OFF = 16
+IP_RSVP_VIF_ON = 17
+IP_RSVP_VIF_OFF = 18
+IP_PORTRANGE = 19
+IP_RECVIF = 20
+IP_IPSEC_POLICY = 21
+IP_FAITH = 22
+IP_FW_ADD = 50
+IP_FW_DEL = 51
+IP_FW_FLUSH = 52
+IP_FW_ZERO = 53
+IP_FW_GET = 54
+IP_FW_RESETLOG = 55
+IP_DUMMYNET_CONFIGURE = 60
+IP_DUMMYNET_DEL = 61
+IP_DUMMYNET_FLUSH = 62
+IP_DUMMYNET_GET = 64
+IP_DEFAULT_MULTICAST_TTL = 1
+IP_DEFAULT_MULTICAST_LOOP = 1
+IP_MAX_MEMBERSHIPS = 20
+IP_PORTRANGE_DEFAULT = 0
+IP_PORTRANGE_HIGH = 1
+IP_PORTRANGE_LOW = 2
+IPPROTO_MAXID = (IPPROTO_AH + 1)
+IPCTL_FORWARDING = 1
+IPCTL_SENDREDIRECTS = 2
+IPCTL_DEFTTL = 3
+IPCTL_DEFMTU = 4
+IPCTL_RTEXPIRE = 5
+IPCTL_RTMINEXPIRE = 6
+IPCTL_RTMAXCACHE = 7
+IPCTL_SOURCEROUTE = 8
+IPCTL_DIRECTEDBROADCAST = 9
+IPCTL_INTRQMAXLEN = 10
+IPCTL_INTRQDROPS = 11
+IPCTL_STATS = 12
+IPCTL_ACCEPTSOURCEROUTE = 13
+IPCTL_FASTFORWARDING = 14
+IPCTL_KEEPFAITH = 15
+IPCTL_GIF_TTL = 16
+IPCTL_MAXID = 17
+
+# Included from netinet6/in6.h
+
+# Included from sys/queue.h
+def SLIST_HEAD_INITIALIZER(head): return \
+
+def SLIST_ENTRY(type): return \
+
+def STAILQ_HEAD_INITIALIZER(head): return \
+
+def STAILQ_ENTRY(type): return \
+
+def LIST_HEAD_INITIALIZER(head): return \
+
+def LIST_ENTRY(type): return \
+
+def TAILQ_HEAD_INITIALIZER(head): return \
+
+def TAILQ_ENTRY(type): return \
+
+def CIRCLEQ_ENTRY(type): return \
+
+__KAME_VERSION = "20000701/FreeBSD-current"
+IPV6PORT_RESERVED = 1024
+IPV6PORT_ANONMIN = 49152
+IPV6PORT_ANONMAX = 65535
+IPV6PORT_RESERVEDMIN = 600
+IPV6PORT_RESERVEDMAX = (IPV6PORT_RESERVED-1)
+INET6_ADDRSTRLEN = 46
+IPV6_ADDR_INT32_ONE = 1
+IPV6_ADDR_INT32_TWO = 2
+IPV6_ADDR_INT32_MNL = 0xff010000
+IPV6_ADDR_INT32_MLL = 0xff020000
+IPV6_ADDR_INT32_SMP = 0x0000ffff
+IPV6_ADDR_INT16_ULL = 0xfe80
+IPV6_ADDR_INT16_USL = 0xfec0
+IPV6_ADDR_INT16_MLL = 0xff02
+IPV6_ADDR_INT32_ONE = 0x01000000
+IPV6_ADDR_INT32_TWO = 0x02000000
+IPV6_ADDR_INT32_MNL = 0x000001ff
+IPV6_ADDR_INT32_MLL = 0x000002ff
+IPV6_ADDR_INT32_SMP = 0xffff0000
+IPV6_ADDR_INT16_ULL = 0x80fe
+IPV6_ADDR_INT16_USL = 0xc0fe
+IPV6_ADDR_INT16_MLL = 0x02ff
+def IN6_IS_ADDR_UNSPECIFIED(a): return \
+
+def IN6_IS_ADDR_LOOPBACK(a): return \
+
+def IN6_IS_ADDR_V4COMPAT(a): return \
+
+def IN6_IS_ADDR_V4MAPPED(a): return \
+
+IPV6_ADDR_SCOPE_NODELOCAL = 0x01
+IPV6_ADDR_SCOPE_LINKLOCAL = 0x02
+IPV6_ADDR_SCOPE_SITELOCAL = 0x05
+IPV6_ADDR_SCOPE_ORGLOCAL = 0x08
+IPV6_ADDR_SCOPE_GLOBAL = 0x0e
+__IPV6_ADDR_SCOPE_NODELOCAL = 0x01
+__IPV6_ADDR_SCOPE_LINKLOCAL = 0x02
+__IPV6_ADDR_SCOPE_SITELOCAL = 0x05
+__IPV6_ADDR_SCOPE_ORGLOCAL = 0x08
+__IPV6_ADDR_SCOPE_GLOBAL = 0x0e
+def IN6_IS_ADDR_LINKLOCAL(a): return \
+
+def IN6_IS_ADDR_SITELOCAL(a): return \
+
+def IN6_IS_ADDR_MC_NODELOCAL(a): return \
+
+def IN6_IS_ADDR_MC_LINKLOCAL(a): return \
+
+def IN6_IS_ADDR_MC_SITELOCAL(a): return \
+
+def IN6_IS_ADDR_MC_ORGLOCAL(a): return \
+
+def IN6_IS_ADDR_MC_GLOBAL(a): return \
+
+def IN6_IS_ADDR_MC_NODELOCAL(a): return \
+
+def IN6_IS_ADDR_MC_LINKLOCAL(a): return \
+
+def IN6_IS_ADDR_MC_SITELOCAL(a): return \
+
+def IN6_IS_ADDR_MC_ORGLOCAL(a): return \
+
+def IN6_IS_ADDR_MC_GLOBAL(a): return \
+
+def IN6_IS_SCOPE_LINKLOCAL(a): return \
+
+IPV6_OPTIONS = 1
+IPV6_RECVOPTS = 5
+IPV6_RECVRETOPTS = 6
+IPV6_RECVDSTADDR = 7
+IPV6_RETOPTS = 8
+IPV6_SOCKOPT_RESERVED1 = 3
+IPV6_UNICAST_HOPS = 4
+IPV6_MULTICAST_IF = 9
+IPV6_MULTICAST_HOPS = 10
+IPV6_MULTICAST_LOOP = 11
+IPV6_JOIN_GROUP = 12
+IPV6_LEAVE_GROUP = 13
+IPV6_PORTRANGE = 14
+ICMP6_FILTER = 18
+IPV6_PKTINFO = 19
+IPV6_HOPLIMIT = 20
+IPV6_NEXTHOP = 21
+IPV6_HOPOPTS = 22
+IPV6_DSTOPTS = 23
+IPV6_RTHDR = 24
+IPV6_PKTOPTIONS = 25
+IPV6_CHECKSUM = 26
+IPV6_BINDV6ONLY = 27
+IPV6_IPSEC_POLICY = 28
+IPV6_FAITH = 29
+IPV6_FW_ADD = 30
+IPV6_FW_DEL = 31
+IPV6_FW_FLUSH = 32
+IPV6_FW_ZERO = 33
+IPV6_FW_GET = 34
+IPV6_RTHDR_LOOSE = 0
+IPV6_RTHDR_STRICT = 1
+IPV6_RTHDR_TYPE_0 = 0
+IPV6_DEFAULT_MULTICAST_HOPS = 1
+IPV6_DEFAULT_MULTICAST_LOOP = 1
+IPV6_PORTRANGE_DEFAULT = 0
+IPV6_PORTRANGE_HIGH = 1
+IPV6_PORTRANGE_LOW = 2
+IPV6PROTO_MAXID = (IPPROTO_PIM + 1)
+IPV6CTL_FORWARDING = 1
+IPV6CTL_SENDREDIRECTS = 2
+IPV6CTL_DEFHLIM = 3
+IPV6CTL_DEFMTU = 4
+IPV6CTL_FORWSRCRT = 5
+IPV6CTL_STATS = 6
+IPV6CTL_MRTSTATS = 7
+IPV6CTL_MRTPROTO = 8
+IPV6CTL_MAXFRAGPACKETS = 9
+IPV6CTL_SOURCECHECK = 10
+IPV6CTL_SOURCECHECK_LOGINT = 11
+IPV6CTL_ACCEPT_RTADV = 12
+IPV6CTL_KEEPFAITH = 13
+IPV6CTL_LOG_INTERVAL = 14
+IPV6CTL_HDRNESTLIMIT = 15
+IPV6CTL_DAD_COUNT = 16
+IPV6CTL_AUTO_FLOWLABEL = 17
+IPV6CTL_DEFMCASTHLIM = 18
+IPV6CTL_GIF_HLIM = 19
+IPV6CTL_KAME_VERSION = 20
+IPV6CTL_USE_DEPRECATED = 21
+IPV6CTL_RR_PRUNE = 22
+IPV6CTL_MAPPED_ADDR = 23
+IPV6CTL_BINDV6ONLY = 24
+IPV6CTL_RTEXPIRE = 25
+IPV6CTL_RTMINEXPIRE = 26
+IPV6CTL_RTMAXCACHE = 27
+IPV6CTL_MAXID = 28
diff --git a/lib-python/2.2/plat-freebsd5/regen b/lib-python/2.2/plat-freebsd5/regen
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/plat-freebsd5/regen
@@ -0,0 +1,3 @@
+#! /bin/sh
+set -v
+python ../../Tools/scripts/h2py.py -i '(u_long)' /usr/include/netinet/in.h
diff --git a/lib-python/2.2/plat-generic/regen b/lib-python/2.2/plat-generic/regen
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/plat-generic/regen
@@ -0,0 +1,3 @@
+#! /bin/sh
+set -v
+python$EXE ../../Tools/scripts/h2py.py -i '(u_long)' /usr/include/netinet/in.h
diff --git a/lib-python/2.2/plat-irix5/AL.py b/lib-python/2.2/plat-irix5/AL.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/plat-irix5/AL.py
@@ -0,0 +1,61 @@
+RATE_48000 	= 48000
+RATE_44100	= 44100
+RATE_32000	= 32000
+RATE_22050	= 22050
+RATE_16000	= 16000
+RATE_11025	= 11025
+RATE_8000	= 8000
+
+SAMPFMT_TWOSCOMP= 1
+SAMPFMT_FLOAT	= 32
+SAMPFMT_DOUBLE	= 64
+
+SAMPLE_8	= 1
+SAMPLE_16	= 2
+	# SAMPLE_24 is the low 24 bits of a long, sign extended to 32 bits
+SAMPLE_24	= 4
+
+MONO		= 1
+STEREO		= 2
+QUADRO		= 4			# 4CHANNEL is not a legal Python name
+
+INPUT_LINE	= 0
+INPUT_MIC	= 1
+INPUT_DIGITAL	= 2
+
+MONITOR_OFF	= 0
+MONITOR_ON	= 1
+
+ERROR_NUMBER		= 0
+ERROR_TYPE		= 1
+ERROR_LOCATION_LSP 	= 2
+ERROR_LOCATION_MSP	= 3
+ERROR_LENGTH		= 4
+
+ERROR_INPUT_UNDERFLOW	= 0
+ERROR_OUTPUT_OVERFLOW	= 1
+
+# These seem to be not supported anymore:
+##HOLD, RELEASE			= 0, 1
+##ATTAIL, ATHEAD, ATMARK, ATTIME	= 0, 1, 2, 3
+
+DEFAULT_DEVICE	= 1
+
+INPUT_SOURCE		= 0
+LEFT_INPUT_ATTEN	= 1
+RIGHT_INPUT_ATTEN	= 2
+INPUT_RATE		= 3
+OUTPUT_RATE		= 4
+LEFT_SPEAKER_GAIN	= 5
+RIGHT_SPEAKER_GAIN	= 6
+INPUT_COUNT		= 7
+OUTPUT_COUNT		= 8
+UNUSED_COUNT		= 9
+SYNC_INPUT_TO_AES	= 10
+SYNC_OUTPUT_TO_AES	= 11
+MONITOR_CTL		= 12
+LEFT_MONITOR_ATTEN	= 13
+RIGHT_MONITOR_ATTEN	= 14
+
+ENUM_VALUE	= 0	# only certain values are valid
+RANGE_VALUE	= 1	# any value in range is valid
diff --git a/lib-python/2.2/plat-irix5/CD.py b/lib-python/2.2/plat-irix5/CD.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/plat-irix5/CD.py
@@ -0,0 +1,34 @@
+ERROR		= 0
+NODISC		= 1
+READY		= 2
+PLAYING		= 3
+PAUSED		= 4
+STILL		= 5
+
+AUDIO		= 0
+PNUM		= 1
+INDEX		= 2
+PTIME		= 3
+ATIME		= 4
+CATALOG		= 5
+IDENT		= 6
+CONTROL		= 7
+
+CDDA_DATASIZE	= 2352
+
+##CDDA_SUBCODESIZE	= (sizeof(struct subcodeQ))
+##CDDA_BLOCKSIZE	= (sizeof(struct cdframe))
+##CDDA_NUMSAMPLES	= (CDDA_DATASIZE/2)
+##
+##CDQ_PREEMP_MASK	= 0xd
+##CDQ_COPY_MASK	= 0xb
+##CDQ_DDATA_MASK	= 0xd
+##CDQ_BROADCAST_MASK	= 0x8
+##CDQ_PREEMPHASIS	= 0x1
+##CDQ_COPY_PERMITTED	= 0x2		
+##CDQ_DIGITAL_DATA	= 0x4
+##CDQ_BROADCAST_USE	= 0x8
+##
+##CDQ_MODE1	= 0x1
+##CDQ_MODE2	= 0x2
+##CDQ_MODE3	= 0x3
diff --git a/lib-python/2.2/plat-irix5/CL.py b/lib-python/2.2/plat-irix5/CL.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/plat-irix5/CL.py
@@ -0,0 +1,24 @@
+# Backward compatible module CL.
+# All relevant symbols are now defined in the module cl.
+try:
+	from cl import *
+except ImportError:
+	from CL_old import *
+else:
+	del CompressImage
+	del DecompressImage
+	del GetAlgorithmName
+	del OpenCompressor
+	del OpenDecompressor
+	del QueryAlgorithms
+	del QueryMaxHeaderSize
+	del QueryScheme
+	del QuerySchemeFromName
+	del SetDefault
+	del SetMax
+	del SetMin
+	try:
+		del cvt_type
+	except NameError:
+		pass
+	del error
diff --git a/lib-python/2.2/plat-irix5/CL_old.py b/lib-python/2.2/plat-irix5/CL_old.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/plat-irix5/CL_old.py
@@ -0,0 +1,236 @@
+#
+# cl.h - Compression Library typedefs and prototypes
+#
+#   01/07/92	Cleanup by Brian Knittel
+#   02/18/92	Original Version by Brian Knittel
+#
+
+#
+# originalFormat parameter values
+#
+MAX_NUMBER_OF_ORIGINAL_FORMATS = 32
+
+# Audio
+MONO = 0
+STEREO_INTERLEAVED = 1
+
+# Video 
+# YUV is defined to be the same thing as YCrCb (luma and two chroma components).
+# 422 is appended to YUV (or YCrCb) if the chroma is sub-sampled by 2 
+#	horizontally, packed as U Y1 V Y2 (byte order).
+# 422HC is appended to YUV (or YCrCb) if the chroma is sub-sampled by 2 
+#	vertically in addition to horizontally, and is packed the same as 
+#	422 except that U & V are not valid on the second line.
+#
+RGB = 0
+RGBX = 1
+RGBA = 2
+RGB332 = 3
+
+GRAYSCALE = 4
+Y = 4
+YUV = 5	
+YCbCr = 5	
+YUV422 = 6				# 4:2:2 sampling
+YCbCr422 = 6				# 4:2:2 sampling
+YUV422HC = 7				# 4:1:1 sampling
+YCbCr422HC = 7				# 4:1:1 sampling
+YUV422DC = 7				# 4:1:1 sampling
+YCbCr422DC = 7				# 4:1:1 sampling
+
+BEST_FIT = -1	
+
+def BytesPerSample(s):
+	if s in (MONO, YUV):
+		return 2
+	elif s == STEREO_INTERLEAVED:
+		return 4
+	else:
+		return 0
+
+def BytesPerPixel(f):
+	if f in (RGB, YUV):
+		return 3
+	elif f in (RGBX, RGBA):
+		return 4
+	elif f in (RGB332, GRAYSCALE):
+		return 1
+	else:
+		return 2
+
+def AudioFormatName(f):
+	if f == MONO:
+		return 'MONO'
+	elif f == STEREO_INTERLEAVED:
+		return 'STEREO_INTERLEAVED'
+	else:
+		return 'Not a valid format'
+
+def VideoFormatName(f):
+	if f == RGB:
+		return 'RGB'
+	elif f == RGBX:
+		return 'RGBX'
+	elif f == RGBA:
+		return 'RGBA'
+	elif f == RGB332:
+		return 'RGB332'
+	elif f == GRAYSCALE:
+		return 'GRAYSCALE'
+	elif f == YUV:
+		return 'YUV'
+	elif f == YUV422:
+		return 'YUV422'
+	elif f == YUV422DC:
+		return 'YUV422DC'
+	else:
+		return 'Not a valid format'
+
+MAX_NUMBER_OF_AUDIO_ALGORITHMS = 32
+MAX_NUMBER_OF_VIDEO_ALGORITHMS = 32
+
+#
+# Algorithm types
+#
+AUDIO = 0
+VIDEO = 1
+
+def AlgorithmNumber(scheme):
+	return scheme & 0x7fff
+def AlgorithmType(scheme):
+	return (scheme >> 15) & 1
+def Algorithm(type, n):
+	return n | ((type & 1) << 15)
+
+#
+# "compressionScheme" argument values
+#
+UNKNOWN_SCHEME = -1
+
+UNCOMPRESSED_AUDIO = Algorithm(AUDIO, 0)
+G711_ULAW = Algorithm(AUDIO, 1)
+ULAW = Algorithm(AUDIO, 1)
+G711_ALAW = Algorithm(AUDIO, 2)
+ALAW = Algorithm(AUDIO, 2)
+AWARE_MPEG_AUDIO = Algorithm(AUDIO, 3)
+AWARE_MULTIRATE = Algorithm(AUDIO, 4)
+    
+UNCOMPRESSED = Algorithm(VIDEO, 0)
+UNCOMPRESSED_VIDEO = Algorithm(VIDEO, 0)
+RLE = Algorithm(VIDEO, 1)
+JPEG = Algorithm(VIDEO, 2)
+MPEG_VIDEO = Algorithm(VIDEO, 3)
+MVC1 = Algorithm(VIDEO, 4)
+RTR = Algorithm(VIDEO, 5)
+RTR1 = Algorithm(VIDEO, 5)
+
+#
+# Parameters
+#
+MAX_NUMBER_OF_PARAMS = 256
+# Default Parameters
+IMAGE_WIDTH = 0
+IMAGE_HEIGHT = 1 
+ORIGINAL_FORMAT = 2
+INTERNAL_FORMAT = 3
+COMPONENTS = 4
+BITS_PER_COMPONENT = 5
+FRAME_RATE = 6
+COMPRESSION_RATIO = 7
+EXACT_COMPRESSION_RATIO = 8
+FRAME_BUFFER_SIZE = 9 
+COMPRESSED_BUFFER_SIZE = 10
+BLOCK_SIZE = 11
+PREROLL = 12
+FRAME_TYPE = 13
+ALGORITHM_ID = 14
+ALGORITHM_VERSION = 15
+ORIENTATION = 16
+NUMBER_OF_FRAMES = 17
+SPEED = 18
+LAST_FRAME_INDEX = 19
+NUMBER_OF_PARAMS = 20
+
+# JPEG Specific Parameters
+QUALITY_FACTOR = NUMBER_OF_PARAMS + 0
+
+# MPEG Specific Parameters
+END_OF_SEQUENCE = NUMBER_OF_PARAMS + 0
+
+# RTR Specific Parameters
+QUALITY_LEVEL = NUMBER_OF_PARAMS + 0
+ZOOM_X = NUMBER_OF_PARAMS + 1
+ZOOM_Y = NUMBER_OF_PARAMS + 2
+
+#
+# Parameter value types
+#
+ENUM_VALUE = 0				# only certain constant values are valid
+RANGE_VALUE = 1				# any value in a given range is valid
+FLOATING_ENUM_VALUE = 2			# only certain constant floating point values are valid
+FLOATING_RANGE_VALUE = 3		# any value in a given floating point range is valid
+
+#
+# Algorithm Functionality
+#
+DECOMPRESSOR = 1
+COMPRESSOR = 2
+CODEC = 3
+
+#
+# Buffer types
+#
+NONE = 0
+FRAME = 1
+DATA = 2
+
+#
+# Frame types
+#
+NONE = 0
+KEYFRAME = 1
+INTRA = 1
+PREDICTED = 2
+BIDIRECTIONAL = 3
+
+#
+# Orientations
+#
+TOP_DOWN = 0
+BOTTOM_UP = 1
+
+#
+# SGI Proprietary Algorithm Header Start Code
+#
+HEADER_START_CODE = 0xc1C0DEC
+
+#
+# error codes
+#
+
+BAD_NO_BUFFERSPACE =  -2		# no space for internal buffers
+BAD_PVBUFFER =  -3			# param/val buffer doesn't make sense
+BAD_BUFFERLENGTH_NEG =  -4		# negative buffer length
+BAD_BUFFERLENGTH_ODD =  -5		# odd length parameter/value buffer
+BAD_PARAM =  -6				# invalid parameter
+BAD_COMPRESSION_SCHEME =  -7		# compression scheme parameter invalid
+BAD_COMPRESSOR_HANDLE =  -8		# compression handle parameter invalid
+BAD_COMPRESSOR_HANDLE_POINTER = -9	# compression handle pointer invalid
+BAD_BUFFER_HANDLE = -10			# buffer handle invalid
+BAD_BUFFER_QUERY_SIZE = -11		# buffer query size too large
+JPEG_ERROR = -12			# error from libjpeg
+BAD_FRAME_SIZE = -13			# frame size invalid
+PARAM_OUT_OF_RANGE = -14		# parameter out of range
+ADDED_ALGORITHM_ERROR = -15		# added algorithm had a unique error
+BAD_ALGORITHM_TYPE = -16		# bad algorithm type
+BAD_ALGORITHM_NAME = -17		# bad algorithm name
+BAD_BUFFERING = -18			# bad buffering calls
+BUFFER_NOT_CREATED = -19		# buffer not created
+BAD_BUFFER_EXISTS = -20			# buffer already created
+BAD_INTERNAL_FORMAT = -21		# invalid internal format
+BAD_BUFFER_POINTER = -22		# invalid buffer pointer
+FRAME_BUFFER_SIZE_ZERO = -23		# frame buffer has zero size
+BAD_STREAM_HEADER = -24			# invalid stream header
+
+BAD_LICENSE = -25			# netls license not valid
+AWARE_ERROR = -26			# error from libawcmp
diff --git a/lib-python/2.2/plat-irix5/DEVICE.py b/lib-python/2.2/plat-irix5/DEVICE.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/plat-irix5/DEVICE.py
@@ -0,0 +1,400 @@
+NULLDEV = 0
+BUTOFFSET = 1
+VALOFFSET = 256
+PSEUDOFFSET = 512
+BUT2OFFSET = 3840
+TIMOFFSET = 515
+XKBDOFFSET = 143
+BUTCOUNT = 255
+VALCOUNT = 256
+TIMCOUNT = 4
+XKBDCOUNT = 28
+USERBUTOFFSET = 4096
+USERVALOFFSET = 12288
+USERPSEUDOFFSET = 16384
+BUT0 = 1
+BUT1 = 2
+BUT2 = 3
+BUT3 = 4
+BUT4 = 5
+BUT5 = 6
+BUT6 = 7
+BUT7 = 8
+BUT8 = 9
+BUT9 = 10
+BUT10 = 11
+BUT11 = 12
+BUT12 = 13
+BUT13 = 14
+BUT14 = 15
+BUT15 = 16
+BUT16 = 17
+BUT17 = 18
+BUT18 = 19
+BUT19 = 20
+BUT20 = 21
+BUT21 = 22
+BUT22 = 23
+BUT23 = 24
+BUT24 = 25
+BUT25 = 26
+BUT26 = 27
+BUT27 = 28
+BUT28 = 29
+BUT29 = 30
+BUT30 = 31
+BUT31 = 32
+BUT32 = 33
+BUT33 = 34
+BUT34 = 35
+BUT35 = 36
+BUT36 = 37
+BUT37 = 38
+BUT38 = 39
+BUT39 = 40
+BUT40 = 41
+BUT41 = 42
+BUT42 = 43
+BUT43 = 44
+BUT44 = 45
+BUT45 = 46
+BUT46 = 47
+BUT47 = 48
+BUT48 = 49
+BUT49 = 50
+BUT50 = 51
+BUT51 = 52
+BUT52 = 53
+BUT53 = 54
+BUT54 = 55
+BUT55 = 56
+BUT56 = 57
+BUT57 = 58
+BUT58 = 59
+BUT59 = 60
+BUT60 = 61
+BUT61 = 62
+BUT62 = 63
+BUT63 = 64
+BUT64 = 65
+BUT65 = 66
+BUT66 = 67
+BUT67 = 68
+BUT68 = 69
+BUT69 = 70
+BUT70 = 71
+BUT71 = 72
+BUT72 = 73
+BUT73 = 74
+BUT74 = 75
+BUT75 = 76
+BUT76 = 77
+BUT77 = 78
+BUT78 = 79
+BUT79 = 80
+BUT80 = 81
+BUT81 = 82
+BUT82 = 83
+MAXKBDBUT = 83
+BUT100 = 101
+BUT101 = 102
+BUT102 = 103
+BUT103 = 104
+BUT104 = 105
+BUT105 = 106
+BUT106 = 107
+BUT107 = 108
+BUT108 = 109
+BUT109 = 110
+BUT110 = 111
+BUT111 = 112
+BUT112 = 113
+BUT113 = 114
+BUT114 = 115
+BUT115 = 116
+BUT116 = 117
+BUT117 = 118
+BUT118 = 119
+BUT119 = 120
+BUT120 = 121
+BUT121 = 122
+BUT122 = 123
+BUT123 = 124
+BUT124 = 125
+BUT125 = 126
+BUT126 = 127
+BUT127 = 128
+BUT128 = 129
+BUT129 = 130
+BUT130 = 131
+BUT131 = 132
+BUT132 = 133
+BUT133 = 134
+BUT134 = 135
+BUT135 = 136
+BUT136 = 137
+BUT137 = 138
+BUT138 = 139
+BUT139 = 140
+BUT140 = 141
+BUT141 = 142
+BUT142 = 143
+BUT143 = 144
+BUT144 = 145
+BUT145 = 146
+BUT146 = 147
+BUT147 = 148
+BUT148 = 149
+BUT149 = 150
+BUT150 = 151
+BUT151 = 152
+BUT152 = 153
+BUT153 = 154
+BUT154 = 155
+BUT155 = 156
+BUT156 = 157
+BUT157 = 158
+BUT158 = 159
+BUT159 = 160
+BUT160 = 161
+BUT161 = 162
+BUT162 = 163
+BUT163 = 164
+BUT164 = 165
+BUT165 = 166
+BUT166 = 167
+BUT167 = 168
+BUT168 = 169
+BUT181 = 182
+BUT182 = 183
+BUT183 = 184
+BUT184 = 185
+BUT185 = 186
+BUT186 = 187
+BUT187 = 188
+BUT188 = 189
+BUT189 = 190
+MOUSE1 = 101
+MOUSE2 = 102
+MOUSE3 = 103
+LEFTMOUSE = 103
+MIDDLEMOUSE = 102
+RIGHTMOUSE = 101
+LPENBUT = 104
+BPAD0 = 105
+BPAD1 = 106
+BPAD2 = 107
+BPAD3 = 108
+LPENVALID = 109
+SWBASE = 111
+SW0 = 111
+SW1 = 112
+SW2 = 113
+SW3 = 114
+SW4 = 115
+SW5 = 116
+SW6 = 117
+SW7 = 118
+SW8 = 119
+SW9 = 120
+SW10 = 121
+SW11 = 122
+SW12 = 123
+SW13 = 124
+SW14 = 125
+SW15 = 126
+SW16 = 127
+SW17 = 128
+SW18 = 129
+SW19 = 130
+SW20 = 131
+SW21 = 132
+SW22 = 133
+SW23 = 134
+SW24 = 135
+SW25 = 136
+SW26 = 137
+SW27 = 138
+SW28 = 139
+SW29 = 140
+SW30 = 141
+SW31 = 142
+SBBASE = 182
+SBPICK = 182
+SBBUT1 = 183
+SBBUT2 = 184
+SBBUT3 = 185
+SBBUT4 = 186
+SBBUT5 = 187
+SBBUT6 = 188
+SBBUT7 = 189
+SBBUT8 = 190
+AKEY = 11
+BKEY = 36
+CKEY = 28
+DKEY = 18
+EKEY = 17
+FKEY = 19
+GKEY = 26
+HKEY = 27
+IKEY = 40
+JKEY = 34
+KKEY = 35
+LKEY = 42
+MKEY = 44
+NKEY = 37
+OKEY = 41
+PKEY = 48
+QKEY = 10
+RKEY = 24
+SKEY = 12
+TKEY = 25
+UKEY = 33
+VKEY = 29
+WKEY = 16
+XKEY = 21
+YKEY = 32
+ZKEY = 20
+ZEROKEY = 46
+ONEKEY = 8
+TWOKEY = 14
+THREEKEY = 15
+FOURKEY = 22
+FIVEKEY = 23
+SIXKEY = 30
+SEVENKEY = 31
+EIGHTKEY = 38
+NINEKEY = 39
+BREAKKEY = 1
+SETUPKEY = 2
+CTRLKEY = 3
+LEFTCTRLKEY = CTRLKEY
+CAPSLOCKKEY = 4
+RIGHTSHIFTKEY = 5
+LEFTSHIFTKEY = 6
+NOSCRLKEY = 13
+ESCKEY = 7
+TABKEY = 9
+RETKEY = 51
+SPACEKEY = 83
+LINEFEEDKEY = 60
+BACKSPACEKEY = 61
+DELKEY = 62
+SEMICOLONKEY = 43
+PERIODKEY = 52
+COMMAKEY = 45
+QUOTEKEY = 50
+ACCENTGRAVEKEY = 55
+MINUSKEY = 47
+VIRGULEKEY = 53
+BACKSLASHKEY = 57
+EQUALKEY = 54
+LEFTBRACKETKEY = 49
+RIGHTBRACKETKEY = 56
+LEFTARROWKEY = 73
+DOWNARROWKEY = 74
+RIGHTARROWKEY = 80
+UPARROWKEY = 81
+PAD0 = 59
+PAD1 = 58
+PAD2 = 64
+PAD3 = 65
+PAD4 = 63
+PAD5 = 69
+PAD6 = 70
+PAD7 = 67
+PAD8 = 68
+PAD9 = 75
+PADPF1 = 72
+PADPF2 = 71
+PADPF3 = 79
+PADPF4 = 78
+PADPERIOD = 66
+PADMINUS = 76
+PADCOMMA = 77
+PADENTER = 82
+LEFTALTKEY = 143
+RIGHTALTKEY = 144
+RIGHTCTRLKEY = 145
+F1KEY = 146
+F2KEY = 147
+F3KEY = 148
+F4KEY = 149
+F5KEY = 150
+F6KEY = 151
+F7KEY = 152
+F8KEY = 153
+F9KEY = 154
+F10KEY = 155
+F11KEY = 156
+F12KEY = 157
+PRINTSCREENKEY = 158
+SCROLLLOCKKEY = 159
+PAUSEKEY = 160
+INSERTKEY = 161
+HOMEKEY = 162
+PAGEUPKEY = 163
+ENDKEY = 164
+PAGEDOWNKEY = 165
+NUMLOCKKEY = 166
+PADVIRGULEKEY = 167
+PADASTERKEY = 168
+PADPLUSKEY = 169
+SGIRESERVED = 256
+DIAL0 = 257
+DIAL1 = 258
+DIAL2 = 259
+DIAL3 = 260
+DIAL4 = 261
+DIAL5 = 262
+DIAL6 = 263
+DIAL7 = 264
+DIAL8 = 265
+MOUSEX = 266
+MOUSEY = 267
+LPENX = 268
+LPENY = 269
+BPADX = 270
+BPADY = 271
+CURSORX = 272
+CURSORY = 273
+GHOSTX = 274
+GHOSTY = 275
+SBTX = 276
+SBTY = 277
+SBTZ = 278
+SBRX = 279
+SBRY = 280
+SBRZ = 281
+SBPERIOD = 282
+TIMER0 = 515
+TIMER1 = 516
+TIMER2 = 517
+TIMER3 = 518
+KEYBD = 513
+RAWKEYBD = 514
+VALMARK = 523
+REDRAW = 528
+INPUTCHANGE = 534
+QFULL = 535
+QREADERROR = 538
+WINFREEZE = 539
+WINTHAW = 540
+REDRAWICONIC = 541
+WINQUIT = 542
+DEPTHCHANGE = 543
+WINSHUT = 546
+DRAWOVERLAY = 547
+VIDEO = 548
+MENUBUTTON = RIGHTMOUSE
+WINCLOSE = 537
+KEYBDFNAMES = 544
+KEYBDFSTRINGS = 545
+MAXSGIDEVICE = 20000
+GERROR = 524
+WMSEND = 529
+WMREPLY = 530
+WMGFCLOSE = 531
+WMTXCLOSE = 532
+MODECHANGE = 533
+PIECECHANGE = 536
diff --git a/lib-python/2.2/plat-irix5/ERRNO.py b/lib-python/2.2/plat-irix5/ERRNO.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/plat-irix5/ERRNO.py
@@ -0,0 +1,147 @@
+# Generated by h2py from /usr/include/errno.h
+
+# Included from sys/errno.h
+__KBASE = 1000
+__IRIXBASE = 1000
+EPERM = 1
+ENOENT = 2
+ESRCH = 3
+EINTR = 4
+EIO = 5
+ENXIO = 6
+E2BIG = 7
+ENOEXEC = 8
+EBADF = 9
+ECHILD = 10
+EAGAIN = 11
+ENOMEM = 12
+EACCES = 13
+EFAULT = 14
+ENOTBLK = 15
+EBUSY = 16
+EEXIST = 17
+EXDEV = 18
+ENODEV = 19
+ENOTDIR = 20
+EISDIR = 21
+EINVAL = 22
+ENFILE = 23
+EMFILE = 24
+ENOTTY = 25
+ETXTBSY = 26
+EFBIG = 27
+ENOSPC = 28
+ESPIPE = 29
+EROFS = 30
+EMLINK = 31
+EPIPE = 32
+EDOM = 33
+ERANGE = 34
+ENOMSG = 35
+EIDRM = 36
+ECHRNG = 37
+EL2NSYNC = 38
+EL3HLT = 39
+EL3RST = 40
+ELNRNG = 41
+EUNATCH = 42
+ENOCSI = 43
+EL2HLT = 44
+EDEADLK = 45
+ENOLCK = 46
+EBADE = 50
+EBADR = 51
+EXFULL = 52
+ENOANO = 53
+EBADRQC = 54
+EBADSLT = 55
+EDEADLOCK = 56
+EBFONT = 57
+ENOSTR = 60
+ENODATA = 61
+ETIME = 62
+ENOSR = 63
+ENONET = 64
+ENOPKG = 65
+EREMOTE = 66
+ENOLINK = 67
+EADV = 68
+ESRMNT = 69
+ECOMM = 70
+EPROTO = 71
+EMULTIHOP = 74
+EBADMSG = 77
+ENAMETOOLONG = 78
+EOVERFLOW = 79
+ENOTUNIQ = 80
+EBADFD = 81
+EREMCHG = 82
+ELIBACC = 83
+ELIBBAD = 84
+ELIBSCN = 85
+ELIBMAX = 86
+ELIBEXEC = 87
+EILSEQ = 88
+ENOSYS = 89
+ELOOP = 90
+ERESTART = 91
+ESTRPIPE = 92
+ENOTEMPTY = 93
+EUSERS = 94
+ENOTSOCK = 95
+EDESTADDRREQ = 96
+EMSGSIZE = 97
+EPROTOTYPE = 98
+ENOPROTOOPT = 99
+EPROTONOSUPPORT = 120
+ESOCKTNOSUPPORT = 121
+EOPNOTSUPP = 122
+EPFNOSUPPORT = 123
+EAFNOSUPPORT = 124
+EADDRINUSE = 125
+EADDRNOTAVAIL = 126
+ENETDOWN = 127
+ENETUNREACH = 128
+ENETRESET = 129
+ECONNABORTED = 130
+ECONNRESET = 131
+ENOBUFS = 132
+EISCONN = 133
+ENOTCONN = 134
+ESHUTDOWN = 143
+ETOOMANYREFS = 144
+ETIMEDOUT = 145
+ECONNREFUSED = 146
+EHOSTDOWN = 147
+EHOSTUNREACH = 148
+EWOULDBLOCK = __KBASE+101
+EWOULDBLOCK = EAGAIN
+EALREADY = 149
+EINPROGRESS = 150
+ESTALE = 151
+EIORESID = 500
+EUCLEAN = 135
+ENOTNAM = 137
+ENAVAIL = 138
+EISNAM = 139
+EREMOTEIO = 140
+EINIT = 141
+EREMDEV = 142
+ECANCELED = 158
+ECANCELED = 1000
+EDQUOT = 1133
+ENFSREMOTE = 1135
+ETCP_EBASE = 100
+ETCP_ELIMIT = 129
+ENAMI_EBASE = 129
+ENAMI_ELIMIT = 131
+ENFS_EBASE = 131
+ENFS_ELIMIT = 135
+ELASTERRNO = 135
+TCP_EBASE = ETCP_EBASE
+TCP_ELIMIT = ETCP_ELIMIT
+NAMI_EBASE = ENAMI_EBASE
+NAMI_ELIMIT = ENAMI_ELIMIT
+NFS_EBASE = ENFS_EBASE
+NFS_ELIMIT = ENFS_ELIMIT
+LASTERRNO = ELASTERRNO
diff --git a/lib-python/2.2/plat-irix5/FILE.py b/lib-python/2.2/plat-irix5/FILE.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/plat-irix5/FILE.py
@@ -0,0 +1,239 @@
+# Generated by h2py from /usr/include/sys/file.h
+
+# Included from sys/types.h
+
+# Included from sgidefs.h
+_MIPS_ISA_MIPS1 = 1
+_MIPS_ISA_MIPS2 = 2
+_MIPS_ISA_MIPS3 = 3
+_MIPS_ISA_MIPS4 = 4
+_MIPS_SIM_ABI32 = 1
+_MIPS_SIM_NABI32 = 2
+_MIPS_SIM_ABI64 = 3
+P_MYID = (-1)
+P_MYHOSTID = (-1)
+
+# Included from sys/bsd_types.h
+
+# Included from sys/mkdev.h
+ONBITSMAJOR = 7
+ONBITSMINOR = 8
+OMAXMAJ = 0x7f
+OMAXMIN = 0xff
+NBITSMAJOR = 14
+NBITSMINOR = 18
+MAXMAJ = 0x1ff
+MAXMIN = 0x3ffff
+OLDDEV = 0
+NEWDEV = 1
+MKDEV_VER = NEWDEV
+def major(dev): return __major(MKDEV_VER, dev)
+
+def minor(dev): return __minor(MKDEV_VER, dev)
+
+
+# Included from sys/select.h
+FD_SETSIZE = 1024
+NBBY = 8
+
+# Included from sys/sema.h
+HP_NOPOLICY = 0
+HP_ADDOFF = 1
+HP_MULOFF = 2
+SEMA_NAMSZ = 8
+SEMA_NOHIST = 0x1
+SEMA_LIFO = 0x2
+SEMA_MUTEX = 0x4
+SEMA_METER = 0x8
+SEMAOP_PSEMA = 1
+SEMAOP_VSEMA = 2
+SEMAOP_CPSEMA = 3
+SEMAOP_CVSEMA = 4
+SEMAOP_WSEMA = 5
+SEMAOP_UNSEMA = 6
+SEMAOP_INIT = 7
+SEMAOP_FREE = 8
+SSOP_PHIT = 1
+SSOP_PSLP = 2
+SSOP_PWAKE = 6
+SSOP_PRESIG = 7
+SSOP_POSTSIG = 8
+SSOP_VNOWAKE = 3
+SSOP_VWAKE = 4
+SSOP_CPHIT = 1
+SSOP_CPMISS = 5
+SSOP_CVNOWAKE = 3
+SSOP_CVWAKE = 4
+SSOP_WMISS = 5
+SSOP_WWAKE = 4
+SSOP_RMV = 9
+TZERO = 10
+SEMA_NOP = 0
+SEMA_WAKE = 1
+SEMA_VSEMA = 2
+SEMA_SPINOP = 3
+MR_ACCESS = 0x1
+MR_UPDATE = 0x2
+def cv_signal(cv): return cvsema(cv);
+
+def cv_destroy(cv): return freesema(cv)
+
+def mutex_enter(m): return psema(m, PZERO | PNOSTOP)
+
+def mutex_exit(m): return vsema(m)
+
+def mutex_destroy(m): return freesema(m)
+
+def MUTEX_HELD(m): return (ownsema(m))
+
+def MUTEX_HELD(m): return (1)
+
+RW_READER = MR_ACCESS
+RW_WRITER = MR_UPDATE
+def rw_exit(r): return mrunlock(r)
+
+def rw_tryupgrade(r): return cmrpromote(r)
+
+def rw_downgrade(r): return mrdemote(r)
+
+def rw_destroy(r): return mrfree(r)
+
+def RW_WRITE_HELD(r): return ismrlocked(r, MR_UPDATE)
+
+def RW_READ_HELD(r): return ismrlocked(r, MR_ACCESS)
+
+
+# Included from sys/splock.h
+SPLOCKNAMSIZ = 8
+SPLOCK_NONE = 0
+SPLOCK_SOFT = 1
+SPLOCK_HARD = 2
+OWNER_NONE = -1
+MAP_LOCKID = 0
+SPLOCK_MAX = (96*1024)
+SPLOCK_MAX = 32768
+MIN_POOL_SIZE = 256
+MAX_POOL_SIZE = 16384
+DEF_SEMA_POOL = 8192
+DEF_VNODE_POOL = 1024
+DEF_FILE_POOL = 1024
+def ownlock(x): return 1
+
+def splock(x): return 1
+
+def io_splock(x): return 1
+
+def apvsema(x): return vsema(x)
+
+def apcpsema(x): return cpsema(x)
+
+def apcvsema(x): return cvsema(x)
+
+def mp_mrunlock(a): return mrunlock(a)
+
+def apvsema(x): return 0
+
+def apcpsema(x): return 1
+
+def apcvsema(x): return 0
+
+def mp_mrunlock(a): return 0
+
+
+# Included from sys/fcntl.h
+FNDELAY = 0x04
+FAPPEND = 0x08
+FSYNC = 0x10
+FNONBLOCK = 0x80
+FASYNC = 0x1000
+FNONBLK = FNONBLOCK
+FDIRECT = 0x8000
+FCREAT = 0x0100
+FTRUNC = 0x0200
+FEXCL = 0x0400
+FNOCTTY = 0x0800
+O_RDONLY = 0
+O_WRONLY = 1
+O_RDWR = 2
+O_NDELAY = 0x04
+O_APPEND = 0x08
+O_SYNC = 0x10
+O_NONBLOCK = 0x80
+O_DIRECT = 0x8000
+O_CREAT = 0x100
+O_TRUNC = 0x200
+O_EXCL = 0x400
+O_NOCTTY = 0x800
+F_DUPFD = 0
+F_GETFD = 1
+F_SETFD = 2
+F_GETFL = 3
+F_SETFL = 4
+F_GETLK = 14
+F_SETLK = 6
+F_SETLKW = 7
+F_CHKFL = 8
+F_ALLOCSP = 10
+F_FREESP = 11
+F_SETBSDLK = 12
+F_SETBSDLKW = 13
+F_DIOINFO = 30
+F_FSGETXATTR = 31
+F_FSSETXATTR = 32
+F_GETLK64 = 33
+F_SETLK64 = 34
+F_SETLKW64 = 35
+F_ALLOCSP64 = 36
+F_FREESP64 = 37
+F_GETBMAP = 38
+F_FSSETDM = 39
+F_RSETLK = 20
+F_RGETLK = 21
+F_RSETLKW = 22
+F_GETOWN = 23
+F_SETOWN = 24
+F_O_GETLK = 5
+F_O_GETOWN = 10
+F_O_SETOWN = 11
+F_RDLCK = 01
+F_WRLCK = 02
+F_UNLCK = 03
+O_ACCMODE = 3
+FD_CLOEXEC = 1
+FD_NODUP_FORK = 4
+FMASK = 0x90FF
+FOPEN = 0xFFFFFFFF
+FREAD = 0x01
+FWRITE = 0x02
+FNDELAY = 0x04
+FAPPEND = 0x08
+FSYNC = 0x10
+FNONBLOCK = 0x80
+FASYNC = 0x1000
+FNONBLK = FNONBLOCK
+FDIRECT = 0x8000
+FCREAT = 0x0100
+FTRUNC = 0x0200
+FEXCL = 0x0400
+FNOCTTY = 0x0800
+IRIX4_FASYNC = 0x40
+FMARK = 0x4000
+FDEFER = 0x2000
+FINPROGRESS = 0x0400
+FINVIS = 0x0100
+FNMFS = 0x2000
+FCLOSEXEC = 001
+FDSHD = 0x0001
+FDNOMARK = 0x0002
+FDIGNPROGRESS = 0x0004
+LOCK_SH = 1
+LOCK_EX = 2
+LOCK_NB = 4
+LOCK_UN = 8
+F_OK = 0
+X_OK = 1
+W_OK = 2
+R_OK = 4
+L_SET = 0
+L_INCR = 1
+L_XTND = 2
diff --git a/lib-python/2.2/plat-irix5/FL.py b/lib-python/2.2/plat-irix5/FL.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/plat-irix5/FL.py
@@ -0,0 +1,289 @@
+# Constants used by the FORMS library (module fl).
+# This corresponds to "forms.h".
+# Recommended use: import FL; ... FL.NORMAL_BOX ... etc.
+# Alternate use: from FL import *; ... NORMAL_BOX ... etc.
+
+_v20 = 1
+_v21 = 1
+##import fl
+##try:
+##	_v20 = (fl.get_rgbmode is not None)
+##except:
+##	_v20 = 0
+##del fl
+
+NULL = 0
+FALSE = 0
+TRUE = 1
+
+EVENT = -1
+
+LABEL_SIZE = 64
+if _v20:
+	SHORTCUT_SIZE = 32
+PLACE_FREE = 0
+PLACE_SIZE = 1
+PLACE_ASPECT = 2
+PLACE_MOUSE = 3
+PLACE_CENTER = 4
+PLACE_POSITION = 5
+FL_PLACE_FULLSCREEN = 6
+FIND_INPUT = 0
+FIND_AUTOMATIC = 1
+FIND_MOUSE = 2
+BEGIN_GROUP = 10000
+END_GROUP = 20000
+ALIGN_TOP = 0
+ALIGN_BOTTOM = 1
+ALIGN_LEFT = 2
+ALIGN_RIGHT = 3
+ALIGN_CENTER = 4
+NO_BOX = 0
+UP_BOX = 1
+DOWN_BOX = 2
+FLAT_BOX = 3
+BORDER_BOX = 4
+SHADOW_BOX = 5
+FRAME_BOX = 6
+ROUNDED_BOX = 7
+RFLAT_BOX = 8
+RSHADOW_BOX = 9
+TOP_BOUND_COL = 51
+LEFT_BOUND_COL = 55
+BOT_BOUND_COL = 40
+RIGHT_BOUND_COL = 35
+COL1 = 47
+MCOL = 49
+LCOL = 0
+BOUND_WIDTH = 3.0
+DRAW = 0
+PUSH = 1
+RELEASE = 2
+ENTER = 3
+LEAVE = 4
+MOUSE = 5
+FOCUS = 6
+UNFOCUS = 7
+KEYBOARD = 8
+STEP = 9
+MOVE = 10
+FONT_NAME = 'Helvetica'
+FONT_BOLDNAME = 'Helvetica-Bold'
+FONT_ITALICNAME = 'Helvetica-Oblique'
+FONT_FIXEDNAME = 'Courier'
+FONT_ICONNAME = 'Icon'
+SMALL_FONT = 8.0
+NORMAL_FONT = 11.0
+LARGE_FONT = 20.0
+NORMAL_STYLE = 0
+BOLD_STYLE = 1
+ITALIC_STYLE = 2
+FIXED_STYLE = 3
+ENGRAVED_STYLE = 4
+ICON_STYLE = 5
+BITMAP = 3
+NORMAL_BITMAP = 0
+BITMAP_BOXTYPE = NO_BOX
+BITMAP_COL1 = 0
+BITMAP_COL2 = COL1
+BITMAP_LCOL = LCOL
+BITMAP_ALIGN = ALIGN_BOTTOM
+BITMAP_MAXSIZE = 128*128
+BITMAP_BW = BOUND_WIDTH
+BOX = 1
+BOX_BOXTYPE = UP_BOX
+BOX_COL1 = COL1
+BOX_LCOL = LCOL
+BOX_ALIGN = ALIGN_CENTER
+BOX_BW = BOUND_WIDTH
+BROWSER = 71
+NORMAL_BROWSER = 0
+SELECT_BROWSER = 1
+HOLD_BROWSER = 2
+MULTI_BROWSER = 3
+BROWSER_BOXTYPE = DOWN_BOX
+BROWSER_COL1 = COL1
+BROWSER_COL2 = 3
+BROWSER_LCOL = LCOL
+BROWSER_ALIGN = ALIGN_BOTTOM
+BROWSER_SLCOL = COL1
+BROWSER_BW = BOUND_WIDTH
+BROWSER_LINELENGTH = 128
+BROWSER_MAXLINE = 512
+BUTTON = 11
+NORMAL_BUTTON = 0
+PUSH_BUTTON = 1
+RADIO_BUTTON = 2
+HIDDEN_BUTTON = 3
+TOUCH_BUTTON = 4
+INOUT_BUTTON = 5
+RETURN_BUTTON = 6
+if _v20:
+	HIDDEN_RET_BUTTON = 7
+BUTTON_BOXTYPE = UP_BOX
+BUTTON_COL1 = COL1
+BUTTON_COL2 = COL1
+BUTTON_LCOL = LCOL
+BUTTON_ALIGN = ALIGN_CENTER
+BUTTON_MCOL1 = MCOL
+BUTTON_MCOL2 = MCOL
+BUTTON_BW = BOUND_WIDTH
+if _v20:
+	CHART = 4
+	BAR_CHART = 0
+	HORBAR_CHART = 1
+	LINE_CHART = 2
+	FILLED_CHART = 3
+	SPIKE_CHART = 4
+	PIE_CHART = 5
+	SPECIALPIE_CHART = 6
+	CHART_BOXTYPE = BORDER_BOX
+	CHART_COL1 = COL1
+	CHART_LCOL = LCOL
+	CHART_ALIGN = ALIGN_BOTTOM
+	CHART_BW = BOUND_WIDTH
+	CHART_MAX = 128
+CHOICE = 42
+NORMAL_CHOICE = 0
+CHOICE_BOXTYPE = DOWN_BOX
+CHOICE_COL1 = COL1
+CHOICE_COL2 = LCOL
+CHOICE_LCOL = LCOL
+CHOICE_ALIGN = ALIGN_LEFT
+CHOICE_BW = BOUND_WIDTH
+CHOICE_MCOL = MCOL
+CHOICE_MAXITEMS = 128
+CHOICE_MAXSTR = 64
+CLOCK = 61
+SQUARE_CLOCK = 0
+ROUND_CLOCK = 1
+CLOCK_BOXTYPE = UP_BOX
+CLOCK_COL1 = 37
+CLOCK_COL2 = 42
+CLOCK_LCOL = LCOL
+CLOCK_ALIGN = ALIGN_BOTTOM
+CLOCK_TOPCOL = COL1
+CLOCK_BW = BOUND_WIDTH
+COUNTER = 25
+NORMAL_COUNTER = 0
+SIMPLE_COUNTER = 1
+COUNTER_BOXTYPE = UP_BOX
+COUNTER_COL1 = COL1
+COUNTER_COL2 = 4
+COUNTER_LCOL = LCOL
+COUNTER_ALIGN = ALIGN_BOTTOM
+if _v20:
+	COUNTER_BW = BOUND_WIDTH
+else:
+	DEFAULT = 51
+	RETURN_DEFAULT = 0
+	ALWAYS_DEFAULT = 1
+DIAL = 22
+NORMAL_DIAL = 0
+LINE_DIAL = 1
+DIAL_BOXTYPE = NO_BOX
+DIAL_COL1 = COL1
+DIAL_COL2 = 37
+DIAL_LCOL = LCOL
+DIAL_ALIGN = ALIGN_BOTTOM
+DIAL_TOPCOL = COL1
+DIAL_BW = BOUND_WIDTH
+FREE = 101
+NORMAL_FREE = 1
+SLEEPING_FREE = 2
+INPUT_FREE = 3
+CONTINUOUS_FREE = 4
+ALL_FREE = 5
+INPUT = 31
+NORMAL_INPUT = 0
+if _v20:
+	FLOAT_INPUT = 1
+	INT_INPUT = 2
+	HIDDEN_INPUT = 3
+	if _v21:
+		MULTILINE_INPUT = 4
+		SECRET_INPUT = 5
+else:
+	ALWAYS_INPUT = 1
+INPUT_BOXTYPE = DOWN_BOX
+INPUT_COL1 = 13
+INPUT_COL2 = 5
+INPUT_LCOL = LCOL
+INPUT_ALIGN = ALIGN_LEFT
+INPUT_TCOL = LCOL
+INPUT_CCOL = 4
+INPUT_BW = BOUND_WIDTH
+INPUT_MAX = 128
+LIGHTBUTTON = 12
+LIGHTBUTTON_BOXTYPE = UP_BOX
+LIGHTBUTTON_COL1 = 39
+LIGHTBUTTON_COL2 = 3
+LIGHTBUTTON_LCOL = LCOL
+LIGHTBUTTON_ALIGN = ALIGN_CENTER
+LIGHTBUTTON_TOPCOL = COL1
+LIGHTBUTTON_MCOL = MCOL
+LIGHTBUTTON_BW1 = BOUND_WIDTH
+LIGHTBUTTON_BW2 = BOUND_WIDTH/2.0
+LIGHTBUTTON_MINSIZE = 12.0
+MENU = 41
+TOUCH_MENU = 0
+PUSH_MENU = 1
+MENU_BOXTYPE = BORDER_BOX
+MENU_COL1 = 55
+MENU_COL2 = 37
+MENU_LCOL = LCOL
+MENU_ALIGN = ALIGN_CENTER
+MENU_BW = BOUND_WIDTH
+MENU_MAX = 300
+POSITIONER = 23
+NORMAL_POSITIONER = 0
+POSITIONER_BOXTYPE = DOWN_BOX
+POSITIONER_COL1 = COL1
+POSITIONER_COL2 = 1
+POSITIONER_LCOL = LCOL
+POSITIONER_ALIGN = ALIGN_BOTTOM
+POSITIONER_BW = BOUND_WIDTH
+ROUNDBUTTON = 13
+ROUNDBUTTON_BOXTYPE = NO_BOX
+ROUNDBUTTON_COL1 = 7
+ROUNDBUTTON_COL2 = 3
+ROUNDBUTTON_LCOL = LCOL
+ROUNDBUTTON_ALIGN = ALIGN_CENTER
+ROUNDBUTTON_TOPCOL = COL1
+ROUNDBUTTON_MCOL = MCOL
+ROUNDBUTTON_BW = BOUND_WIDTH
+SLIDER = 21
+VALSLIDER = 24
+VERT_SLIDER = 0
+HOR_SLIDER = 1
+VERT_FILL_SLIDER = 2
+HOR_FILL_SLIDER = 3
+VERT_NICE_SLIDER = 4
+HOR_NICE_SLIDER = 5
+SLIDER_BOXTYPE = DOWN_BOX
+SLIDER_COL1 = COL1
+SLIDER_COL2 = COL1
+SLIDER_LCOL = LCOL
+SLIDER_ALIGN = ALIGN_BOTTOM
+SLIDER_BW1 = BOUND_WIDTH
+SLIDER_BW2 = BOUND_WIDTH*0.75
+SLIDER_FINE = 0.05
+SLIDER_WIDTH = 0.08
+TEXT = 2
+NORMAL_TEXT = 0
+TEXT_BOXTYPE = NO_BOX
+TEXT_COL1 = COL1
+TEXT_LCOL = LCOL
+TEXT_ALIGN = ALIGN_LEFT
+TEXT_BW = BOUND_WIDTH
+TIMER = 62
+NORMAL_TIMER = 0
+VALUE_TIMER = 1
+HIDDEN_TIMER = 2
+TIMER_BOXTYPE = DOWN_BOX
+TIMER_COL1 = COL1
+TIMER_COL2 = 1
+TIMER_LCOL = LCOL
+TIMER_ALIGN = ALIGN_CENTER
+TIMER_BW = BOUND_WIDTH
+TIMER_BLINKRATE = 0.2
diff --git a/lib-python/2.2/plat-irix5/GET.py b/lib-python/2.2/plat-irix5/GET.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/plat-irix5/GET.py
@@ -0,0 +1,59 @@
+# Symbols from <gl/get.h>
+
+BCKBUFFER = 0x1
+FRNTBUFFER = 0x2
+DRAWZBUFFER = 0x4
+DMRGB = 0
+DMSINGLE = 1
+DMDOUBLE = 2
+DMRGBDOUBLE = 5
+HZ30 = 0
+HZ60 = 1
+NTSC = 2
+HDTV = 3
+VGA = 4
+IRIS3K = 5
+PR60 = 6
+PAL = 9
+HZ30_SG = 11
+A343 = 14
+STR_RECT = 15
+VOF0 = 16
+VOF1 = 17
+VOF2 = 18
+VOF3 = 19
+SGI0 = 20
+SGI1 = 21
+SGI2 = 22
+HZ72 = 23
+GL_VIDEO_REG = 0x00800000
+GLV_GENLOCK = 0x00000001
+GLV_UNBLANK = 0x00000002
+GLV_SRED = 0x00000004
+GLV_SGREEN = 0x00000008
+GLV_SBLUE = 0x00000010
+GLV_SALPHA = 0x00000020
+GLV_TTLGENLOCK = 0x00000080
+GLV_TTLSYNC = GLV_TTLGENLOCK
+GLV_GREENGENLOCK = 0x0000100
+LEFTPLANE = 0x0001
+RIGHTPLANE = 0x0002
+BOTTOMPLANE = 0x0004
+TOPPLANE = 0x0008
+NEARPLANE = 0x0010
+FARPLANE = 0x0020
+## GETDEF = __GL_GET_H__
+NOBUFFER = 0x0
+BOTHBUFFERS = 0x3
+DMINTENSITYSINGLE = 3
+DMINTENSITYDOUBLE = 4
+MONSPECIAL = 0x20
+HZ50 = 3
+MONA = 5
+MONB = 6
+MONC = 7
+MOND = 8
+MON_ALL = 12
+MON_GEN_ALL = 13
+CMAPMULTI = 0
+CMAPONE = 1
diff --git a/lib-python/2.2/plat-irix5/GL.py b/lib-python/2.2/plat-irix5/GL.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/plat-irix5/GL.py
@@ -0,0 +1,393 @@
+NULL = 0
+FALSE = 0
+TRUE = 1
+ATTRIBSTACKDEPTH = 10
+VPSTACKDEPTH = 8
+MATRIXSTACKDEPTH = 32
+NAMESTACKDEPTH = 1025
+STARTTAG = -2
+ENDTAG = -3
+BLACK = 0
+RED = 1
+GREEN = 2
+YELLOW = 3
+BLUE = 4
+MAGENTA = 5
+CYAN = 6
+WHITE = 7
+PUP_CLEAR = 0
+PUP_COLOR = 1
+PUP_BLACK = 2
+PUP_WHITE = 3
+NORMALDRAW = 0x010
+PUPDRAW = 0x020
+OVERDRAW = 0x040
+UNDERDRAW = 0x080
+CURSORDRAW = 0x100
+DUALDRAW = 0x200
+PATTERN_16 = 16
+PATTERN_32 = 32
+PATTERN_64 = 64
+PATTERN_16_SIZE = 16
+PATTERN_32_SIZE = 64
+PATTERN_64_SIZE = 256
+SRC_AUTO = 0
+SRC_FRONT = 1
+SRC_BACK = 2
+SRC_ZBUFFER = 3
+SRC_PUP = 4
+SRC_OVER = 5
+SRC_UNDER = 6
+SRC_FRAMEGRABBER = 7
+BF_ZERO = 0
+BF_ONE = 1
+BF_DC = 2
+BF_SC = 2
+BF_MDC = 3
+BF_MSC = 3
+BF_SA = 4
+BF_MSA = 5
+BF_DA = 6
+BF_MDA = 7
+BF_MIN_SA_MDA = 8
+AF_NEVER = 0
+AF_LESS = 1
+AF_EQUAL = 2
+AF_LEQUAL = 3
+AF_GREATER = 4
+AF_NOTEQUAL = 5
+AF_GEQUAL = 6
+AF_ALWAYS = 7
+ZF_NEVER = 0
+ZF_LESS = 1
+ZF_EQUAL = 2
+ZF_LEQUAL = 3
+ZF_GREATER = 4
+ZF_NOTEQUAL = 5
+ZF_GEQUAL = 6
+ZF_ALWAYS = 7
+ZSRC_DEPTH = 0
+ZSRC_COLOR = 1
+SMP_OFF = 0x0
+SMP_ON = 0x1
+SMP_SMOOTHER = 0x2
+SML_OFF = 0x0
+SML_ON = 0x1
+SML_SMOOTHER = 0x2
+SML_END_CORRECT = 0x4
+PYSM_OFF = 0
+PYSM_ON = 1
+PYSM_SHRINK = 2
+DT_OFF = 0
+DT_ON = 1
+PUP_NONE = 0
+PUP_GREY = 0x1
+PUP_BOX = 0x2
+PUP_CHECK = 0x4
+GLC_OLDPOLYGON = 0
+GLC_ZRANGEMAP = 1
+GLC_MQUEUERATE = 2
+GLC_SOFTATTACH = 3
+GLC_MANAGEBG = 4
+GLC_SLOWMAPCOLORS = 5
+GLC_INPUTCHANGEBUG = 6
+GLC_NOBORDERBUG = 7
+GLC_SET_VSYNC = 8
+GLC_GET_VSYNC = 9
+GLC_VSYNC_SLEEP = 10
+GLC_COMPATRATE = 15
+C16X1 = 0
+C16X2 = 1
+C32X1 = 2
+C32X2 = 3
+CCROSS = 4
+FLAT = 0
+GOURAUD = 1
+LO_ZERO = 0x0
+LO_AND = 0x1
+LO_ANDR = 0x2
+LO_SRC = 0x3
+LO_ANDI = 0x4
+LO_DST = 0x5
+LO_XOR = 0x6
+LO_OR = 0x7
+LO_NOR = 0x8
+LO_XNOR = 0x9
+LO_NDST = 0xa
+LO_ORR = 0xb
+LO_NSRC = 0xc
+LO_ORI = 0xd
+LO_NAND = 0xe
+LO_ONE = 0xf
+INFOCUSSCRN = -2
+ST_KEEP = 0
+ST_ZERO = 1
+ST_REPLACE = 2
+ST_INCR = 3
+ST_DECR = 4
+ST_INVERT = 5
+SF_NEVER = 0
+SF_LESS = 1
+SF_EQUAL = 2
+SF_LEQUAL = 3
+SF_GREATER = 4
+SF_NOTEQUAL = 5
+SF_GEQUAL = 6
+SF_ALWAYS = 7
+SS_OFF = 0
+SS_DEPTH = 1
+PYM_FILL = 1
+PYM_POINT = 2
+PYM_LINE = 3
+PYM_HOLLOW = 4
+PYM_LINE_FAST = 5
+FG_OFF = 0
+FG_ON = 1
+FG_DEFINE = 2
+FG_VTX_EXP = 2
+FG_VTX_LIN = 3
+FG_PIX_EXP = 4
+FG_PIX_LIN = 5
+FG_VTX_EXP2 = 6
+FG_PIX_EXP2 = 7
+PM_SHIFT = 0
+PM_EXPAND = 1
+PM_C0 = 2
+PM_C1 = 3
+PM_ADD24 = 4
+PM_SIZE = 5
+PM_OFFSET = 6
+PM_STRIDE = 7
+PM_TTOB = 8
+PM_RTOL = 9
+PM_ZDATA = 10
+PM_WARP = 11
+PM_RDX = 12
+PM_RDY = 13
+PM_CDX = 14
+PM_CDY = 15
+PM_XSTART = 16
+PM_YSTART = 17
+PM_VO1 = 1000
+NAUTO = 0
+NNORMALIZE = 1
+AC_CLEAR = 0
+AC_ACCUMULATE = 1
+AC_CLEAR_ACCUMULATE = 2
+AC_RETURN = 3
+AC_MULT = 4
+AC_ADD = 5
+CP_OFF = 0
+CP_ON = 1
+CP_DEFINE = 2
+SB_RESET = 0
+SB_TRACK = 1
+SB_HOLD = 2
+RD_FREEZE = 0x00000001
+RD_ALPHAONE = 0x00000002
+RD_IGNORE_UNDERLAY = 0x00000004
+RD_IGNORE_OVERLAY = 0x00000008
+RD_IGNORE_PUP = 0x00000010
+RD_OFFSCREEN = 0x00000020
+GD_XPMAX = 0
+GD_YPMAX = 1
+GD_XMMAX = 2
+GD_YMMAX = 3
+GD_ZMIN = 4
+GD_ZMAX = 5
+GD_BITS_NORM_SNG_RED = 6
+GD_BITS_NORM_SNG_GREEN = 7
+GD_BITS_NORM_SNG_BLUE = 8
+GD_BITS_NORM_DBL_RED = 9
+GD_BITS_NORM_DBL_GREEN = 10
+GD_BITS_NORM_DBL_BLUE = 11
+GD_BITS_NORM_SNG_CMODE = 12
+GD_BITS_NORM_DBL_CMODE = 13
+GD_BITS_NORM_SNG_MMAP = 14
+GD_BITS_NORM_DBL_MMAP = 15
+GD_BITS_NORM_ZBUFFER = 16
+GD_BITS_OVER_SNG_CMODE = 17
+GD_BITS_UNDR_SNG_CMODE = 18
+GD_BITS_PUP_SNG_CMODE = 19
+GD_BITS_NORM_SNG_ALPHA = 21
+GD_BITS_NORM_DBL_ALPHA = 22
+GD_BITS_CURSOR = 23
+GD_OVERUNDER_SHARED = 24
+GD_BLEND = 25
+GD_CIFRACT = 26
+GD_CROSSHAIR_CINDEX = 27
+GD_DITHER = 28
+GD_LINESMOOTH_CMODE = 30
+GD_LINESMOOTH_RGB = 31
+GD_LOGICOP = 33
+GD_NSCRNS = 35
+GD_NURBS_ORDER = 36
+GD_NBLINKS = 37
+GD_NVERTEX_POLY = 39
+GD_PATSIZE_64 = 40
+GD_PNTSMOOTH_CMODE = 41
+GD_PNTSMOOTH_RGB = 42
+GD_PUP_TO_OVERUNDER = 43
+GD_READSOURCE = 44
+GD_READSOURCE_ZBUFFER = 48
+GD_STEREO = 50
+GD_SUBPIXEL_LINE = 51
+GD_SUBPIXEL_PNT = 52
+GD_SUBPIXEL_POLY = 53
+GD_TRIMCURVE_ORDER = 54
+GD_WSYS = 55
+GD_ZDRAW_GEOM = 57
+GD_ZDRAW_PIXELS = 58
+GD_SCRNTYPE = 61
+GD_TEXTPORT = 62
+GD_NMMAPS = 63
+GD_FRAMEGRABBER = 64
+GD_TIMERHZ = 66
+GD_DBBOX = 67
+GD_AFUNCTION = 68
+GD_ALPHA_OVERUNDER = 69
+GD_BITS_ACBUF = 70
+GD_BITS_ACBUF_HW = 71
+GD_BITS_STENCIL = 72
+GD_CLIPPLANES = 73
+GD_FOGVERTEX = 74
+GD_LIGHTING_TWOSIDE = 76
+GD_POLYMODE = 77
+GD_POLYSMOOTH = 78
+GD_SCRBOX = 79
+GD_TEXTURE = 80
+GD_FOGPIXEL = 81
+GD_TEXTURE_PERSP = 82
+GD_MUXPIPES = 83
+GD_NOLIMIT = -2
+GD_WSYS_NONE = 0
+GD_WSYS_4S = 1
+GD_SCRNTYPE_WM = 0
+GD_SCRNTYPE_NOWM = 1
+N_PIXEL_TOLERANCE = 1
+N_CULLING = 2
+N_DISPLAY = 3
+N_ERRORCHECKING = 4
+N_SUBDIVISIONS = 5
+N_S_STEPS = 6
+N_T_STEPS = 7
+N_TILES = 8
+N_TMP1 = 9
+N_TMP2 = 10
+N_TMP3 = 11
+N_TMP4 = 12
+N_TMP5 = 13
+N_TMP6 = 14
+N_FILL = 1.0
+N_OUTLINE_POLY = 2.0
+N_OUTLINE_PATCH = 5.0
+N_ISOLINE_S = 12.0
+N_ST = 0x8
+N_STW = 0xd
+N_XYZ = 0x4c
+N_XYZW = 0x51
+N_TEX = 0x88
+N_TEXW = 0x8d
+N_RGBA = 0xd0
+N_RGBAW = 0xd5
+N_P2D = 0x8
+N_P2DR = 0xd
+N_V3D = 0x4c
+N_V3DR = 0x51
+N_T2D = 0x88
+N_T2DR = 0x8d
+N_C4D = 0xd0
+N_C4DR = 0xd5
+LMNULL = 0.0
+MSINGLE = 0
+MPROJECTION = 1
+MVIEWING = 2
+MTEXTURE = 3
+MAXLIGHTS = 8
+MAXRESTRICTIONS = 4
+DEFMATERIAL = 0
+EMISSION = 1
+AMBIENT = 2
+DIFFUSE = 3
+SPECULAR = 4
+SHININESS = 5
+COLORINDEXES = 6
+ALPHA = 7
+DEFLIGHT = 100
+LCOLOR = 101
+POSITION = 102
+SPOTDIRECTION = 103
+SPOTLIGHT = 104
+DEFLMODEL = 200
+LOCALVIEWER = 201
+ATTENUATION = 202
+ATTENUATION2 = 203
+TWOSIDE = 204
+MATERIAL = 1000
+BACKMATERIAL = 1001
+LIGHT0 = 1100
+LIGHT1 = 1101
+LIGHT2 = 1102
+LIGHT3 = 1103
+LIGHT4 = 1104
+LIGHT5 = 1105
+LIGHT6 = 1106
+LIGHT7 = 1107
+LMODEL = 1200
+LMC_COLOR = 0
+LMC_EMISSION = 1
+LMC_AMBIENT = 2
+LMC_DIFFUSE = 3
+LMC_SPECULAR = 4
+LMC_AD = 5
+LMC_NULL = 6
+TX_MINFILTER = 0x100
+TX_MAGFILTER = 0x200
+TX_WRAP = 0x300
+TX_WRAP_S = 0x310
+TX_WRAP_T = 0x320
+TX_TILE = 0x400
+TX_BORDER = 0x500
+TX_NULL = 0x000
+TX_POINT = 0x110
+TX_BILINEAR = 0x220
+TX_MIPMAP = 0x120
+TX_MIPMAP_POINT = 0x121
+TX_MIPMAP_LINEAR = 0x122
+TX_MIPMAP_BILINEAR = 0x123
+TX_MIPMAP_TRILINEAR = 0x124
+TX_REPEAT = 0x301
+TX_CLAMP = 0x302
+TX_SELECT = 0x303
+TX_TEXTURE_0 = 0
+TV_MODULATE = 0x101
+TV_BLEND = 0x102
+TV_DECAL = 0x103
+TV_COLOR = 0x200
+TV_NULL = 0x000
+TV_ENV0 = 0
+TX_S = 0
+TX_T = 1
+TG_OFF = 0
+TG_ON = 1
+TG_CONTOUR = 2
+TG_LINEAR = 3
+TG_SPHEREMAP = 4
+TG_REFRACTMAP = 5
+DGLSINK = 0
+DGLLOCAL = 1
+DGLTSOCKET = 2
+DGL4DDN = 3
+PUP_CURSOR = PUP_COLOR
+FATAL = 1
+WARNING = 2
+ASK_CONT = 3
+ASK_RESTART = 4
+XMAXSCREEN = 1279
+YMAXSCREEN = 1023
+XMAXMEDIUM = 1023
+YMAXMEDIUM = 767
+XMAX170 = 645
+YMAX170 = 484
+XMAXPAL = 779
+YMAXPAL = 574
diff --git a/lib-python/2.2/plat-irix5/GLWS.py b/lib-python/2.2/plat-irix5/GLWS.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/plat-irix5/GLWS.py
@@ -0,0 +1,12 @@
+NOERROR = 0
+NOCONTEXT = -1
+NODISPLAY = -2
+NOWINDOW = -3
+NOGRAPHICS = -4
+NOTTOP = -5
+NOVISUAL = -6
+BUFSIZE = -7
+BADWINDOW = -8
+ALREADYBOUND = -100
+BINDFAILED = -101
+SETFAILED = -102
diff --git a/lib-python/2.2/plat-irix5/IN.py b/lib-python/2.2/plat-irix5/IN.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/plat-irix5/IN.py
@@ -0,0 +1,141 @@
+# Generated by h2py from /usr/include/netinet/in.h
+
+# Included from sys/endian.h
+LITTLE_ENDIAN = 1234
+BIG_ENDIAN = 4321
+PDP_ENDIAN = 3412
+BYTE_ORDER = BIG_ENDIAN
+BYTE_ORDER = LITTLE_ENDIAN
+def ntohl(x): return (x)
+
+def ntohs(x): return (x)
+
+def htonl(x): return (x)
+
+def htons(x): return (x)
+
+def htonl(x): return ntohl(x)
+
+def htons(x): return ntohs(x)
+
+
+# Included from sys/bsd_types.h
+
+# Included from sys/mkdev.h
+ONBITSMAJOR = 7
+ONBITSMINOR = 8
+OMAXMAJ = 0x7f
+OMAXMIN = 0xff
+NBITSMAJOR = 14
+NBITSMINOR = 18
+MAXMAJ = 0x1ff
+MAXMIN = 0x3ffff
+OLDDEV = 0
+NEWDEV = 1
+MKDEV_VER = NEWDEV
+def major(dev): return __major(MKDEV_VER, dev)
+
+def minor(dev): return __minor(MKDEV_VER, dev)
+
+
+# Included from sys/select.h
+FD_SETSIZE = 1024
+NBBY = 8
+IPPROTO_IP = 0
+IPPROTO_ICMP = 1
+IPPROTO_IGMP = 2
+IPPROTO_GGP = 3
+IPPROTO_ENCAP = 4
+IPPROTO_TCP = 6
+IPPROTO_EGP = 8
+IPPROTO_PUP = 12
+IPPROTO_UDP = 17
+IPPROTO_IDP = 22
+IPPROTO_TP = 29
+IPPROTO_XTP = 36
+IPPROTO_HELLO = 63
+IPPROTO_ND = 77
+IPPROTO_EON = 80
+IPPROTO_RAW = 255
+IPPROTO_MAX = 256
+IPPORT_RESERVED = 1024
+IPPORT_USERRESERVED = 5000
+IPPORT_MAXPORT = 65535
+def IN_CLASSA(i): return (((long)(i) & 0x80000000) == 0)
+
+IN_CLASSA_NET = 0xff000000
+IN_CLASSA_NSHIFT = 24
+IN_CLASSA_HOST = 0x00ffffff
+IN_CLASSA_MAX = 128
+def IN_CLASSB(i): return (((long)(i) & 0xc0000000) == 0x80000000)
+
+IN_CLASSB_NET = 0xffff0000
+IN_CLASSB_NSHIFT = 16
+IN_CLASSB_HOST = 0x0000ffff
+IN_CLASSB_MAX = 65536
+def IN_CLASSC(i): return (((long)(i) & 0xe0000000) == 0xc0000000)
+
+IN_CLASSC_NET = 0xffffff00
+IN_CLASSC_NSHIFT = 8
+IN_CLASSC_HOST = 0x000000ff
+def IN_CLASSD(i): return (((long)(i) & 0xf0000000) == 0xe0000000)
+
+IN_CLASSD_NET = 0xf0000000
+IN_CLASSD_NSHIFT = 28
+IN_CLASSD_HOST = 0x0fffffff
+def IN_MULTICAST(i): return IN_CLASSD(i)
+
+def IN_EXPERIMENTAL(i): return (((long)(i) & 0xf0000000) == 0xf0000000)
+
+def IN_BADCLASS(i): return (((long)(i) & 0xf0000000) == 0xf0000000)
+
+INADDR_ANY = 0x00000000
+INADDR_BROADCAST = 0xffffffff
+INADDR_LOOPBACK = 0x7F000001
+INADDR_UNSPEC_GROUP = 0xe0000000
+INADDR_ALLHOSTS_GROUP = 0xe0000001
+INADDR_MAX_LOCAL_GROUP = 0xe00000ff
+INADDR_NONE = 0xffffffff
+IN_LOOPBACKNET = 127
+IP_OPTIONS = 1
+IP_MULTICAST_IF = 2
+IP_MULTICAST_TTL = 3
+IP_MULTICAST_LOOP = 4
+IP_ADD_MEMBERSHIP = 5
+IP_DROP_MEMBERSHIP = 6
+IP_HDRINCL = 7
+IP_TOS = 8
+IP_TTL = 9
+IP_RECVOPTS = 10
+IP_RECVRETOPTS = 11
+IP_RECVDSTADDR = 12
+IP_RETOPTS = 13
+IP_OPTIONS = 1
+IP_HDRINCL = 2
+IP_TOS = 3
+IP_TTL = 4
+IP_RECVOPTS = 5
+IP_RECVRETOPTS = 6
+IP_RECVDSTADDR = 7
+IP_RETOPTS = 8
+IP_MULTICAST_IF = 20
+IP_MULTICAST_TTL = 21
+IP_MULTICAST_LOOP = 22
+IP_ADD_MEMBERSHIP = 23
+IP_DROP_MEMBERSHIP = 24
+IRIX4_IP_OPTIONS = 1
+IRIX4_IP_MULTICAST_IF = 2
+IRIX4_IP_MULTICAST_TTL = 3
+IRIX4_IP_MULTICAST_LOOP = 4
+IRIX4_IP_ADD_MEMBERSHIP = 5
+IRIX4_IP_DROP_MEMBERSHIP = 6
+IRIX4_IP_HDRINCL = 7
+IRIX4_IP_TOS = 8
+IRIX4_IP_TTL = 9
+IRIX4_IP_RECVOPTS = 10
+IRIX4_IP_RECVRETOPTS = 11
+IRIX4_IP_RECVDSTADDR = 12
+IRIX4_IP_RETOPTS = 13
+IP_DEFAULT_MULTICAST_TTL = 1
+IP_DEFAULT_MULTICAST_LOOP = 1
+IP_MAX_MEMBERSHIPS = 20
diff --git a/lib-python/2.2/plat-irix5/IOCTL.py b/lib-python/2.2/plat-irix5/IOCTL.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/plat-irix5/IOCTL.py
@@ -0,0 +1,233 @@
+# These lines were mostly generated by h2py.py (see demo/scripts)
+# from <sys/ioctl.h>, <sys/termio.h> and <termios.h> on Irix 4.0.2
+# with some manual changes to cope with imperfections in h2py.py.
+# The applicability on other systems is not clear; especially non-SYSV
+# systems may have a totally different set of ioctls.
+
+IOCTYPE = 0xff00
+LIOC = (ord('l')<<8)
+LIOCGETP = (LIOC|1)
+LIOCSETP = (LIOC|2)
+LIOCGETS = (LIOC|5)
+LIOCSETS = (LIOC|6)
+DIOC = (ord('d')<<8)
+DIOCGETC = (DIOC|1)
+DIOCGETB = (DIOC|2)
+DIOCSETE = (DIOC|3)
+IOCPARM_MASK = 0x7f
+IOC_VOID = 0x20000000
+IOC_OUT = 0x40000000
+IOC_IN = 0x80000000
+IOC_INOUT = (IOC_IN|IOC_OUT)
+int = 'i'
+short = 'h'
+long = 'l'
+def sizeof(t): import struct; return struct.calcsize(t)
+def _IO(x,y): return (IOC_VOID|((x)<<8)|y)
+def _IOR(x,y,t): return (IOC_OUT|((sizeof(t)&IOCPARM_MASK)<<16)|((x)<<8)|y)
+def _IOW(x,y,t): return (IOC_IN|((sizeof(t)&IOCPARM_MASK)<<16)|((x)<<8)|y)
+# this should be _IORW, but stdio got there first
+def _IOWR(x,y,t): return (IOC_INOUT|((sizeof(t)&IOCPARM_MASK)<<16)|((x)<<8)|y)
+FIONREAD = _IOR(ord('f'), 127, int)
+FIONBIO = _IOW(ord('f'), 126, int)
+FIOASYNC = _IOW(ord('f'), 125, int)
+FIOSETOWN = _IOW(ord('f'), 124, int)
+FIOGETOWN = _IOR(ord('f'), 123, int)
+NCC = 8
+NCC_PAD = 7
+NCC_EXT = 16
+NCCS = (NCC+NCC_PAD+NCC_EXT)
+VINTR = 0
+VQUIT = 1
+VERASE = 2
+VKILL = 3
+VEOF = 4
+VEOL = 5
+VEOL2 = 6
+VMIN = VEOF
+VTIME = VEOL
+VSWTCH = 7
+VLNEXT = (NCC+NCC_PAD+0)
+VWERASE = (NCC+NCC_PAD+1)
+VRPRNT = (NCC+NCC_PAD+2)
+VFLUSHO = (NCC+NCC_PAD+3)
+VSTOP = (NCC+NCC_PAD+4)
+VSTART = (NCC+NCC_PAD+5)
+CNUL = '\0'
+CDEL = '\377'
+CESC = '\\'
+CINTR = '\177'
+CQUIT = '\34'
+CBRK = '\377'
+def CTRL(c): return ord(c) & 0x0f
+CERASE = CTRL('H')
+CKILL = CTRL('U')
+CEOF = CTRL('d')
+CEOT = CEOF
+CSTART = CTRL('q')
+CSTOP = CTRL('s')
+CSWTCH = CTRL('z')
+CSUSP = CSWTCH
+CNSWTCH = 0
+CLNEXT = CTRL('v')
+CWERASE = CTRL('w')
+CFLUSHO = CTRL('o')
+CFLUSH = CFLUSHO
+CRPRNT = CTRL('r')
+CDSUSP = CTRL('y')
+IGNBRK = 0000001
+BRKINT = 0000002
+IGNPAR = 0000004
+PARMRK = 0000010
+INPCK = 0000020
+ISTRIP = 0000040
+INLCR = 0000100
+IGNCR = 0000200
+ICRNL = 0000400
+IUCLC = 0001000
+IXON = 0002000
+IXANY = 0004000
+IXOFF = 0010000
+IBLKMD = 0020000
+OPOST = 0000001
+OLCUC = 0000002
+ONLCR = 0000004
+OCRNL = 0000010
+ONOCR = 0000020
+ONLRET = 0000040
+OFILL = 0000100
+OFDEL = 0000200
+NLDLY = 0000400
+NL0 = 0
+NL1 = 0000400
+CRDLY = 0003000
+CR0 = 0
+CR1 = 0001000
+CR2 = 0002000
+CR3 = 0003000
+TABDLY = 0014000
+TAB0 = 0
+TAB1 = 0004000
+TAB2 = 0010000
+TAB3 = 0014000
+BSDLY = 0020000
+BS0 = 0
+BS1 = 0020000
+VTDLY = 0040000
+VT0 = 0
+VT1 = 0040000
+FFDLY = 0100000
+FF0 = 0
+FF1 = 0100000
+CBAUD = 0000017
+B0 = 0
+B50 = 0000001
+B75 = 0000002
+B110 = 0000003
+B134 = 0000004
+B150 = 0000005
+B200 = 0000006
+B300 = 0000007
+B600 = 0000010
+B1200 = 0000011
+B1800 = 0000012
+B2400 = 0000013
+B4800 = 0000014
+B9600 = 0000015
+B19200 = 0000016
+EXTA = 0000016
+B38400 = 0000017
+EXTB = 0000017
+CSIZE = 0000060
+CS5 = 0
+CS6 = 0000020
+CS7 = 0000040
+CS8 = 0000060
+CSTOPB = 0000100
+CREAD = 0000200
+PARENB = 0000400
+PARODD = 0001000
+HUPCL = 0002000
+CLOCAL = 0004000
+LOBLK = 0040000
+ISIG = 0000001
+ICANON = 0000002
+XCASE = 0000004
+ECHO = 0000010
+ECHOE = 0000020
+ECHOK = 0000040
+ECHONL = 0000100
+NOFLSH = 0000200
+IIEXTEN = 0000400
+ITOSTOP = 0001000
+SSPEED = B9600
+IOCTYPE = 0xff00
+TIOC = (ord('T')<<8)
+oTCGETA = (TIOC|1)
+oTCSETA = (TIOC|2)
+oTCSETAW = (TIOC|3)
+oTCSETAF = (TIOC|4)
+TCSBRK = (TIOC|5)
+TCXONC = (TIOC|6)
+TCFLSH = (TIOC|7)
+TCGETA = (TIOC|8)
+TCSETA = (TIOC|9)
+TCSETAW = (TIOC|10)
+TCSETAF = (TIOC|11)
+TIOCFLUSH = (TIOC|12)
+TCDSET = (TIOC|32)
+TCBLKMD = (TIOC|33)
+TIOCPKT = (TIOC|112)
+TIOCPKT_DATA = 0x00
+TIOCPKT_FLUSHREAD = 0x01
+TIOCPKT_FLUSHWRITE = 0x02
+TIOCPKT_NOSTOP = 0x10
+TIOCPKT_DOSTOP = 0x20
+TIOCNOTTY = (TIOC|113)
+TIOCSTI = (TIOC|114)
+TIOCSPGRP = _IOW(ord('t'), 118, int)
+TIOCGPGRP = _IOR(ord('t'), 119, int)
+TIOCCONS = _IOW(ord('t'), 120, int)
+struct_winsize = 'hhhh'
+TIOCGWINSZ = _IOR(ord('t'), 104, struct_winsize)
+TIOCSWINSZ = _IOW(ord('t'), 103, struct_winsize)
+TFIOC = (ord('F')<<8)
+oFIONREAD = (TFIOC|127)
+LDIOC = (ord('D')<<8)
+LDOPEN = (LDIOC|0)
+LDCLOSE = (LDIOC|1)
+LDCHG = (LDIOC|2)
+LDGETT = (LDIOC|8)
+LDSETT = (LDIOC|9)
+TERM_NONE = 0
+TERM_TEC = 1
+TERM_V61 = 2
+TERM_V10 = 3
+TERM_TEX = 4
+TERM_D40 = 5
+TERM_H45 = 6
+TERM_D42 = 7
+TM_NONE = 0000
+TM_SNL = 0001
+TM_ANL = 0002
+TM_LCF = 0004
+TM_CECHO = 0010
+TM_CINVIS = 0020
+TM_SET = 0200
+LDISC0 = 0
+LDISC1 = 1
+NTTYDISC = LDISC1
+VSUSP = VSWTCH
+TCSANOW = 0
+TCSADRAIN = 1
+TCSAFLUSH = 2
+TCIFLUSH = 0
+TCOFLUSH = 1
+TCIOFLUSH = 2
+TCOOFF = 0
+TCOON = 1
+TCIOFF = 2
+TCION = 3
+TO_STOP = LOBLK
+IEXTEN = IIEXTEN
+TOSTOP = ITOSTOP
diff --git a/lib-python/2.2/plat-irix5/SV.py b/lib-python/2.2/plat-irix5/SV.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/plat-irix5/SV.py
@@ -0,0 +1,120 @@
+NTSC_XMAX = 640
+NTSC_YMAX = 480
+PAL_XMAX = 768
+PAL_YMAX = 576
+BLANKING_BUFFER_SIZE = 2
+
+MAX_SOURCES = 2
+
+# mode parameter for Bind calls
+IN_OFF = 0				# No Video
+IN_OVER = 1				# Video over graphics
+IN_UNDER = 2				# Video under graphics
+IN_REPLACE = 3				# Video replaces entire win
+
+# mode parameters for LoadMap calls.  Specifies buffer, always 256 entries
+INPUT_COLORMAP = 0			# tuples of 8-bit RGB
+CHROMA_KEY_MAP = 1			# tuples of 8-bit RGB
+COLOR_SPACE_MAP = 2			# tuples of 8-bit RGB
+GAMMA_MAP = 3				# tuples of 24-bit red values
+
+# mode parameters for UseExclusive calls
+INPUT = 0
+OUTPUT = 1
+IN_OUT = 2
+
+# Format constants for the capture routines
+RGB8_FRAMES = 0				# noninterleaved 8 bit 3:2:3 RBG fields
+RGB32_FRAMES = 1			# 32-bit 8:8:8 RGB frames
+YUV411_FRAMES = 2			# interleaved, 8:2:2 YUV format
+YUV411_FRAMES_AND_BLANKING_BUFFER = 3
+
+#
+# sv.SetParam is passed variable length argument lists,
+# consisting of <name, value> pairs.   The following
+# constants identify argument names.
+#
+_NAME_BASE = 1000
+SOURCE = (_NAME_BASE + 0)
+SOURCE1 = 0
+SOURCE2 = 1
+SOURCE3 = 2
+COLOR = (_NAME_BASE + 1)
+DEFAULT_COLOR = 0
+USER_COLOR = 1
+MONO = 2
+OUTPUTMODE = (_NAME_BASE + 2)
+LIVE_OUTPUT = 0
+STILL24_OUT = 1
+FREEZE = (_NAME_BASE + 3)
+DITHER = (_NAME_BASE + 4)
+OUTPUT_FILTER = (_NAME_BASE + 5)
+HUE = (_NAME_BASE + 6)
+GENLOCK = (_NAME_BASE + 7)
+GENLOCK_OFF = 0
+GENLOCK_ON = 1
+GENLOCK_HOUSE = 2
+BROADCAST = (_NAME_BASE + 8)
+NTSC = 0
+PAL = 1
+VIDEO_MODE = (_NAME_BASE + 9)
+COMP = 0
+SVIDEO = 1
+INPUT_BYPASS = (_NAME_BASE + 10)
+FIELDDROP = (_NAME_BASE + 11)
+SLAVE = (_NAME_BASE + 12)
+APERTURE_FACTOR = (_NAME_BASE + 13)
+AFACTOR_0 = 0
+AFACTOR_QTR = 1
+AFACTOR_HLF = 2
+AFACTOR_ONE = 3
+CORING = (_NAME_BASE + 14)
+COR_OFF = 0
+COR_1LSB = 1
+COR_2LSB = 2
+COR_3LSB = 3
+APERTURE_BANDPASS = (_NAME_BASE + 15)
+ABAND_F0 = 0
+ABAND_F1 = 1
+ABAND_F2 = 2
+ABAND_F3 = 3
+PREFILTER = (_NAME_BASE + 16)
+CHROMA_TRAP = (_NAME_BASE + 17)
+CK_THRESHOLD = (_NAME_BASE + 18)
+PAL_SENSITIVITY = (_NAME_BASE + 19)
+GAIN_CONTROL = (_NAME_BASE + 20)
+GAIN_SLOW = 0
+GAIN_MEDIUM = 1
+GAIN_FAST = 2
+GAIN_FROZEN = 3
+AUTO_CKILL = (_NAME_BASE + 21)
+VTR_MODE = (_NAME_BASE + 22)
+VTR_INPUT = 0
+CAMERA_INPUT = 1
+LUMA_DELAY = (_NAME_BASE + 23)
+VNOISE = (_NAME_BASE + 24)
+VNOISE_NORMAL = 0
+VNOISE_SEARCH = 1
+VNOISE_AUTO = 2
+VNOISE_BYPASS = 3
+CHCV_PAL = (_NAME_BASE + 25)
+CHCV_NTSC = (_NAME_BASE + 26)
+CCIR_LEVELS = (_NAME_BASE + 27)
+STD_CHROMA = (_NAME_BASE + 28)
+DENC_VTBYPASS = (_NAME_BASE + 29)
+FAST_TIMECONSTANT = (_NAME_BASE + 30)
+GENLOCK_DELAY = (_NAME_BASE + 31)
+PHASE_SYNC = (_NAME_BASE + 32)
+VIDEO_OUTPUT = (_NAME_BASE + 33)
+CHROMA_PHASEOUT = (_NAME_BASE + 34)
+CHROMA_CENTER = (_NAME_BASE + 35)
+YUV_TO_RGB_INVERT = (_NAME_BASE + 36)
+SOURCE1_BROADCAST = (_NAME_BASE + 37)
+SOURCE1_MODE = (_NAME_BASE + 38)
+SOURCE2_BROADCAST = (_NAME_BASE + 39)
+SOURCE2_MODE = (_NAME_BASE + 40)
+SOURCE3_BROADCAST = (_NAME_BASE + 41)
+SOURCE3_MODE = (_NAME_BASE + 42)
+SIGNAL_STD = (_NAME_BASE + 43)
+NOSIGNAL = 2
+SIGNAL_COLOR = (_NAME_BASE + 44)
diff --git a/lib-python/2.2/plat-irix5/WAIT.py b/lib-python/2.2/plat-irix5/WAIT.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/plat-irix5/WAIT.py
@@ -0,0 +1,14 @@
+# Generated by h2py from /usr/include/sys/wait.h
+_WSTOPPED = 0177
+WNOHANG = 0100
+WEXITED = 0001
+WTRAPPED = 0002
+WSTOPPED = 0004
+WCONTINUED = 0010
+WNOWAIT = 0200
+WOPTMASK = (WEXITED|WTRAPPED|WSTOPPED|WCONTINUED|WNOHANG|WNOWAIT)
+WSTOPFLG = 0177
+WCONTFLG = 0177777
+WCOREFLAG = 0200
+WSIGMASK = 0177
+WUNTRACED = 0004
diff --git a/lib-python/2.2/plat-irix5/cddb.py b/lib-python/2.2/plat-irix5/cddb.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/plat-irix5/cddb.py
@@ -0,0 +1,206 @@
+# This file implements a class which forms an interface to the .cddb
+# directory that is maintained by SGI's cdman program.
+#
+# Usage is as follows:
+#
+# import readcd
+# r = readcd.Readcd()
+# c = Cddb(r.gettrackinfo())
+#
+# Now you can use c.artist, c.title and c.track[trackno] (where trackno
+# starts at 1).  When the CD is not recognized, all values will be the empty
+# string.
+# It is also possible to set the above mentioned variables to new values.
+# You can then use c.write() to write out the changed values to the
+# .cdplayerrc file.
+
+import string, posix, os
+
+_cddbrc = '.cddb'
+_DB_ID_NTRACKS = 5
+_dbid_map = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ at _=+abcdefghijklmnopqrstuvwxyz'
+def _dbid(v):
+	if v >= len(_dbid_map):
+		return string.zfill(v, 2)
+	else:
+		return _dbid_map[v]
+
+def tochash(toc):
+	if type(toc) == type(''):
+		tracklist = []
+		for i in range(2, len(toc), 4):
+			tracklist.append((None,
+				  (int(toc[i:i+2]),
+				   int(toc[i+2:i+4]))))
+	else:
+		tracklist = toc
+	ntracks = len(tracklist)
+	hash = _dbid((ntracks >> 4) & 0xF) + _dbid(ntracks & 0xF)
+	if ntracks <= _DB_ID_NTRACKS:
+		nidtracks = ntracks
+	else:
+		nidtracks = _DB_ID_NTRACKS - 1
+		min = 0
+		sec = 0
+		for track in tracklist:
+			start, length = track
+			min = min + length[0]
+			sec = sec + length[1]
+		min = min + sec / 60
+		sec = sec % 60
+		hash = hash + _dbid(min) + _dbid(sec)
+	for i in range(nidtracks):
+		start, length = tracklist[i]
+		hash = hash + _dbid(length[0]) + _dbid(length[1])
+	return hash
+	
+class Cddb:
+	def __init__(self, tracklist):
+		if os.environ.has_key('CDDB_PATH'):
+			path = os.environ['CDDB_PATH']
+			cddb_path = path.split(',')
+		else:
+			home = os.environ['HOME']
+			cddb_path = [home + '/' + _cddbrc]
+
+		self._get_id(tracklist)
+
+		for dir in cddb_path:
+			file = dir + '/' + self.id + '.rdb'
+			try:
+				f = open(file, 'r')
+				self.file = file
+				break
+			except IOError:
+				pass
+		ntracks = int(self.id[:2], 16)
+		self.artist = ''
+		self.title = ''
+		self.track = [None] + [''] * ntracks
+		self.trackartist = [None] + [''] * ntracks
+		self.notes = []
+		if not hasattr(self, 'file'):
+			return
+		import re
+		reg = re.compile(r'^([^.]*)\.([^:]*):[\t ]+(.*)')
+		while 1:
+			line = f.readline()
+			if not line:
+				break
+			match = reg.match(line)
+			if not match:
+				print 'syntax error in ' + file
+				continue
+			name1, name2, value = match.group(1, 2, 3)
+			if name1 == 'album':
+				if name2 == 'artist':
+					self.artist = value
+				elif name2 == 'title':
+					self.title = value
+				elif name2 == 'toc':
+					if not self.toc:
+						self.toc = value
+					if self.toc != value:
+						print 'toc\'s don\'t match'
+				elif name2 == 'notes':
+					self.notes.append(value)
+			elif name1[:5] == 'track':
+				try:
+					trackno = int(name1[5:])
+				except strings.atoi_error:
+					print 'syntax error in ' + file
+					continue
+				if trackno > ntracks:
+					print 'track number ' + `trackno` + \
+						  ' in file ' + file + \
+						  ' out of range'
+					continue
+				if name2 == 'title':
+					self.track[trackno] = value
+				elif name2 == 'artist':
+					self.trackartist[trackno] = value
+		f.close()
+		for i in range(2, len(self.track)):
+			track = self.track[i]
+			# if track title starts with `,', use initial part
+			# of previous track's title
+			if track and track[0] == ',':
+				try:
+					off = self.track[i - 1].index(',')
+				except ValueError:
+					pass
+				else:
+					self.track[i] = self.track[i-1][:off] \
+							+ track
+
+	def _get_id(self, tracklist):
+		# fill in self.id and self.toc.
+		# if the argument is a string ending in .rdb, the part
+		# upto the suffix is taken as the id.
+		if type(tracklist) == type(''):
+			if tracklist[-4:] == '.rdb':
+				self.id = tracklist[:-4]
+				self.toc = ''
+				return
+			t = []
+			for i in range(2, len(tracklist), 4):
+				t.append((None, \
+					  (int(tracklist[i:i+2]), \
+					   int(tracklist[i+2:i+4]))))
+			tracklist = t
+		ntracks = len(tracklist)
+		self.id = _dbid((ntracks >> 4) & 0xF) + _dbid(ntracks & 0xF)
+		if ntracks <= _DB_ID_NTRACKS:
+			nidtracks = ntracks
+		else:
+			nidtracks = _DB_ID_NTRACKS - 1
+			min = 0
+			sec = 0
+			for track in tracklist:
+				start, length = track
+				min = min + length[0]
+				sec = sec + length[1]
+			min = min + sec / 60
+			sec = sec % 60
+			self.id = self.id + _dbid(min) + _dbid(sec)
+		for i in range(nidtracks):
+			start, length = tracklist[i]
+			self.id = self.id + _dbid(length[0]) + _dbid(length[1])
+		self.toc = string.zfill(ntracks, 2)
+		for track in tracklist:
+			start, length = track
+			self.toc = self.toc + string.zfill(length[0], 2) + \
+				  string.zfill(length[1], 2)
+
+	def write(self):
+		import posixpath
+		if os.environ.has_key('CDDB_WRITE_DIR'):
+			dir = os.environ['CDDB_WRITE_DIR']
+		else:
+			dir = os.environ['HOME'] + '/' + _cddbrc
+		file = dir + '/' + self.id + '.rdb'
+		if posixpath.exists(file):
+			# make backup copy
+			posix.rename(file, file + '~')
+		f = open(file, 'w')
+		f.write('album.title:\t' + self.title + '\n')
+		f.write('album.artist:\t' + self.artist + '\n')
+		f.write('album.toc:\t' + self.toc + '\n')
+		for note in self.notes:
+			f.write('album.notes:\t' + note + '\n')
+		prevpref = None
+		for i in range(1, len(self.track)):
+			if self.trackartist[i]:
+				f.write('track'+`i`+'.artist:\t'+self.trackartist[i]+'\n')
+			track = self.track[i]
+			try:
+				off = track.index(',')
+			except ValuError:
+				prevpref = None
+			else:
+				if prevpref and track[:off] == prevpref:
+					track = track[off:]
+				else:
+					prevpref = track[:off]
+			f.write('track' + `i` + '.title:\t' + track + '\n')
+		f.close()
diff --git a/lib-python/2.2/plat-irix5/cdplayer.py b/lib-python/2.2/plat-irix5/cdplayer.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/plat-irix5/cdplayer.py
@@ -0,0 +1,89 @@
+# This file implements a class which forms an interface to the .cdplayerrc
+# file that is maintained by SGI's cdplayer program.
+#
+# Usage is as follows:
+#
+# import readcd
+# r = readcd.Readcd()
+# c = Cdplayer(r.gettrackinfo())
+#
+# Now you can use c.artist, c.title and c.track[trackno] (where trackno
+# starts at 1).  When the CD is not recognized, all values will be the empty
+# string.
+# It is also possible to set the above mentioned variables to new values.
+# You can then use c.write() to write out the changed values to the
+# .cdplayerrc file.
+
+cdplayerrc = '.cdplayerrc'
+
+class Cdplayer:
+	def __init__(self, tracklist):
+		import string
+		self.artist = ''
+		self.title = ''
+		if type(tracklist) == type(''):
+			t = []
+			for i in range(2, len(tracklist), 4):
+				t.append((None, \
+					  (string.atoi(tracklist[i:i+2]), \
+					   string.atoi(tracklist[i+2:i+4]))))
+			tracklist = t
+		self.track = [None] + [''] * len(tracklist)
+		self.id = 'd' + string.zfill(len(tracklist), 2)
+		for track in tracklist:
+			start, length = track
+			self.id = self.id + string.zfill(length[0], 2) + \
+				  string.zfill(length[1], 2)
+		try:
+			import posix
+			f = open(posix.environ['HOME'] + '/' + cdplayerrc, 'r')
+		except IOError:
+			return
+		import re
+		reg = re.compile(r'^([^:]*):\t(.*)')
+		s = self.id + '.'
+		l = len(s)
+		while 1:
+			line = f.readline()
+			if line == '':
+				break
+			if line[:l] == s:
+				line = line[l:]
+				match = reg.match(line)
+				if not match:
+					print 'syntax error in ~/' + cdplayerrc
+					continue
+				name, value = match.group(1, 2)
+				if name == 'title':
+					self.title = value
+				elif name == 'artist':
+					self.artist = value
+				elif name[:5] == 'track':
+					trackno = string.atoi(name[6:])
+					self.track[trackno] = value
+		f.close()
+
+	def write(self):
+		import posix
+		filename = posix.environ['HOME'] + '/' + cdplayerrc
+		try:
+			old = open(filename, 'r')
+		except IOError:
+			old = open('/dev/null', 'r')
+		new = open(filename + '.new', 'w')
+		s = self.id + '.'
+		l = len(s)
+		while 1:
+			line = old.readline()
+			if line == '':
+				break
+			if line[:l] != s:
+				new.write(line)
+		new.write(self.id + '.title:\t' + self.title + '\n')
+		new.write(self.id + '.artist:\t' + self.artist + '\n')
+		for i in range(1, len(self.track)):
+			new.write(self.id + '.track.' + `i` + ':\t' + \
+				  self.track[i] + '\n')
+		old.close()
+		new.close()
+		posix.rename(filename + '.new', filename)
diff --git a/lib-python/2.2/plat-irix5/flp.doc b/lib-python/2.2/plat-irix5/flp.doc
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/plat-irix5/flp.doc
@@ -0,0 +1,117 @@
+.SH
+Module flp
+.LP
+The flp module loads fl-forms from fd files, as generated
+by fdesign. The module is designed to be flexible enough to allow
+almost anything to be done with the loaded form.
+.LP
+Loadform defines 
+two types of functions: functions to parse fd files and functions to
+create the forms from the templates returned by the parse functions.
+There are fairly low-level create functions that create single objects,
+and convenience routines that create complete forms, including callbacks,
+etc.
+.LP
+The exception flp.error is raised whenever an error occurs while parsing a forms
+definition file or creating a form.
+.SH 2
+Parsing functions
+.LP
+There are two parsing functions, parse_form() and parse_forms(). They
+take the following form:
+.LP
+.ft C
+ftuple = parse_form(filename, formname)
+.br
+ftdict = parse_forms(filename)
+.IP
+Parse_form parses a single form, and returns a tuple (ftmp, otmplist).
+Ftmp is a template for a form, otmplist is a list of templates for
+objects. See below for a description of these templates.
+.IP
+Parse_forms parses all forms in an fd file. It returns a dictionary of
+(ftmp, otmplist) tuples, indexed by formname.
+.IP
+Filename is the name of the forms definition file to inspect. The functions
+appends '.fd' if needed, and use 'sys.path' to locate the file.
+.IP
+formname is the name of the form to load. This argument is mandatory,
+even if the file only contains one form.
+.LP
+The form template and object template are structures that contain all
+the information read from the fd file, in 'natural' form. A form
+template record contains the following fields:
+.IP
+.nf
+"Name", the name of the form;
+"Width", the width of the form;
+"Height", the height of the form; and
+"Numberofobjects", the number of objects in the form.
+.LP
+An object template contains the following fields:
+.IP
+.nf
+"Class", the class of object (eg. FL.BUTTON);
+"Type", the sub-class (eg. FL.NORMALBUTTON);
+"Box", a list with four members: [x, y, width, height];
+"Boxtype", the type of box (eg. FL.DOWNBOX);
+"Colors", a list with the two object colors;
+"Alignment", the label alignment (eg. FL.ALIGNLEFT); 
+"Style", the label style (eg. FL.BOLDSTYLE);
+"Lcol", the label color;
+"Label", a string containing the label;
+"Name", a string containing the name of the object;
+"Callback", a string containing the callback routine name; and
+"Argument", a string containing the callback routine extra argument.
+.SH
+Low-level create routines.
+.LP
+The three low-level creation routines are called as follows:
+.LP
+.ft C
+form = create_form(form_template)
+.IP
+Create an fl form from a form template. Returns the form created.
+.LP
+.ft C
+obj = create_object(form, obj_template)
+.IP
+Create an object in an fl form. Return the new object.
+An error is raised if the object has a callback routine.
+.SH
+High-level create routines.
+.LP
+The 'standard' way to handle forms in python is to define a class
+that contains the form and all the objects (insofar as they are named),
+and that defines all the callback functions, and use an instance of
+this class to handle the form interaction.
+Flp contains three routines that simplify handling this paradigm:
+.LP
+.ft C
+create_full_form(instance, ftuple)
+.IP
+This routine takes an instance of your form-handling class and an
+ftuple (as returned by the parsing routines) as parameters. It inserts
+the form into the instance, defines all object names and arranges that
+the callback methods are called. All the names inserted into the
+instance are the same as the names used for the objects, etc. in the
+fd file.
+.LP
+.ft C
+merge_full_form(instance, form, ftuple)
+.IP
+This function does the same as create_full_form, only it does not create
+the form itself nor the 'background box' that fdesign automatically
+adds to each form. This is useful if your class inherits a superclass
+that already defines a skeleton form (with 'OK' and 'Cancel' buttons,
+for instance), and you want to merge the new form into that existing
+form. The 'form' parameter is the form to which the new objects are
+added.
+.LP
+If you use the paradigm sketched here but need slightly more control
+over object creation there is a routine that creates a single object
+and inserts its name (and arranges for the callback routine to be
+called):
+.LP
+.ft C
+create_object_instance(instance, form, obj_template)
diff --git a/lib-python/2.2/plat-irix5/flp.py b/lib-python/2.2/plat-irix5/flp.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/plat-irix5/flp.py
@@ -0,0 +1,451 @@
+#
+# flp - Module to load fl forms from fd files
+#
+# Jack Jansen, December 1991
+#
+import string
+import os
+import sys
+import FL
+
+SPLITLINE = '--------------------'
+FORMLINE = '=============== FORM ==============='
+ENDLINE = '=============================='
+
+class error(Exception):
+    pass
+
+##################################################################
+#    Part 1 - The parsing routines                               #
+##################################################################
+
+#
+# Externally visible function. Load form.
+#
+def parse_form(filename, formname):
+    forms = checkcache(filename)
+    if forms is None:
+        forms = parse_forms(filename)
+    if forms.has_key(formname):
+        return forms[formname]
+    else:
+        raise error, 'No such form in fd file'
+
+#
+# Externally visible function. Load all forms.
+#
+def parse_forms(filename):
+    forms = checkcache(filename)
+    if forms is not None: return forms
+    fp = _open_formfile(filename)
+    nforms = _parse_fd_header(fp)
+    forms = {}
+    for i in range(nforms):
+        form = _parse_fd_form(fp, None)
+        forms[form[0].Name] = form
+    writecache(filename, forms)
+    return forms
+
+#
+# Internal: see if a cached version of the file exists
+#
+MAGIC = '.fdc'
+_internal_cache = {}                    # Used by frozen scripts only
+def checkcache(filename):
+    if _internal_cache.has_key(filename):
+        altforms = _internal_cache[filename]
+        return _unpack_cache(altforms)
+    import marshal
+    fp, filename = _open_formfile2(filename)
+    fp.close()
+    cachename = filename + 'c'
+    try:
+        fp = open(cachename, 'r')
+    except IOError:
+        #print 'flp: no cache file', cachename
+        return None
+    try:
+        if fp.read(4) != MAGIC:
+            print 'flp: bad magic word in cache file', cachename
+            return None
+        cache_mtime = rdlong(fp)
+        file_mtime = getmtime(filename)
+        if cache_mtime != file_mtime:
+            #print 'flp: outdated cache file', cachename
+            return None
+        #print 'flp: valid cache file', cachename
+        altforms = marshal.load(fp)
+        return _unpack_cache(altforms)
+    finally:
+        fp.close()
+
+def _unpack_cache(altforms):
+        forms = {}
+        for name in altforms.keys():
+            altobj, altlist = altforms[name]
+            obj = _newobj()
+            obj.make(altobj)
+            list = []
+            for altobj in altlist:
+                nobj = _newobj()
+                nobj.make(altobj)
+                list.append(nobj)
+            forms[name] = obj, list
+        return forms
+
+def rdlong(fp):
+    s = fp.read(4)
+    if len(s) != 4: return None
+    a, b, c, d = s[0], s[1], s[2], s[3]
+    return ord(a)<<24 | ord(b)<<16 | ord(c)<<8 | ord(d)
+
+def wrlong(fp, x):
+    a, b, c, d = (x>>24)&0xff, (x>>16)&0xff, (x>>8)&0xff, x&0xff
+    fp.write(chr(a) + chr(b) + chr(c) + chr(d))
+
+def getmtime(filename):
+    import os
+    from stat import ST_MTIME
+    try:
+        return os.stat(filename)[ST_MTIME]
+    except os.error:
+        return None
+
+#
+# Internal: write cached version of the form (parsing is too slow!)
+#
+def writecache(filename, forms):
+    import marshal
+    fp, filename = _open_formfile2(filename)
+    fp.close()
+    cachename = filename + 'c'
+    try:
+        fp = open(cachename, 'w')
+    except IOError:
+        print 'flp: can\'t create cache file', cachename
+        return # Never mind
+    fp.write('\0\0\0\0') # Seek back and write MAGIC when done
+    wrlong(fp, getmtime(filename))
+    altforms = _pack_cache(forms)
+    marshal.dump(altforms, fp)
+    fp.seek(0)
+    fp.write(MAGIC)
+    fp.close()
+    #print 'flp: wrote cache file', cachename
+
+#
+# External: print some statements that set up the internal cache.
+# This is for use with the "freeze" script.  You should call
+# flp.freeze(filename) for all forms used by the script, and collect
+# the output on a file in a module file named "frozenforms.py".  Then
+# in the main program of the script import frozenforms.
+# (Don't forget to take this out when using the unfrozen version of
+# the script!)
+#
+def freeze(filename):
+    forms = parse_forms(filename)
+    altforms = _pack_cache(forms)
+    print 'import flp'
+    print 'flp._internal_cache[', `filename`, '] =', altforms
+
+#
+# Internal: create the data structure to be placed in the cache
+#
+def _pack_cache(forms):
+    altforms = {}
+    for name in forms.keys():
+        obj, list = forms[name]
+        altobj = obj.__dict__
+        altlist = []
+        for obj in list: altlist.append(obj.__dict__)
+        altforms[name] = altobj, altlist
+    return altforms
+
+#
+# Internal: Locate form file (using PYTHONPATH) and open file
+#
+def _open_formfile(filename):
+    return _open_formfile2(filename)[0]
+
+def _open_formfile2(filename):
+    if filename[-3:] != '.fd':
+        filename = filename + '.fd'
+    if filename[0] == '/':
+        try:
+            fp = open(filename,'r')
+        except IOError:
+            fp = None
+    else:
+        for pc in sys.path:
+            pn = os.path.join(pc, filename)
+            try:
+                fp = open(pn, 'r')
+                filename = pn
+                break
+            except IOError:
+                fp = None
+    if fp is None:
+        raise error, 'Cannot find forms file ' + filename
+    return fp, filename
+
+#
+# Internal: parse the fd file header, return number of forms
+#
+def _parse_fd_header(file):
+    # First read the magic header line
+    datum = _parse_1_line(file)
+    if datum != ('Magic', 12321):
+        raise error, 'Not a forms definition file'
+    # Now skip until we know number of forms
+    while 1:
+        datum = _parse_1_line(file)
+        if type(datum) == type(()) and datum[0] == 'Numberofforms':
+            break
+    return datum[1]
+#
+# Internal: parse fd form, or skip if name doesn't match.
+# the special value None means 'always parse it'.
+#
+def _parse_fd_form(file, name):
+    datum = _parse_1_line(file)
+    if datum != FORMLINE:
+        raise error, 'Missing === FORM === line'
+    form = _parse_object(file)
+    if form.Name == name or name is None:
+        objs = []
+        for j in range(form.Numberofobjects):
+            obj = _parse_object(file)
+            objs.append(obj)
+        return (form, objs)
+    else:
+        for j in range(form.Numberofobjects):
+            _skip_object(file)
+    return None
+
+#
+# Internal class: a convenient place to store object info fields
+#
+class _newobj:
+    def add(self, name, value):
+        self.__dict__[name] = value
+    def make(self, dict):
+        for name in dict.keys():
+            self.add(name, dict[name])
+
+#
+# Internal parsing routines.
+#
+def _parse_string(str):
+    if '\\' in str:
+        s = '\'' + str + '\''
+        try:
+            return eval(s)
+        except:
+            pass
+    return str
+
+def _parse_num(str):
+    return eval(str)
+
+def _parse_numlist(str):
+    slist = string.split(str)
+    nlist = []
+    for i in slist:
+        nlist.append(_parse_num(i))
+    return nlist
+
+# This dictionary maps item names to parsing routines.
+# If no routine is given '_parse_num' is default.
+_parse_func = { \
+        'Name':         _parse_string, \
+        'Box':          _parse_numlist, \
+        'Colors':       _parse_numlist, \
+        'Label':        _parse_string, \
+        'Name':         _parse_string, \
+        'Callback':     _parse_string, \
+        'Argument':     _parse_string }
+
+# This function parses a line, and returns either
+# a string or a tuple (name,value)
+
+import re
+prog = re.compile('^([^:]*): *(.*)')
+
+def _parse_line(line):
+    match = prog.match(line)
+    if not match:
+        return line
+    name, value = match.group(1, 2)
+    if name[0] == 'N':
+            name = string.join(string.split(name),'')
+            name = string.lower(name)
+    name = string.capitalize(name)
+    try:
+        pf = _parse_func[name]
+    except KeyError:
+        pf = _parse_num
+    value = pf(value)
+    return (name, value)
+
+def _readline(file):
+    line = file.readline()
+    if not line:
+        raise EOFError
+    return line[:-1]
+        
+def _parse_1_line(file):
+    line = _readline(file)
+    while line == '':
+        line = _readline(file)
+    return _parse_line(line)
+
+def _skip_object(file):
+    line = ''
+    while not line in (SPLITLINE, FORMLINE, ENDLINE):
+        pos = file.tell()
+        line = _readline(file)
+    if line == FORMLINE:
+        file.seek(pos)
+
+def _parse_object(file):
+    obj = _newobj()
+    while 1:
+        pos = file.tell()
+        datum = _parse_1_line(file)
+        if datum in (SPLITLINE, FORMLINE, ENDLINE):
+            if datum == FORMLINE:
+                file.seek(pos)
+            return obj
+        if type(datum) is not type(()) or len(datum) != 2:
+            raise error, 'Parse error, illegal line in object: '+datum
+        obj.add(datum[0], datum[1])
+
+#################################################################
+#   Part 2 - High-level object/form creation routines            #
+#################################################################
+
+#
+# External - Create a form an link to an instance variable.
+#
+def create_full_form(inst, (fdata, odatalist)):
+    form = create_form(fdata)
+    exec 'inst.'+fdata.Name+' = form\n'
+    for odata in odatalist:
+        create_object_instance(inst, form, odata)
+
+#
+# External - Merge a form into an existing form in an instance
+# variable.
+#
+def merge_full_form(inst, form, (fdata, odatalist)):
+    exec 'inst.'+fdata.Name+' = form\n'
+    if odatalist[0].Class != FL.BOX:
+        raise error, 'merge_full_form() expects FL.BOX as first obj'
+    for odata in odatalist[1:]:
+        create_object_instance(inst, form, odata)
+
+
+#################################################################
+#   Part 3 - Low-level object/form creation routines            #
+#################################################################
+
+#
+# External Create_form - Create form from parameters
+#
+def create_form(fdata):
+    import fl
+    return fl.make_form(FL.NO_BOX, fdata.Width, fdata.Height)
+
+#
+# External create_object - Create an object. Make sure there are
+# no callbacks. Returns the object created.
+#
+def create_object(form, odata):
+    obj = _create_object(form, odata)
+    if odata.Callback:
+        raise error, 'Creating free object with callback'
+    return obj
+#
+# External create_object_instance - Create object in an instance.
+#
+def create_object_instance(inst, form, odata):
+    obj = _create_object(form, odata)
+    if odata.Callback:
+        cbfunc = eval('inst.'+odata.Callback)
+        obj.set_call_back(cbfunc, odata.Argument)
+    if odata.Name:
+        exec 'inst.' + odata.Name + ' = obj\n'
+#
+# Internal _create_object: Create the object and fill options
+#
+def _create_object(form, odata):
+    crfunc = _select_crfunc(form, odata.Class)
+    obj = crfunc(odata.Type, odata.Box[0], odata.Box[1], odata.Box[2], \
+            odata.Box[3], odata.Label)
+    if not odata.Class in (FL.BEGIN_GROUP, FL.END_GROUP):
+        obj.boxtype = odata.Boxtype
+        obj.col1 = odata.Colors[0]
+        obj.col2 = odata.Colors[1]
+        obj.align = odata.Alignment
+        obj.lstyle = odata.Style
+        obj.lsize = odata.Size
+        obj.lcol = odata.Lcol
+    return obj
+#
+# Internal crfunc: helper function that returns correct create function
+#
+def _select_crfunc(fm, cl):
+    if cl == FL.BEGIN_GROUP: return fm.bgn_group
+    elif cl == FL.END_GROUP: return fm.end_group
+    elif cl == FL.BITMAP: return fm.add_bitmap
+    elif cl == FL.BOX: return fm.add_box
+    elif cl == FL.BROWSER: return fm.add_browser
+    elif cl == FL.BUTTON: return fm.add_button
+    elif cl == FL.CHART: return fm.add_chart
+    elif cl == FL.CHOICE: return fm.add_choice
+    elif cl == FL.CLOCK: return fm.add_clock
+    elif cl == FL.COUNTER: return fm.add_counter
+    elif cl == FL.DIAL: return fm.add_dial
+    elif cl == FL.FREE: return fm.add_free
+    elif cl == FL.INPUT: return fm.add_input
+    elif cl == FL.LIGHTBUTTON: return fm.add_lightbutton
+    elif cl == FL.MENU: return fm.add_menu
+    elif cl == FL.POSITIONER: return fm.add_positioner
+    elif cl == FL.ROUNDBUTTON: return fm.add_roundbutton
+    elif cl == FL.SLIDER: return fm.add_slider
+    elif cl == FL.VALSLIDER: return fm.add_valslider
+    elif cl == FL.TEXT: return fm.add_text
+    elif cl == FL.TIMER: return fm.add_timer
+    else:
+        raise error, 'Unknown object type: ' + `cl`
+
+
+def test():
+    import time
+    t0 = time.time()
+    if len(sys.argv) == 2:
+        forms = parse_forms(sys.argv[1])
+        t1 = time.time()
+        print 'parse time:', 0.001*(t1-t0), 'sec.'
+        keys = forms.keys()
+        keys.sort()
+        for i in keys:
+            _printform(forms[i])
+    elif len(sys.argv) == 3:
+        form = parse_form(sys.argv[1], sys.argv[2])
+        t1 = time.time()
+        print 'parse time:', round(t1-t0, 3), 'sec.'
+        _printform(form)
+    else:
+        print 'Usage: test fdfile [form]'
+
+def _printform(form):
+    f = form[0]
+    objs = form[1]
+    print 'Form ', f.Name, ', size: ', f.Width, f.Height, ' Nobj ', f.Numberofobjects
+    for i in objs:
+        print '  Obj ', i.Name, ' type ', i.Class, i.Type
+        print '    Box ', i.Box, ' btype ', i.Boxtype
+        print '    Label ', i.Label, ' size/style/col/align ', i.Size,i.Style, i.Lcol, i.Alignment
+        print '    cols ', i.Colors
+        print '    cback ', i.Callback, i.Argument
diff --git a/lib-python/2.2/plat-irix5/jpeg.py b/lib-python/2.2/plat-irix5/jpeg.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/plat-irix5/jpeg.py
@@ -0,0 +1,111 @@
+# Implement 'jpeg' interface using SGI's compression library
+
+# XXX Options 'smooth' and 'optimize' are ignored.
+
+# XXX It appears that compressing grayscale images doesn't work right;
+# XXX the resulting file causes weirdness.
+
+class error(Exception):
+	pass
+
+options = {'quality': 75, 'optimize': 0, 'smooth': 0, 'forcegray': 0}
+
+comp = None
+decomp = None
+
+def compress(imgdata, width, height, bytesperpixel):
+	global comp
+	import cl
+	if comp is None: comp = cl.OpenCompressor(cl.JPEG)
+	if bytesperpixel == 1:
+		format = cl.GRAYSCALE
+	elif bytesperpixel == 4:
+		format = cl.RGBX
+	if options['forcegray']:
+		iformat = cl.GRAYSCALE
+	else:
+		iformat = cl.YUV
+	# XXX How to support 'optimize'?
+	params = [cl.IMAGE_WIDTH, width, cl.IMAGE_HEIGHT, height, \
+		  cl.ORIGINAL_FORMAT, format, \
+		  cl.ORIENTATION, cl.BOTTOM_UP, \
+		  cl.QUALITY_FACTOR, options['quality'], \
+		  cl.INTERNAL_FORMAT, iformat, \
+		 ]
+	comp.SetParams(params)
+	jpegdata = comp.Compress(1, imgdata)
+	return jpegdata
+
+def decompress(jpegdata):
+	global decomp
+	import cl
+	if decomp is None: decomp = cl.OpenDecompressor(cl.JPEG)
+	headersize = decomp.ReadHeader(jpegdata)
+	params = [cl.IMAGE_WIDTH, 0, cl.IMAGE_HEIGHT, 0, cl.INTERNAL_FORMAT, 0]
+	decomp.GetParams(params)
+	width, height, format = params[1], params[3], params[5]
+	if format == cl.GRAYSCALE or options['forcegray']:
+		format = cl.GRAYSCALE
+		bytesperpixel = 1
+	else:
+		format = cl.RGBX
+		bytesperpixel = 4
+	# XXX How to support 'smooth'?
+	params = [cl.ORIGINAL_FORMAT, format, \
+		  cl.ORIENTATION, cl.BOTTOM_UP, \
+		  cl.FRAME_BUFFER_SIZE, width*height*bytesperpixel]
+	decomp.SetParams(params)
+	imgdata = decomp.Decompress(1, jpegdata)
+	return imgdata, width, height, bytesperpixel
+
+def setoption(name, value):
+	if type(value) is not type(0):
+		raise TypeError, 'jpeg.setoption: numeric options only'
+	if name == 'forcegrey':
+		name = 'forcegray'
+	if not options.has_key(name):
+		raise KeyError, 'jpeg.setoption: unknown option name'
+	options[name] = int(value)
+
+def test():
+	import sys
+	if sys.argv[1:2] == ['-g']:
+		del sys.argv[1]
+		setoption('forcegray', 1)
+	if not sys.argv[1:]:
+		sys.argv.append('/usr/local/images/data/jpg/asterix.jpg')
+	for file in sys.argv[1:]:
+		show(file)
+
+def show(file):
+	import gl, GL, DEVICE
+	jpegdata = open(file, 'r').read()
+	imgdata, width, height, bytesperpixel = decompress(jpegdata)
+	gl.foreground()
+	gl.prefsize(width, height)
+	win = gl.winopen(file)
+	if bytesperpixel == 1:
+		gl.cmode()
+		gl.pixmode(GL.PM_SIZE, 8)
+		gl.gconfig()
+		for i in range(256):
+			gl.mapcolor(i, i, i, i)
+	else:
+		gl.RGBmode()
+		gl.pixmode(GL.PM_SIZE, 32)
+		gl.gconfig()
+	gl.qdevice(DEVICE.REDRAW)
+	gl.qdevice(DEVICE.ESCKEY)
+	gl.qdevice(DEVICE.WINQUIT)
+	gl.qdevice(DEVICE.WINSHUT)
+	gl.lrectwrite(0, 0, width-1, height-1, imgdata)
+	while 1:
+		dev, val = gl.qread()
+		if dev in (DEVICE.ESCKEY, DEVICE.WINSHUT, DEVICE.WINQUIT):
+			break
+		if dev == DEVICE.REDRAW:
+			gl.lrectwrite(0, 0, width-1, height-1, imgdata)
+	gl.winclose(win)
+	# Now test the compression and write the result to a fixed filename
+	newjpegdata = compress(imgdata, width, height, bytesperpixel)
+	open('/tmp/j.jpg', 'w').write(newjpegdata)
diff --git a/lib-python/2.2/plat-irix5/panel.py b/lib-python/2.2/plat-irix5/panel.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/plat-irix5/panel.py
@@ -0,0 +1,281 @@
+# Module 'panel'
+#
+# Support for the Panel library.
+# Uses built-in module 'pnl'.
+# Applications should use 'panel.function' instead of 'pnl.function';
+# most 'pnl' functions are transparently exported by 'panel',
+# but dopanel() is overridden and you have to use this version
+# if you want to use callbacks.
+
+
+import pnl
+
+
+debug = 0
+
+
+# Test if an object is a list.
+#
+def is_list(x):
+	return type(x) == type([])
+
+
+# Reverse a list.
+#
+def reverse(list):
+	res = []
+	for item in list:
+		res.insert(0, item)
+	return res
+
+
+# Get an attribute of a list, which may itself be another list.
+# Don't use 'prop' for name.
+#
+def getattrlist(list, name):
+	for item in list:
+		if item and is_list(item) and item[0] == name:
+			return item[1:]
+	return []
+
+
+# Get a property of a list, which may itself be another list.
+#
+def getproplist(list, name):
+	for item in list:
+		if item and is_list(item) and item[0] == 'prop':
+			if len(item) > 1 and item[1] == name:
+				return item[2:]
+	return []
+
+
+# Test if an actuator description contains the property 'end-of-group'
+#
+def is_endgroup(list):
+	x = getproplist(list, 'end-of-group')
+	return (x and x[0] == '#t')
+
+
+# Neatly display an actuator definition given as S-expression
+# the prefix string is printed before each line.
+#
+def show_actuator(prefix, a):
+	for item in a:
+		if not is_list(item):
+			print prefix, item
+		elif item and item[0] == 'al':
+			print prefix, 'Subactuator list:'
+			for a in item[1:]:
+				show_actuator(prefix + '    ', a)
+		elif len(item) == 2:
+			print prefix, item[0], '=>', item[1]
+		elif len(item) == 3 and item[0] == 'prop':
+			print prefix, 'Prop', item[1], '=>',
+			print item[2]
+		else:
+			print prefix, '?', item
+
+
+# Neatly display a panel.
+#
+def show_panel(prefix, p):
+	for item in p:
+		if not is_list(item):
+			print prefix, item
+		elif item and item[0] == 'al':
+			print prefix, 'Actuator list:'
+			for a in item[1:]:
+				show_actuator(prefix + '    ', a)
+		elif len(item) == 2:
+			print prefix, item[0], '=>', item[1]
+		elif len(item) == 3 and item[0] == 'prop':
+			print prefix, 'Prop', item[1], '=>',
+			print item[2]
+		else:
+			print prefix, '?', item
+
+
+# Exception raised by build_actuator or build_panel.
+#
+panel_error = 'panel error'
+
+
+# Dummy callback used to initialize the callbacks.
+#
+def dummy_callback(arg):
+	pass
+
+
+# Assign attributes to members of the target.
+# Attribute names in exclist are ignored.
+# The member name is the attribute name prefixed with the prefix.
+#
+def assign_members(target, attrlist, exclist, prefix):
+	for item in attrlist:
+		if is_list(item) and len(item) == 2 and item[0] not in exclist:
+			name, value = item[0], item[1]
+			ok = 1
+			if value[0] in '-0123456789':
+				value = eval(value)
+			elif value[0] == '"':
+				value = value[1:-1]
+			elif value == 'move-then-resize':
+				# Strange default set by Panel Editor...
+				ok = 0
+			else:
+				print 'unknown value', value, 'for', name
+				ok = 0
+			if ok:
+				lhs = 'target.' + prefix + name
+				stmt = lhs + '=' + `value`
+				if debug: print 'exec', stmt
+				try:
+					exec stmt + '\n'
+				except KeyboardInterrupt: # Don't catch this!
+					raise KeyboardInterrupt
+				except:
+					print 'assign failed:', stmt
+
+
+# Build a real actuator from an actuator description.
+# Return a pair (actuator, name).
+#
+def build_actuator(descr):
+	namelist = getattrlist(descr, 'name')
+	if namelist:
+		# Assume it is a string
+		actuatorname = namelist[0][1:-1]
+	else:
+		actuatorname = ''
+	type = descr[0]
+	if type[:4] == 'pnl_': type = type[4:]
+	act = pnl.mkact(type)
+	act.downfunc = act.activefunc = act.upfunc = dummy_callback
+	#
+	assign_members(act, descr[1:], ['al', 'data', 'name'], '')
+	#
+	# Treat actuator-specific data
+	#
+	datalist = getattrlist(descr, 'data')
+	prefix = ''
+	if type[-4:] == 'puck':
+		prefix = 'puck_'
+	elif type == 'mouse':
+		prefix = 'mouse_'
+	assign_members(act, datalist, [], prefix)
+	#
+	return act, actuatorname
+
+
+# Build all sub-actuators and add them to the super-actuator.
+# The super-actuator must already have been added to the panel.
+# Sub-actuators with defined names are added as members to the panel
+# so they can be referenced as p.name.
+#
+# Note: I have no idea how panel.endgroup() works when applied
+# to a sub-actuator.
+#
+def build_subactuators(panel, super_act, al):
+	#
+	# This is nearly the same loop as below in build_panel(),
+	# except a call is made to addsubact() instead of addact().
+	#
+	for a in al:
+		act, name = build_actuator(a)
+		act.addsubact(super_act)
+		if name:
+			stmt = 'panel.' + name + ' = act'
+			if debug: print 'exec', stmt
+			exec stmt + '\n'
+		if is_endgroup(a):
+			panel.endgroup()
+		sub_al = getattrlist(a, 'al')
+		if sub_al:
+			build_subactuators(panel, act, sub_al)
+	#
+	# Fix the actuator to which whe just added subactuators.
+	# This can't hurt (I hope) and is needed for the scroll actuator.
+	#
+	super_act.fixact()
+
+
+# Build a real panel from a panel definition.
+# Return a panel object p, where for each named actuator a, p.name is a
+# reference to a.
+#
+def build_panel(descr):
+	#
+	# Sanity check
+	#
+	if (not descr) or descr[0] != 'panel':
+		raise panel_error, 'panel description must start with "panel"'
+	#
+	if debug: show_panel('', descr)
+	#
+	# Create an empty panel
+	#
+	panel = pnl.mkpanel()
+	#
+	# Assign panel attributes
+	#
+	assign_members(panel, descr[1:], ['al'], '')
+	#
+	# Look for actuator list
+	#
+	al = getattrlist(descr, 'al')
+	#
+	# The order in which actuators are created is important
+	# because of the endgroup() operator.
+	# Unfortunately the Panel Editor outputs the actuator list
+	# in reverse order, so we reverse it here.
+	#
+	al = reverse(al)
+	#
+	for a in al:
+		act, name = build_actuator(a)
+		act.addact(panel)
+		if name:
+			stmt = 'panel.' + name + ' = act'
+			exec stmt + '\n'
+		if is_endgroup(a):
+			panel.endgroup()
+		sub_al = getattrlist(a, 'al')
+		if sub_al:
+			build_subactuators(panel, act, sub_al)
+	#
+	return panel
+
+
+# Wrapper around pnl.dopanel() which calls call-back functions.
+#
+def my_dopanel():
+	# Extract only the first 4 elements to allow for future expansion
+	a, down, active, up = pnl.dopanel()[:4]
+	if down:
+		down.downfunc(down)
+	if active:
+		active.activefunc(active)
+	if up:
+		up.upfunc(up)
+	return a
+
+
+# Create one or more panels from a description file (S-expressions)
+# generated by the Panel Editor.
+# 
+def defpanellist(file):
+	import panelparser
+	descrlist = panelparser.parse_file(open(file, 'r'))
+	panellist = []
+	for descr in descrlist:
+		panellist.append(build_panel(descr))
+	return panellist
+
+
+# Import everything from built-in method pnl, so the user can always
+# use panel.foo() instead of pnl.foo().
+# This gives *no* performance penalty once this module is imported.
+#
+from pnl import *			# for export
+
+dopanel = my_dopanel			# override pnl.dopanel
diff --git a/lib-python/2.2/plat-irix5/panelparser.py b/lib-python/2.2/plat-irix5/panelparser.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/plat-irix5/panelparser.py
@@ -0,0 +1,128 @@
+# Module 'parser'
+#
+# Parse S-expressions output by the Panel Editor
+# (which is written in Scheme so it can't help writing S-expressions).
+#
+# See notes at end of file.
+
+
+whitespace = ' \t\n'
+operators = '()\''
+separators = operators + whitespace + ';' + '"'
+
+
+# Tokenize a string.
+# Return a list of tokens (strings).
+#
+def tokenize_string(s):
+	tokens = []
+	while s:
+		c = s[:1]
+		if c in whitespace:
+			s = s[1:]
+		elif c == ';':
+			s = ''
+		elif c == '"':
+			n = len(s)
+			i = 1
+			while i < n:
+				c = s[i]
+				i = i+1
+				if c == '"': break
+				if c == '\\': i = i+1
+			tokens.append(s[:i])
+			s = s[i:]
+		elif c in operators:
+			tokens.append(c)
+			s = s[1:]
+		else:
+			n = len(s)
+			i = 1
+			while i < n:
+				if s[i] in separators: break
+				i = i+1
+			tokens.append(s[:i])
+			s = s[i:]
+	return tokens
+
+
+# Tokenize a whole file (given as file object, not as file name).
+# Return a list of tokens (strings).
+#
+def tokenize_file(fp):
+	tokens = []
+	while 1:
+		line = fp.readline()
+		if not line: break
+		tokens = tokens + tokenize_string(line)
+	return tokens
+
+
+# Exception raised by parse_exr.
+#
+syntax_error = 'syntax error'
+
+
+# Parse an S-expression.
+# Input is a list of tokens as returned by tokenize_*().
+# Return a pair (expr, tokens)
+# where expr is a list representing the s-expression,
+# and tokens contains the remaining tokens.
+# May raise syntax_error.
+#
+def parse_expr(tokens):
+	if (not tokens) or tokens[0] != '(':
+		raise syntax_error, 'expected "("'
+	tokens = tokens[1:]
+	expr = []
+	while 1:
+		if not tokens:
+			raise syntax_error, 'missing ")"'
+		if tokens[0] == ')':
+			return expr, tokens[1:]
+		elif tokens[0] == '(':
+			subexpr, tokens = parse_expr(tokens)
+			expr.append(subexpr)
+		else:
+			expr.append(tokens[0])
+			tokens = tokens[1:]
+
+
+# Parse a file (given as file object, not as file name).
+# Return a list of parsed S-expressions found at the top level.
+#
+def parse_file(fp):
+	tokens = tokenize_file(fp)
+	exprlist = []
+	while tokens:
+		expr, tokens = parse_expr(tokens)
+		exprlist.append(expr)
+	return exprlist
+
+
+# EXAMPLE:
+#
+# The input
+#	'(hip (hop hur-ray))'
+#
+# passed to tokenize_string() returns the token list
+#	['(', 'hip', '(', 'hop', 'hur-ray', ')', ')']
+#
+# When this is passed to parse_expr() it returns the expression
+#	['hip', ['hop', 'hur-ray']]
+# plus an empty token list (because there are no tokens left.
+#
+# When a file containing the example is passed to parse_file() it returns
+# a list whose only element is the output of parse_expr() above:
+#	[['hip', ['hop', 'hur-ray']]]
+
+
+# TOKENIZING:
+#
+# Comments start with semicolon (;) and continue till the end of the line.
+#
+# Tokens are separated by whitespace, except the following characters
+# always form a separate token (outside strings):
+#	( ) '
+# Strings are enclosed in double quotes (") and backslash (\) is used
+# as escape character in strings.
diff --git a/lib-python/2.2/plat-irix5/readcd.doc b/lib-python/2.2/plat-irix5/readcd.doc
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/plat-irix5/readcd.doc
@@ -0,0 +1,104 @@
+Interface to CD-ROM player.
+
+This module implements an interface to the built-in cd module.  The
+intention is to provide a more user-friendly interface than the
+built-in module.
+
+The module defines a class Readcd with several methods.  The
+initialization of the class will try to open the CD player.  This
+means that initialization will fail if the CD player is already in
+use.  A RuntimeError will be raised by the cd module in that case.
+
+The way to work with this module is as follows.  The user specifies
+the parts of the CD that are to be read and he specifies callback
+functions which are to be called by the system.  At some point he can
+tell the system to play.  The specified parts of the CD will then be
+read and the callbacks will be called.
+
+Initialization.
+===============
+
+r = readcd.Readcd([cd-player [, mode]])
+
+The optional arguments are the name of the CD device and the mode.
+When "mode" is not specified, it defaults to 'r' (which is the only
+possible value); when "cd-player" also isn't specified, it defaults
+to "None" which indicates the default CD player.
+
+Methods.
+========
+
+eject() -- Eject the CD from the player.
+
+reset() -- Reset the list of data stretches to be played.
+
+appendtrack(track) -- Append the specified track to the list of music
+stretches.
+
+appendstretch(first, last) -- Append the stretch from "first" to "last"
+to the list of music stretches.  Both "first" and "last" can be in one
+of four forms.  "None": for "first", the beginning of the CD, for
+"last" the end of the CD; a single integer: a track number--playing
+starts at the beginning of the track or ends at the end of the
+specified track; a three-tuple: the absolute time from the start of
+the CD in minutes, seconds, frames; a four-tuple: track number and
+relative time within the track in minutes, seconds, frames.
+
+settracks(tracklist) -- The argument is a list of integers.  The list
+of stretches is set to argument list.  The old list is discarded.
+
+setcallback(type, func, arg) -- Set a callback function for "type".
+The function will be called as func(arg, type, data) where "arg" is
+the third argument of setcallback, "type" is the type of callback,
+"data" is type-dependent data.  See the CDsetcallback(3) manual page
+for more information.  The possible "type" arguments are defined in
+the CD module.
+
+removecallback(type) -- Remove the callback for "type".
+
+gettrackinfo([tracklist]) -- Return a list of tuples.  Each tuple
+consists of start and length information of a track.  The start and
+length information consist of three-tuples with minutes, seconds and
+frames.  The optional tracklist argument gives a list of interesting
+track numbers.  If no tracklist is specified, information about all
+tracks is returned.
+
+getstatus() -- Return the status information of the CD.
+
+play() -- Play the preprogrammed stretches of music from the CD.  When
+nothing was programmed, the whole CD is played.
+
+Specifying stretches.
+=====================
+
+There are three methods available to specify a stretch of music to be
+played.  The easiest way is to use "settracklist(tracklist)" with which
+a list of tracks can be specified.  "settracklist(tracklist)" is
+equivalent to the sequence
+	reset()
+	for track in tracklist:
+		appendtrack(track)
+
+The next method is "appendtrack(track)" with which a whole track can be
+added to the list of music to be played.  "appendtrack(track)" is
+equivalent to "appendstretch(track, track)".
+
+The most complete method is "appendstretch(first, last)".  Using this
+method, it is possible to specify any stretch of music.
+
+When two consecutive tracks are played, it is possible to choose
+whether the pause that may be between the tracks is played as well or
+whether the pause should be skipped.  When the end of a stretch is
+specified using a track number and the next stretch starts at the
+beginning of the following track and that was also specified using the
+track number (that is, both were specified as integers, not as tuples),
+the pause is played.  When either value was specified using absolute
+time or track-relative time (that is, as three-tuple or as
+four-tuple), the pause will not be played.
+
+Errors.
+=======
+
+When an error occurs, an exception will be raised.  Depending on where
+the error occurs, the exception may either be "readcd.Error" or
+"RuntimeError".
diff --git a/lib-python/2.2/plat-irix5/readcd.py b/lib-python/2.2/plat-irix5/readcd.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/plat-irix5/readcd.py
@@ -0,0 +1,244 @@
+# Class interface to the CD module.
+
+import cd, CD
+
+class Error(Exception):
+	pass
+class _Stop(Exception):
+	pass
+
+def _doatime(self, cb_type, data):
+	if ((data[0] * 60) + data[1]) * 75 + data[2] > self.end:
+##		print 'done with list entry',`self.listindex`
+		raise _Stop
+	func, arg = self.callbacks[cb_type]
+	if func:
+		func(arg, cb_type, data)
+
+def _dopnum(self, cb_type, data):
+	if data > self.end:
+##		print 'done with list entry',`self.listindex`
+		raise _Stop
+	func, arg = self.callbacks[cb_type]
+	if func:
+		func(arg, cb_type, data)
+
+class Readcd:
+	def __init__(self, *arg):
+		if len(arg) == 0:
+			self.player = cd.open()
+		elif len(arg) == 1:
+			self.player = cd.open(arg[0])
+		elif len(arg) == 2:
+			self.player = cd.open(arg[0], arg[1])
+		else:
+			raise Error, 'bad __init__ call'
+		self.list = []
+		self.callbacks = [(None, None)] * 8
+		self.parser = cd.createparser()
+		self.playing = 0
+		self.end = 0
+		self.status = None
+		self.trackinfo = None
+
+	def eject(self):
+		self.player.eject()
+		self.list = []
+		self.end = 0
+		self.listindex = 0
+		self.status = None
+		self.trackinfo = None
+		if self.playing:
+##			print 'stop playing from eject'
+			raise _Stop
+
+	def pmsf2msf(self, track, min, sec, frame):
+		if not self.status:
+			self.cachestatus()
+		if track < self.status[5] or track > self.status[6]:
+			raise Error, 'track number out of range'
+		if not self.trackinfo:
+			self.cacheinfo()
+		start, total = self.trackinfo[track]
+		start = ((start[0] * 60) + start[1]) * 75 + start[2]
+		total = ((total[0] * 60) + total[1]) * 75 + total[2]
+		block = ((min * 60) + sec) * 75 + frame
+		if block > total:
+			raise Error, 'out of range'
+		block = start + block
+		min, block = divmod(block, 75*60)
+		sec, frame = divmod(block, 75)
+		return min, sec, frame
+
+	def reset(self):
+		self.list = []
+
+	def appendtrack(self, track):
+		self.appendstretch(track, track)
+				
+	def appendstretch(self, start, end):
+		if not self.status:
+			self.cachestatus()
+		if not start:
+			start = 1
+		if not end:
+			end = self.status[6]
+		if type(end) == type(0):
+			if end < self.status[5] or end > self.status[6]:
+				raise Error, 'range error'
+		else:
+			l = len(end)
+			if l == 4:
+				prog, min, sec, frame = end
+				if prog < self.status[5] or prog > self.status[6]:
+					raise Error, 'range error'
+				end = self.pmsf2msf(prog, min, sec, frame)
+			elif l != 3:
+				raise Error, 'syntax error'
+		if type(start) == type(0):
+			if start < self.status[5] or start > self.status[6]:
+				raise Error, 'range error'
+			if len(self.list) > 0:
+				s, e = self.list[-1]
+				if type(e) == type(0):
+					if start == e+1:
+						start = s
+						del self.list[-1]
+		else:
+			l = len(start)
+			if l == 4:
+				prog, min, sec, frame = start
+				if prog < self.status[5] or prog > self.status[6]:
+					raise Error, 'range error'
+				start = self.pmsf2msf(prog, min, sec, frame)
+			elif l != 3:
+				raise Error, 'syntax error'
+		self.list.append((start, end))
+
+	def settracks(self, list):
+		self.list = []
+		for track in list:
+			self.appendtrack(track)
+
+	def setcallback(self, cb_type, func, arg):
+		if cb_type < 0 or cb_type >= 8:
+			raise Error, 'type out of range'
+		self.callbacks[cb_type] = (func, arg)
+		if self.playing:
+			start, end = self.list[self.listindex]
+			if type(end) == type(0):
+				if cb_type != CD.PNUM:
+					self.parser.setcallback(cb_type, func, arg)
+			else:
+				if cb_type != CD.ATIME:
+					self.parser.setcallback(cb_type, func, arg)
+
+	def removecallback(self, cb_type):
+		if cb_type < 0 or cb_type >= 8:
+			raise Error, 'type out of range'
+		self.callbacks[cb_type] = (None, None)
+		if self.playing:
+			start, end = self.list[self.listindex]
+			if type(end) == type(0):
+				if cb_type != CD.PNUM:
+					self.parser.removecallback(cb_type)
+			else:
+				if cb_type != CD.ATIME:
+					self.parser.removecallback(cb_type)
+
+	def gettrackinfo(self, *arg):
+		if not self.status:
+			self.cachestatus()
+		if not self.trackinfo:
+			self.cacheinfo()
+		if len(arg) == 0:
+			return self.trackinfo[self.status[5]:self.status[6]+1]
+		result = []
+		for i in arg:
+			if i < self.status[5] or i > self.status[6]:
+				raise Error, 'range error'
+			result.append(self.trackinfo[i])
+		return result
+
+	def cacheinfo(self):
+		if not self.status:
+			self.cachestatus()
+		self.trackinfo = []
+		for i in range(self.status[5]):
+			self.trackinfo.append(None)
+		for i in range(self.status[5], self.status[6]+1):
+			self.trackinfo.append(self.player.gettrackinfo(i))
+
+	def cachestatus(self):
+		self.status = self.player.getstatus()
+		if self.status[0] == CD.NODISC:
+			self.status = None
+			raise Error, 'no disc in player'
+
+	def getstatus(self):
+		return self.player.getstatus()
+
+	def play(self):
+		if not self.status:
+			self.cachestatus()
+		size = self.player.bestreadsize()
+		self.listindex = 0
+		self.playing = 0
+		for i in range(8):
+			func, arg = self.callbacks[i]
+			if func:
+				self.parser.setcallback(i, func, arg)
+			else:
+				self.parser.removecallback(i)
+		if len(self.list) == 0:
+			for i in range(self.status[5], self.status[6]+1):
+				self.appendtrack(i)
+		try:
+			while 1:
+				if not self.playing:
+					if self.listindex >= len(self.list):
+						return
+					start, end = self.list[self.listindex]
+					if type(start) == type(0):
+						dummy = self.player.seektrack(
+							start)
+					else:
+						min, sec, frame = start
+						dummy = self.player.seek(
+							min, sec, frame)
+					if type(end) == type(0):
+						self.parser.setcallback(
+							CD.PNUM, _dopnum, self)
+						self.end = end
+						func, arg = \
+						      self.callbacks[CD.ATIME]
+						if func:
+							self.parser.setcallback(CD.ATIME, func, arg)
+						else:
+							self.parser.removecallback(CD.ATIME)
+					else:
+						min, sec, frame = end
+						self.parser.setcallback(
+							CD.ATIME, _doatime,
+							self)
+						self.end = (min * 60 + sec) * \
+							   75 + frame
+						func, arg = \
+						      self.callbacks[CD.PNUM]
+						if func:
+							self.parser.setcallback(CD.PNUM, func, arg)
+						else:
+							self.parser.removecallback(CD.PNUM)
+					self.playing = 1
+				data = self.player.readda(size)
+				if data == '':
+					self.playing = 0
+					self.listindex = self.listindex + 1
+					continue
+				try:
+					self.parser.parseframe(data)
+				except _Stop:
+					self.playing = 0
+					self.listindex = self.listindex + 1
+		finally:
+			self.playing = 0
diff --git a/lib-python/2.2/plat-irix5/regen b/lib-python/2.2/plat-irix5/regen
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/plat-irix5/regen
@@ -0,0 +1,10 @@
+#! /bin/sh
+case `uname -sr` in
+'IRIX '[45].*)	;;
+*)	echo Probably not on an IRIX system 1>&2
+	exit 1;;
+esac
+set -v
+h2py /usr/include/sys/file.h
+h2py -i '(u_long)' /usr/include/netinet/in.h
+h2py /usr/include/errno.h
diff --git a/lib-python/2.2/plat-irix5/torgb.py b/lib-python/2.2/plat-irix5/torgb.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/plat-irix5/torgb.py
@@ -0,0 +1,98 @@
+# Convert "arbitrary" image files to rgb files (SGI's image format).
+# Input may be compressed.
+# The uncompressed file type may be PBM, PGM, PPM, GIF, TIFF, or Sun raster.
+# An exception is raised if the file is not of a recognized type.
+# Returned filename is either the input filename or a temporary filename;
+# in the latter case the caller must ensure that it is removed.
+# Other temporary files used are removed by the function.
+
+import os
+import tempfile
+import pipes
+import imghdr
+
+table = {}
+
+t = pipes.Template()
+t.append('fromppm $IN $OUT', 'ff')
+table['ppm'] = t
+
+t = pipes.Template()
+t.append('(PATH=$PATH:/ufs/guido/bin/sgi; exec pnmtoppm)', '--')
+t.append('fromppm $IN $OUT', 'ff')
+table['pnm'] = t
+table['pgm'] = t
+table['pbm'] = t
+
+t = pipes.Template()
+t.append('fromgif $IN $OUT', 'ff')
+table['gif'] = t
+
+t = pipes.Template()
+t.append('tifftopnm', '--')
+t.append('(PATH=$PATH:/ufs/guido/bin/sgi; exec pnmtoppm)', '--')
+t.append('fromppm $IN $OUT', 'ff')
+table['tiff'] = t
+
+t = pipes.Template()
+t.append('rasttopnm', '--')
+t.append('(PATH=$PATH:/ufs/guido/bin/sgi; exec pnmtoppm)', '--')
+t.append('fromppm $IN $OUT', 'ff')
+table['rast'] = t
+
+t = pipes.Template()
+t.append('djpeg', '--')
+t.append('(PATH=$PATH:/ufs/guido/bin/sgi; exec pnmtoppm)', '--')
+t.append('fromppm $IN $OUT', 'ff')
+table['jpeg'] = t
+
+uncompress = pipes.Template()
+uncompress.append('uncompress', '--')
+
+
+class error(Exception):
+	pass
+
+def torgb(filename):
+	temps = []
+	ret = None
+	try:
+		ret = _torgb(filename, temps)
+	finally:
+		for temp in temps[:]:
+			if temp != ret:
+				try:
+					os.unlink(temp)
+				except os.error:
+					pass
+				temps.remove(temp)
+	return ret
+
+def _torgb(filename, temps):
+	if filename[-2:] == '.Z':
+		fname = tempfile.mktemp()
+		temps.append(fname)
+		sts = uncompress.copy(filename, fname)
+		if sts:
+			raise error, filename + ': uncompress failed'
+	else:
+		fname = filename
+	try:
+		ftype = imghdr.what(fname)
+	except IOError, msg:
+		if type(msg) == type(()) and len(msg) == 2 and \
+			type(msg[0]) == type(0) and type(msg[1]) == type(''):
+			msg = msg[1]
+		if type(msg) is not type(''):
+			msg = `msg`
+		raise error, filename + ': ' + msg
+	if ftype == 'rgb':
+		return fname
+	if ftype is None or not table.has_key(ftype):
+		raise error, \
+			filename + ': unsupported image file type ' + `ftype`
+	temp = tempfile.mktemp()
+	sts = table[ftype].copy(fname, temp)
+	if sts:
+		raise error, filename + ': conversion to rgb failed'
+	return temp
diff --git a/lib-python/2.2/plat-irix6/AL.py b/lib-python/2.2/plat-irix6/AL.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/plat-irix6/AL.py
@@ -0,0 +1,61 @@
+RATE_48000 	= 48000
+RATE_44100	= 44100
+RATE_32000	= 32000
+RATE_22050	= 22050
+RATE_16000	= 16000
+RATE_11025	= 11025
+RATE_8000	= 8000
+
+SAMPFMT_TWOSCOMP= 1
+SAMPFMT_FLOAT	= 32
+SAMPFMT_DOUBLE	= 64
+
+SAMPLE_8	= 1
+SAMPLE_16	= 2
+	# SAMPLE_24 is the low 24 bits of a long, sign extended to 32 bits
+SAMPLE_24	= 4
+
+MONO		= 1
+STEREO		= 2
+QUADRO		= 4			# 4CHANNEL is not a legal Python name
+
+INPUT_LINE	= 0
+INPUT_MIC	= 1
+INPUT_DIGITAL	= 2
+
+MONITOR_OFF	= 0
+MONITOR_ON	= 1
+
+ERROR_NUMBER		= 0
+ERROR_TYPE		= 1
+ERROR_LOCATION_LSP 	= 2
+ERROR_LOCATION_MSP	= 3
+ERROR_LENGTH		= 4
+
+ERROR_INPUT_UNDERFLOW	= 0
+ERROR_OUTPUT_OVERFLOW	= 1
+
+# These seem to be not supported anymore:
+##HOLD, RELEASE			= 0, 1
+##ATTAIL, ATHEAD, ATMARK, ATTIME	= 0, 1, 2, 3
+
+DEFAULT_DEVICE	= 1
+
+INPUT_SOURCE		= 0
+LEFT_INPUT_ATTEN	= 1
+RIGHT_INPUT_ATTEN	= 2
+INPUT_RATE		= 3
+OUTPUT_RATE		= 4
+LEFT_SPEAKER_GAIN	= 5
+RIGHT_SPEAKER_GAIN	= 6
+INPUT_COUNT		= 7
+OUTPUT_COUNT		= 8
+UNUSED_COUNT		= 9
+SYNC_INPUT_TO_AES	= 10
+SYNC_OUTPUT_TO_AES	= 11
+MONITOR_CTL		= 12
+LEFT_MONITOR_ATTEN	= 13
+RIGHT_MONITOR_ATTEN	= 14
+
+ENUM_VALUE	= 0	# only certain values are valid
+RANGE_VALUE	= 1	# any value in range is valid
diff --git a/lib-python/2.2/plat-irix6/CD.py b/lib-python/2.2/plat-irix6/CD.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/plat-irix6/CD.py
@@ -0,0 +1,34 @@
+ERROR		= 0
+NODISC		= 1
+READY		= 2
+PLAYING		= 3
+PAUSED		= 4
+STILL		= 5
+
+AUDIO		= 0
+PNUM		= 1
+INDEX		= 2
+PTIME		= 3
+ATIME		= 4
+CATALOG		= 5
+IDENT		= 6
+CONTROL		= 7
+
+CDDA_DATASIZE	= 2352
+
+##CDDA_SUBCODESIZE	= (sizeof(struct subcodeQ))
+##CDDA_BLOCKSIZE	= (sizeof(struct cdframe))
+##CDDA_NUMSAMPLES	= (CDDA_DATASIZE/2)
+##
+##CDQ_PREEMP_MASK	= 0xd
+##CDQ_COPY_MASK	= 0xb
+##CDQ_DDATA_MASK	= 0xd
+##CDQ_BROADCAST_MASK	= 0x8
+##CDQ_PREEMPHASIS	= 0x1
+##CDQ_COPY_PERMITTED	= 0x2		
+##CDQ_DIGITAL_DATA	= 0x4
+##CDQ_BROADCAST_USE	= 0x8
+##
+##CDQ_MODE1	= 0x1
+##CDQ_MODE2	= 0x2
+##CDQ_MODE3	= 0x3
diff --git a/lib-python/2.2/plat-irix6/CL.py b/lib-python/2.2/plat-irix6/CL.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/plat-irix6/CL.py
@@ -0,0 +1,24 @@
+# Backward compatible module CL.
+# All relevant symbols are now defined in the module cl.
+try:
+	from cl import *
+except ImportError:
+	from CL_old import *
+else:
+	del CompressImage
+	del DecompressImage
+	del GetAlgorithmName
+	del OpenCompressor
+	del OpenDecompressor
+	del QueryAlgorithms
+	del QueryMaxHeaderSize
+	del QueryScheme
+	del QuerySchemeFromName
+	del SetDefault
+	del SetMax
+	del SetMin
+	try:
+		del cvt_type
+	except NameError:
+		pass
+	del error
diff --git a/lib-python/2.2/plat-irix6/DEVICE.py b/lib-python/2.2/plat-irix6/DEVICE.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/plat-irix6/DEVICE.py
@@ -0,0 +1,400 @@
+NULLDEV = 0
+BUTOFFSET = 1
+VALOFFSET = 256
+PSEUDOFFSET = 512
+BUT2OFFSET = 3840
+TIMOFFSET = 515
+XKBDOFFSET = 143
+BUTCOUNT = 255
+VALCOUNT = 256
+TIMCOUNT = 4
+XKBDCOUNT = 28
+USERBUTOFFSET = 4096
+USERVALOFFSET = 12288
+USERPSEUDOFFSET = 16384
+BUT0 = 1
+BUT1 = 2
+BUT2 = 3
+BUT3 = 4
+BUT4 = 5
+BUT5 = 6
+BUT6 = 7
+BUT7 = 8
+BUT8 = 9
+BUT9 = 10
+BUT10 = 11
+BUT11 = 12
+BUT12 = 13
+BUT13 = 14
+BUT14 = 15
+BUT15 = 16
+BUT16 = 17
+BUT17 = 18
+BUT18 = 19
+BUT19 = 20
+BUT20 = 21
+BUT21 = 22
+BUT22 = 23
+BUT23 = 24
+BUT24 = 25
+BUT25 = 26
+BUT26 = 27
+BUT27 = 28
+BUT28 = 29
+BUT29 = 30
+BUT30 = 31
+BUT31 = 32
+BUT32 = 33
+BUT33 = 34
+BUT34 = 35
+BUT35 = 36
+BUT36 = 37
+BUT37 = 38
+BUT38 = 39
+BUT39 = 40
+BUT40 = 41
+BUT41 = 42
+BUT42 = 43
+BUT43 = 44
+BUT44 = 45
+BUT45 = 46
+BUT46 = 47
+BUT47 = 48
+BUT48 = 49
+BUT49 = 50
+BUT50 = 51
+BUT51 = 52
+BUT52 = 53
+BUT53 = 54
+BUT54 = 55
+BUT55 = 56
+BUT56 = 57
+BUT57 = 58
+BUT58 = 59
+BUT59 = 60
+BUT60 = 61
+BUT61 = 62
+BUT62 = 63
+BUT63 = 64
+BUT64 = 65
+BUT65 = 66
+BUT66 = 67
+BUT67 = 68
+BUT68 = 69
+BUT69 = 70
+BUT70 = 71
+BUT71 = 72
+BUT72 = 73
+BUT73 = 74
+BUT74 = 75
+BUT75 = 76
+BUT76 = 77
+BUT77 = 78
+BUT78 = 79
+BUT79 = 80
+BUT80 = 81
+BUT81 = 82
+BUT82 = 83
+MAXKBDBUT = 83
+BUT100 = 101
+BUT101 = 102
+BUT102 = 103
+BUT103 = 104
+BUT104 = 105
+BUT105 = 106
+BUT106 = 107
+BUT107 = 108
+BUT108 = 109
+BUT109 = 110
+BUT110 = 111
+BUT111 = 112
+BUT112 = 113
+BUT113 = 114
+BUT114 = 115
+BUT115 = 116
+BUT116 = 117
+BUT117 = 118
+BUT118 = 119
+BUT119 = 120
+BUT120 = 121
+BUT121 = 122
+BUT122 = 123
+BUT123 = 124
+BUT124 = 125
+BUT125 = 126
+BUT126 = 127
+BUT127 = 128
+BUT128 = 129
+BUT129 = 130
+BUT130 = 131
+BUT131 = 132
+BUT132 = 133
+BUT133 = 134
+BUT134 = 135
+BUT135 = 136
+BUT136 = 137
+BUT137 = 138
+BUT138 = 139
+BUT139 = 140
+BUT140 = 141
+BUT141 = 142
+BUT142 = 143
+BUT143 = 144
+BUT144 = 145
+BUT145 = 146
+BUT146 = 147
+BUT147 = 148
+BUT148 = 149
+BUT149 = 150
+BUT150 = 151
+BUT151 = 152
+BUT152 = 153
+BUT153 = 154
+BUT154 = 155
+BUT155 = 156
+BUT156 = 157
+BUT157 = 158
+BUT158 = 159
+BUT159 = 160
+BUT160 = 161
+BUT161 = 162
+BUT162 = 163
+BUT163 = 164
+BUT164 = 165
+BUT165 = 166
+BUT166 = 167
+BUT167 = 168
+BUT168 = 169
+BUT181 = 182
+BUT182 = 183
+BUT183 = 184
+BUT184 = 185
+BUT185 = 186
+BUT186 = 187
+BUT187 = 188
+BUT188 = 189
+BUT189 = 190
+MOUSE1 = 101
+MOUSE2 = 102
+MOUSE3 = 103
+LEFTMOUSE = 103
+MIDDLEMOUSE = 102
+RIGHTMOUSE = 101
+LPENBUT = 104
+BPAD0 = 105
+BPAD1 = 106
+BPAD2 = 107
+BPAD3 = 108
+LPENVALID = 109
+SWBASE = 111
+SW0 = 111
+SW1 = 112
+SW2 = 113
+SW3 = 114
+SW4 = 115
+SW5 = 116
+SW6 = 117
+SW7 = 118
+SW8 = 119
+SW9 = 120
+SW10 = 121
+SW11 = 122
+SW12 = 123
+SW13 = 124
+SW14 = 125
+SW15 = 126
+SW16 = 127
+SW17 = 128
+SW18 = 129
+SW19 = 130
+SW20 = 131
+SW21 = 132
+SW22 = 133
+SW23 = 134
+SW24 = 135
+SW25 = 136
+SW26 = 137
+SW27 = 138
+SW28 = 139
+SW29 = 140
+SW30 = 141
+SW31 = 142
+SBBASE = 182
+SBPICK = 182
+SBBUT1 = 183
+SBBUT2 = 184
+SBBUT3 = 185
+SBBUT4 = 186
+SBBUT5 = 187
+SBBUT6 = 188
+SBBUT7 = 189
+SBBUT8 = 190
+AKEY = 11
+BKEY = 36
+CKEY = 28
+DKEY = 18
+EKEY = 17
+FKEY = 19
+GKEY = 26
+HKEY = 27
+IKEY = 40
+JKEY = 34
+KKEY = 35
+LKEY = 42
+MKEY = 44
+NKEY = 37
+OKEY = 41
+PKEY = 48
+QKEY = 10
+RKEY = 24
+SKEY = 12
+TKEY = 25
+UKEY = 33
+VKEY = 29
+WKEY = 16
+XKEY = 21
+YKEY = 32
+ZKEY = 20
+ZEROKEY = 46
+ONEKEY = 8
+TWOKEY = 14
+THREEKEY = 15
+FOURKEY = 22
+FIVEKEY = 23
+SIXKEY = 30
+SEVENKEY = 31
+EIGHTKEY = 38
+NINEKEY = 39
+BREAKKEY = 1
+SETUPKEY = 2
+CTRLKEY = 3
+LEFTCTRLKEY = CTRLKEY
+CAPSLOCKKEY = 4
+RIGHTSHIFTKEY = 5
+LEFTSHIFTKEY = 6
+NOSCRLKEY = 13
+ESCKEY = 7
+TABKEY = 9
+RETKEY = 51
+SPACEKEY = 83
+LINEFEEDKEY = 60
+BACKSPACEKEY = 61
+DELKEY = 62
+SEMICOLONKEY = 43
+PERIODKEY = 52
+COMMAKEY = 45
+QUOTEKEY = 50
+ACCENTGRAVEKEY = 55
+MINUSKEY = 47
+VIRGULEKEY = 53
+BACKSLASHKEY = 57
+EQUALKEY = 54
+LEFTBRACKETKEY = 49
+RIGHTBRACKETKEY = 56
+LEFTARROWKEY = 73
+DOWNARROWKEY = 74
+RIGHTARROWKEY = 80
+UPARROWKEY = 81
+PAD0 = 59
+PAD1 = 58
+PAD2 = 64
+PAD3 = 65
+PAD4 = 63
+PAD5 = 69
+PAD6 = 70
+PAD7 = 67
+PAD8 = 68
+PAD9 = 75
+PADPF1 = 72
+PADPF2 = 71
+PADPF3 = 79
+PADPF4 = 78
+PADPERIOD = 66
+PADMINUS = 76
+PADCOMMA = 77
+PADENTER = 82
+LEFTALTKEY = 143
+RIGHTALTKEY = 144
+RIGHTCTRLKEY = 145
+F1KEY = 146
+F2KEY = 147
+F3KEY = 148
+F4KEY = 149
+F5KEY = 150
+F6KEY = 151
+F7KEY = 152
+F8KEY = 153
+F9KEY = 154
+F10KEY = 155
+F11KEY = 156
+F12KEY = 157
+PRINTSCREENKEY = 158
+SCROLLLOCKKEY = 159
+PAUSEKEY = 160
+INSERTKEY = 161
+HOMEKEY = 162
+PAGEUPKEY = 163
+ENDKEY = 164
+PAGEDOWNKEY = 165
+NUMLOCKKEY = 166
+PADVIRGULEKEY = 167
+PADASTERKEY = 168
+PADPLUSKEY = 169
+SGIRESERVED = 256
+DIAL0 = 257
+DIAL1 = 258
+DIAL2 = 259
+DIAL3 = 260
+DIAL4 = 261
+DIAL5 = 262
+DIAL6 = 263
+DIAL7 = 264
+DIAL8 = 265
+MOUSEX = 266
+MOUSEY = 267
+LPENX = 268
+LPENY = 269
+BPADX = 270
+BPADY = 271
+CURSORX = 272
+CURSORY = 273
+GHOSTX = 274
+GHOSTY = 275
+SBTX = 276
+SBTY = 277
+SBTZ = 278
+SBRX = 279
+SBRY = 280
+SBRZ = 281
+SBPERIOD = 282
+TIMER0 = 515
+TIMER1 = 516
+TIMER2 = 517
+TIMER3 = 518
+KEYBD = 513
+RAWKEYBD = 514
+VALMARK = 523
+REDRAW = 528
+INPUTCHANGE = 534
+QFULL = 535
+QREADERROR = 538
+WINFREEZE = 539
+WINTHAW = 540
+REDRAWICONIC = 541
+WINQUIT = 542
+DEPTHCHANGE = 543
+WINSHUT = 546
+DRAWOVERLAY = 547
+VIDEO = 548
+MENUBUTTON = RIGHTMOUSE
+WINCLOSE = 537
+KEYBDFNAMES = 544
+KEYBDFSTRINGS = 545
+MAXSGIDEVICE = 20000
+GERROR = 524
+WMSEND = 529
+WMREPLY = 530
+WMGFCLOSE = 531
+WMTXCLOSE = 532
+MODECHANGE = 533
+PIECECHANGE = 536
diff --git a/lib-python/2.2/plat-irix6/ERRNO.py b/lib-python/2.2/plat-irix6/ERRNO.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/plat-irix6/ERRNO.py
@@ -0,0 +1,180 @@
+# Generated by h2py from /usr/include/errno.h
+
+# Included from sys/errno.h
+
+# Included from standards.h
+__KBASE = 1000
+__IRIXBASE = 1000
+__FTNBASE = 4000
+__FTNLAST = 5999
+EPERM = 1
+ENOENT = 2
+ESRCH = 3
+EINTR = 4
+EIO = 5
+ENXIO = 6
+E2BIG = 7
+ENOEXEC = 8
+EBADF = 9
+ECHILD = 10
+EAGAIN = 11
+ENOMEM = 12
+EACCES = 13
+EFAULT = 14
+ENOTBLK = 15
+EBUSY = 16
+EEXIST = 17
+EXDEV = 18
+ENODEV = 19
+ENOTDIR = 20
+EISDIR = 21
+EINVAL = 22
+ENFILE = 23
+EMFILE = 24
+ENOTTY = 25
+ETXTBSY = 26
+EFBIG = 27
+ENOSPC = 28
+ESPIPE = 29
+EROFS = 30
+EMLINK = 31
+EPIPE = 32
+EDOM = 33
+ERANGE = 34
+ENOMSG = 35
+EIDRM = 36
+ECHRNG = 37
+EL2NSYNC = 38
+EL3HLT = 39
+EL3RST = 40
+ELNRNG = 41
+EUNATCH = 42
+ENOCSI = 43
+EL2HLT = 44
+EDEADLK = 45
+ENOLCK = 46
+ECKPT = 47
+EBADE = 50
+EBADR = 51
+EXFULL = 52
+ENOANO = 53
+EBADRQC = 54
+EBADSLT = 55
+EDEADLOCK = 56
+EBFONT = 57
+ENOSTR = 60
+ENODATA = 61
+ETIME = 62
+ENOSR = 63
+ENONET = 64
+ENOPKG = 65
+EREMOTE = 66
+ENOLINK = 67
+EADV = 68
+ESRMNT = 69
+ECOMM = 70
+EPROTO = 71
+EMULTIHOP = 74
+EBADMSG = 77
+ENAMETOOLONG = 78
+EOVERFLOW = 79
+ENOTUNIQ = 80
+EBADFD = 81
+EREMCHG = 82
+ELIBACC = 83
+ELIBBAD = 84
+ELIBSCN = 85
+ELIBMAX = 86
+ELIBEXEC = 87
+EILSEQ = 88
+ENOSYS = 89
+ELOOP = 90
+ERESTART = 91
+ESTRPIPE = 92
+ENOTEMPTY = 93
+EUSERS = 94
+ENOTSOCK = 95
+EDESTADDRREQ = 96
+EMSGSIZE = 97
+EPROTOTYPE = 98
+ENOPROTOOPT = 99
+EPROTONOSUPPORT = 120
+ESOCKTNOSUPPORT = 121
+EOPNOTSUPP = 122
+EPFNOSUPPORT = 123
+EAFNOSUPPORT = 124
+EADDRINUSE = 125
+EADDRNOTAVAIL = 126
+ENETDOWN = 127
+ENETUNREACH = 128
+ENETRESET = 129
+ECONNABORTED = 130
+ECONNRESET = 131
+ENOBUFS = 132
+EISCONN = 133
+ENOTCONN = 134
+ESHUTDOWN = 143
+ETOOMANYREFS = 144
+ETIMEDOUT = 145
+ECONNREFUSED = 146
+EHOSTDOWN = 147
+EHOSTUNREACH = 148
+LASTERRNO = ENOTCONN
+EWOULDBLOCK = __KBASE+101
+EWOULDBLOCK = EAGAIN
+EALREADY = 149
+EINPROGRESS = 150
+ESTALE = 151
+EIORESID = 500
+EUCLEAN = 135
+ENOTNAM = 137
+ENAVAIL = 138
+EISNAM = 139
+EREMOTEIO = 140
+EINIT = 141
+EREMDEV = 142
+ECANCELED = 158
+ENOLIMFILE = 1001
+EPROCLIM = 1002
+EDISJOINT = 1003
+ENOLOGIN = 1004
+ELOGINLIM = 1005
+EGROUPLOOP = 1006
+ENOATTACH = 1007
+ENOTSUP = 1008
+ENOATTR = 1009
+EFSCORRUPTED = 1010
+EDIRCORRUPTED = 1010
+EWRONGFS = 1011
+EDQUOT = 1133
+ENFSREMOTE = 1135
+ECONTROLLER = 1300
+ENOTCONTROLLER = 1301
+EENQUEUED = 1302
+ENOTENQUEUED = 1303
+EJOINED = 1304
+ENOTJOINED = 1305
+ENOPROC = 1306
+EMUSTRUN = 1307
+ENOTSTOPPED = 1308
+ECLOCKCPU = 1309
+EINVALSTATE = 1310
+ENOEXIST = 1311
+EENDOFMINOR = 1312
+EBUFSIZE = 1313
+EEMPTY = 1314
+ENOINTRGROUP = 1315
+EINVALMODE = 1316
+ECANTEXTENT = 1317
+EINVALTIME = 1318
+EDESTROYED = 1319
+EBDHDL = 1400
+EDELAY = 1401
+ENOBWD = 1402
+EBADRSPEC = 1403
+EBADTSPEC = 1404
+EBADFILT = 1405
+EMIGRATED = 1500
+EMIGRATING = 1501
+ECELLDOWN = 1502
+EMEMRETRY = 1600
diff --git a/lib-python/2.2/plat-irix6/FILE.py b/lib-python/2.2/plat-irix6/FILE.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/plat-irix6/FILE.py
@@ -0,0 +1,674 @@
+# Generated by h2py from /usr/include/sys/file.h
+
+# Included from standards.h
+
+# Included from sys/types.h
+
+# Included from sgidefs.h
+_MIPS_ISA_MIPS1 = 1
+_MIPS_ISA_MIPS2 = 2
+_MIPS_ISA_MIPS3 = 3
+_MIPS_ISA_MIPS4 = 4
+_MIPS_SIM_ABI32 = 1
+_MIPS_SIM_NABI32 = 2
+_MIPS_SIM_ABI64 = 3
+
+# Included from sys/pthread.h
+P_MYID = (-1)
+P_MYHOSTID = (-1)
+
+# Included from sys/bsd_types.h
+
+# Included from sys/mkdev.h
+ONBITSMAJOR = 7
+ONBITSMINOR = 8
+OMAXMAJ = 0x7f
+OMAXMIN = 0xff
+NBITSMAJOR = 14
+NBITSMINOR = 18
+MAXMAJ = 0x1ff
+MAXMIN = 0x3ffff
+OLDDEV = 0
+NEWDEV = 1
+MKDEV_VER = NEWDEV
+def IS_STRING_SPEC_DEV(x): return ((dev_t)(x)==__makedev(MKDEV_VER, 0, 0))
+
+def major(dev): return __major(MKDEV_VER, dev)
+
+def minor(dev): return __minor(MKDEV_VER, dev)
+
+
+# Included from sys/select.h
+FD_SETSIZE = 1024
+__NBBY = 8
+
+# Included from string.h
+NULL = 0L
+NBBY = 8
+
+# Included from sys/cpumask.h
+MAXCPU = 128
+def CPUMASK_INDEX(bit): return ((bit) >> 6)
+
+def CPUMASK_SHFT(bit): return ((bit) & 0x3f)
+
+def CPUMASK_IS_ZERO(p): return ((p) == 0)
+
+def CPUMASK_IS_NONZERO(p): return ((p) != 0)
+
+
+# Included from sys/nodemask.h
+def CNODEMASK_IS_ZERO(p): return ((p) == 0)
+
+def CNODEMASK_IS_NONZERO(p): return ((p) != 0)
+
+
+# Included from sys/sema.h
+
+# Included from sys/timespec.h
+
+# Included from sys/param.h
+
+# Included from sys/signal.h
+SIGHUP = 1
+SIGINT = 2
+SIGQUIT = 3
+SIGILL = 4
+SIGTRAP = 5
+SIGIOT = 6
+SIGABRT = 6
+SIGEMT = 7
+SIGFPE = 8
+SIGKILL = 9
+SIGBUS = 10
+SIGSEGV = 11
+SIGSYS = 12
+SIGPIPE = 13
+SIGALRM = 14
+SIGTERM = 15
+SIGUSR1 = 16
+SIGUSR2 = 17
+SIGCLD = 18
+SIGCHLD = 18
+SIGPWR = 19
+SIGWINCH = 20
+SIGURG = 21
+SIGPOLL = 22
+SIGIO = 22
+SIGSTOP = 23
+SIGTSTP = 24
+SIGCONT = 25
+SIGTTIN = 26
+SIGTTOU = 27
+SIGVTALRM = 28
+SIGPROF = 29
+SIGXCPU = 30
+SIGXFSZ = 31
+SIGK32 = 32
+SIGCKPT = 33
+SIGRESTART = 34
+SIGUME = 35
+SIGPTINTR = 47
+SIGPTRESCHED = 48
+SIGRTMIN = 49
+SIGRTMAX = 64
+__sigargs = int
+
+# Included from sys/sigevent.h
+SIGEV_NONE = 128
+SIGEV_SIGNAL = 129
+SIGEV_CALLBACK = 130
+SIGEV_THREAD = 131
+
+# Included from sys/siginfo.h
+SI_MAXSZ = 128
+SI_USER = 0
+SI_KILL = SI_USER
+SI_QUEUE = -1
+SI_ASYNCIO = -2
+SI_TIMER = -3
+SI_MESGQ = -4
+ILL_ILLOPC = 1
+ILL_ILLOPN = 2
+ILL_ILLADR = 3
+ILL_ILLTRP = 4
+ILL_PRVOPC = 5
+ILL_PRVREG = 6
+ILL_COPROC = 7
+ILL_BADSTK = 8
+NSIGILL = 8
+FPE_INTDIV = 1
+FPE_INTOVF = 2
+FPE_FLTDIV = 3
+FPE_FLTOVF = 4
+FPE_FLTUND = 5
+FPE_FLTRES = 6
+FPE_FLTINV = 7
+FPE_FLTSUB = 8
+NSIGFPE = 8
+SEGV_MAPERR = 1
+SEGV_ACCERR = 2
+NSIGSEGV = 2
+BUS_ADRALN = 1
+BUS_ADRERR = 2
+BUS_OBJERR = 3
+NSIGBUS = 3
+TRAP_BRKPT = 1
+TRAP_TRACE = 2
+NSIGTRAP = 2
+CLD_EXITED = 1
+CLD_KILLED = 2
+CLD_DUMPED = 3
+CLD_TRAPPED = 4
+CLD_STOPPED = 5
+CLD_CONTINUED = 6
+NSIGCLD = 6
+POLL_IN = 1
+POLL_OUT = 2
+POLL_MSG = 3
+POLL_ERR = 4
+POLL_PRI = 5
+POLL_HUP = 6
+NSIGPOLL = 6
+UME_ECCERR = 1
+NSIGUME = 1
+SIG_NOP = 0
+SIG_BLOCK = 1
+SIG_UNBLOCK = 2
+SIG_SETMASK = 3
+SIG_SETMASK32 = 256
+SA_ONSTACK = 0x00000001
+SA_RESETHAND = 0x00000002
+SA_RESTART = 0x00000004
+SA_SIGINFO = 0x00000008
+SA_NODEFER = 0x00000010
+SA_NOCLDWAIT = 0x00010000
+SA_NOCLDSTOP = 0x00020000
+_SA_BSDCALL = 0x10000000
+MINSIGSTKSZ = 512
+SIGSTKSZ = 8192
+SS_ONSTACK = 0x00000001
+SS_DISABLE = 0x00000002
+
+# Included from sys/ucontext.h
+NGREG = 36
+NGREG = 37
+GETCONTEXT = 0
+SETCONTEXT = 1
+UC_SIGMASK = 001
+UC_STACK = 002
+UC_CPU = 004
+UC_MAU = 010
+UC_MCONTEXT = (UC_CPU|UC_MAU)
+UC_ALL = (UC_SIGMASK|UC_STACK|UC_MCONTEXT)
+CTX_R0 = 0
+CTX_AT = 1
+CTX_V0 = 2
+CTX_V1 = 3
+CTX_A0 = 4
+CTX_A1 = 5
+CTX_A2 = 6
+CTX_A3 = 7
+CTX_T0 = 8
+CTX_T1 = 9
+CTX_T2 = 10
+CTX_T3 = 11
+CTX_T4 = 12
+CTX_T5 = 13
+CTX_T6 = 14
+CTX_T7 = 15
+CTX_A4 = 8
+CTX_A5 = 9
+CTX_A6 = 10
+CTX_A7 = 11
+CTX_T0 = 12
+CTX_T1 = 13
+CTX_T2 = 14
+CTX_T3 = 15
+CTX_S0 = 16
+CTX_S1 = 17
+CTX_S2 = 18
+CTX_S3 = 19
+CTX_S4 = 20
+CTX_S5 = 21
+CTX_S6 = 22
+CTX_S7 = 23
+CTX_T8 = 24
+CTX_T9 = 25
+CTX_K0 = 26
+CTX_K1 = 27
+CTX_GP = 28
+CTX_SP = 29
+CTX_S8 = 30
+CTX_RA = 31
+CTX_MDLO = 32
+CTX_MDHI = 33
+CTX_CAUSE = 34
+CTX_EPC = 35
+CTX_SR = 36
+CXT_R0 = CTX_R0
+CXT_AT = CTX_AT
+CXT_V0 = CTX_V0
+CXT_V1 = CTX_V1
+CXT_A0 = CTX_A0
+CXT_A1 = CTX_A1
+CXT_A2 = CTX_A2
+CXT_A3 = CTX_A3
+CXT_T0 = CTX_T0
+CXT_T1 = CTX_T1
+CXT_T2 = CTX_T2
+CXT_T3 = CTX_T3
+CXT_T4 = CTX_T4
+CXT_T5 = CTX_T5
+CXT_T6 = CTX_T6
+CXT_T7 = CTX_T7
+CXT_S0 = CTX_S0
+CXT_S1 = CTX_S1
+CXT_S2 = CTX_S2
+CXT_S3 = CTX_S3
+CXT_S4 = CTX_S4
+CXT_S5 = CTX_S5
+CXT_S6 = CTX_S6
+CXT_S7 = CTX_S7
+CXT_T8 = CTX_T8
+CXT_T9 = CTX_T9
+CXT_K0 = CTX_K0
+CXT_K1 = CTX_K1
+CXT_GP = CTX_GP
+CXT_SP = CTX_SP
+CXT_S8 = CTX_S8
+CXT_RA = CTX_RA
+CXT_MDLO = CTX_MDLO
+CXT_MDHI = CTX_MDHI
+CXT_CAUSE = CTX_CAUSE
+CXT_EPC = CTX_EPC
+CXT_SR = CTX_SR
+CTX_FV0 = 0
+CTX_FV1 = 2
+CTX_FA0 = 12
+CTX_FA1 = 13
+CTX_FA2 = 14
+CTX_FA3 = 15
+CTX_FA4 = 16
+CTX_FA5 = 17
+CTX_FA6 = 18
+CTX_FA7 = 19
+CTX_FT0 = 4
+CTX_FT1 = 5
+CTX_FT2 = 6
+CTX_FT3 = 7
+CTX_FT4 = 8
+CTX_FT5 = 9
+CTX_FT6 = 10
+CTX_FT7 = 11
+CTX_FT8 = 20
+CTX_FT9 = 21
+CTX_FT10 = 22
+CTX_FT11 = 23
+CTX_FT12 = 1
+CTX_FT13 = 3
+CTX_FS0 = 24
+CTX_FS1 = 25
+CTX_FS2 = 26
+CTX_FS3 = 27
+CTX_FS4 = 28
+CTX_FS5 = 29
+CTX_FS6 = 30
+CTX_FS7 = 31
+CTX_FT8 = 21
+CTX_FT9 = 23
+CTX_FT10 = 25
+CTX_FT11 = 27
+CTX_FT12 = 29
+CTX_FT13 = 31
+CTX_FT14 = 1
+CTX_FT15 = 3
+CTX_FS0 = 20
+CTX_FS1 = 22
+CTX_FS2 = 24
+CTX_FS3 = 26
+CTX_FS4 = 28
+CTX_FS5 = 30
+SV_ONSTACK = 0x0001
+SV_INTERRUPT = 0x0002
+NUMBSDSIGS = (32)
+def sigmask(sig): return (1L << ((sig)-1))
+
+def sigmask(sig): return (1L << ((sig)-1))
+
+SIG_ERR = (-1)
+SIG_IGN = (1)
+SIG_HOLD = (2)
+SIG_DFL = (0)
+NSIG = 65
+MAXSIG = (NSIG-1)
+NUMSIGS = (NSIG-1)
+BRK_USERBP = 0
+BRK_KERNELBP = 1
+BRK_ABORT = 2
+BRK_BD_TAKEN = 3
+BRK_BD_NOTTAKEN = 4
+BRK_SSTEPBP = 5
+BRK_OVERFLOW = 6
+BRK_DIVZERO = 7
+BRK_RANGE = 8
+BRK_PSEUDO_OP_BIT = 0x80
+BRK_PSEUDO_OP_MAX = 0x3
+BRK_CACHE_SYNC = 0x80
+BRK_MULOVF = 1023
+_POSIX_VERSION = 199506L
+_POSIX_VERSION = 199506
+_POSIX_VDISABLE = 0
+MAX_INPUT = 512
+MAX_CANON = 256
+UID_NOBODY = 60001
+GID_NOBODY = UID_NOBODY
+UID_NOACCESS = 60002
+MAXPID = 0x7ffffff0
+MAXUID = 0x7fffffff
+MAXLINK = 30000
+SSIZE = 1
+SINCR = 1
+KSTKSIZE = 1
+EXTKSTKSIZE = 1
+KSTKIDX = 0
+KSTEIDX = 1
+EXTKSTKSIZE = 0
+KSTKIDX = 0
+CANBSIZ = 256
+HZ = 100
+TICK = 10000000
+NOFILE = 20
+NGROUPS_UMIN = 0
+NGROUPS_UMAX = 32
+NGROUPS = 16
+PMASK = 0177
+PCATCH = 0400
+PLTWAIT = 01000
+PRECALC = 01000
+PSWP = 0
+PINOD = 10
+PSNDD = PINOD
+PRIBIO = 20
+PZERO = 25
+PMEM = 0
+NZERO = 20
+PPIPE = 26
+PVFS = 27
+PWAIT = 30
+PSLEP = 39
+PUSER = 60
+PBATCH_CRITICAL = -1
+PTIME_SHARE = -2
+PTIME_SHARE_OVER = -3
+PBATCH = -4
+PWEIGHTLESS = -5
+IO_NBPC = 4096
+IO_BPCSHIFT = 12
+MIN_NBPC = 4096
+MIN_BPCSHIFT = 12
+MIN_CPSSHIFT = 10
+BPCSHIFT = 12
+CPSSHIFT = 10
+BPCSHIFT = 14
+CPSSHIFT = 12
+CPSSHIFT = 11
+BPSSHIFT = (BPCSHIFT+CPSSHIFT)
+NULL = 0L
+CMASK = 022
+NODEV = (-1)
+NOPAGE = (-1)
+NBPSCTR = 512
+SCTRSHFT = 9
+def BASEPRI(psw): return (((psw) & SR_IMASK) == SR_IMASK0)
+
+def BASEPRI(psw): return (((psw) & SR_IMASK) == SR_IMASK)
+
+def USERMODE(psw): return (((psw) & SR_KSU_MSK) == SR_KSU_USR)
+
+MAXPATHLEN = 1024
+MAXSYMLINKS = 30
+MAXNAMELEN = 256
+PIPE_BUF = 10240
+PIPE_MAX = 10240
+NBBY = 8
+BBSHIFT = 9
+BBSIZE = (1<<BBSHIFT)
+BBMASK = (BBSIZE-1)
+def BBTOB(bbs): return ((bbs) << BBSHIFT)
+
+def OFFTOBB(bytes): return (((__uint64_t)(bytes) + BBSIZE - 1) >> BBSHIFT)
+
+def OFFTOBBT(bytes): return ((off_t)(bytes) >> BBSHIFT)
+
+def BBTOOFF(bbs): return ((off_t)(bbs) << BBSHIFT)     
+
+SEEKLIMIT32 = 0x7fffffff
+MAXBSIZE = 8192
+DEV_BSIZE = BBSIZE
+DEV_BSHIFT = BBSHIFT
+def btodb(bytes): return   \
+
+def dbtob(db): return   \
+
+BLKDEV_IOSHIFT = BPCSHIFT
+BLKDEV_IOSIZE = (1<<BLKDEV_IOSHIFT)
+def BLKDEV_OFF(off): return ((off) & (BLKDEV_IOSIZE - 1))
+
+def BLKDEV_LBN(off): return ((off) >> BLKDEV_IOSHIFT)
+
+def BLKDEV_LTOP(bn): return ((bn) * BLKDEV_BB)
+
+MAXHOSTNAMELEN = 256
+def DELAY(n): return us_delay(n)
+
+def DELAYBUS(n): return us_delaybus(n)
+
+TIMEPOKE_NOW = -100L
+MUTEX_DEFAULT = 0x0
+METER_NAMSZ = 16
+METER_NO_SEQ = -1
+def mutex_spinlock(l): return splhi()
+
+def mutex_spintrylock(l): return splhi()
+
+def spinlock_initialized(l): return 1
+
+SV_FIFO = 0x0
+SV_LIFO = 0x2
+SV_PRIO = 0x4
+SV_KEYED = 0x6
+SV_DEFAULT = SV_FIFO
+SEMA_NOHIST = 0x0001
+SEMA_LOCK = 0x0004
+NSCHEDCLASS = (-(PWEIGHTLESS)+1)
+MR_ACCESS = 1
+MR_UPDATE = 2
+MRLOCK_BARRIER = 0x1
+MRLOCK_BEHAVIOR = 0x2
+MRLOCK_DBLTRIPPABLE = 0x4
+MRLOCK_ALLOW_EQUAL_PRI = 0x8
+MRLOCK_DEFAULT = MRLOCK_BARRIER
+def mraccess(mrp): return mraccessf(mrp, 0)	 
+
+def mrupdate(mrp): return mrupdatef(mrp, 0)	 
+
+def mp_mutex_unlock(m): return mutex_unlock(m)
+
+def mp_mutex_trylock(m): return mutex_trylock(m)
+
+def mp_mutex_spinlock(m): return mutex_spinlock(m)
+
+
+# Included from sys/mon.h
+MON_LOCKED = 0x01
+MON_WAITING = 0x02
+MON_TIMEOUT = 0x04
+MON_DOSRV = 0x08
+MON_RUN = 0x10
+MR_READER_BUCKETS = 13
+def initlock(l): return spinlock_init(l,0)
+
+def ownlock(x): return 1
+
+def mutex_enter(m): return mutex_lock(m, PZERO)
+
+def mutex_tryenter(m): return mutex_trylock(m)
+
+def mutex_exit(m): return mutex_unlock(m)
+
+def cv_signal(cv): return sv_signal(cv)
+
+def cv_broadcast(cv): return sv_broadcast(cv)
+
+def cv_destroy(cv): return sv_destroy(cv)
+
+RW_READER = MR_ACCESS
+RW_WRITER = MR_UPDATE
+def rw_exit(r): return mrunlock(r)
+
+def rw_tryupgrade(r): return mrtrypromote(r)
+
+def rw_downgrade(r): return mrdemote(r)
+
+def rw_destroy(r): return mrfree(r)
+
+def RW_WRITE_HELD(r): return ismrlocked(r, MR_UPDATE)
+
+def RW_READ_HELD(r): return ismrlocked(r, MR_ACCESS)
+
+MS_FREE = 0
+MS_UPD = 1
+MS_ACC = 2
+MS_WAITERS = 4
+
+# Included from sys/fcntl.h
+FNDELAY = 0x04
+FAPPEND = 0x08
+FSYNC = 0x10
+FDSYNC = 0x20
+FRSYNC = 0x40
+FNONBLOCK = 0x80
+FASYNC = 0x1000
+FLARGEFILE = 0x2000
+FNONBLK = FNONBLOCK
+FDIRECT = 0x8000
+FBULK = 0x10000
+FDIRENT64 = 0x8000
+FCREAT = 0x0100
+FTRUNC = 0x0200
+FEXCL = 0x0400
+FNOCTTY = 0x0800
+O_RDONLY = 0
+O_WRONLY = 1
+O_RDWR = 2
+O_NDELAY = 0x04
+O_APPEND = 0x08
+O_SYNC = 0x10
+O_DSYNC = 0x20
+O_RSYNC = 0x40
+O_NONBLOCK = 0x80
+O_LARGEFILE = 0x2000
+O_DIRECT = 0x8000
+O_BULK = 0x10000
+O_CREAT = 0x100
+O_TRUNC = 0x200
+O_EXCL = 0x400
+O_NOCTTY = 0x800
+F_DUPFD = 0
+F_GETFD = 1
+F_SETFD = 2
+F_GETFL = 3
+F_SETFL = 4
+F_SETLK = 6
+F_SETLKW = 7
+F_CHKFL = 8
+F_ALLOCSP = 10
+F_FREESP = 11
+F_SETBSDLK = 12
+F_SETBSDLKW = 13
+F_GETLK = 14
+F_CHKLK = 15
+F_CHKLKW = 16
+F_CLNLK = 17
+F_RSETLK = 20
+F_RGETLK = 21
+F_RSETLKW = 22
+F_GETOWN = 23
+F_SETOWN = 24
+F_DIOINFO = 30
+F_FSGETXATTR = 31
+F_FSSETXATTR = 32
+F_GETLK64 = 33
+F_SETLK64 = 34
+F_SETLKW64 = 35
+F_ALLOCSP64 = 36
+F_FREESP64 = 37
+F_GETBMAP = 38
+F_FSSETDM = 39
+F_RESVSP = 40
+F_UNRESVSP = 41
+F_RESVSP64 = 42
+F_UNRESVSP64 = 43
+F_GETBMAPA = 44
+F_FSGETXATTRA = 45
+F_SETBIOSIZE = 46
+F_GETBIOSIZE = 47
+F_GETOPS = 50
+F_DMAPI = 51
+F_FSYNC = 52
+F_FSYNC64 = 53
+F_GETBDSATTR = 54
+F_SETBDSATTR = 55
+F_GETBMAPX = 56
+F_SETPRIO = 57
+F_GETPRIO = 58
+F_RDLCK = 01
+F_WRLCK = 02
+F_UNLCK = 03
+O_ACCMODE = 3
+FD_CLOEXEC = 1
+FD_NODUP_FORK = 4
+BMV_IF_ATTRFORK = 0x1
+BMV_IF_NO_DMAPI_READ = 0x2
+BMV_IF_PREALLOC = 0x4
+BMV_IF_VALID = (BMV_IF_ATTRFORK|BMV_IF_NO_DMAPI_READ|BMV_IF_PREALLOC)
+BMV_OF_PREALLOC = 0x1
+BMV_IF_EXTENDED = 0x40000000
+FMASK = 0x190FF
+FOPEN = 0xFFFFFFFF
+FREAD = 0x01
+FWRITE = 0x02
+FNDELAY = 0x04
+FAPPEND = 0x08
+FSYNC = 0x10
+FDSYNC = 0x20
+FRSYNC = 0x40
+FNONBLOCK = 0x80
+FASYNC = 0x1000
+FNONBLK = FNONBLOCK
+FLARGEFILE = 0x2000
+FDIRECT = 0x8000
+FBULK = 0x10000
+FCREAT = 0x0100
+FTRUNC = 0x0200
+FEXCL = 0x0400
+FNOCTTY = 0x0800
+FINVIS = 0x0100
+FSOCKET = 0x0200
+FINPROGRESS = 0x0400
+FPRIORITY = 0x0800
+FPRIO = 0x4000
+FDIRENT64 = 0x8000
+FCLOSEXEC = 0x01
+LOCK_SH = 1
+LOCK_EX = 2
+LOCK_NB = 4
+LOCK_UN = 8
+L_SET = 0
+L_INCR = 1
+L_XTND = 2
+F_OK = 0
+X_OK = 1
+W_OK = 2
+R_OK = 4
diff --git a/lib-python/2.2/plat-irix6/FL.py b/lib-python/2.2/plat-irix6/FL.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/plat-irix6/FL.py
@@ -0,0 +1,289 @@
+# Constants used by the FORMS library (module fl).
+# This corresponds to "forms.h".
+# Recommended use: import FL; ... FL.NORMAL_BOX ... etc.
+# Alternate use: from FL import *; ... NORMAL_BOX ... etc.
+
+_v20 = 1
+_v21 = 1
+##import fl
+##try:
+##	_v20 = (fl.get_rgbmode is not None)
+##except:
+##	_v20 = 0
+##del fl
+
+NULL = 0
+FALSE = 0
+TRUE = 1
+
+EVENT = -1
+
+LABEL_SIZE = 64
+if _v20:
+	SHORTCUT_SIZE = 32
+PLACE_FREE = 0
+PLACE_SIZE = 1
+PLACE_ASPECT = 2
+PLACE_MOUSE = 3
+PLACE_CENTER = 4
+PLACE_POSITION = 5
+FL_PLACE_FULLSCREEN = 6
+FIND_INPUT = 0
+FIND_AUTOMATIC = 1
+FIND_MOUSE = 2
+BEGIN_GROUP = 10000
+END_GROUP = 20000
+ALIGN_TOP = 0
+ALIGN_BOTTOM = 1
+ALIGN_LEFT = 2
+ALIGN_RIGHT = 3
+ALIGN_CENTER = 4
+NO_BOX = 0
+UP_BOX = 1
+DOWN_BOX = 2
+FLAT_BOX = 3
+BORDER_BOX = 4
+SHADOW_BOX = 5
+FRAME_BOX = 6
+ROUNDED_BOX = 7
+RFLAT_BOX = 8
+RSHADOW_BOX = 9
+TOP_BOUND_COL = 51
+LEFT_BOUND_COL = 55
+BOT_BOUND_COL = 40
+RIGHT_BOUND_COL = 35
+COL1 = 47
+MCOL = 49
+LCOL = 0
+BOUND_WIDTH = 3.0
+DRAW = 0
+PUSH = 1
+RELEASE = 2
+ENTER = 3
+LEAVE = 4
+MOUSE = 5
+FOCUS = 6
+UNFOCUS = 7
+KEYBOARD = 8
+STEP = 9
+MOVE = 10
+FONT_NAME = 'Helvetica'
+FONT_BOLDNAME = 'Helvetica-Bold'
+FONT_ITALICNAME = 'Helvetica-Oblique'
+FONT_FIXEDNAME = 'Courier'
+FONT_ICONNAME = 'Icon'
+SMALL_FONT = 8.0
+NORMAL_FONT = 11.0
+LARGE_FONT = 20.0
+NORMAL_STYLE = 0
+BOLD_STYLE = 1
+ITALIC_STYLE = 2
+FIXED_STYLE = 3
+ENGRAVED_STYLE = 4
+ICON_STYLE = 5
+BITMAP = 3
+NORMAL_BITMAP = 0
+BITMAP_BOXTYPE = NO_BOX
+BITMAP_COL1 = 0
+BITMAP_COL2 = COL1
+BITMAP_LCOL = LCOL
+BITMAP_ALIGN = ALIGN_BOTTOM
+BITMAP_MAXSIZE = 128*128
+BITMAP_BW = BOUND_WIDTH
+BOX = 1
+BOX_BOXTYPE = UP_BOX
+BOX_COL1 = COL1
+BOX_LCOL = LCOL
+BOX_ALIGN = ALIGN_CENTER
+BOX_BW = BOUND_WIDTH
+BROWSER = 71
+NORMAL_BROWSER = 0
+SELECT_BROWSER = 1
+HOLD_BROWSER = 2
+MULTI_BROWSER = 3
+BROWSER_BOXTYPE = DOWN_BOX
+BROWSER_COL1 = COL1
+BROWSER_COL2 = 3
+BROWSER_LCOL = LCOL
+BROWSER_ALIGN = ALIGN_BOTTOM
+BROWSER_SLCOL = COL1
+BROWSER_BW = BOUND_WIDTH
+BROWSER_LINELENGTH = 128
+BROWSER_MAXLINE = 512
+BUTTON = 11
+NORMAL_BUTTON = 0
+PUSH_BUTTON = 1
+RADIO_BUTTON = 2
+HIDDEN_BUTTON = 3
+TOUCH_BUTTON = 4
+INOUT_BUTTON = 5
+RETURN_BUTTON = 6
+if _v20:
+	HIDDEN_RET_BUTTON = 7
+BUTTON_BOXTYPE = UP_BOX
+BUTTON_COL1 = COL1
+BUTTON_COL2 = COL1
+BUTTON_LCOL = LCOL
+BUTTON_ALIGN = ALIGN_CENTER
+BUTTON_MCOL1 = MCOL
+BUTTON_MCOL2 = MCOL
+BUTTON_BW = BOUND_WIDTH
+if _v20:
+	CHART = 4
+	BAR_CHART = 0
+	HORBAR_CHART = 1
+	LINE_CHART = 2
+	FILLED_CHART = 3
+	SPIKE_CHART = 4
+	PIE_CHART = 5
+	SPECIALPIE_CHART = 6
+	CHART_BOXTYPE = BORDER_BOX
+	CHART_COL1 = COL1
+	CHART_LCOL = LCOL
+	CHART_ALIGN = ALIGN_BOTTOM
+	CHART_BW = BOUND_WIDTH
+	CHART_MAX = 128
+CHOICE = 42
+NORMAL_CHOICE = 0
+CHOICE_BOXTYPE = DOWN_BOX
+CHOICE_COL1 = COL1
+CHOICE_COL2 = LCOL
+CHOICE_LCOL = LCOL
+CHOICE_ALIGN = ALIGN_LEFT
+CHOICE_BW = BOUND_WIDTH
+CHOICE_MCOL = MCOL
+CHOICE_MAXITEMS = 128
+CHOICE_MAXSTR = 64
+CLOCK = 61
+SQUARE_CLOCK = 0
+ROUND_CLOCK = 1
+CLOCK_BOXTYPE = UP_BOX
+CLOCK_COL1 = 37
+CLOCK_COL2 = 42
+CLOCK_LCOL = LCOL
+CLOCK_ALIGN = ALIGN_BOTTOM
+CLOCK_TOPCOL = COL1
+CLOCK_BW = BOUND_WIDTH
+COUNTER = 25
+NORMAL_COUNTER = 0
+SIMPLE_COUNTER = 1
+COUNTER_BOXTYPE = UP_BOX
+COUNTER_COL1 = COL1
+COUNTER_COL2 = 4
+COUNTER_LCOL = LCOL
+COUNTER_ALIGN = ALIGN_BOTTOM
+if _v20:
+	COUNTER_BW = BOUND_WIDTH
+else:
+	DEFAULT = 51
+	RETURN_DEFAULT = 0
+	ALWAYS_DEFAULT = 1
+DIAL = 22
+NORMAL_DIAL = 0
+LINE_DIAL = 1
+DIAL_BOXTYPE = NO_BOX
+DIAL_COL1 = COL1
+DIAL_COL2 = 37
+DIAL_LCOL = LCOL
+DIAL_ALIGN = ALIGN_BOTTOM
+DIAL_TOPCOL = COL1
+DIAL_BW = BOUND_WIDTH
+FREE = 101
+NORMAL_FREE = 1
+SLEEPING_FREE = 2
+INPUT_FREE = 3
+CONTINUOUS_FREE = 4
+ALL_FREE = 5
+INPUT = 31
+NORMAL_INPUT = 0
+if _v20:
+	FLOAT_INPUT = 1
+	INT_INPUT = 2
+	HIDDEN_INPUT = 3
+	if _v21:
+		MULTILINE_INPUT = 4
+		SECRET_INPUT = 5
+else:
+	ALWAYS_INPUT = 1
+INPUT_BOXTYPE = DOWN_BOX
+INPUT_COL1 = 13
+INPUT_COL2 = 5
+INPUT_LCOL = LCOL
+INPUT_ALIGN = ALIGN_LEFT
+INPUT_TCOL = LCOL
+INPUT_CCOL = 4
+INPUT_BW = BOUND_WIDTH
+INPUT_MAX = 128
+LIGHTBUTTON = 12
+LIGHTBUTTON_BOXTYPE = UP_BOX
+LIGHTBUTTON_COL1 = 39
+LIGHTBUTTON_COL2 = 3
+LIGHTBUTTON_LCOL = LCOL
+LIGHTBUTTON_ALIGN = ALIGN_CENTER
+LIGHTBUTTON_TOPCOL = COL1
+LIGHTBUTTON_MCOL = MCOL
+LIGHTBUTTON_BW1 = BOUND_WIDTH
+LIGHTBUTTON_BW2 = BOUND_WIDTH/2.0
+LIGHTBUTTON_MINSIZE = 12.0
+MENU = 41
+TOUCH_MENU = 0
+PUSH_MENU = 1
+MENU_BOXTYPE = BORDER_BOX
+MENU_COL1 = 55
+MENU_COL2 = 37
+MENU_LCOL = LCOL
+MENU_ALIGN = ALIGN_CENTER
+MENU_BW = BOUND_WIDTH
+MENU_MAX = 300
+POSITIONER = 23
+NORMAL_POSITIONER = 0
+POSITIONER_BOXTYPE = DOWN_BOX
+POSITIONER_COL1 = COL1
+POSITIONER_COL2 = 1
+POSITIONER_LCOL = LCOL
+POSITIONER_ALIGN = ALIGN_BOTTOM
+POSITIONER_BW = BOUND_WIDTH
+ROUNDBUTTON = 13
+ROUNDBUTTON_BOXTYPE = NO_BOX
+ROUNDBUTTON_COL1 = 7
+ROUNDBUTTON_COL2 = 3
+ROUNDBUTTON_LCOL = LCOL
+ROUNDBUTTON_ALIGN = ALIGN_CENTER
+ROUNDBUTTON_TOPCOL = COL1
+ROUNDBUTTON_MCOL = MCOL
+ROUNDBUTTON_BW = BOUND_WIDTH
+SLIDER = 21
+VALSLIDER = 24
+VERT_SLIDER = 0
+HOR_SLIDER = 1
+VERT_FILL_SLIDER = 2
+HOR_FILL_SLIDER = 3
+VERT_NICE_SLIDER = 4
+HOR_NICE_SLIDER = 5
+SLIDER_BOXTYPE = DOWN_BOX
+SLIDER_COL1 = COL1
+SLIDER_COL2 = COL1
+SLIDER_LCOL = LCOL
+SLIDER_ALIGN = ALIGN_BOTTOM
+SLIDER_BW1 = BOUND_WIDTH
+SLIDER_BW2 = BOUND_WIDTH*0.75
+SLIDER_FINE = 0.05
+SLIDER_WIDTH = 0.08
+TEXT = 2
+NORMAL_TEXT = 0
+TEXT_BOXTYPE = NO_BOX
+TEXT_COL1 = COL1
+TEXT_LCOL = LCOL
+TEXT_ALIGN = ALIGN_LEFT
+TEXT_BW = BOUND_WIDTH
+TIMER = 62
+NORMAL_TIMER = 0
+VALUE_TIMER = 1
+HIDDEN_TIMER = 2
+TIMER_BOXTYPE = DOWN_BOX
+TIMER_COL1 = COL1
+TIMER_COL2 = 1
+TIMER_LCOL = LCOL
+TIMER_ALIGN = ALIGN_CENTER
+TIMER_BW = BOUND_WIDTH
+TIMER_BLINKRATE = 0.2
diff --git a/lib-python/2.2/plat-irix6/GET.py b/lib-python/2.2/plat-irix6/GET.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/plat-irix6/GET.py
@@ -0,0 +1,59 @@
+# Symbols from <gl/get.h>
+
+BCKBUFFER = 0x1
+FRNTBUFFER = 0x2
+DRAWZBUFFER = 0x4
+DMRGB = 0
+DMSINGLE = 1
+DMDOUBLE = 2
+DMRGBDOUBLE = 5
+HZ30 = 0
+HZ60 = 1
+NTSC = 2
+HDTV = 3
+VGA = 4
+IRIS3K = 5
+PR60 = 6
+PAL = 9
+HZ30_SG = 11
+A343 = 14
+STR_RECT = 15
+VOF0 = 16
+VOF1 = 17
+VOF2 = 18
+VOF3 = 19
+SGI0 = 20
+SGI1 = 21
+SGI2 = 22
+HZ72 = 23
+GL_VIDEO_REG = 0x00800000
+GLV_GENLOCK = 0x00000001
+GLV_UNBLANK = 0x00000002
+GLV_SRED = 0x00000004
+GLV_SGREEN = 0x00000008
+GLV_SBLUE = 0x00000010
+GLV_SALPHA = 0x00000020
+GLV_TTLGENLOCK = 0x00000080
+GLV_TTLSYNC = GLV_TTLGENLOCK
+GLV_GREENGENLOCK = 0x0000100
+LEFTPLANE = 0x0001
+RIGHTPLANE = 0x0002
+BOTTOMPLANE = 0x0004
+TOPPLANE = 0x0008
+NEARPLANE = 0x0010
+FARPLANE = 0x0020
+## GETDEF = __GL_GET_H__
+NOBUFFER = 0x0
+BOTHBUFFERS = 0x3
+DMINTENSITYSINGLE = 3
+DMINTENSITYDOUBLE = 4
+MONSPECIAL = 0x20
+HZ50 = 3
+MONA = 5
+MONB = 6
+MONC = 7
+MOND = 8
+MON_ALL = 12
+MON_GEN_ALL = 13
+CMAPMULTI = 0
+CMAPONE = 1
diff --git a/lib-python/2.2/plat-irix6/GL.py b/lib-python/2.2/plat-irix6/GL.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/plat-irix6/GL.py
@@ -0,0 +1,393 @@
+NULL = 0
+FALSE = 0
+TRUE = 1
+ATTRIBSTACKDEPTH = 10
+VPSTACKDEPTH = 8
+MATRIXSTACKDEPTH = 32
+NAMESTACKDEPTH = 1025
+STARTTAG = -2
+ENDTAG = -3
+BLACK = 0
+RED = 1
+GREEN = 2
+YELLOW = 3
+BLUE = 4
+MAGENTA = 5
+CYAN = 6
+WHITE = 7
+PUP_CLEAR = 0
+PUP_COLOR = 1
+PUP_BLACK = 2
+PUP_WHITE = 3
+NORMALDRAW = 0x010
+PUPDRAW = 0x020
+OVERDRAW = 0x040
+UNDERDRAW = 0x080
+CURSORDRAW = 0x100
+DUALDRAW = 0x200
+PATTERN_16 = 16
+PATTERN_32 = 32
+PATTERN_64 = 64
+PATTERN_16_SIZE = 16
+PATTERN_32_SIZE = 64
+PATTERN_64_SIZE = 256
+SRC_AUTO = 0
+SRC_FRONT = 1
+SRC_BACK = 2
+SRC_ZBUFFER = 3
+SRC_PUP = 4
+SRC_OVER = 5
+SRC_UNDER = 6
+SRC_FRAMEGRABBER = 7
+BF_ZERO = 0
+BF_ONE = 1
+BF_DC = 2
+BF_SC = 2
+BF_MDC = 3
+BF_MSC = 3
+BF_SA = 4
+BF_MSA = 5
+BF_DA = 6
+BF_MDA = 7
+BF_MIN_SA_MDA = 8
+AF_NEVER = 0
+AF_LESS = 1
+AF_EQUAL = 2
+AF_LEQUAL = 3
+AF_GREATER = 4
+AF_NOTEQUAL = 5
+AF_GEQUAL = 6
+AF_ALWAYS = 7
+ZF_NEVER = 0
+ZF_LESS = 1
+ZF_EQUAL = 2
+ZF_LEQUAL = 3
+ZF_GREATER = 4
+ZF_NOTEQUAL = 5
+ZF_GEQUAL = 6
+ZF_ALWAYS = 7
+ZSRC_DEPTH = 0
+ZSRC_COLOR = 1
+SMP_OFF = 0x0
+SMP_ON = 0x1
+SMP_SMOOTHER = 0x2
+SML_OFF = 0x0
+SML_ON = 0x1
+SML_SMOOTHER = 0x2
+SML_END_CORRECT = 0x4
+PYSM_OFF = 0
+PYSM_ON = 1
+PYSM_SHRINK = 2
+DT_OFF = 0
+DT_ON = 1
+PUP_NONE = 0
+PUP_GREY = 0x1
+PUP_BOX = 0x2
+PUP_CHECK = 0x4
+GLC_OLDPOLYGON = 0
+GLC_ZRANGEMAP = 1
+GLC_MQUEUERATE = 2
+GLC_SOFTATTACH = 3
+GLC_MANAGEBG = 4
+GLC_SLOWMAPCOLORS = 5
+GLC_INPUTCHANGEBUG = 6
+GLC_NOBORDERBUG = 7
+GLC_SET_VSYNC = 8
+GLC_GET_VSYNC = 9
+GLC_VSYNC_SLEEP = 10
+GLC_COMPATRATE = 15
+C16X1 = 0
+C16X2 = 1
+C32X1 = 2
+C32X2 = 3
+CCROSS = 4
+FLAT = 0
+GOURAUD = 1
+LO_ZERO = 0x0
+LO_AND = 0x1
+LO_ANDR = 0x2
+LO_SRC = 0x3
+LO_ANDI = 0x4
+LO_DST = 0x5
+LO_XOR = 0x6
+LO_OR = 0x7
+LO_NOR = 0x8
+LO_XNOR = 0x9
+LO_NDST = 0xa
+LO_ORR = 0xb
+LO_NSRC = 0xc
+LO_ORI = 0xd
+LO_NAND = 0xe
+LO_ONE = 0xf
+INFOCUSSCRN = -2
+ST_KEEP = 0
+ST_ZERO = 1
+ST_REPLACE = 2
+ST_INCR = 3
+ST_DECR = 4
+ST_INVERT = 5
+SF_NEVER = 0
+SF_LESS = 1
+SF_EQUAL = 2
+SF_LEQUAL = 3
+SF_GREATER = 4
+SF_NOTEQUAL = 5
+SF_GEQUAL = 6
+SF_ALWAYS = 7
+SS_OFF = 0
+SS_DEPTH = 1
+PYM_FILL = 1
+PYM_POINT = 2
+PYM_LINE = 3
+PYM_HOLLOW = 4
+PYM_LINE_FAST = 5
+FG_OFF = 0
+FG_ON = 1
+FG_DEFINE = 2
+FG_VTX_EXP = 2
+FG_VTX_LIN = 3
+FG_PIX_EXP = 4
+FG_PIX_LIN = 5
+FG_VTX_EXP2 = 6
+FG_PIX_EXP2 = 7
+PM_SHIFT = 0
+PM_EXPAND = 1
+PM_C0 = 2
+PM_C1 = 3
+PM_ADD24 = 4
+PM_SIZE = 5
+PM_OFFSET = 6
+PM_STRIDE = 7
+PM_TTOB = 8
+PM_RTOL = 9
+PM_ZDATA = 10
+PM_WARP = 11
+PM_RDX = 12
+PM_RDY = 13
+PM_CDX = 14
+PM_CDY = 15
+PM_XSTART = 16
+PM_YSTART = 17
+PM_VO1 = 1000
+NAUTO = 0
+NNORMALIZE = 1
+AC_CLEAR = 0
+AC_ACCUMULATE = 1
+AC_CLEAR_ACCUMULATE = 2
+AC_RETURN = 3
+AC_MULT = 4
+AC_ADD = 5
+CP_OFF = 0
+CP_ON = 1
+CP_DEFINE = 2
+SB_RESET = 0
+SB_TRACK = 1
+SB_HOLD = 2
+RD_FREEZE = 0x00000001
+RD_ALPHAONE = 0x00000002
+RD_IGNORE_UNDERLAY = 0x00000004
+RD_IGNORE_OVERLAY = 0x00000008
+RD_IGNORE_PUP = 0x00000010
+RD_OFFSCREEN = 0x00000020
+GD_XPMAX = 0
+GD_YPMAX = 1
+GD_XMMAX = 2
+GD_YMMAX = 3
+GD_ZMIN = 4
+GD_ZMAX = 5
+GD_BITS_NORM_SNG_RED = 6
+GD_BITS_NORM_SNG_GREEN = 7
+GD_BITS_NORM_SNG_BLUE = 8
+GD_BITS_NORM_DBL_RED = 9
+GD_BITS_NORM_DBL_GREEN = 10
+GD_BITS_NORM_DBL_BLUE = 11
+GD_BITS_NORM_SNG_CMODE = 12
+GD_BITS_NORM_DBL_CMODE = 13
+GD_BITS_NORM_SNG_MMAP = 14
+GD_BITS_NORM_DBL_MMAP = 15
+GD_BITS_NORM_ZBUFFER = 16
+GD_BITS_OVER_SNG_CMODE = 17
+GD_BITS_UNDR_SNG_CMODE = 18
+GD_BITS_PUP_SNG_CMODE = 19
+GD_BITS_NORM_SNG_ALPHA = 21
+GD_BITS_NORM_DBL_ALPHA = 22
+GD_BITS_CURSOR = 23
+GD_OVERUNDER_SHARED = 24
+GD_BLEND = 25
+GD_CIFRACT = 26
+GD_CROSSHAIR_CINDEX = 27
+GD_DITHER = 28
+GD_LINESMOOTH_CMODE = 30
+GD_LINESMOOTH_RGB = 31
+GD_LOGICOP = 33
+GD_NSCRNS = 35
+GD_NURBS_ORDER = 36
+GD_NBLINKS = 37
+GD_NVERTEX_POLY = 39
+GD_PATSIZE_64 = 40
+GD_PNTSMOOTH_CMODE = 41
+GD_PNTSMOOTH_RGB = 42
+GD_PUP_TO_OVERUNDER = 43
+GD_READSOURCE = 44
+GD_READSOURCE_ZBUFFER = 48
+GD_STEREO = 50
+GD_SUBPIXEL_LINE = 51
+GD_SUBPIXEL_PNT = 52
+GD_SUBPIXEL_POLY = 53
+GD_TRIMCURVE_ORDER = 54
+GD_WSYS = 55
+GD_ZDRAW_GEOM = 57
+GD_ZDRAW_PIXELS = 58
+GD_SCRNTYPE = 61
+GD_TEXTPORT = 62
+GD_NMMAPS = 63
+GD_FRAMEGRABBER = 64
+GD_TIMERHZ = 66
+GD_DBBOX = 67
+GD_AFUNCTION = 68
+GD_ALPHA_OVERUNDER = 69
+GD_BITS_ACBUF = 70
+GD_BITS_ACBUF_HW = 71
+GD_BITS_STENCIL = 72
+GD_CLIPPLANES = 73
+GD_FOGVERTEX = 74
+GD_LIGHTING_TWOSIDE = 76
+GD_POLYMODE = 77
+GD_POLYSMOOTH = 78
+GD_SCRBOX = 79
+GD_TEXTURE = 80
+GD_FOGPIXEL = 81
+GD_TEXTURE_PERSP = 82
+GD_MUXPIPES = 83
+GD_NOLIMIT = -2
+GD_WSYS_NONE = 0
+GD_WSYS_4S = 1
+GD_SCRNTYPE_WM = 0
+GD_SCRNTYPE_NOWM = 1
+N_PIXEL_TOLERANCE = 1
+N_CULLING = 2
+N_DISPLAY = 3
+N_ERRORCHECKING = 4
+N_SUBDIVISIONS = 5
+N_S_STEPS = 6
+N_T_STEPS = 7
+N_TILES = 8
+N_TMP1 = 9
+N_TMP2 = 10
+N_TMP3 = 11
+N_TMP4 = 12
+N_TMP5 = 13
+N_TMP6 = 14
+N_FILL = 1.0
+N_OUTLINE_POLY = 2.0
+N_OUTLINE_PATCH = 5.0
+N_ISOLINE_S = 12.0
+N_ST = 0x8
+N_STW = 0xd
+N_XYZ = 0x4c
+N_XYZW = 0x51
+N_TEX = 0x88
+N_TEXW = 0x8d
+N_RGBA = 0xd0
+N_RGBAW = 0xd5
+N_P2D = 0x8
+N_P2DR = 0xd
+N_V3D = 0x4c
+N_V3DR = 0x51
+N_T2D = 0x88
+N_T2DR = 0x8d
+N_C4D = 0xd0
+N_C4DR = 0xd5
+LMNULL = 0.0
+MSINGLE = 0
+MPROJECTION = 1
+MVIEWING = 2
+MTEXTURE = 3
+MAXLIGHTS = 8
+MAXRESTRICTIONS = 4
+DEFMATERIAL = 0
+EMISSION = 1
+AMBIENT = 2
+DIFFUSE = 3
+SPECULAR = 4
+SHININESS = 5
+COLORINDEXES = 6
+ALPHA = 7
+DEFLIGHT = 100
+LCOLOR = 101
+POSITION = 102
+SPOTDIRECTION = 103
+SPOTLIGHT = 104
+DEFLMODEL = 200
+LOCALVIEWER = 201
+ATTENUATION = 202
+ATTENUATION2 = 203
+TWOSIDE = 204
+MATERIAL = 1000
+BACKMATERIAL = 1001
+LIGHT0 = 1100
+LIGHT1 = 1101
+LIGHT2 = 1102
+LIGHT3 = 1103
+LIGHT4 = 1104
+LIGHT5 = 1105
+LIGHT6 = 1106
+LIGHT7 = 1107
+LMODEL = 1200
+LMC_COLOR = 0
+LMC_EMISSION = 1
+LMC_AMBIENT = 2
+LMC_DIFFUSE = 3
+LMC_SPECULAR = 4
+LMC_AD = 5
+LMC_NULL = 6
+TX_MINFILTER = 0x100
+TX_MAGFILTER = 0x200
+TX_WRAP = 0x300
+TX_WRAP_S = 0x310
+TX_WRAP_T = 0x320
+TX_TILE = 0x400
+TX_BORDER = 0x500
+TX_NULL = 0x000
+TX_POINT = 0x110
+TX_BILINEAR = 0x220
+TX_MIPMAP = 0x120
+TX_MIPMAP_POINT = 0x121
+TX_MIPMAP_LINEAR = 0x122
+TX_MIPMAP_BILINEAR = 0x123
+TX_MIPMAP_TRILINEAR = 0x124
+TX_REPEAT = 0x301
+TX_CLAMP = 0x302
+TX_SELECT = 0x303
+TX_TEXTURE_0 = 0
+TV_MODULATE = 0x101
+TV_BLEND = 0x102
+TV_DECAL = 0x103
+TV_COLOR = 0x200
+TV_NULL = 0x000
+TV_ENV0 = 0
+TX_S = 0
+TX_T = 1
+TG_OFF = 0
+TG_ON = 1
+TG_CONTOUR = 2
+TG_LINEAR = 3
+TG_SPHEREMAP = 4
+TG_REFRACTMAP = 5
+DGLSINK = 0
+DGLLOCAL = 1
+DGLTSOCKET = 2
+DGL4DDN = 3
+PUP_CURSOR = PUP_COLOR
+FATAL = 1
+WARNING = 2
+ASK_CONT = 3
+ASK_RESTART = 4
+XMAXSCREEN = 1279
+YMAXSCREEN = 1023
+XMAXMEDIUM = 1023
+YMAXMEDIUM = 767
+XMAX170 = 645
+YMAX170 = 484
+XMAXPAL = 779
+YMAXPAL = 574
diff --git a/lib-python/2.2/plat-irix6/GLWS.py b/lib-python/2.2/plat-irix6/GLWS.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/plat-irix6/GLWS.py
@@ -0,0 +1,12 @@
+NOERROR = 0
+NOCONTEXT = -1
+NODISPLAY = -2
+NOWINDOW = -3
+NOGRAPHICS = -4
+NOTTOP = -5
+NOVISUAL = -6
+BUFSIZE = -7
+BADWINDOW = -8
+ALREADYBOUND = -100
+BINDFAILED = -101
+SETFAILED = -102
diff --git a/lib-python/2.2/plat-irix6/IN.py b/lib-python/2.2/plat-irix6/IN.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/plat-irix6/IN.py
@@ -0,0 +1,385 @@
+# Generated by h2py from /usr/include/netinet/in.h
+
+# Included from standards.h
+
+# Included from sgidefs.h
+_MIPS_ISA_MIPS1 = 1
+_MIPS_ISA_MIPS2 = 2
+_MIPS_ISA_MIPS3 = 3
+_MIPS_ISA_MIPS4 = 4
+_MIPS_SIM_ABI32 = 1
+_MIPS_SIM_NABI32 = 2
+_MIPS_SIM_ABI64 = 3
+
+# Included from sys/bsd_types.h
+
+# Included from sys/mkdev.h
+ONBITSMAJOR = 7
+ONBITSMINOR = 8
+OMAXMAJ = 0x7f
+OMAXMIN = 0xff
+NBITSMAJOR = 14
+NBITSMINOR = 18
+MAXMAJ = 0x1ff
+MAXMIN = 0x3ffff
+OLDDEV = 0
+NEWDEV = 1
+MKDEV_VER = NEWDEV
+def IS_STRING_SPEC_DEV(x): return ((dev_t)(x)==__makedev(MKDEV_VER, 0, 0))
+
+def major(dev): return __major(MKDEV_VER, dev)
+
+def minor(dev): return __minor(MKDEV_VER, dev)
+
+
+# Included from sys/select.h
+FD_SETSIZE = 1024
+__NBBY = 8
+
+# Included from string.h
+NULL = 0L
+NBBY = 8
+
+# Included from sys/endian.h
+LITTLE_ENDIAN = 1234
+BIG_ENDIAN = 4321
+PDP_ENDIAN = 3412
+_LITTLE_ENDIAN = 1234
+_BIG_ENDIAN = 4321
+_PDP_ENDIAN = 3412
+_BYTE_ORDER = _BIG_ENDIAN
+_BYTE_ORDER = _LITTLE_ENDIAN
+def ntohl(x): return (x)
+
+def ntohs(x): return (x)
+
+def htonl(x): return (x)
+
+def htons(x): return (x)
+
+def htonl(x): return ntohl(x)
+
+def htons(x): return ntohs(x)
+
+
+# Included from sys/types.h
+
+# Included from sys/pthread.h
+P_MYID = (-1)
+P_MYHOSTID = (-1)
+
+# Included from sys/cpumask.h
+MAXCPU = 128
+def CPUMASK_INDEX(bit): return ((bit) >> 6)
+
+def CPUMASK_SHFT(bit): return ((bit) & 0x3f)
+
+def CPUMASK_IS_ZERO(p): return ((p) == 0)
+
+def CPUMASK_IS_NONZERO(p): return ((p) != 0)
+
+
+# Included from sys/nodemask.h
+def CNODEMASK_IS_ZERO(p): return ((p) == 0)
+
+def CNODEMASK_IS_NONZERO(p): return ((p) != 0)
+
+IPPROTO_IP = 0
+IPPROTO_HOPOPTS = 0
+IPPROTO_ICMP = 1
+IPPROTO_IGMP = 2
+IPPROTO_GGP = 3
+IPPROTO_IPIP = 4
+IPPROTO_ENCAP = IPPROTO_IPIP
+IPPROTO_ST = 5
+IPPROTO_TCP = 6
+IPPROTO_UCL = 7
+IPPROTO_EGP = 8
+IPPROTO_IGP = 9
+IPPROTO_BBN_RCC_MON = 10
+IPPROTO_NVP_II = 11
+IPPROTO_PUP = 12
+IPPROTO_ARGUS = 13
+IPPROTO_EMCON = 14
+IPPROTO_XNET = 15
+IPPROTO_CHAOS = 16
+IPPROTO_UDP = 17
+IPPROTO_MUX = 18
+IPPROTO_DCN_MEAS = 19
+IPPROTO_HMP = 20
+IPPROTO_PRM = 21
+IPPROTO_IDP = 22
+IPPROTO_TRUNK_1 = 23
+IPPROTO_TRUNK_2 = 24
+IPPROTO_LEAF_1 = 25
+IPPROTO_LEAF_2 = 26
+IPPROTO_RDP = 27
+IPPROTO_IRTP = 28
+IPPROTO_TP = 29
+IPPROTO_NETBLT = 30
+IPPROTO_MFE_NSP = 31
+IPPROTO_MERIT_INP = 32
+IPPROTO_SEP = 33
+IPPROTO_3PC = 34
+IPPROTO_IDPR = 35
+IPPROTO_XTP = 36
+IPPROTO_DDP = 37
+IPPROTO_IDPR_CMTP = 38
+IPPROTO_TPPP = 39
+IPPROTO_IL = 40
+IPPROTO_IPV6 = 41
+IPPROTO_ROUTING = 43
+IPPROTO_FRAGMENT = 44
+IPPROTO_RSVP = 46
+IPPROTO_ESP = 50
+IPPROTO_AH = 51
+IPPROTO_ICMPV6 = 58
+IPPROTO_NONE = 59
+IPPROTO_DSTOPTS = 60
+IPPROTO_CFTP = 62
+IPPROTO_HELLO = 63
+IPPROTO_SAT_EXPAK = 64
+IPPROTO_KRYPTOLAN = 65
+IPPROTO_RVD = 66
+IPPROTO_IPPC = 67
+IPPROTO_SAT_MON = 69
+IPPROTO_VISA = 70
+IPPROTO_IPCV = 71
+IPPROTO_CPNX = 72
+IPPROTO_CPHB = 73
+IPPROTO_WSN = 74
+IPPROTO_PVP = 75
+IPPROTO_BR_SAT_MON = 76
+IPPROTO_ND = 77
+IPPROTO_WB_MON = 78
+IPPROTO_WB_EXPAK = 79
+IPPROTO_EON = 80
+IPPROTO_VMTP = 81
+IPPROTO_SECURE_VMTP = 82
+IPPROTO_VINES = 83
+IPPROTO_TTP = 84
+IPPROTO_NSFNET_IGP = 85
+IPPROTO_DGP = 86
+IPPROTO_TCF = 87
+IPPROTO_IGRP = 88
+IPPROTO_OSPF = 89
+IPPROTO_SPRITE_RPC = 90
+IPPROTO_LARP = 91
+IPPROTO_MTP = 92
+IPPROTO_AX25 = 93
+IPPROTO_SWIPE = 94
+IPPROTO_MICP = 95
+IPPROTO_AES_SP3_D = 96
+IPPROTO_ETHERIP = 97
+IPPROTO_ENCAPHDR = 98
+IPPROTO_RAW = 255
+IPPROTO_MAX = 256
+IPPROTO_STP = 257
+IPPORT_RESERVED = 1024
+IPPORT_MAXPORT = 65535
+INET_ADDRSTRLEN = 16
+INET6_ADDRSTRLEN = 46
+def IN_CLASSA(i): return (((__int32_t)(i) & 0x80000000) == 0)
+
+IN_CLASSA_NET = 0xff000000
+IN_CLASSA_NSHIFT = 24
+IN_CLASSA_HOST = 0x00ffffff
+IN_CLASSA_MAX = 128
+def IN_CLASSB(i): return (((__int32_t)(i) & 0xc0000000) == 0x80000000)
+
+IN_CLASSB_NET = 0xffff0000
+IN_CLASSB_NSHIFT = 16
+IN_CLASSB_HOST = 0x0000ffff
+IN_CLASSB_MAX = 65536
+def IN_CLASSC(i): return (((__int32_t)(i) & 0xe0000000) == 0xc0000000)
+
+IN_CLASSC_NET = 0xffffff00
+IN_CLASSC_NSHIFT = 8
+IN_CLASSC_HOST = 0x000000ff
+def IN_CLASSD(i): return (((__int32_t)(i) & 0xf0000000) == 0xe0000000)
+
+IN_CLASSD_NET = 0xf0000000
+IN_CLASSD_NSHIFT = 28
+IN_CLASSD_HOST = 0x0fffffff
+def IN_MULTICAST(i): return IN_CLASSD(i)
+
+def IN_EXPERIMENTAL(i): return (((__int32_t)(i) & 0xf0000000) == 0xf0000000)
+
+def IN_BADCLASS(i): return (((__int32_t)(i) & 0xf0000000) == 0xf0000000)
+
+INADDR_NONE = 0xffffffff
+IN_LOOPBACKNET = 127
+IPNGVERSION = 6
+IPV6_FLOWINFO_FLOWLABEL = 0x00ffffff
+IPV6_FLOWINFO_PRIORITY = 0x0f000000
+IPV6_FLOWINFO_PRIFLOW = 0x0fffffff
+IPV6_FLOWINFO_SRFLAG = 0x10000000
+IPV6_FLOWINFO_VERSION = 0xf0000000
+IPV6_PRIORITY_UNCHARACTERIZED = 0x00000000
+IPV6_PRIORITY_FILLER = 0x01000000
+IPV6_PRIORITY_UNATTENDED = 0x02000000
+IPV6_PRIORITY_RESERVED1 = 0x03000000
+IPV6_PRIORITY_BULK = 0x04000000
+IPV6_PRIORITY_RESERVED2 = 0x05000000
+IPV6_PRIORITY_INTERACTIVE = 0x06000000
+IPV6_PRIORITY_CONTROL = 0x07000000
+IPV6_PRIORITY_8 = 0x08000000
+IPV6_PRIORITY_9 = 0x09000000
+IPV6_PRIORITY_10 = 0x0a000000
+IPV6_PRIORITY_11 = 0x0b000000
+IPV6_PRIORITY_12 = 0x0c000000
+IPV6_PRIORITY_13 = 0x0d000000
+IPV6_PRIORITY_14 = 0x0e000000
+IPV6_PRIORITY_15 = 0x0f000000
+IPV6_SRFLAG_STRICT = 0x10000000
+IPV6_SRFLAG_LOOSE = 0x00000000
+IPV6_VERSION = 0x60000000
+IPV6_FLOWINFO_FLOWLABEL = 0xffffff00
+IPV6_FLOWINFO_PRIORITY = 0x0000000f
+IPV6_FLOWINFO_PRIFLOW = 0xffffff0f
+IPV6_FLOWINFO_SRFLAG = 0x00000010
+IPV6_FLOWINFO_VERSION = 0x000000f0
+IPV6_PRIORITY_UNCHARACTERIZED = 0x00000000
+IPV6_PRIORITY_FILLER = 0x00000001
+IPV6_PRIORITY_UNATTENDED = 0x00000002
+IPV6_PRIORITY_RESERVED1 = 0x00000003
+IPV6_PRIORITY_BULK = 0x00000004
+IPV6_PRIORITY_RESERVED2 = 0x00000005
+IPV6_PRIORITY_INTERACTIVE = 0x00000006
+IPV6_PRIORITY_CONTROL = 0x00000007
+IPV6_PRIORITY_8 = 0x00000008
+IPV6_PRIORITY_9 = 0x00000009
+IPV6_PRIORITY_10 = 0x0000000a
+IPV6_PRIORITY_11 = 0x0000000b
+IPV6_PRIORITY_12 = 0x0000000c
+IPV6_PRIORITY_13 = 0x0000000d
+IPV6_PRIORITY_14 = 0x0000000e
+IPV6_PRIORITY_15 = 0x0000000f
+IPV6_SRFLAG_STRICT = 0x00000010
+IPV6_SRFLAG_LOOSE = 0x00000000
+IPV6_VERSION = 0x00000060
+def IPV6_GET_FLOWLABEL(x): return (ntohl(x) & 0x00ffffff)
+
+def IPV6_GET_PRIORITY(x): return ((ntohl(x) >> 24) & 0xf)
+
+def IPV6_GET_VERSION(x): return ((ntohl(x) >> 28) & 0xf)
+
+def IPV6_SET_FLOWLABEL(x): return (htonl(x) & IPV6_FLOWINFO_FLOWLABEL)
+
+def IPV6_SET_PRIORITY(x): return (htonl((x & 0xf) << 24))
+
+def CLR_ADDR6(a): return \
+
+def IS_ANYSOCKADDR(a): return \
+
+def IS_ANYADDR6(a): return \
+
+def IS_COMPATSOCKADDR(a): return \
+
+def IS_COMPATADDR6(a): return \
+
+def IS_LOOPSOCKADDR(a): return \
+
+def IS_LOOPADDR6(a): return \
+
+def IS_IPV4SOCKADDR(a): return \
+
+def IS_IPV4ADDR6(a): return \
+
+def IS_LOOPSOCKADDR(a): return \
+
+def IS_LOOPADDR6(a): return \
+
+def IS_IPV4SOCKADDR(a): return \
+
+def IS_IPV4ADDR6(a): return \
+
+def IS_LOCALADDR6(a): return ((a).s6_addr8[0] == 0xfe)
+
+def IS_LINKLADDR6(a): return \
+
+def IS_SITELADDR6(a): return \
+
+def IS_MULTIADDR6(a): return ((a).s6_addr8[0] == 0xff)
+
+def MADDR6_FLAGS(a): return ((a).s6_addr8[1] >> 4)
+
+MADDR6_FLG_WK = 0
+MADDR6_FLG_TS = 1
+def MADDR6_SCOPE(a): return ((a).s6_addr8[1] & 0x0f)
+
+MADDR6_SCP_NODE = 0x1
+MADDR6_SCP_LINK = 0x2
+MADDR6_SCP_SITE = 0x5
+MADDR6_SCP_ORG = 0x8
+MADDR6_SCP_GLO = 0xe
+MADDR6_ALLNODES = 1
+MADDR6_ALLROUTERS = 2
+MADDR6_ALLHOSTS = 3
+def IN6_IS_ADDR_UNSPECIFIED(p): return IS_ANYADDR6(*p)
+
+def IN6_IS_ADDR_LOOPBACK(p): return IS_LOOPADDR6(*p)
+
+def IN6_IS_ADDR_MULTICAST(p): return IS_MULTIADDR6(*p)
+
+def IN6_IS_ADDR_LINKLOCAL(p): return IS_LINKLADDR6(*p)
+
+def IN6_IS_ADDR_SITELOCAL(p): return IS_SITELADDR6(*p)
+
+def IN6_IS_ADDR_V4MAPPED(p): return IS_IPV4ADDR6(*p)
+
+def IN6_IS_ADDR_V4COMPAT(p): return IS_COMPATADDR6(*p)
+
+def IN6_IS_ADDR_MC_NODELOCAL(p): return \
+
+def IN6_IS_ADDR_MC_LINKLOCAL(p): return \
+
+def IN6_IS_ADDR_MC_SITELOCAL(p): return \
+
+def IN6_IS_ADDR_MC_ORGLOCAL(p): return \
+
+def IN6_IS_ADDR_MC_GLOBAL(p): return \
+
+IP_OPTIONS = 1
+IP_HDRINCL = 2
+IP_TOS = 3
+IP_TTL = 4
+IP_RECVOPTS = 5
+IP_RECVRETOPTS = 6
+IP_RECVDSTADDR = 7
+IP_RETOPTS = 8
+IP_MULTICAST_IF = 20
+IP_MULTICAST_TTL = 21
+IP_MULTICAST_LOOP = 22
+IP_ADD_MEMBERSHIP = 23
+IP_DROP_MEMBERSHIP = 24
+IP_MULTICAST_VIF = 25
+IP_RSVP_VIF_ON = 26
+IP_RSVP_VIF_OFF = 27
+IP_RSVP_ON = 28
+IP_SENDSRCADDR = 36
+IPV6_UNICAST_HOPS = IP_TTL
+IPV6_MULTICAST_IF = IP_MULTICAST_IF
+IPV6_MULTICAST_HOPS = IP_MULTICAST_TTL
+IPV6_MULTICAST_LOOP = IP_MULTICAST_LOOP
+IPV6_ADD_MEMBERSHIP = IP_ADD_MEMBERSHIP
+IPV6_DROP_MEMBERSHIP = IP_DROP_MEMBERSHIP
+IPV6_SENDIF = 40
+IPV6_NOPROBE = 42
+IPV6_RECVPKTINFO = 43
+IPV6_PKTINFO = 44
+IP_RECVTTL = 45
+IPV6_RECVHOPS = IP_RECVTTL
+IPV6_CHECKSUM = 46
+ICMP6_FILTER = 47
+IPV6_HOPLIMIT = 48
+IPV6_HOPOPTS = 49
+IPV6_DSTOPTS = 50
+IPV6_RTHDR = 51
+IPV6_PKTOPTIONS = 52
+IPV6_NEXTHOP = 53
+IP_DEFAULT_MULTICAST_TTL = 1
+IP_DEFAULT_MULTICAST_LOOP = 1
+IPV6_RTHDR_LOOSE = 0
+IPV6_RTHDR_STRICT = 1
+IPV6_RTHDR_TYPE_0 = 0
diff --git a/lib-python/2.2/plat-irix6/IOCTL.py b/lib-python/2.2/plat-irix6/IOCTL.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/plat-irix6/IOCTL.py
@@ -0,0 +1,233 @@
+# These lines were mostly generated by h2py.py (see demo/scripts)
+# from <sys/ioctl.h>, <sys/termio.h> and <termios.h> on Irix 4.0.2
+# with some manual changes to cope with imperfections in h2py.py.
+# The applicability on other systems is not clear; especially non-SYSV
+# systems may have a totally different set of ioctls.
+
+IOCTYPE = 0xff00
+LIOC = (ord('l')<<8)
+LIOCGETP = (LIOC|1)
+LIOCSETP = (LIOC|2)
+LIOCGETS = (LIOC|5)
+LIOCSETS = (LIOC|6)
+DIOC = (ord('d')<<8)
+DIOCGETC = (DIOC|1)
+DIOCGETB = (DIOC|2)
+DIOCSETE = (DIOC|3)
+IOCPARM_MASK = 0x7f
+IOC_VOID = 0x20000000
+IOC_OUT = 0x40000000
+IOC_IN = 0x80000000
+IOC_INOUT = (IOC_IN|IOC_OUT)
+int = 'i'
+short = 'h'
+long = 'l'
+def sizeof(t): import struct; return struct.calcsize(t)
+def _IO(x,y): return (IOC_VOID|((x)<<8)|y)
+def _IOR(x,y,t): return (IOC_OUT|((sizeof(t)&IOCPARM_MASK)<<16)|((x)<<8)|y)
+def _IOW(x,y,t): return (IOC_IN|((sizeof(t)&IOCPARM_MASK)<<16)|((x)<<8)|y)
+# this should be _IORW, but stdio got there first
+def _IOWR(x,y,t): return (IOC_INOUT|((sizeof(t)&IOCPARM_MASK)<<16)|((x)<<8)|y)
+FIONREAD = _IOR(ord('f'), 127, int)
+FIONBIO = _IOW(ord('f'), 126, int)
+FIOASYNC = _IOW(ord('f'), 125, int)
+FIOSETOWN = _IOW(ord('f'), 124, int)
+FIOGETOWN = _IOR(ord('f'), 123, int)
+NCC = 8
+NCC_PAD = 7
+NCC_EXT = 16
+NCCS = (NCC+NCC_PAD+NCC_EXT)
+VINTR = 0
+VQUIT = 1
+VERASE = 2
+VKILL = 3
+VEOF = 4
+VEOL = 5
+VEOL2 = 6
+VMIN = VEOF
+VTIME = VEOL
+VSWTCH = 7
+VLNEXT = (NCC+NCC_PAD+0)
+VWERASE = (NCC+NCC_PAD+1)
+VRPRNT = (NCC+NCC_PAD+2)
+VFLUSHO = (NCC+NCC_PAD+3)
+VSTOP = (NCC+NCC_PAD+4)
+VSTART = (NCC+NCC_PAD+5)
+CNUL = '\0'
+CDEL = '\377'
+CESC = '\\'
+CINTR = '\177'
+CQUIT = '\34'
+CBRK = '\377'
+def CTRL(c): return ord(c) & 0x0f
+CERASE = CTRL('H')
+CKILL = CTRL('U')
+CEOF = CTRL('d')
+CEOT = CEOF
+CSTART = CTRL('q')
+CSTOP = CTRL('s')
+CSWTCH = CTRL('z')
+CSUSP = CSWTCH
+CNSWTCH = 0
+CLNEXT = CTRL('v')
+CWERASE = CTRL('w')
+CFLUSHO = CTRL('o')
+CFLUSH = CFLUSHO
+CRPRNT = CTRL('r')
+CDSUSP = CTRL('y')
+IGNBRK = 0000001
+BRKINT = 0000002
+IGNPAR = 0000004
+PARMRK = 0000010
+INPCK = 0000020
+ISTRIP = 0000040
+INLCR = 0000100
+IGNCR = 0000200
+ICRNL = 0000400
+IUCLC = 0001000
+IXON = 0002000
+IXANY = 0004000
+IXOFF = 0010000
+IBLKMD = 0020000
+OPOST = 0000001
+OLCUC = 0000002
+ONLCR = 0000004
+OCRNL = 0000010
+ONOCR = 0000020
+ONLRET = 0000040
+OFILL = 0000100
+OFDEL = 0000200
+NLDLY = 0000400
+NL0 = 0
+NL1 = 0000400
+CRDLY = 0003000
+CR0 = 0
+CR1 = 0001000
+CR2 = 0002000
+CR3 = 0003000
+TABDLY = 0014000
+TAB0 = 0
+TAB1 = 0004000
+TAB2 = 0010000
+TAB3 = 0014000
+BSDLY = 0020000
+BS0 = 0
+BS1 = 0020000
+VTDLY = 0040000
+VT0 = 0
+VT1 = 0040000
+FFDLY = 0100000
+FF0 = 0
+FF1 = 0100000
+CBAUD = 0000017
+B0 = 0
+B50 = 0000001
+B75 = 0000002
+B110 = 0000003
+B134 = 0000004
+B150 = 0000005
+B200 = 0000006
+B300 = 0000007
+B600 = 0000010
+B1200 = 0000011
+B1800 = 0000012
+B2400 = 0000013
+B4800 = 0000014
+B9600 = 0000015
+B19200 = 0000016
+EXTA = 0000016
+B38400 = 0000017
+EXTB = 0000017
+CSIZE = 0000060
+CS5 = 0
+CS6 = 0000020
+CS7 = 0000040
+CS8 = 0000060
+CSTOPB = 0000100
+CREAD = 0000200
+PARENB = 0000400
+PARODD = 0001000
+HUPCL = 0002000
+CLOCAL = 0004000
+LOBLK = 0040000
+ISIG = 0000001
+ICANON = 0000002
+XCASE = 0000004
+ECHO = 0000010
+ECHOE = 0000020
+ECHOK = 0000040
+ECHONL = 0000100
+NOFLSH = 0000200
+IIEXTEN = 0000400
+ITOSTOP = 0001000
+SSPEED = B9600
+IOCTYPE = 0xff00
+TIOC = (ord('T')<<8)
+oTCGETA = (TIOC|1)
+oTCSETA = (TIOC|2)
+oTCSETAW = (TIOC|3)
+oTCSETAF = (TIOC|4)
+TCSBRK = (TIOC|5)
+TCXONC = (TIOC|6)
+TCFLSH = (TIOC|7)
+TCGETA = (TIOC|8)
+TCSETA = (TIOC|9)
+TCSETAW = (TIOC|10)
+TCSETAF = (TIOC|11)
+TIOCFLUSH = (TIOC|12)
+TCDSET = (TIOC|32)
+TCBLKMD = (TIOC|33)
+TIOCPKT = (TIOC|112)
+TIOCPKT_DATA = 0x00
+TIOCPKT_FLUSHREAD = 0x01
+TIOCPKT_FLUSHWRITE = 0x02
+TIOCPKT_NOSTOP = 0x10
+TIOCPKT_DOSTOP = 0x20
+TIOCNOTTY = (TIOC|113)
+TIOCSTI = (TIOC|114)
+TIOCSPGRP = _IOW(ord('t'), 118, int)
+TIOCGPGRP = _IOR(ord('t'), 119, int)
+TIOCCONS = _IOW(ord('t'), 120, int)
+struct_winsize = 'hhhh'
+TIOCGWINSZ = _IOR(ord('t'), 104, struct_winsize)
+TIOCSWINSZ = _IOW(ord('t'), 103, struct_winsize)
+TFIOC = (ord('F')<<8)
+oFIONREAD = (TFIOC|127)
+LDIOC = (ord('D')<<8)
+LDOPEN = (LDIOC|0)
+LDCLOSE = (LDIOC|1)
+LDCHG = (LDIOC|2)
+LDGETT = (LDIOC|8)
+LDSETT = (LDIOC|9)
+TERM_NONE = 0
+TERM_TEC = 1
+TERM_V61 = 2
+TERM_V10 = 3
+TERM_TEX = 4
+TERM_D40 = 5
+TERM_H45 = 6
+TERM_D42 = 7
+TM_NONE = 0000
+TM_SNL = 0001
+TM_ANL = 0002
+TM_LCF = 0004
+TM_CECHO = 0010
+TM_CINVIS = 0020
+TM_SET = 0200
+LDISC0 = 0
+LDISC1 = 1
+NTTYDISC = LDISC1
+VSUSP = VSWTCH
+TCSANOW = 0
+TCSADRAIN = 1
+TCSAFLUSH = 2
+TCIFLUSH = 0
+TCOFLUSH = 1
+TCIOFLUSH = 2
+TCOOFF = 0
+TCOON = 1
+TCIOFF = 2
+TCION = 3
+TO_STOP = LOBLK
+IEXTEN = IIEXTEN
+TOSTOP = ITOSTOP
diff --git a/lib-python/2.2/plat-irix6/SV.py b/lib-python/2.2/plat-irix6/SV.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/plat-irix6/SV.py
@@ -0,0 +1,120 @@
+NTSC_XMAX = 640
+NTSC_YMAX = 480
+PAL_XMAX = 768
+PAL_YMAX = 576
+BLANKING_BUFFER_SIZE = 2
+
+MAX_SOURCES = 2
+
+# mode parameter for Bind calls
+IN_OFF = 0				# No Video
+IN_OVER = 1				# Video over graphics
+IN_UNDER = 2				# Video under graphics
+IN_REPLACE = 3				# Video replaces entire win
+
+# mode parameters for LoadMap calls.  Specifies buffer, always 256 entries
+INPUT_COLORMAP = 0			# tuples of 8-bit RGB
+CHROMA_KEY_MAP = 1			# tuples of 8-bit RGB
+COLOR_SPACE_MAP = 2			# tuples of 8-bit RGB
+GAMMA_MAP = 3				# tuples of 24-bit red values
+
+# mode parameters for UseExclusive calls
+INPUT = 0
+OUTPUT = 1
+IN_OUT = 2
+
+# Format constants for the capture routines
+RGB8_FRAMES = 0				# noninterleaved 8 bit 3:2:3 RBG fields
+RGB32_FRAMES = 1			# 32-bit 8:8:8 RGB frames
+YUV411_FRAMES = 2			# interleaved, 8:2:2 YUV format
+YUV411_FRAMES_AND_BLANKING_BUFFER = 3
+
+#
+# sv.SetParam is passed variable length argument lists,
+# consisting of <name, value> pairs.   The following
+# constants identify argument names.
+#
+_NAME_BASE = 1000
+SOURCE = (_NAME_BASE + 0)
+SOURCE1 = 0
+SOURCE2 = 1
+SOURCE3 = 2
+COLOR = (_NAME_BASE + 1)
+DEFAULT_COLOR = 0
+USER_COLOR = 1
+MONO = 2
+OUTPUTMODE = (_NAME_BASE + 2)
+LIVE_OUTPUT = 0
+STILL24_OUT = 1
+FREEZE = (_NAME_BASE + 3)
+DITHER = (_NAME_BASE + 4)
+OUTPUT_FILTER = (_NAME_BASE + 5)
+HUE = (_NAME_BASE + 6)
+GENLOCK = (_NAME_BASE + 7)
+GENLOCK_OFF = 0
+GENLOCK_ON = 1
+GENLOCK_HOUSE = 2
+BROADCAST = (_NAME_BASE + 8)
+NTSC = 0
+PAL = 1
+VIDEO_MODE = (_NAME_BASE + 9)
+COMP = 0
+SVIDEO = 1
+INPUT_BYPASS = (_NAME_BASE + 10)
+FIELDDROP = (_NAME_BASE + 11)
+SLAVE = (_NAME_BASE + 12)
+APERTURE_FACTOR = (_NAME_BASE + 13)
+AFACTOR_0 = 0
+AFACTOR_QTR = 1
+AFACTOR_HLF = 2
+AFACTOR_ONE = 3
+CORING = (_NAME_BASE + 14)
+COR_OFF = 0
+COR_1LSB = 1
+COR_2LSB = 2
+COR_3LSB = 3
+APERTURE_BANDPASS = (_NAME_BASE + 15)
+ABAND_F0 = 0
+ABAND_F1 = 1
+ABAND_F2 = 2
+ABAND_F3 = 3
+PREFILTER = (_NAME_BASE + 16)
+CHROMA_TRAP = (_NAME_BASE + 17)
+CK_THRESHOLD = (_NAME_BASE + 18)
+PAL_SENSITIVITY = (_NAME_BASE + 19)
+GAIN_CONTROL = (_NAME_BASE + 20)
+GAIN_SLOW = 0
+GAIN_MEDIUM = 1
+GAIN_FAST = 2
+GAIN_FROZEN = 3
+AUTO_CKILL = (_NAME_BASE + 21)
+VTR_MODE = (_NAME_BASE + 22)
+VTR_INPUT = 0
+CAMERA_INPUT = 1
+LUMA_DELAY = (_NAME_BASE + 23)
+VNOISE = (_NAME_BASE + 24)
+VNOISE_NORMAL = 0
+VNOISE_SEARCH = 1
+VNOISE_AUTO = 2
+VNOISE_BYPASS = 3
+CHCV_PAL = (_NAME_BASE + 25)
+CHCV_NTSC = (_NAME_BASE + 26)
+CCIR_LEVELS = (_NAME_BASE + 27)
+STD_CHROMA = (_NAME_BASE + 28)
+DENC_VTBYPASS = (_NAME_BASE + 29)
+FAST_TIMECONSTANT = (_NAME_BASE + 30)
+GENLOCK_DELAY = (_NAME_BASE + 31)
+PHASE_SYNC = (_NAME_BASE + 32)
+VIDEO_OUTPUT = (_NAME_BASE + 33)
+CHROMA_PHASEOUT = (_NAME_BASE + 34)
+CHROMA_CENTER = (_NAME_BASE + 35)
+YUV_TO_RGB_INVERT = (_NAME_BASE + 36)
+SOURCE1_BROADCAST = (_NAME_BASE + 37)
+SOURCE1_MODE = (_NAME_BASE + 38)
+SOURCE2_BROADCAST = (_NAME_BASE + 39)
+SOURCE2_MODE = (_NAME_BASE + 40)
+SOURCE3_BROADCAST = (_NAME_BASE + 41)
+SOURCE3_MODE = (_NAME_BASE + 42)
+SIGNAL_STD = (_NAME_BASE + 43)
+NOSIGNAL = 2
+SIGNAL_COLOR = (_NAME_BASE + 44)
diff --git a/lib-python/2.2/plat-irix6/WAIT.py b/lib-python/2.2/plat-irix6/WAIT.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/plat-irix6/WAIT.py
@@ -0,0 +1,335 @@
+# Generated by h2py from /usr/include/sys/wait.h
+
+# Included from standards.h
+def _W_INT(i): return (i)
+
+WUNTRACED = 0004
+WNOHANG = 0100
+_WSTOPPED = 0177
+def WIFEXITED(stat): return ((_W_INT(stat)&0377)==0)
+
+def WEXITSTATUS(stat): return ((_W_INT(stat)>>8)&0377)
+
+def WTERMSIG(stat): return (_W_INT(stat)&0177)
+
+def WSTOPSIG(stat): return ((_W_INT(stat)>>8)&0377)
+
+WEXITED = 0001
+WTRAPPED = 0002
+WSTOPPED = 0004
+WCONTINUED = 0010
+WNOWAIT = 0200
+WOPTMASK = (WEXITED|WTRAPPED|WSTOPPED|WCONTINUED|WNOHANG|WNOWAIT)
+WSTOPFLG = 0177
+WCONTFLG = 0177777
+WCOREFLAG = 0200
+WSIGMASK = 0177
+def WWORD(stat): return (_W_INT(stat)&0177777)
+
+def WIFCONTINUED(stat): return (WWORD(stat)==WCONTFLG)
+
+def WCOREDUMP(stat): return (_W_INT(stat) & WCOREFLAG)
+
+
+# Included from sys/types.h
+
+# Included from sgidefs.h
+_MIPS_ISA_MIPS1 = 1
+_MIPS_ISA_MIPS2 = 2
+_MIPS_ISA_MIPS3 = 3
+_MIPS_ISA_MIPS4 = 4
+_MIPS_SIM_ABI32 = 1
+_MIPS_SIM_NABI32 = 2
+_MIPS_SIM_ABI64 = 3
+P_MYID = (-1)
+P_MYHOSTID = (-1)
+
+# Included from sys/bsd_types.h
+
+# Included from sys/mkdev.h
+ONBITSMAJOR = 7
+ONBITSMINOR = 8
+OMAXMAJ = 0x7f
+OMAXMIN = 0xff
+NBITSMAJOR = 14
+NBITSMINOR = 18
+MAXMAJ = 0x1ff
+MAXMIN = 0x3ffff
+OLDDEV = 0
+NEWDEV = 1
+MKDEV_VER = NEWDEV
+def major(dev): return __major(MKDEV_VER, dev)
+
+def minor(dev): return __minor(MKDEV_VER, dev)
+
+
+# Included from sys/select.h
+FD_SETSIZE = 1024
+__NBBY = 8
+
+# Included from string.h
+NULL = 0L
+NBBY = 8
+
+# Included from sys/procset.h
+P_INITPID = 1
+P_INITUID = 0
+P_INITPGID = 0
+
+# Included from sys/signal.h
+SIGHUP = 1
+SIGINT = 2
+SIGQUIT = 3
+SIGILL = 4
+SIGTRAP = 5
+SIGIOT = 6
+SIGABRT = 6
+SIGEMT = 7
+SIGFPE = 8
+SIGKILL = 9
+SIGBUS = 10
+SIGSEGV = 11
+SIGSYS = 12
+SIGPIPE = 13
+SIGALRM = 14
+SIGTERM = 15
+SIGUSR1 = 16
+SIGUSR2 = 17
+SIGCLD = 18
+SIGCHLD = 18
+SIGPWR = 19
+SIGWINCH = 20
+SIGURG = 21
+SIGPOLL = 22
+SIGIO = 22
+SIGSTOP = 23
+SIGTSTP = 24
+SIGCONT = 25
+SIGTTIN = 26
+SIGTTOU = 27
+SIGVTALRM = 28
+SIGPROF = 29
+SIGXCPU = 30
+SIGXFSZ = 31
+SIG32 = 32
+SIGCKPT = 33
+SIGRTMIN = 49
+SIGRTMAX = 64
+SIGPTINTR = 47
+SIGPTRESCHED = 48
+__sigargs = int
+SIGEV_NONE = 128
+SIGEV_SIGNAL = 129
+SIGEV_CALLBACK = 130
+
+# Included from sys/siginfo.h
+ILL_ILLOPC = 1
+ILL_ILLOPN = 2
+ILL_ILLADR = 3
+ILL_ILLTRP = 4
+ILL_PRVOPC = 5
+ILL_PRVREG = 6
+ILL_COPROC = 7
+ILL_BADSTK = 8
+NSIGILL = 8
+FPE_INTDIV = 1
+FPE_INTOVF = 2
+FPE_FLTDIV = 3
+FPE_FLTOVF = 4
+FPE_FLTUND = 5
+FPE_FLTRES = 6
+FPE_FLTINV = 7
+FPE_FLTSUB = 8
+NSIGFPE = 8
+SEGV_MAPERR = 1
+SEGV_ACCERR = 2
+NSIGSEGV = 2
+BUS_ADRALN = 1
+BUS_ADRERR = 2
+BUS_OBJERR = 3
+NSIGBUS = 3
+TRAP_BRKPT = 1
+TRAP_TRACE = 2
+NSIGTRAP = 2
+CLD_EXITED = 1
+CLD_KILLED = 2
+CLD_DUMPED = 3
+CLD_TRAPPED = 4
+CLD_STOPPED = 5
+CLD_CONTINUED = 6
+NSIGCLD = 6
+POLL_IN = 1
+POLL_OUT = 2
+POLL_MSG = 3
+POLL_ERR = 4
+POLL_PRI = 5
+POLL_HUP = 6
+NSIGPOLL = 6
+SI_MAXSZ = 128
+SI_USER = 0
+SI_KILL = SI_USER
+SI_QUEUE = -1
+SI_ASYNCIO = -2
+SI_TIMER = -3
+SI_MESGQ = -4
+SIG_NOP = 0
+SIG_BLOCK = 1
+SIG_UNBLOCK = 2
+SIG_SETMASK = 3
+SIG_SETMASK32 = 256
+SA_ONSTACK = 0x00000001
+SA_RESETHAND = 0x00000002
+SA_RESTART = 0x00000004
+SA_SIGINFO = 0x00000008
+SA_NODEFER = 0x00000010
+SA_NOCLDWAIT = 0x00010000
+SA_NOCLDSTOP = 0x00020000
+_SA_BSDCALL = 0x10000000
+MINSIGSTKSZ = 512
+SIGSTKSZ = 8192
+SS_ONSTACK = 0x00000001
+SS_DISABLE = 0x00000002
+
+# Included from sys/ucontext.h
+NGREG = 36
+NGREG = 37
+GETCONTEXT = 0
+SETCONTEXT = 1
+UC_SIGMASK = 001
+UC_STACK = 002
+UC_CPU = 004
+UC_MAU = 010
+UC_MCONTEXT = (UC_CPU|UC_MAU)
+UC_ALL = (UC_SIGMASK|UC_STACK|UC_MCONTEXT)
+CTX_R0 = 0
+CTX_AT = 1
+CTX_V0 = 2
+CTX_V1 = 3
+CTX_A0 = 4
+CTX_A1 = 5
+CTX_A2 = 6
+CTX_A3 = 7
+CTX_T0 = 8
+CTX_T1 = 9
+CTX_T2 = 10
+CTX_T3 = 11
+CTX_T4 = 12
+CTX_T5 = 13
+CTX_T6 = 14
+CTX_T7 = 15
+CTX_A4 = 8
+CTX_A5 = 9
+CTX_A6 = 10
+CTX_A7 = 11
+CTX_T0 = 12
+CTX_T1 = 13
+CTX_T2 = 14
+CTX_T3 = 15
+CTX_S0 = 16
+CTX_S1 = 17
+CTX_S2 = 18
+CTX_S3 = 19
+CTX_S4 = 20
+CTX_S5 = 21
+CTX_S6 = 22
+CTX_S7 = 23
+CTX_T8 = 24
+CTX_T9 = 25
+CTX_K0 = 26
+CTX_K1 = 27
+CTX_GP = 28
+CTX_SP = 29
+CTX_S8 = 30
+CTX_RA = 31
+CTX_MDLO = 32
+CTX_MDHI = 33
+CTX_CAUSE = 34
+CTX_EPC = 35
+CTX_SR = 36
+CXT_R0 = CTX_R0
+CXT_AT = CTX_AT
+CXT_V0 = CTX_V0
+CXT_V1 = CTX_V1
+CXT_A0 = CTX_A0
+CXT_A1 = CTX_A1
+CXT_A2 = CTX_A2
+CXT_A3 = CTX_A3
+CXT_T0 = CTX_T0
+CXT_T1 = CTX_T1
+CXT_T2 = CTX_T2
+CXT_T3 = CTX_T3
+CXT_T4 = CTX_T4
+CXT_T5 = CTX_T5
+CXT_T6 = CTX_T6
+CXT_T7 = CTX_T7
+CXT_S0 = CTX_S0
+CXT_S1 = CTX_S1
+CXT_S2 = CTX_S2
+CXT_S3 = CTX_S3
+CXT_S4 = CTX_S4
+CXT_S5 = CTX_S5
+CXT_S6 = CTX_S6
+CXT_S7 = CTX_S7
+CXT_T8 = CTX_T8
+CXT_T9 = CTX_T9
+CXT_K0 = CTX_K0
+CXT_K1 = CTX_K1
+CXT_GP = CTX_GP
+CXT_SP = CTX_SP
+CXT_S8 = CTX_S8
+CXT_RA = CTX_RA
+CXT_MDLO = CTX_MDLO
+CXT_MDHI = CTX_MDHI
+CXT_CAUSE = CTX_CAUSE
+CXT_EPC = CTX_EPC
+CXT_SR = CTX_SR
+SV_ONSTACK = 0x0001
+SV_INTERRUPT = 0x0002
+NUMBSDSIGS = (32)
+def sigmask(sig): return (1L << ((sig)-1))
+
+def sigmask(sig): return (1L << ((sig)-1))
+
+SIG_ERR = (-1)
+SIG_IGN = (1)
+SIG_HOLD = (2)
+SIG_DFL = (0)
+NSIG = 65
+MAXSIG = (NSIG-1)
+NUMSIGS = (NSIG-1)
+BRK_USERBP = 0
+BRK_KERNELBP = 1
+BRK_ABORT = 2
+BRK_BD_TAKEN = 3
+BRK_BD_NOTTAKEN = 4
+BRK_SSTEPBP = 5
+BRK_OVERFLOW = 6
+BRK_DIVZERO = 7
+BRK_RANGE = 8
+BRK_PSEUDO_OP_BIT = 0x80
+BRK_PSEUDO_OP_MAX = 0x3
+BRK_CACHE_SYNC = 0x80
+BRK_SWASH_FLUSH = 0x81
+BRK_SWASH_SWTCH = 0x82
+BRK_MULOVF = 1023
+
+# Included from sys/resource.h
+PRIO_MIN = -20
+PRIO_MAX = 20
+PRIO_PROCESS = 0
+PRIO_PGRP = 1
+PRIO_USER = 2
+RUSAGE_SELF = 0
+RUSAGE_CHILDREN = -1
+RLIMIT_CPU = 0
+RLIMIT_FSIZE = 1
+RLIMIT_DATA = 2
+RLIMIT_STACK = 3
+RLIMIT_CORE = 4
+RLIMIT_NOFILE = 5
+RLIMIT_VMEM = 6
+RLIMIT_RSS = 7
+RLIMIT_AS = RLIMIT_VMEM
+RLIM_NLIMITS = 8
+RLIM32_INFINITY = 0x7fffffff
+RLIM_INFINITY = 0x7fffffff
diff --git a/lib-python/2.2/plat-irix6/cddb.py b/lib-python/2.2/plat-irix6/cddb.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/plat-irix6/cddb.py
@@ -0,0 +1,206 @@
+# This file implements a class which forms an interface to the .cddb
+# directory that is maintained by SGI's cdman program.
+#
+# Usage is as follows:
+#
+# import readcd
+# r = readcd.Readcd()
+# c = Cddb(r.gettrackinfo())
+#
+# Now you can use c.artist, c.title and c.track[trackno] (where trackno
+# starts at 1).  When the CD is not recognized, all values will be the empty
+# string.
+# It is also possible to set the above mentioned variables to new values.
+# You can then use c.write() to write out the changed values to the
+# .cdplayerrc file.
+
+import string, posix, os
+
+_cddbrc = '.cddb'
+_DB_ID_NTRACKS = 5
+_dbid_map = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ at _=+abcdefghijklmnopqrstuvwxyz'
+def _dbid(v):
+	if v >= len(_dbid_map):
+		return string.zfill(v, 2)
+	else:
+		return _dbid_map[v]
+
+def tochash(toc):
+	if type(toc) == type(''):
+		tracklist = []
+		for i in range(2, len(toc), 4):
+			tracklist.append((None,
+				  (int(toc[i:i+2]),
+				   int(toc[i+2:i+4]))))
+	else:
+		tracklist = toc
+	ntracks = len(tracklist)
+	hash = _dbid((ntracks >> 4) & 0xF) + _dbid(ntracks & 0xF)
+	if ntracks <= _DB_ID_NTRACKS:
+		nidtracks = ntracks
+	else:
+		nidtracks = _DB_ID_NTRACKS - 1
+		min = 0
+		sec = 0
+		for track in tracklist:
+			start, length = track
+			min = min + length[0]
+			sec = sec + length[1]
+		min = min + sec / 60
+		sec = sec % 60
+		hash = hash + _dbid(min) + _dbid(sec)
+	for i in range(nidtracks):
+		start, length = tracklist[i]
+		hash = hash + _dbid(length[0]) + _dbid(length[1])
+	return hash
+	
+class Cddb:
+	def __init__(self, tracklist):
+		if os.environ.has_key('CDDB_PATH'):
+			path = os.environ['CDDB_PATH']
+			cddb_path = path.split(',')
+		else:
+			home = os.environ['HOME']
+			cddb_path = [home + '/' + _cddbrc]
+
+		self._get_id(tracklist)
+
+		for dir in cddb_path:
+			file = dir + '/' + self.id + '.rdb'
+			try:
+				f = open(file, 'r')
+				self.file = file
+				break
+			except IOError:
+				pass
+		ntracks = int(self.id[:2], 16)
+		self.artist = ''
+		self.title = ''
+		self.track = [None] + [''] * ntracks
+		self.trackartist = [None] + [''] * ntracks
+		self.notes = []
+		if not hasattr(self, 'file'):
+			return
+		import re
+		reg = re.compile(r'^([^.]*)\.([^:]*):[\t ]+(.*)')
+		while 1:
+			line = f.readline()
+			if not line:
+				break
+			match = reg.match(line)
+			if not match:
+				print 'syntax error in ' + file
+				continue
+			name1, name2, value = match.group(1, 2, 3)
+			if name1 == 'album':
+				if name2 == 'artist':
+					self.artist = value
+				elif name2 == 'title':
+					self.title = value
+				elif name2 == 'toc':
+					if not self.toc:
+						self.toc = value
+					if self.toc != value:
+						print 'toc\'s don\'t match'
+				elif name2 == 'notes':
+					self.notes.append(value)
+			elif name1[:5] == 'track':
+				try:
+					trackno = int(name1[5:])
+				except ValueError:
+					print 'syntax error in ' + file
+					continue
+				if trackno > ntracks:
+					print 'track number ' + `trackno` + \
+						  ' in file ' + file + \
+						  ' out of range'
+					continue
+				if name2 == 'title':
+					self.track[trackno] = value
+				elif name2 == 'artist':
+					self.trackartist[trackno] = value
+		f.close()
+		for i in range(2, len(self.track)):
+			track = self.track[i]
+			# if track title starts with `,', use initial part
+			# of previous track's title
+			if track and track[0] == ',':
+				try:
+					off = self.track[i - 1].index(',')
+				except ValueError:
+					pass
+				else:
+					self.track[i] = self.track[i-1][:off] \
+							+ track
+
+	def _get_id(self, tracklist):
+		# fill in self.id and self.toc.
+		# if the argument is a string ending in .rdb, the part
+		# upto the suffix is taken as the id.
+		if type(tracklist) == type(''):
+			if tracklist[-4:] == '.rdb':
+				self.id = tracklist[:-4]
+				self.toc = ''
+				return
+			t = []
+			for i in range(2, len(tracklist), 4):
+				t.append((None, \
+					  (int(tracklist[i:i+2]), \
+					   int(tracklist[i+2:i+4]))))
+			tracklist = t
+		ntracks = len(tracklist)
+		self.id = _dbid((ntracks >> 4) & 0xF) + _dbid(ntracks & 0xF)
+		if ntracks <= _DB_ID_NTRACKS:
+			nidtracks = ntracks
+		else:
+			nidtracks = _DB_ID_NTRACKS - 1
+			min = 0
+			sec = 0
+			for track in tracklist:
+				start, length = track
+				min = min + length[0]
+				sec = sec + length[1]
+			min = min + sec / 60
+			sec = sec % 60
+			self.id = self.id + _dbid(min) + _dbid(sec)
+		for i in range(nidtracks):
+			start, length = tracklist[i]
+			self.id = self.id + _dbid(length[0]) + _dbid(length[1])
+		self.toc = string.zfill(ntracks, 2)
+		for track in tracklist:
+			start, length = track
+			self.toc = self.toc + string.zfill(length[0], 2) + \
+				  string.zfill(length[1], 2)
+
+	def write(self):
+		import posixpath
+		if os.environ.has_key('CDDB_WRITE_DIR'):
+			dir = os.environ['CDDB_WRITE_DIR']
+		else:
+			dir = os.environ['HOME'] + '/' + _cddbrc
+		file = dir + '/' + self.id + '.rdb'
+		if posixpath.exists(file):
+			# make backup copy
+			posix.rename(file, file + '~')
+		f = open(file, 'w')
+		f.write('album.title:\t' + self.title + '\n')
+		f.write('album.artist:\t' + self.artist + '\n')
+		f.write('album.toc:\t' + self.toc + '\n')
+		for note in self.notes:
+			f.write('album.notes:\t' + note + '\n')
+		prevpref = None
+		for i in range(1, len(self.track)):
+			if self.trackartist[i]:
+				f.write('track'+`i`+'.artist:\t'+self.trackartist[i]+'\n')
+			track = self.track[i]
+			try:
+				off = track.index(',')
+			except ValueError:
+				prevpref = None
+			else:
+				if prevpref and track[:off] == prevpref:
+					track = track[off:]
+				else:
+					prevpref = track[:off]
+			f.write('track' + `i` + '.title:\t' + track + '\n')
+		f.close()
diff --git a/lib-python/2.2/plat-irix6/cdplayer.py b/lib-python/2.2/plat-irix6/cdplayer.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/plat-irix6/cdplayer.py
@@ -0,0 +1,89 @@
+# This file implements a class which forms an interface to the .cdplayerrc
+# file that is maintained by SGI's cdplayer program.
+#
+# Usage is as follows:
+#
+# import readcd
+# r = readcd.Readcd()
+# c = Cdplayer(r.gettrackinfo())
+#
+# Now you can use c.artist, c.title and c.track[trackno] (where trackno
+# starts at 1).  When the CD is not recognized, all values will be the empty
+# string.
+# It is also possible to set the above mentioned variables to new values.
+# You can then use c.write() to write out the changed values to the
+# .cdplayerrc file.
+
+cdplayerrc = '.cdplayerrc'
+
+class Cdplayer:
+	def __init__(self, tracklist):
+		import string
+		self.artist = ''
+		self.title = ''
+		if type(tracklist) == type(''):
+			t = []
+			for i in range(2, len(tracklist), 4):
+				t.append((None, \
+					  (int(tracklist[i:i+2]), \
+					   int(tracklist[i+2:i+4]))))
+			tracklist = t
+		self.track = [None] + [''] * len(tracklist)
+		self.id = 'd' + string.zfill(len(tracklist), 2)
+		for track in tracklist:
+			start, length = track
+			self.id = self.id + string.zfill(length[0], 2) + \
+				  string.zfill(length[1], 2)
+		try:
+			import posix
+			f = open(posix.environ['HOME'] + '/' + cdplayerrc, 'r')
+		except IOError:
+			return
+		import re
+		reg = re.compile(r'^([^:]*):\t(.*)')
+		s = self.id + '.'
+		l = len(s)
+		while 1:
+			line = f.readline()
+			if line == '':
+				break
+			if line[:l] == s:
+				line = line[l:]
+				match = reg.match(line)
+				if not match:
+					print 'syntax error in ~/' + cdplayerrc
+					continue
+				name, value = match.group(1, 2)
+				if name == 'title':
+					self.title = value
+				elif name == 'artist':
+					self.artist = value
+				elif name[:5] == 'track':
+					trackno = int(name[6:])
+					self.track[trackno] = value
+		f.close()
+
+	def write(self):
+		import posix
+		filename = posix.environ['HOME'] + '/' + cdplayerrc
+		try:
+			old = open(filename, 'r')
+		except IOError:
+			old = open('/dev/null', 'r')
+		new = open(filename + '.new', 'w')
+		s = self.id + '.'
+		l = len(s)
+		while 1:
+			line = old.readline()
+			if line == '':
+				break
+			if line[:l] != s:
+				new.write(line)
+		new.write(self.id + '.title:\t' + self.title + '\n')
+		new.write(self.id + '.artist:\t' + self.artist + '\n')
+		for i in range(1, len(self.track)):
+			new.write(self.id + '.track.' + `i` + ':\t' + \
+				  self.track[i] + '\n')
+		old.close()
+		new.close()
+		posix.rename(filename + '.new', filename)
diff --git a/lib-python/2.2/plat-irix6/flp.doc b/lib-python/2.2/plat-irix6/flp.doc
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/plat-irix6/flp.doc
@@ -0,0 +1,117 @@
+.SH
+Module flp
+.LP
+The flp module loads fl-forms from fd files, as generated
+by fdesign. The module is designed to be flexible enough to allow
+almost anything to be done with the loaded form.
+.LP
+Loadform defines 
+two types of functions: functions to parse fd files and functions to
+create the forms from the templates returned by the parse functions.
+There are fairly low-level create functions that create single objects,
+and convenience routines that create complete forms, including callbacks,
+etc.
+.LP
+The exception flp.error is raised whenever an error occurs while parsing a forms
+definition file or creating a form.
+.SH 2
+Parsing functions
+.LP
+There are two parsing functions, parse_form() and parse_forms(). They
+take the following form:
+.LP
+.ft C
+ftuple = parse_form(filename, formname)
+.br
+ftdict = parse_forms(filename)
+.IP
+Parse_form parses a single form, and returns a tuple (ftmp, otmplist).
+Ftmp is a template for a form, otmplist is a list of templates for
+objects. See below for a description of these templates.
+.IP
+Parse_forms parses all forms in an fd file. It returns a dictionary of
+(ftmp, otmplist) tuples, indexed by formname.
+.IP
+Filename is the name of the forms definition file to inspect. The functions
+appends '.fd' if needed, and use 'sys.path' to locate the file.
+.IP
+formname is the name of the form to load. This argument is mandatory,
+even if the file only contains one form.
+.LP
+The form template and object template are structures that contain all
+the information read from the fd file, in 'natural' form. A form
+template record contains the following fields:
+.IP
+.nf
+"Name", the name of the form;
+"Width", the width of the form;
+"Height", the height of the form; and
+"Numberofobjects", the number of objects in the form.
+.LP
+An object template contains the following fields:
+.IP
+.nf
+"Class", the class of object (eg. FL.BUTTON);
+"Type", the sub-class (eg. FL.NORMALBUTTON);
+"Box", a list with four members: [x, y, width, height];
+"Boxtype", the type of box (eg. FL.DOWNBOX);
+"Colors", a list with the two object colors;
+"Alignment", the label alignment (eg. FL.ALIGNLEFT); 
+"Style", the label style (eg. FL.BOLDSTYLE);
+"Lcol", the label color;
+"Label", a string containing the label;
+"Name", a string containing the name of the object;
+"Callback", a string containing the callback routine name; and
+"Argument", a string containing the callback routine extra argument.
+.SH
+Low-level create routines.
+.LP
+The three low-level creation routines are called as follows:
+.LP
+.ft C
+form = create_form(form_template)
+.IP
+Create an fl form from a form template. Returns the form created.
+.LP
+.ft C
+obj = create_object(form, obj_template)
+.IP
+Create an object in an fl form. Return the new object.
+An error is raised if the object has a callback routine.
+.SH
+High-level create routines.
+.LP
+The 'standard' way to handle forms in python is to define a class
+that contains the form and all the objects (insofar as they are named),
+and that defines all the callback functions, and use an instance of
+this class to handle the form interaction.
+Flp contains three routines that simplify handling this paradigm:
+.LP
+.ft C
+create_full_form(instance, ftuple)
+.IP
+This routine takes an instance of your form-handling class and an
+ftuple (as returned by the parsing routines) as parameters. It inserts
+the form into the instance, defines all object names and arranges that
+the callback methods are called. All the names inserted into the
+instance are the same as the names used for the objects, etc. in the
+fd file.
+.LP
+.ft C
+merge_full_form(instance, form, ftuple)
+.IP
+This function does the same as create_full_form, only it does not create
+the form itself nor the 'background box' that fdesign automatically
+adds to each form. This is useful if your class inherits a superclass
+that already defines a skeleton form (with 'OK' and 'Cancel' buttons,
+for instance), and you want to merge the new form into that existing
+form. The 'form' parameter is the form to which the new objects are
+added.
+.LP
+If you use the paradigm sketched here but need slightly more control
+over object creation there is a routine that creates a single object
+and inserts its name (and arranges for the callback routine to be
+called):
+.LP
+.ft C
+create_object_instance(instance, form, obj_template)
diff --git a/lib-python/2.2/plat-irix6/flp.py b/lib-python/2.2/plat-irix6/flp.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/plat-irix6/flp.py
@@ -0,0 +1,450 @@
+#
+# flp - Module to load fl forms from fd files
+#
+# Jack Jansen, December 1991
+#
+import os
+import sys
+import FL
+
+SPLITLINE = '--------------------'
+FORMLINE = '=============== FORM ==============='
+ENDLINE = '=============================='
+
+class error(Exception):
+    pass
+
+##################################################################
+#    Part 1 - The parsing routines                               #
+##################################################################
+
+#
+# Externally visible function. Load form.
+#
+def parse_form(filename, formname):
+    forms = checkcache(filename)
+    if forms is None:
+        forms = parse_forms(filename)
+    if forms.has_key(formname):
+        return forms[formname]
+    else:
+        raise error, 'No such form in fd file'
+
+#
+# Externally visible function. Load all forms.
+#
+def parse_forms(filename):
+    forms = checkcache(filename)
+    if forms is not None: return forms
+    fp = _open_formfile(filename)
+    nforms = _parse_fd_header(fp)
+    forms = {}
+    for i in range(nforms):
+        form = _parse_fd_form(fp, None)
+        forms[form[0].Name] = form
+    writecache(filename, forms)
+    return forms
+
+#
+# Internal: see if a cached version of the file exists
+#
+MAGIC = '.fdc'
+_internal_cache = {}                    # Used by frozen scripts only
+def checkcache(filename):
+    if _internal_cache.has_key(filename):
+        altforms = _internal_cache[filename]
+        return _unpack_cache(altforms)
+    import marshal
+    fp, filename = _open_formfile2(filename)
+    fp.close()
+    cachename = filename + 'c'
+    try:
+        fp = open(cachename, 'r')
+    except IOError:
+        #print 'flp: no cache file', cachename
+        return None
+    try:
+        if fp.read(4) != MAGIC:
+            print 'flp: bad magic word in cache file', cachename
+            return None
+        cache_mtime = rdlong(fp)
+        file_mtime = getmtime(filename)
+        if cache_mtime != file_mtime:
+            #print 'flp: outdated cache file', cachename
+            return None
+        #print 'flp: valid cache file', cachename
+        altforms = marshal.load(fp)
+        return _unpack_cache(altforms)
+    finally:
+        fp.close()
+
+def _unpack_cache(altforms):
+        forms = {}
+        for name in altforms.keys():
+            altobj, altlist = altforms[name]
+            obj = _newobj()
+            obj.make(altobj)
+            list = []
+            for altobj in altlist:
+                nobj = _newobj()
+                nobj.make(altobj)
+                list.append(nobj)
+            forms[name] = obj, list
+        return forms
+
+def rdlong(fp):
+    s = fp.read(4)
+    if len(s) != 4: return None
+    a, b, c, d = s[0], s[1], s[2], s[3]
+    return ord(a)<<24 | ord(b)<<16 | ord(c)<<8 | ord(d)
+
+def wrlong(fp, x):
+    a, b, c, d = (x>>24)&0xff, (x>>16)&0xff, (x>>8)&0xff, x&0xff
+    fp.write(chr(a) + chr(b) + chr(c) + chr(d))
+
+def getmtime(filename):
+    import os
+    from stat import ST_MTIME
+    try:
+        return os.stat(filename)[ST_MTIME]
+    except os.error:
+        return None
+
+#
+# Internal: write cached version of the form (parsing is too slow!)
+#
+def writecache(filename, forms):
+    import marshal
+    fp, filename = _open_formfile2(filename)
+    fp.close()
+    cachename = filename + 'c'
+    try:
+        fp = open(cachename, 'w')
+    except IOError:
+        print 'flp: can\'t create cache file', cachename
+        return # Never mind
+    fp.write('\0\0\0\0') # Seek back and write MAGIC when done
+    wrlong(fp, getmtime(filename))
+    altforms = _pack_cache(forms)
+    marshal.dump(altforms, fp)
+    fp.seek(0)
+    fp.write(MAGIC)
+    fp.close()
+    #print 'flp: wrote cache file', cachename
+
+#
+# External: print some statements that set up the internal cache.
+# This is for use with the "freeze" script.  You should call
+# flp.freeze(filename) for all forms used by the script, and collect
+# the output on a file in a module file named "frozenforms.py".  Then
+# in the main program of the script import frozenforms.
+# (Don't forget to take this out when using the unfrozen version of
+# the script!)
+#
+def freeze(filename):
+    forms = parse_forms(filename)
+    altforms = _pack_cache(forms)
+    print 'import flp'
+    print 'flp._internal_cache[', `filename`, '] =', altforms
+
+#
+# Internal: create the data structure to be placed in the cache
+#
+def _pack_cache(forms):
+    altforms = {}
+    for name in forms.keys():
+        obj, list = forms[name]
+        altobj = obj.__dict__
+        altlist = []
+        for obj in list: altlist.append(obj.__dict__)
+        altforms[name] = altobj, altlist
+    return altforms
+
+#
+# Internal: Locate form file (using PYTHONPATH) and open file
+#
+def _open_formfile(filename):
+    return _open_formfile2(filename)[0]
+
+def _open_formfile2(filename):
+    if filename[-3:] != '.fd':
+        filename = filename + '.fd'
+    if filename[0] == '/':
+        try:
+            fp = open(filename,'r')
+        except IOError:
+            fp = None
+    else:
+        for pc in sys.path:
+            pn = os.path.join(pc, filename)
+            try:
+                fp = open(pn, 'r')
+                filename = pn
+                break
+            except IOError:
+                fp = None
+    if fp is None:
+        raise error, 'Cannot find forms file ' + filename
+    return fp, filename
+
+#
+# Internal: parse the fd file header, return number of forms
+#
+def _parse_fd_header(file):
+    # First read the magic header line
+    datum = _parse_1_line(file)
+    if datum != ('Magic', 12321):
+        raise error, 'Not a forms definition file'
+    # Now skip until we know number of forms
+    while 1:
+        datum = _parse_1_line(file)
+        if type(datum) == type(()) and datum[0] == 'Numberofforms':
+            break
+    return datum[1]
+#
+# Internal: parse fd form, or skip if name doesn't match.
+# the special value None means 'always parse it'.
+#
+def _parse_fd_form(file, name):
+    datum = _parse_1_line(file)
+    if datum != FORMLINE:
+        raise error, 'Missing === FORM === line'
+    form = _parse_object(file)
+    if form.Name == name or name is None:
+        objs = []
+        for j in range(form.Numberofobjects):
+            obj = _parse_object(file)
+            objs.append(obj)
+        return (form, objs)
+    else:
+        for j in range(form.Numberofobjects):
+            _skip_object(file)
+    return None
+
+#
+# Internal class: a convenient place to store object info fields
+#
+class _newobj:
+    def add(self, name, value):
+        self.__dict__[name] = value
+    def make(self, dict):
+        for name in dict.keys():
+            self.add(name, dict[name])
+
+#
+# Internal parsing routines.
+#
+def _parse_string(str):
+    if '\\' in str:
+        s = '\'' + str + '\''
+        try:
+            return eval(s)
+        except:
+            pass
+    return str
+
+def _parse_num(str):
+    return eval(str)
+
+def _parse_numlist(str):
+    slist = str.split()
+    nlist = []
+    for i in slist:
+        nlist.append(_parse_num(i))
+    return nlist
+
+# This dictionary maps item names to parsing routines.
+# If no routine is given '_parse_num' is default.
+_parse_func = { \
+        'Name':         _parse_string, \
+        'Box':          _parse_numlist, \
+        'Colors':       _parse_numlist, \
+        'Label':        _parse_string, \
+        'Name':         _parse_string, \
+        'Callback':     _parse_string, \
+        'Argument':     _parse_string }
+
+# This function parses a line, and returns either
+# a string or a tuple (name,value)
+
+import re
+prog = re.compile('^([^:]*): *(.*)')
+
+def _parse_line(line):
+    match = prog.match(line)
+    if not match:
+        return line
+    name, value = match.group(1, 2)
+    if name[0] == 'N':
+            name = ''.join(name.split())
+            name = name.lower()
+    name = name.capitalize()
+    try:
+        pf = _parse_func[name]
+    except KeyError:
+        pf = _parse_num
+    value = pf(value)
+    return (name, value)
+
+def _readline(file):
+    line = file.readline()
+    if not line:
+        raise EOFError
+    return line[:-1]
+        
+def _parse_1_line(file):
+    line = _readline(file)
+    while line == '':
+        line = _readline(file)
+    return _parse_line(line)
+
+def _skip_object(file):
+    line = ''
+    while not line in (SPLITLINE, FORMLINE, ENDLINE):
+        pos = file.tell()
+        line = _readline(file)
+    if line == FORMLINE:
+        file.seek(pos)
+
+def _parse_object(file):
+    obj = _newobj()
+    while 1:
+        pos = file.tell()
+        datum = _parse_1_line(file)
+        if datum in (SPLITLINE, FORMLINE, ENDLINE):
+            if datum == FORMLINE:
+                file.seek(pos)
+            return obj
+        if type(datum) is not type(()) or len(datum) != 2:
+            raise error, 'Parse error, illegal line in object: '+datum
+        obj.add(datum[0], datum[1])
+
+#################################################################
+#   Part 2 - High-level object/form creation routines            #
+#################################################################
+
+#
+# External - Create a form an link to an instance variable.
+#
+def create_full_form(inst, (fdata, odatalist)):
+    form = create_form(fdata)
+    exec 'inst.'+fdata.Name+' = form\n'
+    for odata in odatalist:
+        create_object_instance(inst, form, odata)
+
+#
+# External - Merge a form into an existing form in an instance
+# variable.
+#
+def merge_full_form(inst, form, (fdata, odatalist)):
+    exec 'inst.'+fdata.Name+' = form\n'
+    if odatalist[0].Class != FL.BOX:
+        raise error, 'merge_full_form() expects FL.BOX as first obj'
+    for odata in odatalist[1:]:
+        create_object_instance(inst, form, odata)
+
+
+#################################################################
+#   Part 3 - Low-level object/form creation routines            #
+#################################################################
+
+#
+# External Create_form - Create form from parameters
+#
+def create_form(fdata):
+    import fl
+    return fl.make_form(FL.NO_BOX, fdata.Width, fdata.Height)
+
+#
+# External create_object - Create an object. Make sure there are
+# no callbacks. Returns the object created.
+#
+def create_object(form, odata):
+    obj = _create_object(form, odata)
+    if odata.Callback:
+        raise error, 'Creating free object with callback'
+    return obj
+#
+# External create_object_instance - Create object in an instance.
+#
+def create_object_instance(inst, form, odata):
+    obj = _create_object(form, odata)
+    if odata.Callback:
+        cbfunc = eval('inst.'+odata.Callback)
+        obj.set_call_back(cbfunc, odata.Argument)
+    if odata.Name:
+        exec 'inst.' + odata.Name + ' = obj\n'
+#
+# Internal _create_object: Create the object and fill options
+#
+def _create_object(form, odata):
+    crfunc = _select_crfunc(form, odata.Class)
+    obj = crfunc(odata.Type, odata.Box[0], odata.Box[1], odata.Box[2], \
+            odata.Box[3], odata.Label)
+    if not odata.Class in (FL.BEGIN_GROUP, FL.END_GROUP):
+        obj.boxtype = odata.Boxtype
+        obj.col1 = odata.Colors[0]
+        obj.col2 = odata.Colors[1]
+        obj.align = odata.Alignment
+        obj.lstyle = odata.Style
+        obj.lsize = odata.Size
+        obj.lcol = odata.Lcol
+    return obj
+#
+# Internal crfunc: helper function that returns correct create function
+#
+def _select_crfunc(fm, cl):
+    if cl == FL.BEGIN_GROUP: return fm.bgn_group
+    elif cl == FL.END_GROUP: return fm.end_group
+    elif cl == FL.BITMAP: return fm.add_bitmap
+    elif cl == FL.BOX: return fm.add_box
+    elif cl == FL.BROWSER: return fm.add_browser
+    elif cl == FL.BUTTON: return fm.add_button
+    elif cl == FL.CHART: return fm.add_chart
+    elif cl == FL.CHOICE: return fm.add_choice
+    elif cl == FL.CLOCK: return fm.add_clock
+    elif cl == FL.COUNTER: return fm.add_counter
+    elif cl == FL.DIAL: return fm.add_dial
+    elif cl == FL.FREE: return fm.add_free
+    elif cl == FL.INPUT: return fm.add_input
+    elif cl == FL.LIGHTBUTTON: return fm.add_lightbutton
+    elif cl == FL.MENU: return fm.add_menu
+    elif cl == FL.POSITIONER: return fm.add_positioner
+    elif cl == FL.ROUNDBUTTON: return fm.add_roundbutton
+    elif cl == FL.SLIDER: return fm.add_slider
+    elif cl == FL.VALSLIDER: return fm.add_valslider
+    elif cl == FL.TEXT: return fm.add_text
+    elif cl == FL.TIMER: return fm.add_timer
+    else:
+        raise error, 'Unknown object type: ' + `cl`
+
+
+def test():
+    import time
+    t0 = time.time()
+    if len(sys.argv) == 2:
+        forms = parse_forms(sys.argv[1])
+        t1 = time.time()
+        print 'parse time:', 0.001*(t1-t0), 'sec.'
+        keys = forms.keys()
+        keys.sort()
+        for i in keys:
+            _printform(forms[i])
+    elif len(sys.argv) == 3:
+        form = parse_form(sys.argv[1], sys.argv[2])
+        t1 = time.time()
+        print 'parse time:', round(t1-t0, 3), 'sec.'
+        _printform(form)
+    else:
+        print 'Usage: test fdfile [form]'
+
+def _printform(form):
+    f = form[0]
+    objs = form[1]
+    print 'Form ', f.Name, ', size: ', f.Width, f.Height, ' Nobj ', f.Numberofobjects
+    for i in objs:
+        print '  Obj ', i.Name, ' type ', i.Class, i.Type
+        print '    Box ', i.Box, ' btype ', i.Boxtype
+        print '    Label ', i.Label, ' size/style/col/align ', i.Size,i.Style, i.Lcol, i.Alignment
+        print '    cols ', i.Colors
+        print '    cback ', i.Callback, i.Argument
diff --git a/lib-python/2.2/plat-irix6/jpeg.py b/lib-python/2.2/plat-irix6/jpeg.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/plat-irix6/jpeg.py
@@ -0,0 +1,111 @@
+# Implement 'jpeg' interface using SGI's compression library
+
+# XXX Options 'smooth' and 'optimize' are ignored.
+
+# XXX It appears that compressing grayscale images doesn't work right;
+# XXX the resulting file causes weirdness.
+
+class error(Exception):
+	pass
+
+options = {'quality': 75, 'optimize': 0, 'smooth': 0, 'forcegray': 0}
+
+comp = None
+decomp = None
+
+def compress(imgdata, width, height, bytesperpixel):
+	global comp
+	import cl
+	if comp is None: comp = cl.OpenCompressor(cl.JPEG)
+	if bytesperpixel == 1:
+		format = cl.GRAYSCALE
+	elif bytesperpixel == 4:
+		format = cl.RGBX
+	if options['forcegray']:
+		iformat = cl.GRAYSCALE
+	else:
+		iformat = cl.YUV
+	# XXX How to support 'optimize'?
+	params = [cl.IMAGE_WIDTH, width, cl.IMAGE_HEIGHT, height,
+		  cl.ORIGINAL_FORMAT, format,
+		  cl.ORIENTATION, cl.BOTTOM_UP,
+		  cl.QUALITY_FACTOR, options['quality'],
+		  cl.INTERNAL_FORMAT, iformat,
+		 ]
+	comp.SetParams(params)
+	jpegdata = comp.Compress(1, imgdata)
+	return jpegdata
+
+def decompress(jpegdata):
+	global decomp
+	import cl
+	if decomp is None: decomp = cl.OpenDecompressor(cl.JPEG)
+	headersize = decomp.ReadHeader(jpegdata)
+	params = [cl.IMAGE_WIDTH, 0, cl.IMAGE_HEIGHT, 0, cl.INTERNAL_FORMAT, 0]
+	decomp.GetParams(params)
+	width, height, format = params[1], params[3], params[5]
+	if format == cl.GRAYSCALE or options['forcegray']:
+		format = cl.GRAYSCALE
+		bytesperpixel = 1
+	else:
+		format = cl.RGBX
+		bytesperpixel = 4
+	# XXX How to support 'smooth'?
+	params = [cl.ORIGINAL_FORMAT, format,
+		  cl.ORIENTATION, cl.BOTTOM_UP,
+		  cl.FRAME_BUFFER_SIZE, width*height*bytesperpixel]
+	decomp.SetParams(params)
+	imgdata = decomp.Decompress(1, jpegdata)
+	return imgdata, width, height, bytesperpixel
+
+def setoption(name, value):
+	if type(value) is not type(0):
+		raise TypeError, 'jpeg.setoption: numeric options only'
+	if name == 'forcegrey':
+		name = 'forcegray'
+	if not options.has_key(name):
+		raise KeyError, 'jpeg.setoption: unknown option name'
+	options[name] = int(value)
+
+def test():
+	import sys
+	if sys.argv[1:2] == ['-g']:
+		del sys.argv[1]
+		setoption('forcegray', 1)
+	if not sys.argv[1:]:
+		sys.argv.append('/usr/local/images/data/jpg/asterix.jpg')
+	for file in sys.argv[1:]:
+		show(file)
+
+def show(file):
+	import gl, GL, DEVICE
+	jpegdata = open(file, 'r').read()
+	imgdata, width, height, bytesperpixel = decompress(jpegdata)
+	gl.foreground()
+	gl.prefsize(width, height)
+	win = gl.winopen(file)
+	if bytesperpixel == 1:
+		gl.cmode()
+		gl.pixmode(GL.PM_SIZE, 8)
+		gl.gconfig()
+		for i in range(256):
+			gl.mapcolor(i, i, i, i)
+	else:
+		gl.RGBmode()
+		gl.pixmode(GL.PM_SIZE, 32)
+		gl.gconfig()
+	gl.qdevice(DEVICE.REDRAW)
+	gl.qdevice(DEVICE.ESCKEY)
+	gl.qdevice(DEVICE.WINQUIT)
+	gl.qdevice(DEVICE.WINSHUT)
+	gl.lrectwrite(0, 0, width-1, height-1, imgdata)
+	while 1:
+		dev, val = gl.qread()
+		if dev in (DEVICE.ESCKEY, DEVICE.WINSHUT, DEVICE.WINQUIT):
+			break
+		if dev == DEVICE.REDRAW:
+			gl.lrectwrite(0, 0, width-1, height-1, imgdata)
+	gl.winclose(win)
+	# Now test the compression and write the result to a fixed filename
+	newjpegdata = compress(imgdata, width, height, bytesperpixel)
+	open('/tmp/j.jpg', 'w').write(newjpegdata)
diff --git a/lib-python/2.2/plat-irix6/panel.py b/lib-python/2.2/plat-irix6/panel.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/plat-irix6/panel.py
@@ -0,0 +1,281 @@
+# Module 'panel'
+#
+# Support for the Panel library.
+# Uses built-in module 'pnl'.
+# Applications should use 'panel.function' instead of 'pnl.function';
+# most 'pnl' functions are transparently exported by 'panel',
+# but dopanel() is overridden and you have to use this version
+# if you want to use callbacks.
+
+
+import pnl
+
+
+debug = 0
+
+
+# Test if an object is a list.
+#
+def is_list(x):
+	return type(x) == type([])
+
+
+# Reverse a list.
+#
+def reverse(list):
+	res = []
+	for item in list:
+		res.insert(0, item)
+	return res
+
+
+# Get an attribute of a list, which may itself be another list.
+# Don't use 'prop' for name.
+#
+def getattrlist(list, name):
+	for item in list:
+		if item and is_list(item) and item[0] == name:
+			return item[1:]
+	return []
+
+
+# Get a property of a list, which may itself be another list.
+#
+def getproplist(list, name):
+	for item in list:
+		if item and is_list(item) and item[0] == 'prop':
+			if len(item) > 1 and item[1] == name:
+				return item[2:]
+	return []
+
+
+# Test if an actuator description contains the property 'end-of-group'
+#
+def is_endgroup(list):
+	x = getproplist(list, 'end-of-group')
+	return (x and x[0] == '#t')
+
+
+# Neatly display an actuator definition given as S-expression
+# the prefix string is printed before each line.
+#
+def show_actuator(prefix, a):
+	for item in a:
+		if not is_list(item):
+			print prefix, item
+		elif item and item[0] == 'al':
+			print prefix, 'Subactuator list:'
+			for a in item[1:]:
+				show_actuator(prefix + '    ', a)
+		elif len(item) == 2:
+			print prefix, item[0], '=>', item[1]
+		elif len(item) == 3 and item[0] == 'prop':
+			print prefix, 'Prop', item[1], '=>',
+			print item[2]
+		else:
+			print prefix, '?', item
+
+
+# Neatly display a panel.
+#
+def show_panel(prefix, p):
+	for item in p:
+		if not is_list(item):
+			print prefix, item
+		elif item and item[0] == 'al':
+			print prefix, 'Actuator list:'
+			for a in item[1:]:
+				show_actuator(prefix + '    ', a)
+		elif len(item) == 2:
+			print prefix, item[0], '=>', item[1]
+		elif len(item) == 3 and item[0] == 'prop':
+			print prefix, 'Prop', item[1], '=>',
+			print item[2]
+		else:
+			print prefix, '?', item
+
+
+# Exception raised by build_actuator or build_panel.
+#
+panel_error = 'panel error'
+
+
+# Dummy callback used to initialize the callbacks.
+#
+def dummy_callback(arg):
+	pass
+
+
+# Assign attributes to members of the target.
+# Attribute names in exclist are ignored.
+# The member name is the attribute name prefixed with the prefix.
+#
+def assign_members(target, attrlist, exclist, prefix):
+	for item in attrlist:
+		if is_list(item) and len(item) == 2 and item[0] not in exclist:
+			name, value = item[0], item[1]
+			ok = 1
+			if value[0] in '-0123456789':
+				value = eval(value)
+			elif value[0] == '"':
+				value = value[1:-1]
+			elif value == 'move-then-resize':
+				# Strange default set by Panel Editor...
+				ok = 0
+			else:
+				print 'unknown value', value, 'for', name
+				ok = 0
+			if ok:
+				lhs = 'target.' + prefix + name
+				stmt = lhs + '=' + `value`
+				if debug: print 'exec', stmt
+				try:
+					exec stmt + '\n'
+				except KeyboardInterrupt: # Don't catch this!
+					raise KeyboardInterrupt
+				except:
+					print 'assign failed:', stmt
+
+
+# Build a real actuator from an actuator description.
+# Return a pair (actuator, name).
+#
+def build_actuator(descr):
+	namelist = getattrlist(descr, 'name')
+	if namelist:
+		# Assume it is a string
+		actuatorname = namelist[0][1:-1]
+	else:
+		actuatorname = ''
+	type = descr[0]
+	if type[:4] == 'pnl_': type = type[4:]
+	act = pnl.mkact(type)
+	act.downfunc = act.activefunc = act.upfunc = dummy_callback
+	#
+	assign_members(act, descr[1:], ['al', 'data', 'name'], '')
+	#
+	# Treat actuator-specific data
+	#
+	datalist = getattrlist(descr, 'data')
+	prefix = ''
+	if type[-4:] == 'puck':
+		prefix = 'puck_'
+	elif type == 'mouse':
+		prefix = 'mouse_'
+	assign_members(act, datalist, [], prefix)
+	#
+	return act, actuatorname
+
+
+# Build all sub-actuators and add them to the super-actuator.
+# The super-actuator must already have been added to the panel.
+# Sub-actuators with defined names are added as members to the panel
+# so they can be referenced as p.name.
+#
+# Note: I have no idea how panel.endgroup() works when applied
+# to a sub-actuator.
+#
+def build_subactuators(panel, super_act, al):
+	#
+	# This is nearly the same loop as below in build_panel(),
+	# except a call is made to addsubact() instead of addact().
+	#
+	for a in al:
+		act, name = build_actuator(a)
+		act.addsubact(super_act)
+		if name:
+			stmt = 'panel.' + name + ' = act'
+			if debug: print 'exec', stmt
+			exec stmt + '\n'
+		if is_endgroup(a):
+			panel.endgroup()
+		sub_al = getattrlist(a, 'al')
+		if sub_al:
+			build_subactuators(panel, act, sub_al)
+	#
+	# Fix the actuator to which whe just added subactuators.
+	# This can't hurt (I hope) and is needed for the scroll actuator.
+	#
+	super_act.fixact()
+
+
+# Build a real panel from a panel definition.
+# Return a panel object p, where for each named actuator a, p.name is a
+# reference to a.
+#
+def build_panel(descr):
+	#
+	# Sanity check
+	#
+	if (not descr) or descr[0] != 'panel':
+		raise panel_error, 'panel description must start with "panel"'
+	#
+	if debug: show_panel('', descr)
+	#
+	# Create an empty panel
+	#
+	panel = pnl.mkpanel()
+	#
+	# Assign panel attributes
+	#
+	assign_members(panel, descr[1:], ['al'], '')
+	#
+	# Look for actuator list
+	#
+	al = getattrlist(descr, 'al')
+	#
+	# The order in which actuators are created is important
+	# because of the endgroup() operator.
+	# Unfortunately the Panel Editor outputs the actuator list
+	# in reverse order, so we reverse it here.
+	#
+	al = reverse(al)
+	#
+	for a in al:
+		act, name = build_actuator(a)
+		act.addact(panel)
+		if name:
+			stmt = 'panel.' + name + ' = act'
+			exec stmt + '\n'
+		if is_endgroup(a):
+			panel.endgroup()
+		sub_al = getattrlist(a, 'al')
+		if sub_al:
+			build_subactuators(panel, act, sub_al)
+	#
+	return panel
+
+
+# Wrapper around pnl.dopanel() which calls call-back functions.
+#
+def my_dopanel():
+	# Extract only the first 4 elements to allow for future expansion
+	a, down, active, up = pnl.dopanel()[:4]
+	if down:
+		down.downfunc(down)
+	if active:
+		active.activefunc(active)
+	if up:
+		up.upfunc(up)
+	return a
+
+
+# Create one or more panels from a description file (S-expressions)
+# generated by the Panel Editor.
+# 
+def defpanellist(file):
+	import panelparser
+	descrlist = panelparser.parse_file(open(file, 'r'))
+	panellist = []
+	for descr in descrlist:
+		panellist.append(build_panel(descr))
+	return panellist
+
+
+# Import everything from built-in method pnl, so the user can always
+# use panel.foo() instead of pnl.foo().
+# This gives *no* performance penalty once this module is imported.
+#
+from pnl import *			# for export
+
+dopanel = my_dopanel			# override pnl.dopanel
diff --git a/lib-python/2.2/plat-irix6/panelparser.py b/lib-python/2.2/plat-irix6/panelparser.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/plat-irix6/panelparser.py
@@ -0,0 +1,128 @@
+# Module 'parser'
+#
+# Parse S-expressions output by the Panel Editor
+# (which is written in Scheme so it can't help writing S-expressions).
+#
+# See notes at end of file.
+
+
+whitespace = ' \t\n'
+operators = '()\''
+separators = operators + whitespace + ';' + '"'
+
+
+# Tokenize a string.
+# Return a list of tokens (strings).
+#
+def tokenize_string(s):
+	tokens = []
+	while s:
+		c = s[:1]
+		if c in whitespace:
+			s = s[1:]
+		elif c == ';':
+			s = ''
+		elif c == '"':
+			n = len(s)
+			i = 1
+			while i < n:
+				c = s[i]
+				i = i+1
+				if c == '"': break
+				if c == '\\': i = i+1
+			tokens.append(s[:i])
+			s = s[i:]
+		elif c in operators:
+			tokens.append(c)
+			s = s[1:]
+		else:
+			n = len(s)
+			i = 1
+			while i < n:
+				if s[i] in separators: break
+				i = i+1
+			tokens.append(s[:i])
+			s = s[i:]
+	return tokens
+
+
+# Tokenize a whole file (given as file object, not as file name).
+# Return a list of tokens (strings).
+#
+def tokenize_file(fp):
+	tokens = []
+	while 1:
+		line = fp.readline()
+		if not line: break
+		tokens = tokens + tokenize_string(line)
+	return tokens
+
+
+# Exception raised by parse_exr.
+#
+syntax_error = 'syntax error'
+
+
+# Parse an S-expression.
+# Input is a list of tokens as returned by tokenize_*().
+# Return a pair (expr, tokens)
+# where expr is a list representing the s-expression,
+# and tokens contains the remaining tokens.
+# May raise syntax_error.
+#
+def parse_expr(tokens):
+	if (not tokens) or tokens[0] != '(':
+		raise syntax_error, 'expected "("'
+	tokens = tokens[1:]
+	expr = []
+	while 1:
+		if not tokens:
+			raise syntax_error, 'missing ")"'
+		if tokens[0] == ')':
+			return expr, tokens[1:]
+		elif tokens[0] == '(':
+			subexpr, tokens = parse_expr(tokens)
+			expr.append(subexpr)
+		else:
+			expr.append(tokens[0])
+			tokens = tokens[1:]
+
+
+# Parse a file (given as file object, not as file name).
+# Return a list of parsed S-expressions found at the top level.
+#
+def parse_file(fp):
+	tokens = tokenize_file(fp)
+	exprlist = []
+	while tokens:
+		expr, tokens = parse_expr(tokens)
+		exprlist.append(expr)
+	return exprlist
+
+
+# EXAMPLE:
+#
+# The input
+#	'(hip (hop hur-ray))'
+#
+# passed to tokenize_string() returns the token list
+#	['(', 'hip', '(', 'hop', 'hur-ray', ')', ')']
+#
+# When this is passed to parse_expr() it returns the expression
+#	['hip', ['hop', 'hur-ray']]
+# plus an empty token list (because there are no tokens left.
+#
+# When a file containing the example is passed to parse_file() it returns
+# a list whose only element is the output of parse_expr() above:
+#	[['hip', ['hop', 'hur-ray']]]
+
+
+# TOKENIZING:
+#
+# Comments start with semicolon (;) and continue till the end of the line.
+#
+# Tokens are separated by whitespace, except the following characters
+# always form a separate token (outside strings):
+#	( ) '
+# Strings are enclosed in double quotes (") and backslash (\) is used
+# as escape character in strings.
diff --git a/lib-python/2.2/plat-irix6/readcd.doc b/lib-python/2.2/plat-irix6/readcd.doc
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/plat-irix6/readcd.doc
@@ -0,0 +1,104 @@
+Interface to CD-ROM player.
+
+This module implements an interface to the built-in cd module.  The
+intention is to provide a more user-friendly interface than the
+built-in module.
+
+The module defines a class Readcd with several methods.  The
+initialization of the class will try to open the CD player.  This
+means that initialization will fail if the CD player is already in
+use.  A RuntimeError will be raised by the cd module in that case.
+
+The way to work with this module is as follows.  The user specifies
+the parts of the CD that are to be read and he specifies callback
+functions which are to be called by the system.  At some point he can
+tell the system to play.  The specified parts of the CD will then be
+read and the callbacks will be called.
+
+Initialization.
+===============
+
+r = readcd.Readcd([cd-player [, mode]])
+
+The optional arguments are the name of the CD device and the mode.
+When "mode" is not specified, it defaults to 'r' (which is the only
+possible value); when "cd-player" also isn't specified, it defaults
+to "None" which indicates the default CD player.
+
+Methods.
+========
+
+eject() -- Eject the CD from the player.
+
+reset() -- Reset the list of data stretches to be played.
+
+appendtrack(track) -- Append the specified track to the list of music
+stretches.
+
+appendstretch(first, last) -- Append the stretch from "first" to "last"
+to the list of music stretches.  Both "first" and "last" can be in one
+of four forms.  "None": for "first", the beginning of the CD, for
+"last" the end of the CD; a single integer: a track number--playing
+starts at the beginning of the track or ends at the end of the
+specified track; a three-tuple: the absolute time from the start of
+the CD in minutes, seconds, frames; a four-tuple: track number and
+relative time within the track in minutes, seconds, frames.
+
+settracks(tracklist) -- The argument is a list of integers.  The list
+of stretches is set to argument list.  The old list is discarded.
+
+setcallback(type, func, arg) -- Set a callback function for "type".
+The function will be called as func(arg, type, data) where "arg" is
+the third argument of setcallback, "type" is the type of callback,
+"data" is type-dependent data.  See the CDsetcallback(3) manual page
+for more information.  The possible "type" arguments are defined in
+the CD module.
+
+removecallback(type) -- Remove the callback for "type".
+
+gettrackinfo([tracklist]) -- Return a list of tuples.  Each tuple
+consists of start and length information of a track.  The start and
+length information consist of three-tuples with minutes, seconds and
+frames.  The optional tracklist argument gives a list of interesting
+track numbers.  If no tracklist is specified, information about all
+tracks is returned.
+
+getstatus() -- Return the status information of the CD.
+
+play() -- Play the preprogrammed stretches of music from the CD.  When
+nothing was programmed, the whole CD is played.
+
+Specifying stretches.
+=====================
+
+There are three methods available to specify a stretch of music to be
+played.  The easiest way is to use "settracklist(tracklist)" with which
+a list of tracks can be specified.  "settracklist(tracklist)" is
+equivalent to the sequence
+	reset()
+	for track in tracklist:
+		appendtrack(track)
+
+The next method is "appendtrack(track)" with which a whole track can be
+added to the list of music to be played.  "appendtrack(track)" is
+equivalent to "appendstretch(track, track)".
+
+The most complete method is "appendstretch(first, last)".  Using this
+method, it is possible to specify any stretch of music.
+
+When two consecutive tracks are played, it is possible to choose
+whether the pause that may be between the tracks is played as well or
+whether the pause should be skipped.  When the end of a stretch is
+specified using a track number and the next stretch starts at the
+beginning of the following track and that was also specified using the
+track number (that is, both were specified as integers, not as tuples),
+the pause is played.  When either value was specified using absolute
+time or track-relative time (that is, as three-tuple or as
+four-tuple), the pause will not be played.
+
+Errors.
+=======
+
+When an error occurs, an exception will be raised.  Depending on where
+the error occurs, the exception may either be "readcd.Error" or
+"RuntimeError".
diff --git a/lib-python/2.2/plat-irix6/readcd.py b/lib-python/2.2/plat-irix6/readcd.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/plat-irix6/readcd.py
@@ -0,0 +1,244 @@
+# Class interface to the CD module.
+
+import cd, CD
+
+class Error(Exception):
+	pass
+class _Stop(Exception):
+	pass
+
+def _doatime(self, cb_type, data):
+	if ((data[0] * 60) + data[1]) * 75 + data[2] > self.end:
+##		print 'done with list entry',`self.listindex`
+		raise _Stop
+	func, arg = self.callbacks[cb_type]
+	if func:
+		func(arg, cb_type, data)
+
+def _dopnum(self, cb_type, data):
+	if data > self.end:
+##		print 'done with list entry',`self.listindex`
+		raise _Stop
+	func, arg = self.callbacks[cb_type]
+	if func:
+		func(arg, cb_type, data)
+
+class Readcd:
+	def __init__(self, *arg):
+		if len(arg) == 0:
+			self.player = cd.open()
+		elif len(arg) == 1:
+			self.player = cd.open(arg[0])
+		elif len(arg) == 2:
+			self.player = cd.open(arg[0], arg[1])
+		else:
+			raise Error, 'bad __init__ call'
+		self.list = []
+		self.callbacks = [(None, None)] * 8
+		self.parser = cd.createparser()
+		self.playing = 0
+		self.end = 0
+		self.status = None
+		self.trackinfo = None
+
+	def eject(self):
+		self.player.eject()
+		self.list = []
+		self.end = 0
+		self.listindex = 0
+		self.status = None
+		self.trackinfo = None
+		if self.playing:
+##			print 'stop playing from eject'
+			raise _Stop
+
+	def pmsf2msf(self, track, min, sec, frame):
+		if not self.status:
+			self.cachestatus()
+		if track < self.status[5] or track > self.status[6]:
+			raise Error, 'track number out of range'
+		if not self.trackinfo:
+			self.cacheinfo()
+		start, total = self.trackinfo[track]
+		start = ((start[0] * 60) + start[1]) * 75 + start[2]
+		total = ((total[0] * 60) + total[1]) * 75 + total[2]
+		block = ((min * 60) + sec) * 75 + frame
+		if block > total:
+			raise Error, 'out of range'
+		block = start + block
+		min, block = divmod(block, 75*60)
+		sec, frame = divmod(block, 75)
+		return min, sec, frame
+
+	def reset(self):
+		self.list = []
+
+	def appendtrack(self, track):
+		self.appendstretch(track, track)
+				
+	def appendstretch(self, start, end):
+		if not self.status:
+			self.cachestatus()
+		if not start:
+			start = 1
+		if not end:
+			end = self.status[6]
+		if type(end) == type(0):
+			if end < self.status[5] or end > self.status[6]:
+				raise Error, 'range error'
+		else:
+			l = len(end)
+			if l == 4:
+				prog, min, sec, frame = end
+				if prog < self.status[5] or prog > self.status[6]:
+					raise Error, 'range error'
+				end = self.pmsf2msf(prog, min, sec, frame)
+			elif l != 3:
+				raise Error, 'syntax error'
+		if type(start) == type(0):
+			if start < self.status[5] or start > self.status[6]:
+				raise Error, 'range error'
+			if len(self.list) > 0:
+				s, e = self.list[-1]
+				if type(e) == type(0):
+					if start == e+1:
+						start = s
+						del self.list[-1]
+		else:
+			l = len(start)
+			if l == 4:
+				prog, min, sec, frame = start
+				if prog < self.status[5] or prog > self.status[6]:
+					raise Error, 'range error'
+				start = self.pmsf2msf(prog, min, sec, frame)
+			elif l != 3:
+				raise Error, 'syntax error'
+		self.list.append((start, end))
+
+	def settracks(self, list):
+		self.list = []
+		for track in list:
+			self.appendtrack(track)
+
+	def setcallback(self, cb_type, func, arg):
+		if cb_type < 0 or cb_type >= 8:
+			raise Error, 'type out of range'
+		self.callbacks[cb_type] = (func, arg)
+		if self.playing:
+			start, end = self.list[self.listindex]
+			if type(end) == type(0):
+				if cb_type != CD.PNUM:
+					self.parser.setcallback(cb_type, func, arg)
+			else:
+				if cb_type != CD.ATIME:
+					self.parser.setcallback(cb_type, func, arg)
+
+	def removecallback(self, cb_type):
+		if cb_type < 0 or cb_type >= 8:
+			raise Error, 'type out of range'
+		self.callbacks[cb_type] = (None, None)
+		if self.playing:
+			start, end = self.list[self.listindex]
+			if type(end) == type(0):
+				if cb_type != CD.PNUM:
+					self.parser.removecallback(cb_type)
+			else:
+				if cb_type != CD.ATIME:
+					self.parser.removecallback(cb_type)
+
+	def gettrackinfo(self, *arg):
+		if not self.status:
+			self.cachestatus()
+		if not self.trackinfo:
+			self.cacheinfo()
+		if len(arg) == 0:
+			return self.trackinfo[self.status[5]:self.status[6]+1]
+		result = []
+		for i in arg:
+			if i < self.status[5] or i > self.status[6]:
+				raise Error, 'range error'
+			result.append(self.trackinfo[i])
+		return result
+
+	def cacheinfo(self):
+		if not self.status:
+			self.cachestatus()
+		self.trackinfo = []
+		for i in range(self.status[5]):
+			self.trackinfo.append(None)
+		for i in range(self.status[5], self.status[6]+1):
+			self.trackinfo.append(self.player.gettrackinfo(i))
+
+	def cachestatus(self):
+		self.status = self.player.getstatus()
+		if self.status[0] == CD.NODISC:
+			self.status = None
+			raise Error, 'no disc in player'
+
+	def getstatus(self):
+		return self.player.getstatus()
+
+	def play(self):
+		if not self.status:
+			self.cachestatus()
+		size = self.player.bestreadsize()
+		self.listindex = 0
+		self.playing = 0
+		for i in range(8):
+			func, arg = self.callbacks[i]
+			if func:
+				self.parser.setcallback(i, func, arg)
+			else:
+				self.parser.removecallback(i)
+		if len(self.list) == 0:
+			for i in range(self.status[5], self.status[6]+1):
+				self.appendtrack(i)
+		try:
+			while 1:
+				if not self.playing:
+					if self.listindex >= len(self.list):
+						return
+					start, end = self.list[self.listindex]
+					if type(start) == type(0):
+						dummy = self.player.seektrack(
+							start)
+					else:
+						min, sec, frame = start
+						dummy = self.player.seek(
+							min, sec, frame)
+					if type(end) == type(0):
+						self.parser.setcallback(
+							CD.PNUM, _dopnum, self)
+						self.end = end
+						func, arg = \
+						      self.callbacks[CD.ATIME]
+						if func:
+							self.parser.setcallback(CD.ATIME, func, arg)
+						else:
+							self.parser.removecallback(CD.ATIME)
+					else:
+						min, sec, frame = end
+						self.parser.setcallback(
+							CD.ATIME, _doatime,
+							self)
+						self.end = (min * 60 + sec) * \
+							   75 + frame
+						func, arg = \
+						      self.callbacks[CD.PNUM]
+						if func:
+							self.parser.setcallback(CD.PNUM, func, arg)
+						else:
+							self.parser.removecallback(CD.PNUM)
+					self.playing = 1
+				data = self.player.readda(size)
+				if data == '':
+					self.playing = 0
+					self.listindex = self.listindex + 1
+					continue
+				try:
+					self.parser.parseframe(data)
+				except _Stop:
+					self.playing = 0
+					self.listindex = self.listindex + 1
+		finally:
+			self.playing = 0
diff --git a/lib-python/2.2/plat-irix6/regen b/lib-python/2.2/plat-irix6/regen
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/plat-irix6/regen
@@ -0,0 +1,11 @@
+#! /bin/sh
+case `uname -sr` in
+'IRIX '[456].*)	;;
+'IRIX64 '[456].*)	;;
+*)	echo Probably not on an IRIX system 1>&2
+	exit 1;;
+esac
+set -v
+h2py /usr/include/sys/file.h
+h2py -i '(u_long)' /usr/include/netinet/in.h
+h2py /usr/include/errno.h
diff --git a/lib-python/2.2/plat-irix6/torgb.py b/lib-python/2.2/plat-irix6/torgb.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/plat-irix6/torgb.py
@@ -0,0 +1,98 @@
+# Convert "arbitrary" image files to rgb files (SGI's image format).
+# Input may be compressed.
+# The uncompressed file type may be PBM, PGM, PPM, GIF, TIFF, or Sun raster.
+# An exception is raised if the file is not of a recognized type.
+# Returned filename is either the input filename or a temporary filename;
+# in the latter case the caller must ensure that it is removed.
+# Other temporary files used are removed by the function.
+
+import os
+import tempfile
+import pipes
+import imghdr
+
+table = {}
+
+t = pipes.Template()
+t.append('fromppm $IN $OUT', 'ff')
+table['ppm'] = t
+
+t = pipes.Template()
+t.append('(PATH=$PATH:/ufs/guido/bin/sgi; exec pnmtoppm)', '--')
+t.append('fromppm $IN $OUT', 'ff')
+table['pnm'] = t
+table['pgm'] = t
+table['pbm'] = t
+
+t = pipes.Template()
+t.append('fromgif $IN $OUT', 'ff')
+table['gif'] = t
+
+t = pipes.Template()
+t.append('tifftopnm', '--')
+t.append('(PATH=$PATH:/ufs/guido/bin/sgi; exec pnmtoppm)', '--')
+t.append('fromppm $IN $OUT', 'ff')
+table['tiff'] = t
+
+t = pipes.Template()
+t.append('rasttopnm', '--')
+t.append('(PATH=$PATH:/ufs/guido/bin/sgi; exec pnmtoppm)', '--')
+t.append('fromppm $IN $OUT', 'ff')
+table['rast'] = t
+
+t = pipes.Template()
+t.append('djpeg', '--')
+t.append('(PATH=$PATH:/ufs/guido/bin/sgi; exec pnmtoppm)', '--')
+t.append('fromppm $IN $OUT', 'ff')
+table['jpeg'] = t
+
+uncompress = pipes.Template()
+uncompress.append('uncompress', '--')
+
+
+class error(Exception):
+	pass
+
+def torgb(filename):
+	temps = []
+	ret = None
+	try:
+		ret = _torgb(filename, temps)
+	finally:
+		for temp in temps[:]:
+			if temp != ret:
+				try:
+					os.unlink(temp)
+				except os.error:
+					pass
+				temps.remove(temp)
+	return ret
+
+def _torgb(filename, temps):
+	if filename[-2:] == '.Z':
+		fname = tempfile.mktemp()
+		temps.append(fname)
+		sts = uncompress.copy(filename, fname)
+		if sts:
+			raise error, filename + ': uncompress failed'
+	else:
+		fname = filename
+	try:
+		ftype = imghdr.what(fname)
+	except IOError, msg:
+		if type(msg) == type(()) and len(msg) == 2 and \
+			type(msg[0]) == type(0) and type(msg[1]) == type(''):
+			msg = msg[1]
+		if type(msg) is not type(''):
+			msg = `msg`
+		raise error, filename + ': ' + msg
+	if ftype == 'rgb':
+		return fname
+	if ftype is None or not table.has_key(ftype):
+		raise error, \
+			filename + ': unsupported image file type ' + `ftype`
+	temp = tempfile.mktemp()
+	sts = table[ftype].copy(fname, temp)
+	if sts:
+		raise error, filename + ': conversion to rgb failed'
+	return temp
diff --git a/lib-python/2.2/plat-linux1/IN.py b/lib-python/2.2/plat-linux1/IN.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/plat-linux1/IN.py
@@ -0,0 +1,239 @@
+# Generated by h2py from /usr/include/netinet/in.h
+_NETINET_IN_H = 1
+
+# Included from features.h
+_FEATURES_H = 1
+_GNU_SOURCE = 1
+__USE_ANSI = 1
+__FAVOR_BSD = 1
+_BSD_SOURCE = 1
+_SVID_SOURCE = 1
+_POSIX_SOURCE = 1
+_POSIX_C_SOURCE = 2
+__USE_POSIX = 1
+__USE_POSIX2 = 1
+__USE_MISC = 1
+__USE_BSD = 1
+__USE_SVID = 1
+__USE_GNU = 1
+__GNU_LIBRARY__ = 1
+
+# Included from sys/cdefs.h
+_SYS_CDEFS_H = 1
+def __P(args): return args	 
+
+def __P(args): return args
+
+def __P(args): return ()	 
+
+def __STRING(x): return #x
+
+def __STRING(x): return "x"
+
+
+# Included from sys/socket.h
+
+# Included from linux/socket.h
+
+# Included from asm/socket.h
+FIOSETOWN = 0x8901
+SIOCSPGRP = 0x8902
+FIOGETOWN = 0x8903
+SIOCGPGRP = 0x8904
+SIOCATMARK = 0x8905
+SIOCGSTAMP = 0x8906
+SOL_SOCKET = 1
+SO_DEBUG = 1
+SO_REUSEADDR = 2
+SO_TYPE = 3
+SO_ERROR = 4
+SO_DONTROUTE = 5
+SO_BROADCAST = 6
+SO_SNDBUF = 7
+SO_RCVBUF = 8
+SO_KEEPALIVE = 9
+SO_OOBINLINE = 10
+SO_NO_CHECK = 11
+SO_PRIORITY = 12
+SO_LINGER = 13
+
+# Included from linux/sockios.h
+SIOCADDRT = 0x890B
+SIOCDELRT = 0x890C
+SIOCGIFNAME = 0x8910
+SIOCSIFLINK = 0x8911
+SIOCGIFCONF = 0x8912
+SIOCGIFFLAGS = 0x8913
+SIOCSIFFLAGS = 0x8914
+SIOCGIFADDR = 0x8915
+SIOCSIFADDR = 0x8916
+SIOCGIFDSTADDR = 0x8917
+SIOCSIFDSTADDR = 0x8918
+SIOCGIFBRDADDR = 0x8919
+SIOCSIFBRDADDR = 0x891a
+SIOCGIFNETMASK = 0x891b
+SIOCSIFNETMASK = 0x891c
+SIOCGIFMETRIC = 0x891d
+SIOCSIFMETRIC = 0x891e
+SIOCGIFMEM = 0x891f
+SIOCSIFMEM = 0x8920
+SIOCGIFMTU = 0x8921
+SIOCSIFMTU = 0x8922
+SIOCSIFHWADDR = 0x8924
+SIOCGIFENCAP = 0x8925
+SIOCSIFENCAP = 0x8926
+SIOCGIFHWADDR = 0x8927
+SIOCGIFSLAVE = 0x8929
+SIOCSIFSLAVE = 0x8930
+SIOCADDMULTI = 0x8931
+SIOCDELMULTI = 0x8932
+OLD_SIOCDARP = 0x8950
+OLD_SIOCGARP = 0x8951
+OLD_SIOCSARP = 0x8952
+SIOCDARP = 0x8953
+SIOCGARP = 0x8954
+SIOCSARP = 0x8955
+SIOCDRARP = 0x8960
+SIOCGRARP = 0x8961
+SIOCSRARP = 0x8962
+SIOCGIFMAP = 0x8970
+SIOCSIFMAP = 0x8971
+SIOCDEVPRIVATE = 0x89F0
+SIOCPROTOPRIVATE = 0x89E0
+
+# Included from linux/uio.h
+MAX_IOVEC = 8
+SOCK_STREAM = 1
+SOCK_DGRAM = 2
+SOCK_RAW = 3
+SOCK_RDM = 4
+SOCK_SEQPACKET = 5
+SOCK_PACKET = 10
+AF_UNSPEC = 0
+AF_UNIX = 1
+AF_INET = 2
+AF_AX25 = 3
+AF_IPX = 4
+AF_APPLETALK = 5
+AF_NETROM = 6
+AF_BRIDGE = 7
+AF_AAL5 = 8
+AF_X25 = 9
+AF_INET6 = 10
+AF_MAX = 12
+PF_UNSPEC = AF_UNSPEC
+PF_UNIX = AF_UNIX
+PF_INET = AF_INET
+PF_AX25 = AF_AX25
+PF_IPX = AF_IPX
+PF_APPLETALK = AF_APPLETALK
+PF_NETROM = AF_NETROM
+PF_BRIDGE = AF_BRIDGE
+PF_AAL5 = AF_AAL5
+PF_X25 = AF_X25
+PF_INET6 = AF_INET6
+PF_MAX = AF_MAX
+SOMAXCONN = 128
+MSG_OOB = 1
+MSG_PEEK = 2
+MSG_DONTROUTE = 4
+SOL_IP = 0
+SOL_IPX = 256
+SOL_AX25 = 257
+SOL_ATALK = 258
+SOL_NETROM = 259
+SOL_TCP = 6
+SOL_UDP = 17
+IP_TOS = 1
+IPTOS_LOWDELAY = 0x10
+IPTOS_THROUGHPUT = 0x08
+IPTOS_RELIABILITY = 0x04
+IP_TTL = 2
+IP_HDRINCL = 3
+IP_OPTIONS = 4
+IP_MULTICAST_IF = 32
+IP_MULTICAST_TTL = 33
+IP_MULTICAST_LOOP = 34
+IP_ADD_MEMBERSHIP = 35
+IP_DROP_MEMBERSHIP = 36
+IP_DEFAULT_MULTICAST_TTL = 1
+IP_DEFAULT_MULTICAST_LOOP = 1
+IP_MAX_MEMBERSHIPS = 20
+IPX_TYPE = 1
+TCP_NODELAY = 1
+TCP_MAXSEG = 2
+SOPRI_INTERACTIVE = 0
+SOPRI_NORMAL = 1
+SOPRI_BACKGROUND = 2
+
+# Included from sys/types.h
+
+# Included from linux/types.h
+__FD_SETSIZE = 256
+
+# Included from asm/types.h
+def __FD_ZERO(fdsetp): return \
+
+
+# Included from sys/bitypes.h
+
+# Included from pthread/mit/posix.h
+
+# Included from pthread/mit/types.h
+
+# Included from pthread/mit/xtypes.h
+
+# Included from pthread/mit/sys/types.h
+IMPLINK_IP = 155
+IMPLINK_LOWEXPER = 156
+IMPLINK_HIGHEXPER = 158
+
+# Included from linux/in.h
+__SOCK_SIZE__ = 16
+IN_CLASSA_NET = 0xff000000
+IN_CLASSA_NSHIFT = 24
+IN_CLASSA_HOST = (0xffffffff & ~IN_CLASSA_NET)
+IN_CLASSA_MAX = 128
+IN_CLASSB_NET = 0xffff0000
+IN_CLASSB_NSHIFT = 16
+IN_CLASSB_HOST = (0xffffffff & ~IN_CLASSB_NET)
+IN_CLASSB_MAX = 65536
+IN_CLASSC_NET = 0xffffff00
+IN_CLASSC_NSHIFT = 8
+IN_CLASSC_HOST = (0xffffffff & ~IN_CLASSC_NET)
+def IN_MULTICAST(a): return IN_CLASSD(a)
+
+IN_MULTICAST_NET = 0xF0000000
+INADDR_NONE = 0xffffffff
+IN_LOOPBACKNET = 127
+INADDR_LOOPBACK = 0x7f000001
+INADDR_UNSPEC_GROUP = 0xe0000000
+INADDR_ALLHOSTS_GROUP = 0xe0000001
+INADDR_MAX_LOCAL_GROUP = 0xe00000ff
+
+# Included from asm/byteorder.h
+__LITTLE_ENDIAN = 1234
+def __constant_ntohl(x): return \
+
+def __constant_ntohs(x): return \
+
+def __htonl(x): return __ntohl(x)
+
+def __htons(x): return __ntohs(x)
+
+def __constant_htonl(x): return __constant_ntohl(x)
+
+def __constant_htons(x): return __constant_ntohs(x)
+
+def ntohl(x): return \
+
+def ntohs(x): return \
+
+def htonl(x): return \
+
+def htons(x): return \
+
+def LOOPBACK(x): return (((x) & htonl(0xff000000)) == htonl(0x7f000000))
+
+def MULTICAST(x): return (((x) & htonl(0xf0000000)) == htonl(0xe0000000))
+
diff --git a/lib-python/2.2/plat-linux1/regen b/lib-python/2.2/plat-linux1/regen
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/plat-linux1/regen
@@ -0,0 +1,8 @@
+#! /bin/sh
+case `uname` in
+Linux*)	;;
+*)	echo Probably not on a Linux system 1>&2
+	exit 1;;
+esac
+set -v
+h2py -i '(u_long)' /usr/include/netinet/in.h
diff --git a/lib-python/2.2/plat-linux2/CDROM.py b/lib-python/2.2/plat-linux2/CDROM.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/plat-linux2/CDROM.py
@@ -0,0 +1,207 @@
+# Generated by h2py from /usr/include/linux/cdrom.h
+
+CDROMPAUSE = 0x5301
+CDROMRESUME = 0x5302
+CDROMPLAYMSF = 0x5303
+CDROMPLAYTRKIND = 0x5304
+CDROMREADTOCHDR = 0x5305
+CDROMREADTOCENTRY = 0x5306
+CDROMSTOP = 0x5307
+CDROMSTART = 0x5308
+CDROMEJECT = 0x5309
+CDROMVOLCTRL = 0x530a
+CDROMSUBCHNL = 0x530b
+CDROMREADMODE2 = 0x530c
+CDROMREADMODE1 = 0x530d
+CDROMREADAUDIO = 0x530e
+CDROMEJECT_SW = 0x530f
+CDROMMULTISESSION = 0x5310
+CDROM_GET_MCN = 0x5311
+CDROM_GET_UPC = CDROM_GET_MCN
+CDROMRESET = 0x5312
+CDROMVOLREAD = 0x5313
+CDROMREADRAW = 0x5314
+CDROMREADCOOKED = 0x5315
+CDROMSEEK = 0x5316
+CDROMPLAYBLK = 0x5317
+CDROMREADALL = 0x5318
+CDROMGETSPINDOWN = 0x531d
+CDROMSETSPINDOWN = 0x531e
+CDROMCLOSETRAY = 0x5319
+CDROM_SET_OPTIONS = 0x5320
+CDROM_CLEAR_OPTIONS = 0x5321
+CDROM_SELECT_SPEED = 0x5322
+CDROM_SELECT_DISC = 0x5323
+CDROM_MEDIA_CHANGED = 0x5325
+CDROM_DRIVE_STATUS = 0x5326
+CDROM_DISC_STATUS = 0x5327
+CDROM_CHANGER_NSLOTS = 0x5328
+CDROM_LOCKDOOR = 0x5329
+CDROM_DEBUG = 0x5330
+CDROM_GET_CAPABILITY = 0x5331
+CDROMAUDIOBUFSIZ = 0x5382
+DVD_READ_STRUCT = 0x5390
+DVD_WRITE_STRUCT = 0x5391
+DVD_AUTH = 0x5392
+CDROM_SEND_PACKET = 0x5393
+CDROM_NEXT_WRITABLE = 0x5394
+CDROM_LAST_WRITTEN = 0x5395
+CDROM_PACKET_SIZE = 12
+CGC_DATA_UNKNOWN = 0
+CGC_DATA_WRITE = 1
+CGC_DATA_READ = 2
+CGC_DATA_NONE = 3
+CD_MINS = 74
+CD_SECS = 60
+CD_FRAMES = 75
+CD_SYNC_SIZE = 12
+CD_MSF_OFFSET = 150
+CD_CHUNK_SIZE = 24
+CD_NUM_OF_CHUNKS = 98
+CD_FRAMESIZE_SUB = 96
+CD_HEAD_SIZE = 4
+CD_SUBHEAD_SIZE = 8
+CD_EDC_SIZE = 4
+CD_ZERO_SIZE = 8
+CD_ECC_SIZE = 276
+CD_FRAMESIZE = 2048
+CD_FRAMESIZE_RAW = 2352
+CD_FRAMESIZE_RAWER = 2646
+CD_FRAMESIZE_RAW1 = (CD_FRAMESIZE_RAW-CD_SYNC_SIZE)
+CD_FRAMESIZE_RAW0 = (CD_FRAMESIZE_RAW-CD_SYNC_SIZE-CD_HEAD_SIZE)
+CD_XA_HEAD = (CD_HEAD_SIZE+CD_SUBHEAD_SIZE)
+CD_XA_TAIL = (CD_EDC_SIZE+CD_ECC_SIZE)
+CD_XA_SYNC_HEAD = (CD_SYNC_SIZE+CD_XA_HEAD)
+CDROM_LBA = 0x01
+CDROM_MSF = 0x02
+CDROM_DATA_TRACK = 0x04
+CDROM_LEADOUT = 0xAA
+CDROM_AUDIO_INVALID = 0x00
+CDROM_AUDIO_PLAY = 0x11
+CDROM_AUDIO_PAUSED = 0x12
+CDROM_AUDIO_COMPLETED = 0x13
+CDROM_AUDIO_ERROR = 0x14
+CDROM_AUDIO_NO_STATUS = 0x15
+CDC_CLOSE_TRAY = 0x1
+CDC_OPEN_TRAY = 0x2
+CDC_LOCK = 0x4
+CDC_SELECT_SPEED = 0x8
+CDC_SELECT_DISC = 0x10
+CDC_MULTI_SESSION = 0x20
+CDC_MCN = 0x40
+CDC_MEDIA_CHANGED = 0x80
+CDC_PLAY_AUDIO = 0x100
+CDC_RESET = 0x200
+CDC_IOCTLS = 0x400
+CDC_DRIVE_STATUS = 0x800
+CDC_GENERIC_PACKET = 0x1000
+CDC_CD_R = 0x2000
+CDC_CD_RW = 0x4000
+CDC_DVD = 0x8000
+CDC_DVD_R = 0x10000
+CDC_DVD_RAM = 0x20000
+CDS_NO_INFO = 0
+CDS_NO_DISC = 1
+CDS_TRAY_OPEN = 2
+CDS_DRIVE_NOT_READY = 3
+CDS_DISC_OK = 4
+CDS_AUDIO = 100
+CDS_DATA_1 = 101
+CDS_DATA_2 = 102
+CDS_XA_2_1 = 103
+CDS_XA_2_2 = 104
+CDS_MIXED = 105
+CDO_AUTO_CLOSE = 0x1
+CDO_AUTO_EJECT = 0x2
+CDO_USE_FFLAGS = 0x4
+CDO_LOCK = 0x8
+CDO_CHECK_TYPE = 0x10
+CD_PART_MAX = 64
+CD_PART_MASK = (CD_PART_MAX - 1)
+GPCMD_BLANK = 0xa1
+GPCMD_CLOSE_TRACK = 0x5b
+GPCMD_FLUSH_CACHE = 0x35
+GPCMD_FORMAT_UNIT = 0x04
+GPCMD_GET_CONFIGURATION = 0x46
+GPCMD_GET_EVENT_STATUS_NOTIFICATION = 0x4a
+GPCMD_GET_PERFORMANCE = 0xac
+GPCMD_INQUIRY = 0x12
+GPCMD_LOAD_UNLOAD = 0xa6
+GPCMD_MECHANISM_STATUS = 0xbd
+GPCMD_MODE_SELECT_10 = 0x55
+GPCMD_MODE_SENSE_10 = 0x5a
+GPCMD_PAUSE_RESUME = 0x4b
+GPCMD_PLAY_AUDIO_10 = 0x45
+GPCMD_PLAY_AUDIO_MSF = 0x47
+GPCMD_PLAY_AUDIO_TI = 0x48
+GPCMD_PLAY_CD = 0xbc
+GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL = 0x1e
+GPCMD_READ_10 = 0x28
+GPCMD_READ_12 = 0xa8
+GPCMD_READ_CDVD_CAPACITY = 0x25
+GPCMD_READ_CD = 0xbe
+GPCMD_READ_CD_MSF = 0xb9
+GPCMD_READ_DISC_INFO = 0x51
+GPCMD_READ_DVD_STRUCTURE = 0xad
+GPCMD_READ_FORMAT_CAPACITIES = 0x23
+GPCMD_READ_HEADER = 0x44
+GPCMD_READ_TRACK_RZONE_INFO = 0x52
+GPCMD_READ_SUBCHANNEL = 0x42
+GPCMD_READ_TOC_PMA_ATIP = 0x43
+GPCMD_REPAIR_RZONE_TRACK = 0x58
+GPCMD_REPORT_KEY = 0xa4
+GPCMD_REQUEST_SENSE = 0x03
+GPCMD_RESERVE_RZONE_TRACK = 0x53
+GPCMD_SCAN = 0xba
+GPCMD_SEEK = 0x2b
+GPCMD_SEND_DVD_STRUCTURE = 0xad
+GPCMD_SEND_EVENT = 0xa2
+GPCMD_SEND_KEY = 0xa3
+GPCMD_SEND_OPC = 0x54
+GPCMD_SET_READ_AHEAD = 0xa7
+GPCMD_SET_STREAMING = 0xb6
+GPCMD_START_STOP_UNIT = 0x1b
+GPCMD_STOP_PLAY_SCAN = 0x4e
+GPCMD_TEST_UNIT_READY = 0x00
+GPCMD_VERIFY_10 = 0x2f
+GPCMD_WRITE_10 = 0x2a
+GPCMD_WRITE_AND_VERIFY_10 = 0x2e
+GPCMD_SET_SPEED = 0xbb
+GPCMD_PLAYAUDIO_TI = 0x48
+GPCMD_GET_MEDIA_STATUS = 0xda
+GPMODE_R_W_ERROR_PAGE = 0x01
+GPMODE_WRITE_PARMS_PAGE = 0x05
+GPMODE_AUDIO_CTL_PAGE = 0x0e
+GPMODE_POWER_PAGE = 0x1a
+GPMODE_FAULT_FAIL_PAGE = 0x1c
+GPMODE_TO_PROTECT_PAGE = 0x1d
+GPMODE_CAPABILITIES_PAGE = 0x2a
+GPMODE_ALL_PAGES = 0x3f
+GPMODE_CDROM_PAGE = 0x0d
+DVD_STRUCT_PHYSICAL = 0x00
+DVD_STRUCT_COPYRIGHT = 0x01
+DVD_STRUCT_DISCKEY = 0x02
+DVD_STRUCT_BCA = 0x03
+DVD_STRUCT_MANUFACT = 0x04
+DVD_LAYERS = 4
+DVD_LU_SEND_AGID = 0
+DVD_HOST_SEND_CHALLENGE = 1
+DVD_LU_SEND_KEY1 = 2
+DVD_LU_SEND_CHALLENGE = 3
+DVD_HOST_SEND_KEY2 = 4
+DVD_AUTH_ESTABLISHED = 5
+DVD_AUTH_FAILURE = 6
+DVD_LU_SEND_TITLE_KEY = 7
+DVD_LU_SEND_ASF = 8
+DVD_INVALIDATE_AGID = 9
+DVD_LU_SEND_RPC_STATE = 10
+DVD_HOST_SEND_RPC_STATE = 11
+DVD_CPM_NO_COPYRIGHT = 0
+DVD_CPM_COPYRIGHTED = 1
+DVD_CP_SEC_NONE = 0
+DVD_CP_SEC_EXIST = 1
+DVD_CGMS_UNRESTRICTED = 0
+DVD_CGMS_SINGLE = 2
+DVD_CGMS_RESTRICTED = 3
+
+CDROM_MAX_SLOTS = 256
diff --git a/lib-python/2.2/plat-linux2/DLFCN.py b/lib-python/2.2/plat-linux2/DLFCN.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/plat-linux2/DLFCN.py
@@ -0,0 +1,83 @@
+# Generated by h2py from /usr/include/dlfcn.h
+_DLFCN_H = 1
+
+# Included from features.h
+_FEATURES_H = 1
+__USE_ANSI = 1
+__FAVOR_BSD = 1
+_ISOC99_SOURCE = 1
+_POSIX_SOURCE = 1
+_POSIX_C_SOURCE = 199506L
+_XOPEN_SOURCE = 600
+_XOPEN_SOURCE_EXTENDED = 1
+_LARGEFILE64_SOURCE = 1
+_BSD_SOURCE = 1
+_SVID_SOURCE = 1
+_BSD_SOURCE = 1
+_SVID_SOURCE = 1
+__USE_ISOC99 = 1
+_POSIX_SOURCE = 1
+_POSIX_C_SOURCE = 2
+_POSIX_C_SOURCE = 199506L
+__USE_POSIX = 1
+__USE_POSIX2 = 1
+__USE_POSIX199309 = 1
+__USE_POSIX199506 = 1
+__USE_XOPEN = 1
+__USE_XOPEN_EXTENDED = 1
+__USE_UNIX98 = 1
+_LARGEFILE_SOURCE = 1
+__USE_XOPEN2K = 1
+__USE_ISOC99 = 1
+__USE_XOPEN_EXTENDED = 1
+__USE_LARGEFILE = 1
+__USE_LARGEFILE64 = 1
+__USE_FILE_OFFSET64 = 1
+__USE_MISC = 1
+__USE_BSD = 1
+__USE_SVID = 1
+__USE_GNU = 1
+__USE_REENTRANT = 1
+__STDC_IEC_559__ = 1
+__STDC_IEC_559_COMPLEX__ = 1
+__STDC_ISO_10646__ = 200009L
+__GNU_LIBRARY__ = 6
+__GLIBC__ = 2
+__GLIBC_MINOR__ = 2
+
+# Included from sys/cdefs.h
+_SYS_CDEFS_H = 1
+def __PMT(args): return args
+
+def __P(args): return args
+
+def __PMT(args): return args
+
+def __STRING(x): return #x
+
+__flexarr = []
+__flexarr = [0]
+__flexarr = []
+__flexarr = [1]
+def __ASMNAME(cname): return __ASMNAME2 (__USER_LABEL_PREFIX__, cname)
+
+def __attribute__(xyz): return  
+
+def __attribute_format_arg__(x): return __attribute__ ((__format_arg__ (x)))
+
+def __attribute_format_arg__(x): return  
+
+__USE_LARGEFILE = 1
+__USE_LARGEFILE64 = 1
+__USE_EXTERN_INLINES = 1
+
+# Included from gnu/stubs.h
+
+# Included from bits/dlfcn.h
+RTLD_LAZY = 0x00001
+RTLD_NOW = 0x00002
+RTLD_BINDING_MASK = 0x3
+RTLD_NOLOAD = 0x00004
+RTLD_GLOBAL = 0x00100
+RTLD_LOCAL = 0
+RTLD_NODELETE = 0x01000
diff --git a/lib-python/2.2/plat-linux2/IN.py b/lib-python/2.2/plat-linux2/IN.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/plat-linux2/IN.py
@@ -0,0 +1,603 @@
+# Generated by h2py from /usr/include/netinet/in.h
+_NETINET_IN_H = 1
+
+# Included from features.h
+_FEATURES_H = 1
+__USE_ANSI = 1
+__FAVOR_BSD = 1
+_ISOC99_SOURCE = 1
+_POSIX_SOURCE = 1
+_POSIX_C_SOURCE = 199506L
+_XOPEN_SOURCE = 600
+_XOPEN_SOURCE_EXTENDED = 1
+_LARGEFILE64_SOURCE = 1
+_BSD_SOURCE = 1
+_SVID_SOURCE = 1
+_BSD_SOURCE = 1
+_SVID_SOURCE = 1
+__USE_ISOC99 = 1
+_POSIX_SOURCE = 1
+_POSIX_C_SOURCE = 2
+_POSIX_C_SOURCE = 199506L
+__USE_POSIX = 1
+__USE_POSIX2 = 1
+__USE_POSIX199309 = 1
+__USE_POSIX199506 = 1
+__USE_XOPEN = 1
+__USE_XOPEN_EXTENDED = 1
+__USE_UNIX98 = 1
+_LARGEFILE_SOURCE = 1
+__USE_XOPEN2K = 1
+__USE_ISOC99 = 1
+__USE_XOPEN_EXTENDED = 1
+__USE_LARGEFILE = 1
+__USE_LARGEFILE64 = 1
+__USE_FILE_OFFSET64 = 1
+__USE_MISC = 1
+__USE_BSD = 1
+__USE_SVID = 1
+__USE_GNU = 1
+__USE_REENTRANT = 1
+__STDC_IEC_559__ = 1
+__STDC_IEC_559_COMPLEX__ = 1
+__STDC_ISO_10646__ = 200009L
+__GNU_LIBRARY__ = 6
+__GLIBC__ = 2
+__GLIBC_MINOR__ = 2
+
+# Included from sys/cdefs.h
+_SYS_CDEFS_H = 1
+def __PMT(args): return args
+
+def __P(args): return args
+
+def __PMT(args): return args
+
+def __STRING(x): return #x
+
+__flexarr = []
+__flexarr = [0]
+__flexarr = []
+__flexarr = [1]
+def __ASMNAME(cname): return __ASMNAME2 (__USER_LABEL_PREFIX__, cname)
+
+def __attribute__(xyz): return  
+
+def __attribute_format_arg__(x): return __attribute__ ((__format_arg__ (x)))
+
+def __attribute_format_arg__(x): return  
+
+__USE_LARGEFILE = 1
+__USE_LARGEFILE64 = 1
+__USE_EXTERN_INLINES = 1
+
+# Included from gnu/stubs.h
+
+# Included from stdint.h
+_STDINT_H = 1
+
+# Included from bits/wchar.h
+_BITS_WCHAR_H = 1
+__WCHAR_MIN = (-2147483647l - 1l)
+__WCHAR_MAX = (2147483647l)
+
+# Included from bits/wordsize.h
+__WORDSIZE = 32
+def __INT64_C(c): return c ## L
+
+def __UINT64_C(c): return c ## UL
+
+def __INT64_C(c): return c ## LL
+
+def __UINT64_C(c): return c ## ULL
+
+INT8_MIN = (-128)
+INT16_MIN = (-32767-1)
+INT32_MIN = (-2147483647-1)
+INT8_MAX = (127)
+INT16_MAX = (32767)
+INT32_MAX = (2147483647)
+UINT8_MAX = (255)
+UINT16_MAX = (65535)
+INT_LEAST8_MIN = (-128)
+INT_LEAST16_MIN = (-32767-1)
+INT_LEAST32_MIN = (-2147483647-1)
+INT_LEAST8_MAX = (127)
+INT_LEAST16_MAX = (32767)
+INT_LEAST32_MAX = (2147483647)
+UINT_LEAST8_MAX = (255)
+UINT_LEAST16_MAX = (65535)
+INT_FAST8_MIN = (-128)
+INT_FAST16_MIN = (-9223372036854775807L-1)
+INT_FAST32_MIN = (-9223372036854775807L-1)
+INT_FAST16_MIN = (-2147483647-1)
+INT_FAST32_MIN = (-2147483647-1)
+INT_FAST8_MAX = (127)
+INT_FAST16_MAX = (9223372036854775807L)
+INT_FAST32_MAX = (9223372036854775807L)
+INT_FAST16_MAX = (2147483647)
+INT_FAST32_MAX = (2147483647)
+UINT_FAST8_MAX = (255)
+INTPTR_MIN = (-9223372036854775807L-1)
+INTPTR_MAX = (9223372036854775807L)
+INTPTR_MIN = (-2147483647-1)
+INTPTR_MAX = (2147483647)
+PTRDIFF_MIN = (-9223372036854775807L-1)
+PTRDIFF_MAX = (9223372036854775807L)
+PTRDIFF_MIN = (-2147483647-1)
+PTRDIFF_MAX = (2147483647)
+SIG_ATOMIC_MIN = (-2147483647-1)
+SIG_ATOMIC_MAX = (2147483647)
+WCHAR_MIN = __WCHAR_MIN
+WCHAR_MAX = __WCHAR_MAX
+def INT8_C(c): return c
+
+def INT16_C(c): return c
+
+def INT32_C(c): return c
+
+def INT64_C(c): return c ## L
+
+def INT64_C(c): return c ## LL
+
+def UINT8_C(c): return c ## U
+
+def UINT16_C(c): return c ## U
+
+def UINT32_C(c): return c ## U
+
+def UINT64_C(c): return c ## UL
+
+def UINT64_C(c): return c ## ULL
+
+def INTMAX_C(c): return c ## L
+
+def UINTMAX_C(c): return c ## UL
+
+def INTMAX_C(c): return c ## LL
+
+def UINTMAX_C(c): return c ## ULL
+
+
+# Included from bits/types.h
+_BITS_TYPES_H = 1
+__FD_SETSIZE = 1024
+
+# Included from bits/pthreadtypes.h
+_BITS_PTHREADTYPES_H = 1
+
+# Included from bits/sched.h
+SCHED_OTHER = 0
+SCHED_FIFO = 1
+SCHED_RR = 2
+CSIGNAL = 0x000000ff
+CLONE_VM = 0x00000100
+CLONE_FS = 0x00000200
+CLONE_FILES = 0x00000400
+CLONE_SIGHAND = 0x00000800
+CLONE_PID = 0x00001000
+CLONE_PTRACE = 0x00002000
+CLONE_VFORK = 0x00004000
+__defined_schedparam = 1
+def IN_CLASSA(a): return ((((in_addr_t)(a)) & 0x80000000) == 0)
+
+IN_CLASSA_NET = 0xff000000
+IN_CLASSA_NSHIFT = 24
+IN_CLASSA_HOST = (0xffffffff & ~IN_CLASSA_NET)
+IN_CLASSA_MAX = 128
+def IN_CLASSB(a): return ((((in_addr_t)(a)) & 0xc0000000) == 0x80000000)
+
+IN_CLASSB_NET = 0xffff0000
+IN_CLASSB_NSHIFT = 16
+IN_CLASSB_HOST = (0xffffffff & ~IN_CLASSB_NET)
+IN_CLASSB_MAX = 65536
+def IN_CLASSC(a): return ((((in_addr_t)(a)) & 0xe0000000) == 0xc0000000)
+
+IN_CLASSC_NET = 0xffffff00
+IN_CLASSC_NSHIFT = 8
+IN_CLASSC_HOST = (0xffffffff & ~IN_CLASSC_NET)
+def IN_CLASSD(a): return ((((in_addr_t)(a)) & 0xf0000000) == 0xe0000000)
+
+def IN_MULTICAST(a): return IN_CLASSD(a)
+
+def IN_EXPERIMENTAL(a): return ((((in_addr_t)(a)) & 0xe0000000) == 0xe0000000)
+
+def IN_BADCLASS(a): return ((((in_addr_t)(a)) & 0xf0000000) == 0xf0000000)
+
+IN_LOOPBACKNET = 127
+INET_ADDRSTRLEN = 16
+INET6_ADDRSTRLEN = 46
+
+# Included from bits/socket.h
+
+# Included from limits.h
+_LIBC_LIMITS_H_ = 1
+MB_LEN_MAX = 16
+_LIMITS_H = 1
+CHAR_BIT = 8
+SCHAR_MIN = (-128)
+SCHAR_MAX = 127
+UCHAR_MAX = 255
+CHAR_MIN = 0
+CHAR_MAX = UCHAR_MAX
+CHAR_MIN = SCHAR_MIN
+CHAR_MAX = SCHAR_MAX
+SHRT_MIN = (-32768)
+SHRT_MAX = 32767
+USHRT_MAX = 65535
+INT_MAX = 2147483647
+LONG_MAX = 9223372036854775807L
+LONG_MAX = 2147483647L
+LONG_MIN = (-LONG_MAX - 1L)
+
+# Included from bits/posix1_lim.h
+_BITS_POSIX1_LIM_H = 1
+_POSIX_AIO_LISTIO_MAX = 2
+_POSIX_AIO_MAX = 1
+_POSIX_ARG_MAX = 4096
+_POSIX_CHILD_MAX = 6
+_POSIX_DELAYTIMER_MAX = 32
+_POSIX_LINK_MAX = 8
+_POSIX_MAX_CANON = 255
+_POSIX_MAX_INPUT = 255
+_POSIX_MQ_OPEN_MAX = 8
+_POSIX_MQ_PRIO_MAX = 32
+_POSIX_NGROUPS_MAX = 0
+_POSIX_OPEN_MAX = 16
+_POSIX_FD_SETSIZE = _POSIX_OPEN_MAX
+_POSIX_NAME_MAX = 14
+_POSIX_PATH_MAX = 256
+_POSIX_PIPE_BUF = 512
+_POSIX_RTSIG_MAX = 8
+_POSIX_SEM_NSEMS_MAX = 256
+_POSIX_SEM_VALUE_MAX = 32767
+_POSIX_SIGQUEUE_MAX = 32
+_POSIX_SSIZE_MAX = 32767
+_POSIX_STREAM_MAX = 8
+_POSIX_TZNAME_MAX = 6
+_POSIX_QLIMIT = 1
+_POSIX_HIWAT = _POSIX_PIPE_BUF
+_POSIX_UIO_MAXIOV = 16
+_POSIX_TTY_NAME_MAX = 9
+_POSIX_TIMER_MAX = 32
+_POSIX_LOGIN_NAME_MAX = 9
+_POSIX_CLOCKRES_MIN = 20000000
+
+# Included from bits/local_lim.h
+
+# Included from linux/limits.h
+NR_OPEN = 1024
+NGROUPS_MAX = 32
+ARG_MAX = 131072
+CHILD_MAX = 999
+OPEN_MAX = 256
+LINK_MAX = 127
+MAX_CANON = 255
+MAX_INPUT = 255
+NAME_MAX = 255
+PATH_MAX = 4095
+PIPE_BUF = 4096
+RTSIG_MAX = 32
+_POSIX_THREAD_KEYS_MAX = 128
+PTHREAD_KEYS_MAX = 1024
+_POSIX_THREAD_DESTRUCTOR_ITERATIONS = 4
+PTHREAD_DESTRUCTOR_ITERATIONS = _POSIX_THREAD_DESTRUCTOR_ITERATIONS
+_POSIX_THREAD_THREADS_MAX = 64
+PTHREAD_THREADS_MAX = 1024
+AIO_PRIO_DELTA_MAX = 20
+PTHREAD_STACK_MIN = 16384
+TIMER_MAX = 256
+SSIZE_MAX = INT_MAX
+NGROUPS_MAX = _POSIX_NGROUPS_MAX
+
+# Included from bits/posix2_lim.h
+_BITS_POSIX2_LIM_H = 1
+_POSIX2_BC_BASE_MAX = 99
+_POSIX2_BC_DIM_MAX = 2048
+_POSIX2_BC_SCALE_MAX = 99
+_POSIX2_BC_STRING_MAX = 1000
+_POSIX2_COLL_WEIGHTS_MAX = 2
+_POSIX2_EXPR_NEST_MAX = 32
+_POSIX2_LINE_MAX = 2048
+_POSIX2_RE_DUP_MAX = 255
+_POSIX2_CHARCLASS_NAME_MAX = 14
+BC_BASE_MAX = _POSIX2_BC_BASE_MAX
+BC_DIM_MAX = _POSIX2_BC_DIM_MAX
+BC_SCALE_MAX = _POSIX2_BC_SCALE_MAX
+BC_STRING_MAX = _POSIX2_BC_STRING_MAX
+COLL_WEIGHTS_MAX = 255
+EXPR_NEST_MAX = _POSIX2_EXPR_NEST_MAX
+LINE_MAX = _POSIX2_LINE_MAX
+CHARCLASS_NAME_MAX = 2048
+RE_DUP_MAX = (0x7fff)
+
+# Included from bits/xopen_lim.h
+_XOPEN_LIM_H = 1
+
+# Included from bits/stdio_lim.h
+L_tmpnam = 20
+TMP_MAX = 238328
+FILENAME_MAX = 4095
+L_ctermid = 9
+L_cuserid = 9
+FOPEN_MAX = 16
+IOV_MAX = 1024
+_XOPEN_IOV_MAX = _POSIX_UIO_MAXIOV
+NL_ARGMAX = _POSIX_ARG_MAX
+NL_LANGMAX = _POSIX2_LINE_MAX
+NL_MSGMAX = INT_MAX
+NL_NMAX = INT_MAX
+NL_SETMAX = INT_MAX
+NL_TEXTMAX = INT_MAX
+NZERO = 20
+WORD_BIT = 16
+WORD_BIT = 32
+WORD_BIT = 64
+WORD_BIT = 16
+WORD_BIT = 32
+WORD_BIT = 64
+WORD_BIT = 32
+LONG_BIT = 32
+LONG_BIT = 64
+LONG_BIT = 32
+LONG_BIT = 64
+LONG_BIT = 64
+LONG_BIT = 32
+from TYPES import *
+PF_UNSPEC = 0
+PF_LOCAL = 1
+PF_UNIX = PF_LOCAL
+PF_FILE = PF_LOCAL
+PF_INET = 2
+PF_AX25 = 3
+PF_IPX = 4
+PF_APPLETALK = 5
+PF_NETROM = 6
+PF_BRIDGE = 7
+PF_ATMPVC = 8
+PF_X25 = 9
+PF_INET6 = 10
+PF_ROSE = 11
+PF_DECnet = 12
+PF_NETBEUI = 13
+PF_SECURITY = 14
+PF_KEY = 15
+PF_NETLINK = 16
+PF_ROUTE = PF_NETLINK
+PF_PACKET = 17
+PF_ASH = 18
+PF_ECONET = 19
+PF_ATMSVC = 20
+PF_SNA = 22
+PF_IRDA = 23
+PF_PPPOX = 24
+PF_WANPIPE = 25
+PF_BLUETOOTH = 31
+PF_MAX = 32
+AF_UNSPEC = PF_UNSPEC
+AF_LOCAL = PF_LOCAL
+AF_UNIX = PF_UNIX
+AF_FILE = PF_FILE
+AF_INET = PF_INET
+AF_AX25 = PF_AX25
+AF_IPX = PF_IPX
+AF_APPLETALK = PF_APPLETALK
+AF_NETROM = PF_NETROM
+AF_BRIDGE = PF_BRIDGE
+AF_ATMPVC = PF_ATMPVC
+AF_X25 = PF_X25
+AF_INET6 = PF_INET6
+AF_ROSE = PF_ROSE
+AF_DECnet = PF_DECnet
+AF_NETBEUI = PF_NETBEUI
+AF_SECURITY = PF_SECURITY
+AF_KEY = PF_KEY
+AF_NETLINK = PF_NETLINK
+AF_ROUTE = PF_ROUTE
+AF_PACKET = PF_PACKET
+AF_ASH = PF_ASH
+AF_ECONET = PF_ECONET
+AF_ATMSVC = PF_ATMSVC
+AF_SNA = PF_SNA
+AF_IRDA = PF_IRDA
+AF_PPPOX = PF_PPPOX
+AF_WANPIPE = PF_WANPIPE
+AF_BLUETOOTH = PF_BLUETOOTH
+AF_MAX = PF_MAX
+SOL_RAW = 255
+SOL_DECNET = 261
+SOL_X25 = 262
+SOL_PACKET = 263
+SOL_ATM = 264
+SOL_AAL = 265
+SOL_IRDA = 266
+SOMAXCONN = 128
+
+# Included from bits/sockaddr.h
+_BITS_SOCKADDR_H = 1
+def __SOCKADDR_COMMON(sa_prefix): return \
+
+_SS_SIZE = 128
+def CMSG_FIRSTHDR(mhdr): return \
+
+
+# Included from asm/socket.h
+
+# Included from asm/sockios.h
+FIOSETOWN = 0x8901
+SIOCSPGRP = 0x8902
+FIOGETOWN = 0x8903
+SIOCGPGRP = 0x8904
+SIOCATMARK = 0x8905
+SIOCGSTAMP = 0x8906
+SOL_SOCKET = 1
+SO_DEBUG = 1
+SO_REUSEADDR = 2
+SO_TYPE = 3
+SO_ERROR = 4
+SO_DONTROUTE = 5
+SO_BROADCAST = 6
+SO_SNDBUF = 7
+SO_RCVBUF = 8
+SO_KEEPALIVE = 9
+SO_OOBINLINE = 10
+SO_NO_CHECK = 11
+SO_PRIORITY = 12
+SO_LINGER = 13
+SO_BSDCOMPAT = 14
+SO_PASSCRED = 16
+SO_PEERCRED = 17
+SO_RCVLOWAT = 18
+SO_SNDLOWAT = 19
+SO_RCVTIMEO = 20
+SO_SNDTIMEO = 21
+SO_SECURITY_AUTHENTICATION = 22
+SO_SECURITY_ENCRYPTION_TRANSPORT = 23
+SO_SECURITY_ENCRYPTION_NETWORK = 24
+SO_BINDTODEVICE = 25
+SO_ATTACH_FILTER = 26
+SO_DETACH_FILTER = 27
+SO_PEERNAME = 28
+SO_TIMESTAMP = 29
+SCM_TIMESTAMP = SO_TIMESTAMP
+SO_ACCEPTCONN = 30
+SOCK_STREAM = 1
+SOCK_DGRAM = 2
+SOCK_RAW = 3
+SOCK_RDM = 4
+SOCK_SEQPACKET = 5
+SOCK_PACKET = 10
+SOCK_MAX = (SOCK_PACKET+1)
+
+# Included from bits/in.h
+IP_TOS = 1
+IP_TTL = 2
+IP_HDRINCL = 3
+IP_OPTIONS = 4
+IP_ROUTER_ALERT = 5
+IP_RECVOPTS = 6
+IP_RETOPTS = 7
+IP_PKTINFO = 8
+IP_PKTOPTIONS = 9
+IP_PMTUDISC = 10
+IP_MTU_DISCOVER = 10
+IP_RECVERR = 11
+IP_RECVTTL = 12
+IP_RECVTOS = 13
+IP_MULTICAST_IF = 32
+IP_MULTICAST_TTL = 33
+IP_MULTICAST_LOOP = 34
+IP_ADD_MEMBERSHIP = 35
+IP_DROP_MEMBERSHIP = 36
+IP_RECVRETOPTS = IP_RETOPTS
+IP_PMTUDISC_DONT = 0
+IP_PMTUDISC_WANT = 1
+IP_PMTUDISC_DO = 2
+SOL_IP = 0
+IP_DEFAULT_MULTICAST_TTL = 1
+IP_DEFAULT_MULTICAST_LOOP = 1
+IP_MAX_MEMBERSHIPS = 20
+IPV6_ADDRFORM = 1
+IPV6_PKTINFO = 2
+IPV6_HOPOPTS = 3
+IPV6_DSTOPTS = 4
+IPV6_RTHDR = 5
+IPV6_PKTOPTIONS = 6
+IPV6_CHECKSUM = 7
+IPV6_HOPLIMIT = 8
+IPV6_NEXTHOP = 9
+IPV6_AUTHHDR = 10
+IPV6_UNICAST_HOPS = 16
+IPV6_MULTICAST_IF = 17
+IPV6_MULTICAST_HOPS = 18
+IPV6_MULTICAST_LOOP = 19
+IPV6_JOIN_GROUP = 20
+IPV6_LEAVE_GROUP = 21
+IPV6_ROUTER_ALERT = 22
+IPV6_MTU_DISCOVER = 23
+IPV6_MTU = 24
+IPV6_RECVERR = 25
+IPV6_RXHOPOPTS = IPV6_HOPOPTS
+IPV6_RXDSTOPTS = IPV6_DSTOPTS
+IPV6_ADD_MEMBERSHIP = IPV6_JOIN_GROUP
+IPV6_DROP_MEMBERSHIP = IPV6_LEAVE_GROUP
+IPV6_PMTUDISC_DONT = 0
+IPV6_PMTUDISC_WANT = 1
+IPV6_PMTUDISC_DO = 2
+SOL_IPV6 = 41
+SOL_ICMPV6 = 58
+IPV6_RTHDR_LOOSE = 0
+IPV6_RTHDR_STRICT = 1
+IPV6_RTHDR_TYPE_0 = 0
+
+# Included from endian.h
+_ENDIAN_H = 1
+__LITTLE_ENDIAN = 1234
+__BIG_ENDIAN = 4321
+__PDP_ENDIAN = 3412
+
+# Included from bits/endian.h
+__BYTE_ORDER = __LITTLE_ENDIAN
+__FLOAT_WORD_ORDER = __BYTE_ORDER
+LITTLE_ENDIAN = __LITTLE_ENDIAN
+BIG_ENDIAN = __BIG_ENDIAN
+PDP_ENDIAN = __PDP_ENDIAN
+BYTE_ORDER = __BYTE_ORDER
+
+# Included from bits/byteswap.h
+def __bswap_constant_16(x): return \
+
+def __bswap_16(x): return \
+
+def __bswap_16(x): return __bswap_constant_16 (x)
+
+def __bswap_constant_32(x): return \
+
+def __bswap_32(x): return \
+
+def __bswap_32(x): return \
+
+def __bswap_32(x): return __bswap_constant_32 (x)
+
+def __bswap_constant_64(x): return \
+
+def __bswap_64(x): return \
+
+def ntohl(x): return (x)
+
+def ntohs(x): return (x)
+
+def htonl(x): return (x)
+
+def htons(x): return (x)
+
+def ntohl(x): return __bswap_32 (x)
+
+def ntohs(x): return __bswap_16 (x)
+
+def htonl(x): return __bswap_32 (x)
+
+def htons(x): return __bswap_16 (x)
+
+def IN6_IS_ADDR_UNSPECIFIED(a): return \
+
+def IN6_IS_ADDR_LOOPBACK(a): return \
+
+def IN6_IS_ADDR_LINKLOCAL(a): return \
+
+def IN6_IS_ADDR_SITELOCAL(a): return \
+
+def IN6_IS_ADDR_V4MAPPED(a): return \
+
+def IN6_IS_ADDR_V4COMPAT(a): return \
+
+def IN6_IS_ADDR_MC_NODELOCAL(a): return \
+
+def IN6_IS_ADDR_MC_LINKLOCAL(a): return \
+
+def IN6_IS_ADDR_MC_SITELOCAL(a): return \
+
+def IN6_IS_ADDR_MC_ORGLOCAL(a): return \
+
+def IN6_IS_ADDR_MC_GLOBAL(a): return \
+
diff --git a/lib-python/2.2/plat-linux2/TYPES.py b/lib-python/2.2/plat-linux2/TYPES.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/plat-linux2/TYPES.py
@@ -0,0 +1,171 @@
+# Generated by h2py from /usr/include/sys/types.h
+_SYS_TYPES_H = 1
+
+# Included from features.h
+_FEATURES_H = 1
+__USE_ANSI = 1
+__FAVOR_BSD = 1
+_ISOC99_SOURCE = 1
+_POSIX_SOURCE = 1
+_POSIX_C_SOURCE = 199506L
+_XOPEN_SOURCE = 600
+_XOPEN_SOURCE_EXTENDED = 1
+_LARGEFILE64_SOURCE = 1
+_BSD_SOURCE = 1
+_SVID_SOURCE = 1
+_BSD_SOURCE = 1
+_SVID_SOURCE = 1
+__USE_ISOC99 = 1
+_POSIX_SOURCE = 1
+_POSIX_C_SOURCE = 2
+_POSIX_C_SOURCE = 199506L
+__USE_POSIX = 1
+__USE_POSIX2 = 1
+__USE_POSIX199309 = 1
+__USE_POSIX199506 = 1
+__USE_XOPEN = 1
+__USE_XOPEN_EXTENDED = 1
+__USE_UNIX98 = 1
+_LARGEFILE_SOURCE = 1
+__USE_XOPEN2K = 1
+__USE_ISOC99 = 1
+__USE_XOPEN_EXTENDED = 1
+__USE_LARGEFILE = 1
+__USE_LARGEFILE64 = 1
+__USE_FILE_OFFSET64 = 1
+__USE_MISC = 1
+__USE_BSD = 1
+__USE_SVID = 1
+__USE_GNU = 1
+__USE_REENTRANT = 1
+__STDC_IEC_559__ = 1
+__STDC_IEC_559_COMPLEX__ = 1
+__STDC_ISO_10646__ = 200009L
+__GNU_LIBRARY__ = 6
+__GLIBC__ = 2
+__GLIBC_MINOR__ = 2
+
+# Included from sys/cdefs.h
+_SYS_CDEFS_H = 1
+def __PMT(args): return args
+
+def __P(args): return args
+
+def __PMT(args): return args
+
+def __STRING(x): return #x
+
+__flexarr = []
+__flexarr = [0]
+__flexarr = []
+__flexarr = [1]
+def __ASMNAME(cname): return __ASMNAME2 (__USER_LABEL_PREFIX__, cname)
+
+def __attribute__(xyz): return  
+
+def __attribute_format_arg__(x): return __attribute__ ((__format_arg__ (x)))
+
+def __attribute_format_arg__(x): return  
+
+__USE_LARGEFILE = 1
+__USE_LARGEFILE64 = 1
+__USE_EXTERN_INLINES = 1
+
+# Included from gnu/stubs.h
+
+# Included from bits/types.h
+_BITS_TYPES_H = 1
+__FD_SETSIZE = 1024
+
+# Included from bits/pthreadtypes.h
+_BITS_PTHREADTYPES_H = 1
+
+# Included from bits/sched.h
+SCHED_OTHER = 0
+SCHED_FIFO = 1
+SCHED_RR = 2
+CSIGNAL = 0x000000ff
+CLONE_VM = 0x00000100
+CLONE_FS = 0x00000200
+CLONE_FILES = 0x00000400
+CLONE_SIGHAND = 0x00000800
+CLONE_PID = 0x00001000
+CLONE_PTRACE = 0x00002000
+CLONE_VFORK = 0x00004000
+__defined_schedparam = 1
+
+# Included from time.h
+_TIME_H = 1
+
+# Included from bits/time.h
+_BITS_TIME_H = 1
+CLOCKS_PER_SEC = 1000000l
+CLOCK_REALTIME = 0
+CLOCK_PROCESS_CPUTIME_ID = 2
+CLOCK_THREAD_CPUTIME_ID = 3
+TIMER_ABSTIME = 1
+_STRUCT_TIMEVAL = 1
+CLK_TCK = CLOCKS_PER_SEC
+__clock_t_defined = 1
+__time_t_defined = 1
+__clockid_t_defined = 1
+__timer_t_defined = 1
+__timespec_defined = 1
+def __isleap(year): return \
+
+__BIT_TYPES_DEFINED__ = 1
+
+# Included from endian.h
+_ENDIAN_H = 1
+__LITTLE_ENDIAN = 1234
+__BIG_ENDIAN = 4321
+__PDP_ENDIAN = 3412
+
+# Included from bits/endian.h
+__BYTE_ORDER = __LITTLE_ENDIAN
+__FLOAT_WORD_ORDER = __BYTE_ORDER
+LITTLE_ENDIAN = __LITTLE_ENDIAN
+BIG_ENDIAN = __BIG_ENDIAN
+PDP_ENDIAN = __PDP_ENDIAN
+BYTE_ORDER = __BYTE_ORDER
+
+# Included from sys/select.h
+_SYS_SELECT_H = 1
+
+# Included from bits/select.h
+def __FD_ZERO(fdsp): return \
+
+def __FD_ZERO(set): return \
+
+
+# Included from bits/sigset.h
+_SIGSET_H_types = 1
+_SIGSET_H_fns = 1
+def __sigmask(sig): return \
+
+def __sigemptyset(set): return \
+
+def __sigfillset(set): return \
+
+def __sigisemptyset(set): return \
+
+def __FDELT(d): return ((d) / __NFDBITS)
+
+FD_SETSIZE = __FD_SETSIZE
+def FD_ZERO(fdsetp): return __FD_ZERO (fdsetp)
+
+
+# Included from sys/sysmacros.h
+_SYS_SYSMACROS_H = 1
+def major(dev): return ((int)(((dev) >> 8) & 0xff))
+
+def minor(dev): return ((int)((dev) & 0xff))
+
+def major(dev): return (((dev).__val[1] >> 8) & 0xff)
+
+def minor(dev): return ((dev).__val[1] & 0xff)
+
+def major(dev): return (((dev).__val[0] >> 8) & 0xff)
+
+def minor(dev): return ((dev).__val[0] & 0xff)
+
diff --git a/lib-python/2.2/plat-linux2/regen b/lib-python/2.2/plat-linux2/regen
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/plat-linux2/regen
@@ -0,0 +1,8 @@
+#! /bin/sh
+case `uname` in
+Linux*)	;;
+*)	echo Probably not on a Linux system 1>&2
+	exit 1;;
+esac
+set -v
+h2py -i '(u_long)' /usr/include/sys/types.h /usr/include/netinet/in.h /usr/include/dlfcn.h
diff --git a/lib-python/2.2/plat-netbsd1/IN.py b/lib-python/2.2/plat-netbsd1/IN.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/plat-netbsd1/IN.py
@@ -0,0 +1,57 @@
+# Generated by h2py from /usr/include/netinet/in.h
+IPPROTO_IP = 0
+IPPROTO_ICMP = 1
+IPPROTO_IGMP = 2
+IPPROTO_GGP = 3
+IPPROTO_IPIP = 4
+IPPROTO_TCP = 6
+IPPROTO_EGP = 8
+IPPROTO_PUP = 12
+IPPROTO_UDP = 17
+IPPROTO_IDP = 22
+IPPROTO_TP = 29
+IPPROTO_EON = 80
+IPPROTO_ENCAP = 98
+IPPROTO_RAW = 255
+IPPROTO_MAX = 256
+IPPORT_RESERVED = 1024
+IPPORT_USERRESERVED = 5000
+def __IPADDR(x): return ((u_int32_t)(x))
+
+IN_CLASSA_NSHIFT = 24
+IN_CLASSA_MAX = 128
+IN_CLASSB_NSHIFT = 16
+IN_CLASSB_MAX = 65536
+IN_CLASSC_NSHIFT = 8
+IN_CLASSD_NSHIFT = 28
+def IN_MULTICAST(i): return IN_CLASSD(i)
+
+IN_LOOPBACKNET = 127
+IP_OPTIONS = 1
+IP_HDRINCL = 2
+IP_TOS = 3
+IP_TTL = 4
+IP_RECVOPTS = 5
+IP_RECVRETOPTS = 6
+IP_RECVDSTADDR = 7
+IP_RETOPTS = 8
+IP_MULTICAST_IF = 9
+IP_MULTICAST_TTL = 10
+IP_MULTICAST_LOOP = 11
+IP_ADD_MEMBERSHIP = 12
+IP_DROP_MEMBERSHIP = 13
+IP_RECVIF = 20
+IP_DEFAULT_MULTICAST_TTL = 1
+IP_DEFAULT_MULTICAST_LOOP = 1
+IP_MAX_MEMBERSHIPS = 20
+IPPROTO_MAXID = (IPPROTO_IDP + 1)
+IPCTL_FORWARDING = 1
+IPCTL_SENDREDIRECTS = 2
+IPCTL_DEFTTL = 3
+IPCTL_DEFMTU = 4
+IPCTL_FORWSRCRT = 5
+IPCTL_DIRECTEDBCAST = 6
+IPCTL_ALLOWSRCRT = 7
+IPCTL_MAXID = 8
+def in_nullhost(x): return ((x).s_addr == INADDR_ANY)
+
diff --git a/lib-python/2.2/plat-netbsd1/regen b/lib-python/2.2/plat-netbsd1/regen
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/plat-netbsd1/regen
@@ -0,0 +1,3 @@
+#! /bin/sh
+set -v
+python ../../Tools/scripts/h2py.py -i '(u_long)' /usr/include/netinet/in.h
diff --git a/lib-python/2.2/plat-next3/regen b/lib-python/2.2/plat-next3/regen
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/plat-next3/regen
@@ -0,0 +1,6 @@
+#! /bin/sh
+set -v
+INCLUDE="/NextDeveloper/Headers;/NextDeveloper/Headers/ansi;/NextDeveloper/Headers/bsd"
+export INCLUDE
+
+python ../../Tools/scripts/h2py.py -i '(u_long)' /usr/include/bsd/netinet/in.h
diff --git a/lib-python/2.2/plat-riscos/riscosenviron.py b/lib-python/2.2/plat-riscos/riscosenviron.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/plat-riscos/riscosenviron.py
@@ -0,0 +1,43 @@
+"""A more or less complete dictionary like interface for the RISC OS environment."""
+
+import riscos
+
+class _Environ:
+    def __init__(self, initial = None):
+        pass
+    def __repr__(self):
+        return repr(riscos.getenvdict())
+    def __cmp__(self, dict):
+        return cmp(riscos.getenvdict(), dict)
+    def __len__(self):
+        return len(riscos.getenvdict())
+    def __getitem__(self, key):
+        ret = riscos.getenv(key)
+        if ret<>None:
+            return ret
+        else:
+            raise KeyError
+    def __setitem__(self, key, item):
+        riscos.putenv(key, item)
+    def __delitem__(self, key):
+        riscos.delenv(key)
+    def clear(self):
+        # too dangerous on RISC OS
+        pass
+    def copy(self):
+        return riscos.getenvdict()
+    def keys(self): return riscos.getenvdict().keys()
+    def items(self): return riscos.getenvdict().items()
+    def values(self): return riscos.getenvdict().values()
+    def has_key(self, key):
+        value = riscos.getenv(key)
+        return value<>None
+    def update(self, dict):
+        for k, v in dict.items():
+            riscos.putenv(k, v)
+    def get(self, key, failobj=None):
+        value = riscos.getenv(key)
+        if value<>None:
+            return value
+        else:
+            return failobj
diff --git a/lib-python/2.2/plat-riscos/riscospath.py b/lib-python/2.2/plat-riscos/riscospath.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/plat-riscos/riscospath.py
@@ -0,0 +1,375 @@
+# Module 'riscospath' -- common operations on RISC OS pathnames.
+
+# contributed by Andrew Clover  ( andrew at oaktree.co.uk )
+
+# The "os.path" name is an alias for this module on RISC OS systems;
+# on other systems (e.g. Mac, Windows), os.path provides the same
+# operations in a manner specific to that platform, and is an alias
+# to another module (e.g. macpath, ntpath).
+
+"""
+Instead of importing this module directly, import os and refer to this module
+as os.path.
+"""
+
+
+# Imports - make an error-generating swi object if the swi module is not
+# available (ie. we are not running on RISC OS Python)
+
+import os, stat, string
+
+try:
+    import swi
+except ImportError:
+    class _swi:
+        def swi(*a):
+            raise AttributeError, 'This function only available under RISC OS'
+        block= swi
+    swi= _swi()
+
+[_false, _true]= range(2)
+
+_roots= ['$', '&', '%', '@', '\\']
+
+
+# _allowMOSFSNames
+# After importing riscospath, set _allowMOSFSNames true if you want the module
+# to understand the "-SomeFS-" notation left over from the old BBC Master MOS,
+# as well as the standard "SomeFS:" notation. Set this to be fully backwards
+# compatible but remember that "-SomeFS-" can also be a perfectly valid file
+# name so care must be taken when splitting and joining paths.
+
+_allowMOSFSNames= _false
+
+
+## Path manipulation, RISC OS stylee.
+
+def _split(p):
+    """
+  split filing system name (including special field) and drive specifier from rest
+  of path. This is needed by many riscospath functions.
+  """
+    dash= _allowMOSFSNames and p[:1]=='-'
+    if dash:
+        q= string.find(p, '-', 1)+1
+    else:
+        if p[:1]==':':
+            q= 0
+        else:
+            q= string.find(p, ':')+1 # q= index of start of non-FS portion of path
+    s= string.find(p, '#')
+    if s==-1 or s>q:
+        s= q # find end of main FS name, not including special field
+    else:
+        for c in p[dash:s]:
+            if c not in string.ascii_letters:
+                q= 0
+                break # disallow invalid non-special-field characters in FS name
+    r= q
+    if p[q:q+1]==':':
+        r= string.find(p, '.', q+1)+1
+        if r==0:
+            r= len(p) # find end of drive name (if any) following FS name (if any)
+    return (p[:q], p[q:r], p[r:])
+
+
+def normcase(p):
+    """
+  Normalize the case of a pathname. This converts to lowercase as the native RISC
+  OS filesystems are case-insensitive. However, not all filesystems have to be,
+  and there's no simple way to find out what type an FS is argh.
+  """
+    return string.lower(p)
+
+
+def isabs(p):
+    """
+  Return whether a path is absolute. Under RISC OS, a file system specifier does
+  not make a path absolute, but a drive name or number does, and so does using the
+  symbol for root, URD, library, CSD or PSD. This means it is perfectly possible
+  to have an "absolute" URL dependent on the current working directory, and
+  equally you can have a "relative" URL that's on a completely different device to
+  the current one argh.
+  """
+    (fs, drive, path)= _split(p)
+    return drive!='' or path[:1] in _roots
+
+
+def join(a, *p):
+    """
+  Join path elements with the directory separator, replacing the entire path when
+  an absolute or FS-changing path part is found.
+  """
+    j= a
+    for b in p:
+        (fs, drive, path)= _split(b)
+        if j=='' or fs!='' or drive!='' or path[:1] in _roots:
+            j= b
+        elif j[-1]==':':
+            j= j+b
+        else:
+            j= j+'.'+b
+    return j
+
+
+def split(p):
+    """
+  Split a path in head (everything up to the last '.') and tail (the rest). FS
+  name must still be dealt with separately since special field may contain '.'.
+  """
+    (fs, drive, path)= _split(p)
+    q= string.rfind(path, '.')
+    if q!=-1:
+        return (fs+drive+path[:q], path[q+1:])
+    return ('', p)
+
+
+def splitext(p):
+    """
+  Split a path in root and extension. This assumes the 'using slash for dot and
+  dot for slash with foreign files' convention common in RISC OS is in force.
+  """
+    (tail, head)= split(p)
+    if '/' in head:
+        q= len(head)-string.rfind(head, '/')
+        return (p[:-q], p[-q:])
+    return (p, '')
+
+
+def splitdrive(p):
+    """
+  Split a pathname into a drive specification (including FS name) and the rest of
+  the path. The terminating dot of the drive name is included in the drive
+  specification.
+  """
+    (fs, drive, path)= _split(p)
+    return (fs+drive, p)
+
+
+def basename(p):
+    """
+  Return the tail (basename) part of a path.
+  """
+    return split(p)[1]
+
+
+def dirname(p):
+    """
+  Return the head (dirname) part of a path.
+  """
+    return split(p)[0]
+
+
+def commonprefix(ps):
+    """
+  Return the longest prefix of all list elements. Purely string-based; does not
+  separate any path parts. Why am I in os.path?
+  """
+    if len(ps)==0:
+        return ''
+    prefix= ps[0]
+    for p in ps[1:]:
+        prefix= prefix[:len(p)]
+        for i in range(len(prefix)):
+            if prefix[i] <> p[i]:
+                prefix= prefix[:i]
+                if i==0:
+                    return ''
+                break
+    return prefix
+
+
+## File access functions. Why are we in os.path?
+
+def getsize(p):
+    """
+  Return the size of a file, reported by os.stat().
+  """
+    st= os.stat(p)
+    return st[stat.ST_SIZE]
+
+
+def getmtime(p):
+    """
+  Return the last modification time of a file, reported by os.stat().
+  """
+    st = os.stat(p)
+    return st[stat.ST_MTIME]
+
+getatime= getmtime
+
+
+# RISC OS-specific file access functions
+
+def exists(p):
+    """
+  Test whether a path exists.
+  """
+    try:
+        return swi.swi('OS_File', '5s;i', p)!=0
+    except swi.error:
+        return 0
+
+
+def isdir(p):
+    """
+  Is a path a directory? Includes image files.
+  """
+    try:
+        return swi.swi('OS_File', '5s;i', p) in [2, 3]
+    except swi.error:
+        return 0
+
+
+def isfile(p):
+    """
+  Test whether a path is a file, including image files.
+  """
+    try:
+        return swi.swi('OS_File', '5s;i', p) in [1, 3]
+    except swi.error:
+        return 0
+
+
+def islink(p):
+    """
+  RISC OS has no links or mounts.
+  """
+    return _false
+
+ismount= islink
+
+
+# Same-file testing.
+
+# samefile works on filename comparison since there is no ST_DEV and ST_INO is
+# not reliably unique (esp. directories). First it has to normalise the
+# pathnames, which it can do 'properly' using OS_FSControl since samefile can
+# assume it's running on RISC OS (unlike normpath).
+
+def samefile(fa, fb):
+    """
+  Test whether two pathnames reference the same actual file.
+  """
+    l= 512
+    b= swi.block(l)
+    swi.swi('OS_FSControl', 'isb..i', 37, fa, b, l)
+    fa= b.ctrlstring()
+    swi.swi('OS_FSControl', 'isb..i', 37, fb, b, l)
+    fb= b.ctrlstring()
+    return fa==fb
+
+
+def sameopenfile(a, b):
+    """
+  Test whether two open file objects reference the same file.
+  """
+    return os.fstat(a)[stat.ST_INO]==os.fstat(b)[stat.ST_INO]
+
+
+## Path canonicalisation
+
+# 'user directory' is taken as meaning the User Root Directory, which is in
+# practice never used, for anything.
+
+def expanduser(p):
+    (fs, drive, path)= _split(p)
+    l= 512
+    b= swi.block(l)
+
+    if path[:1]!='@':
+        return p
+    if fs=='':
+        fsno= swi.swi('OS_Args', '00;i')
+        swi.swi('OS_FSControl', 'iibi', 33, fsno, b, l)
+        fsname= b.ctrlstring()
+    else:
+        if fs[:1]=='-':
+            fsname= fs[1:-1]
+        else:
+            fsname= fs[:-1]
+        fsname= string.split(fsname, '#', 1)[0] # remove special field from fs
+    x= swi.swi('OS_FSControl', 'ib2s.i;.....i', 54, b, fsname, l)
+    if x<l:
+        urd= b.tostring(0, l-x-1)
+    else: # no URD! try CSD
+        x= swi.swi('OS_FSControl', 'ib0s.i;.....i', 54, b, fsname, l)
+        if x<l:
+            urd= b.tostring(0, l-x-1)
+        else: # no CSD! use root
+            urd= '$'
+    return fsname+':'+urd+path[1:]
+
+# Environment variables are in angle brackets.
+
+def expandvars(p):
+    """
+  Expand environment variables using OS_GSTrans.
+  """
+    l= 512
+    b= swi.block(l)
+    return b.tostring(0, swi.swi('OS_GSTrans', 'sbi;..i', p, b, l))
+
+
+# Return an absolute path. RISC OS' osfscontrol_canonicalise_path does this among others
+abspath = os.expand
+
+
+# realpath is a no-op on systems without islink support
+realpath = abspath
+
+
+# Normalize a path. Only special path element under RISC OS is "^" for "..".
+
+def normpath(p):
+    """
+  Normalize path, eliminating up-directory ^s.
+  """
+    (fs, drive, path)= _split(p)
+    rhs= ''
+    ups= 0
+    while path!='':
+        (path, el)= split(path)
+        if el=='^':
+            ups= ups+1
+        else:
+            if ups>0:
+                ups= ups-1
+            else:
+                if rhs=='':
+                    rhs= el
+                else:
+                    rhs= el+'.'+rhs
+    while ups>0:
+        ups= ups-1
+        rhs= '^.'+rhs
+    return fs+drive+rhs
+
+
+# Directory tree walk.
+# Independent of host system. Why am I in os.path?
+
+def walk(top, func, arg):
+    """Directory tree walk with callback function.
+
+    For each directory in the directory tree rooted at top (including top
+    itself, but excluding '.' and '..'), call func(arg, dirname, fnames).
+    dirname is the name of the directory, and fnames a list of the names of
+    the files and subdirectories in dirname (excluding '.' and '..').  func
+    may modify the fnames list in-place (e.g. via del or slice assignment),
+    and walk will only recurse into the subdirectories whose names remain in
+    fnames; this can be used to implement a filter, or to impose a specific
+    order of visiting.  No semantics are defined for, or required of, arg,
+    beyond that arg is always passed to func.  It can be used, e.g., to pass
+    a filename pattern, or a mutable object designed to accumulate
+    statistics.  Passing None for arg is common."""
+
+    try:
+        names= os.listdir(top)
+    except os.error:
+        return
+    func(arg, top, names)
+    for name in names:
+        name= join(top, name)
+        if isdir(name) and not islink(name):
+            walk(name, func, arg)
diff --git a/lib-python/2.2/plat-riscos/rourl2path.py b/lib-python/2.2/plat-riscos/rourl2path.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/plat-riscos/rourl2path.py
@@ -0,0 +1,69 @@
+"""riscos specific module for conversion between pathnames and URLs.
+Based on macurl2path.
+Do not import directly, use urllib instead."""
+
+import string
+import urllib
+import os
+
+__all__ = ["url2pathname","pathname2url"]
+
+__slash_dot = string.maketrans("/.", "./")
+
+def url2pathname(url):
+    "Convert URL to a RISC OS path."
+    tp = urllib.splittype(url)[0]
+    if tp and tp <> 'file':
+        raise RuntimeError, 'Cannot convert non-local URL to pathname'
+    # Turn starting /// into /, an empty hostname means current host
+    if url[:3] == '///':
+        url = url[2:]
+    elif url[:2] == '//':
+        raise RuntimeError, 'Cannot convert non-local URL to pathname'
+    components = string.split(url, '/')
+    if not components[0]:
+        if '$' in components:
+            del components[0]
+        else:
+             components[0] = '$'
+    # Remove . and embedded ..
+    i = 0
+    while i < len(components):
+        if components[i] == '.':
+            del components[i]
+        elif components[i] == '..' and i > 0 and \
+                                  components[i-1] not in ('', '..'):
+            del components[i-1:i+1]
+            i -= 1
+        elif components[i] == '..':
+            components[i] = '^'
+            i += 1
+        elif components[i] == '' and i > 0 and components[i-1] <> '':
+            del components[i]
+        else:
+            i += 1
+    components = map(lambda x: urllib.unquote(x).translate(__slash_dot), components)
+    return '.'.join(components)
+
+def pathname2url(pathname):
+    "Convert a RISC OS path name to a file url."
+    return urllib.quote('///' + pathname.translate(__slash_dot), "/$:")
+
+def test():
+    for url in ["index.html",
+                "/SCSI::SCSI4/$/Anwendung/Comm/Apps/!Fresco/Welcome",
+                "/SCSI::SCSI4/$/Anwendung/Comm/Apps/../!Fresco/Welcome",
+                "../index.html",
+                "bar/index.html",
+                "/foo/bar/index.html",
+                "/foo/bar/",
+                "/"]:
+        print `url`, '->', `url2pathname(url)`
+    print "*******************************************************"
+    for path in ["SCSI::SCSI4.$.Anwendung",
+                 "PythonApp:Lib",
+                 "PythonApp:Lib.rourl2path/py"]:
+        print `path`, '->', `pathname2url(path)`
+
+if __name__ == '__main__':
+    test()
diff --git a/lib-python/2.2/plat-sunos4/IN.py b/lib-python/2.2/plat-sunos4/IN.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/plat-sunos4/IN.py
@@ -0,0 +1,59 @@
+# Generated by h2py from /usr/include/netinet/in.h
+IPPROTO_IP = 0
+IPPROTO_ICMP = 1
+IPPROTO_IGMP = 2
+IPPROTO_GGP = 3
+IPPROTO_TCP = 6
+IPPROTO_EGP = 8
+IPPROTO_PUP = 12
+IPPROTO_UDP = 17
+IPPROTO_IDP = 22
+IPPROTO_HELLO = 63
+IPPROTO_ND = 77
+IPPROTO_RAW = 255
+IPPROTO_MAX = 256
+IPPORT_ECHO = 7
+IPPORT_DISCARD = 9
+IPPORT_SYSTAT = 11
+IPPORT_DAYTIME = 13
+IPPORT_NETSTAT = 15
+IPPORT_FTP = 21
+IPPORT_TELNET = 23
+IPPORT_SMTP = 25
+IPPORT_TIMESERVER = 37
+IPPORT_NAMESERVER = 42
+IPPORT_WHOIS = 43
+IPPORT_MTP = 57
+IPPORT_TFTP = 69
+IPPORT_RJE = 77
+IPPORT_FINGER = 79
+IPPORT_TTYLINK = 87
+IPPORT_SUPDUP = 95
+IPPORT_EXECSERVER = 512
+IPPORT_LOGINSERVER = 513
+IPPORT_CMDSERVER = 514
+IPPORT_EFSSERVER = 520
+IPPORT_BIFFUDP = 512
+IPPORT_WHOSERVER = 513
+IPPORT_ROUTESERVER = 520
+IPPORT_RESERVED = 1024
+IPPORT_USERRESERVED = 5000
+IMPLINK_IP = 155
+IMPLINK_LOWEXPER = 156
+IMPLINK_HIGHEXPER = 158
+IN_CLASSA_NET = 0xff000000
+IN_CLASSA_NSHIFT = 24
+IN_CLASSA_HOST = 0x00ffffff
+IN_CLASSA_MAX = 128
+IN_CLASSB_NET = 0xffff0000
+IN_CLASSB_NSHIFT = 16
+IN_CLASSB_HOST = 0x0000ffff
+IN_CLASSB_MAX = 65536
+IN_CLASSC_NET = 0xffffff00
+IN_CLASSC_NSHIFT = 8
+IN_CLASSC_HOST = 0x000000ff
+INADDR_ANY = 0x00000000
+INADDR_LOOPBACK = 0x7F000001
+INADDR_BROADCAST = 0xffffffff
+IN_LOOPBACKNET = 127
+IP_OPTIONS = 1
diff --git a/lib-python/2.2/plat-sunos4/SUNAUDIODEV.py b/lib-python/2.2/plat-sunos4/SUNAUDIODEV.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/plat-sunos4/SUNAUDIODEV.py
@@ -0,0 +1,38 @@
+# Symbolic constants for use with sunaudiodev module
+# The names are the same as in audioio.h with the leading AUDIO_
+# removed.
+
+# Not all values are supported on all releases of SunOS.
+
+# Encoding types, for fields i_encoding and o_encoding
+
+ENCODING_NONE = 0			# no encoding assigned
+ENCODING_ULAW = 1			# u-law encoding
+ENCODING_ALAW = 2			# A-law encoding
+ENCODING_LINEAR = 3			# Linear PCM encoding
+
+# Gain ranges for i_gain, o_gain and monitor_gain
+
+MIN_GAIN = 0				# minimum gain value
+MAX_GAIN = 255				# maximum gain value
+
+# Balance values for i_balance and o_balance
+
+LEFT_BALANCE = 0			# left channel only
+MID_BALANCE = 32			# equal left/right channel
+RIGHT_BALANCE = 64			# right channel only
+BALANCE_SHIFT = 3
+
+# Port names for i_port and o_port
+
+PORT_A = 1
+PORT_B = 2
+PORT_C = 3
+PORT_D = 4
+
+SPEAKER = 0x01				# output to built-in speaker
+HEADPHONE = 0x02			# output to headphone jack
+LINE_OUT = 0x04				# output to line out
+
+MICROPHONE = 0x01			# input from microphone
+LINE_IN = 0x02				# input from line in
diff --git a/lib-python/2.2/plat-sunos4/WAIT.py b/lib-python/2.2/plat-sunos4/WAIT.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/plat-sunos4/WAIT.py
@@ -0,0 +1,13 @@
+# Generated by h2py from /usr/include/sys/wait.h
+WUNTRACED = 0004
+WNOHANG = 0100
+WEXITED = 0001
+WTRAPPED = 0002
+WSTOPPED = WUNTRACED
+WCONTINUED = 0010
+WNOWAIT = 0200
+WOPTMASK = (WEXITED|WTRAPPED|WSTOPPED|WCONTINUED|WNOHANG|WNOWAIT)
+WSTOPFLG = 0177
+WCONTFLG = 0177777
+WCOREFLG = 0200
+WSIGMASK = 0177
diff --git a/lib-python/2.2/plat-sunos4/regen b/lib-python/2.2/plat-sunos4/regen
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/plat-sunos4/regen
@@ -0,0 +1,9 @@
+#! /bin/sh
+case `uname -sr` in
+'SunOS 4.'*)	;;
+*)	echo Probably not on a SunOS 4 system 1>&2
+	exit 1;;
+esac
+set -v
+h2py /usr/include/sys/wait.h
+h2py -i '(u_long)' /usr/include/netinet/in.h
diff --git a/lib-python/2.2/plat-sunos5/CDIO.py b/lib-python/2.2/plat-sunos5/CDIO.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/plat-sunos5/CDIO.py
@@ -0,0 +1,73 @@
+# Generated by h2py from /usr/include/sys/cdio.h
+CDROM_LBA = 0x01
+CDROM_MSF = 0x02
+CDROM_DATA_TRACK = 0x04
+CDROM_LEADOUT = 0xAA
+CDROM_AUDIO_INVALID = 0x00
+CDROM_AUDIO_PLAY = 0x11
+CDROM_AUDIO_PAUSED = 0x12
+CDROM_AUDIO_COMPLETED = 0x13
+CDROM_AUDIO_ERROR = 0x14
+CDROM_AUDIO_NO_STATUS = 0x15
+CDROM_DA_NO_SUBCODE = 0x00
+CDROM_DA_SUBQ = 0x01
+CDROM_DA_ALL_SUBCODE = 0x02
+CDROM_DA_SUBCODE_ONLY = 0x03
+CDROM_XA_DATA = 0x00
+CDROM_XA_SECTOR_DATA = 0x01
+CDROM_XA_DATA_W_ERROR = 0x02
+CDROM_BLK_512 = 512
+CDROM_BLK_1024 = 1024
+CDROM_BLK_2048 = 2048
+CDROM_BLK_2056 = 2056
+CDROM_BLK_2336 = 2336
+CDROM_BLK_2340 = 2340
+CDROM_BLK_2352 = 2352
+CDROM_BLK_2368 = 2368
+CDROM_BLK_2448 = 2448
+CDROM_BLK_2646 = 2646
+CDROM_BLK_2647 = 2647
+CDROM_BLK_SUBCODE = 96
+CDROM_NORMAL_SPEED = 0x00
+CDROM_DOUBLE_SPEED = 0x01
+CDROM_QUAD_SPEED = 0x03
+CDROM_TWELVE_SPEED = 0x0C
+CDROM_MAXIMUM_SPEED = 0xff
+CDIOC = (0x04 << 8)
+CDROMPAUSE = (CDIOC|151)
+CDROMRESUME = (CDIOC|152)
+CDROMPLAYMSF = (CDIOC|153)
+CDROMPLAYTRKIND = (CDIOC|154)
+CDROMREADTOCHDR = (CDIOC|155)
+CDROMREADTOCENTRY = (CDIOC|156)
+CDROMSTOP = (CDIOC|157)
+CDROMSTART = (CDIOC|158)
+CDROMEJECT = (CDIOC|159)
+CDROMVOLCTRL = (CDIOC|160)
+CDROMSUBCHNL = (CDIOC|161)
+CDROMREADMODE2 = (CDIOC|162)
+CDROMREADMODE1 = (CDIOC|163)
+CDROMREADOFFSET = (CDIOC|164)
+CDROMGBLKMODE = (CDIOC|165)
+CDROMSBLKMODE = (CDIOC|166)
+CDROMCDDA = (CDIOC|167)
+CDROMCDXA = (CDIOC|168)
+CDROMSUBCODE = (CDIOC|169)
+CDROMGDRVSPEED = (CDIOC|170)
+CDROMSDRVSPEED = (CDIOC|171)
+SCMD_READ_TOC = 0x43
+SCMD_PLAYAUDIO_MSF = 0x47
+SCMD_PLAYAUDIO_TI = 0x48
+SCMD_PAUSE_RESUME = 0x4B
+SCMD_READ_SUBCHANNEL = 0x42
+SCMD_PLAYAUDIO10 = 0x45
+SCMD_PLAYTRACK_REL10 = 0x49
+SCMD_READ_HEADER = 0x44
+SCMD_PLAYAUDIO12 = 0xA5
+SCMD_PLAYTRACK_REL12 = 0xA9
+SCMD_CD_PLAYBACK_CONTROL = 0xC9
+SCMD_CD_PLAYBACK_STATUS = 0xC4
+SCMD_READ_CDDA = 0xD8
+SCMD_READ_CDXA = 0xDB
+SCMD_READ_ALL_SUBCODES = 0xDF
+CDROM_MODE2_SIZE = 2336
diff --git a/lib-python/2.2/plat-sunos5/DLFCN.py b/lib-python/2.2/plat-sunos5/DLFCN.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/plat-sunos5/DLFCN.py
@@ -0,0 +1,27 @@
+# Generated by h2py from /usr/include/dlfcn.h
+from TYPES import *
+RTLD_LAZY = 0x00001
+RTLD_NOW = 0x00002
+RTLD_NOLOAD = 0x00004
+RTLD_GLOBAL = 0x00100
+RTLD_LOCAL = 0x00000
+RTLD_PARENT = 0x00200
+RTLD_GROUP = 0x00400
+RTLD_WORLD = 0x00800
+RTLD_NODELETE = 0x01000
+RTLD_CONFGEN = 0x10000
+RTLD_REL_RELATIVE = 0x00001
+RTLD_REL_EXEC = 0x00002
+RTLD_REL_DEPENDS = 0x00004
+RTLD_REL_PRELOAD = 0x00008
+RTLD_REL_SELF = 0x00010
+RTLD_REL_WEAK = 0x00020
+RTLD_REL_ALL = 0x00fff
+RTLD_MEMORY = 0x01000
+RTLD_STRIP = 0x02000
+RTLD_NOHEAP = 0x04000
+RTLD_CONFSET = 0x10000
+RTLD_DI_LMID = 1
+RTLD_DI_LINKMAP = 2
+RTLD_DI_CONFIGADDR = 3
+RTLD_DI_MAX = 3
diff --git a/lib-python/2.2/plat-sunos5/IN.py b/lib-python/2.2/plat-sunos5/IN.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/plat-sunos5/IN.py
@@ -0,0 +1,1421 @@
+# Generated by h2py from /usr/include/netinet/in.h
+
+# Included from sys/feature_tests.h
+
+# Included from sys/isa_defs.h
+_CHAR_ALIGNMENT = 1
+_SHORT_ALIGNMENT = 2
+_INT_ALIGNMENT = 4
+_LONG_ALIGNMENT = 8
+_LONG_LONG_ALIGNMENT = 8
+_DOUBLE_ALIGNMENT = 8
+_LONG_DOUBLE_ALIGNMENT = 16
+_POINTER_ALIGNMENT = 8
+_MAX_ALIGNMENT = 16
+_ALIGNMENT_REQUIRED = 1
+_CHAR_ALIGNMENT = 1
+_SHORT_ALIGNMENT = 2
+_INT_ALIGNMENT = 4
+_LONG_ALIGNMENT = 4
+_LONG_LONG_ALIGNMENT = 4
+_DOUBLE_ALIGNMENT = 4
+_LONG_DOUBLE_ALIGNMENT = 4
+_POINTER_ALIGNMENT = 4
+_MAX_ALIGNMENT = 4
+_ALIGNMENT_REQUIRED = 0
+_CHAR_ALIGNMENT = 1
+_SHORT_ALIGNMENT = 2
+_INT_ALIGNMENT = 4
+_LONG_LONG_ALIGNMENT = 8
+_DOUBLE_ALIGNMENT = 8
+_ALIGNMENT_REQUIRED = 1
+_LONG_ALIGNMENT = 4
+_LONG_DOUBLE_ALIGNMENT = 8
+_POINTER_ALIGNMENT = 4
+_MAX_ALIGNMENT = 8
+_LONG_ALIGNMENT = 8
+_LONG_DOUBLE_ALIGNMENT = 16
+_POINTER_ALIGNMENT = 8
+_MAX_ALIGNMENT = 16
+_POSIX_C_SOURCE = 1
+_LARGEFILE64_SOURCE = 1
+_LARGEFILE_SOURCE = 1
+_FILE_OFFSET_BITS = 64
+_FILE_OFFSET_BITS = 32
+_POSIX_C_SOURCE = 199506L
+_POSIX_PTHREAD_SEMANTICS = 1
+_XOPEN_VERSION = 500
+_XOPEN_VERSION = 4
+_XOPEN_VERSION = 3
+from TYPES import *
+
+# Included from sys/stream.h
+
+# Included from sys/vnode.h
+from TYPES import *
+
+# Included from sys/t_lock.h
+
+# Included from sys/machlock.h
+from TYPES import *
+LOCK_HELD_VALUE = 0xff
+def SPIN_LOCK(pl): return ((pl) > ipltospl(LOCK_LEVEL))
+
+def LOCK_SAMPLE_INTERVAL(i): return (((i) & 0xff) == 0)
+
+CLOCK_LEVEL = 10
+LOCK_LEVEL = 10
+DISP_LEVEL = (LOCK_LEVEL + 1)
+PTR24_LSB = 5
+PTR24_MSB = (PTR24_LSB + 24)
+PTR24_ALIGN = 32
+PTR24_BASE = 0xe0000000
+
+# Included from sys/param.h
+from TYPES import *
+_POSIX_VDISABLE = 0
+MAX_INPUT = 512
+MAX_CANON = 256
+UID_NOBODY = 60001
+GID_NOBODY = UID_NOBODY
+UID_NOACCESS = 60002
+MAX_TASKID = 999999
+MAX_MAXPID = 999999
+DEFAULT_MAXPID = 999999
+DEFAULT_JUMPPID = 100000
+DEFAULT_MAXPID = 30000
+DEFAULT_JUMPPID = 0
+MAXUID = 2147483647
+MAXPROJID = MAXUID
+MAXLINK = 32767
+NMOUNT = 40
+CANBSIZ = 256
+NOFILE = 20
+NGROUPS_UMIN = 0
+NGROUPS_UMAX = 32
+NGROUPS_MAX_DEFAULT = 16
+NZERO = 20
+NULL = 0L
+NULL = 0
+CMASK = 022
+CDLIMIT = (1L<<11)
+NBPS = 0x20000
+NBPSCTR = 512
+UBSIZE = 512
+SCTRSHFT = 9
+SYSNAME = 9
+PREMOTE = 39
+MAXPATHLEN = 1024
+MAXSYMLINKS = 20
+MAXNAMELEN = 256
+NADDR = 13
+PIPE_BUF = 5120
+PIPE_MAX = 5120
+NBBY = 8
+MAXBSIZE = 8192
+DEV_BSIZE = 512
+DEV_BSHIFT = 9
+MAXFRAG = 8
+MAXOFF32_T = 0x7fffffff
+MAXOFF_T = 0x7fffffffffffffffl
+MAXOFFSET_T = 0x7fffffffffffffffl
+MAXOFF_T = 0x7fffffffl
+MAXOFFSET_T = 0x7fffffff
+def btodb(bytes): return   \
+
+def dbtob(db): return   \
+
+def lbtodb(bytes): return   \
+
+def ldbtob(db): return   \
+
+NCARGS32 = 0x100000
+NCARGS64 = 0x200000
+NCARGS = NCARGS64
+NCARGS = NCARGS32
+FSHIFT = 8
+FSCALE = (1<<FSHIFT)
+def DELAY(n): return drv_usecwait(n)
+
+def mmu_ptob(x): return ((x) << MMU_PAGESHIFT)
+
+def mmu_btop(x): return (((x)) >> MMU_PAGESHIFT)
+
+def mmu_btopr(x): return ((((x) + MMU_PAGEOFFSET) >> MMU_PAGESHIFT))
+
+def mmu_ptod(x): return ((x) << (MMU_PAGESHIFT - DEV_BSHIFT))
+
+def ptod(x): return ((x) << (PAGESHIFT - DEV_BSHIFT))
+
+def ptob(x): return ((x) << PAGESHIFT)
+
+def btop(x): return (((x) >> PAGESHIFT))
+
+def btopr(x): return ((((x) + PAGEOFFSET) >> PAGESHIFT))
+
+def dtop(DD): return (((DD) + NDPP - 1) >> (PAGESHIFT - DEV_BSHIFT))
+
+def dtopt(DD): return ((DD) >> (PAGESHIFT - DEV_BSHIFT))
+
+_AIO_LISTIO_MAX = (4096)
+_AIO_MAX = (-1)
+_MQ_OPEN_MAX = (32)
+_MQ_PRIO_MAX = (32)
+_SEM_NSEMS_MAX = INT_MAX
+_SEM_VALUE_MAX = INT_MAX
+
+# Included from sys/unistd.h
+_CS_PATH = 65
+_CS_LFS_CFLAGS = 68
+_CS_LFS_LDFLAGS = 69
+_CS_LFS_LIBS = 70
+_CS_LFS_LINTFLAGS = 71
+_CS_LFS64_CFLAGS = 72
+_CS_LFS64_LDFLAGS = 73
+_CS_LFS64_LIBS = 74
+_CS_LFS64_LINTFLAGS = 75
+_CS_XBS5_ILP32_OFF32_CFLAGS = 700
+_CS_XBS5_ILP32_OFF32_LDFLAGS = 701
+_CS_XBS5_ILP32_OFF32_LIBS = 702
+_CS_XBS5_ILP32_OFF32_LINTFLAGS = 703
+_CS_XBS5_ILP32_OFFBIG_CFLAGS = 705
+_CS_XBS5_ILP32_OFFBIG_LDFLAGS = 706
+_CS_XBS5_ILP32_OFFBIG_LIBS = 707
+_CS_XBS5_ILP32_OFFBIG_LINTFLAGS = 708
+_CS_XBS5_LP64_OFF64_CFLAGS = 709
+_CS_XBS5_LP64_OFF64_LDFLAGS = 710
+_CS_XBS5_LP64_OFF64_LIBS = 711
+_CS_XBS5_LP64_OFF64_LINTFLAGS = 712
+_CS_XBS5_LPBIG_OFFBIG_CFLAGS = 713
+_CS_XBS5_LPBIG_OFFBIG_LDFLAGS = 714
+_CS_XBS5_LPBIG_OFFBIG_LIBS = 715
+_CS_XBS5_LPBIG_OFFBIG_LINTFLAGS = 716
+_SC_ARG_MAX = 1
+_SC_CHILD_MAX = 2
+_SC_CLK_TCK = 3
+_SC_NGROUPS_MAX = 4
+_SC_OPEN_MAX = 5
+_SC_JOB_CONTROL = 6
+_SC_SAVED_IDS = 7
+_SC_VERSION = 8
+_SC_PASS_MAX = 9
+_SC_LOGNAME_MAX = 10
+_SC_PAGESIZE = 11
+_SC_XOPEN_VERSION = 12
+_SC_NPROCESSORS_CONF = 14
+_SC_NPROCESSORS_ONLN = 15
+_SC_STREAM_MAX = 16
+_SC_TZNAME_MAX = 17
+_SC_AIO_LISTIO_MAX = 18
+_SC_AIO_MAX = 19
+_SC_AIO_PRIO_DELTA_MAX = 20
+_SC_ASYNCHRONOUS_IO = 21
+_SC_DELAYTIMER_MAX = 22
+_SC_FSYNC = 23
+_SC_MAPPED_FILES = 24
+_SC_MEMLOCK = 25
+_SC_MEMLOCK_RANGE = 26
+_SC_MEMORY_PROTECTION = 27
+_SC_MESSAGE_PASSING = 28
+_SC_MQ_OPEN_MAX = 29
+_SC_MQ_PRIO_MAX = 30
+_SC_PRIORITIZED_IO = 31
+_SC_PRIORITY_SCHEDULING = 32
+_SC_REALTIME_SIGNALS = 33
+_SC_RTSIG_MAX = 34
+_SC_SEMAPHORES = 35
+_SC_SEM_NSEMS_MAX = 36
+_SC_SEM_VALUE_MAX = 37
+_SC_SHARED_MEMORY_OBJECTS = 38
+_SC_SIGQUEUE_MAX = 39
+_SC_SIGRT_MIN = 40
+_SC_SIGRT_MAX = 41
+_SC_SYNCHRONIZED_IO = 42
+_SC_TIMERS = 43
+_SC_TIMER_MAX = 44
+_SC_2_C_BIND = 45
+_SC_2_C_DEV = 46
+_SC_2_C_VERSION = 47
+_SC_2_FORT_DEV = 48
+_SC_2_FORT_RUN = 49
+_SC_2_LOCALEDEF = 50
+_SC_2_SW_DEV = 51
+_SC_2_UPE = 52
+_SC_2_VERSION = 53
+_SC_BC_BASE_MAX = 54
+_SC_BC_DIM_MAX = 55
+_SC_BC_SCALE_MAX = 56
+_SC_BC_STRING_MAX = 57
+_SC_COLL_WEIGHTS_MAX = 58
+_SC_EXPR_NEST_MAX = 59
+_SC_LINE_MAX = 60
+_SC_RE_DUP_MAX = 61
+_SC_XOPEN_CRYPT = 62
+_SC_XOPEN_ENH_I18N = 63
+_SC_XOPEN_SHM = 64
+_SC_2_CHAR_TERM = 66
+_SC_XOPEN_XCU_VERSION = 67
+_SC_ATEXIT_MAX = 76
+_SC_IOV_MAX = 77
+_SC_XOPEN_UNIX = 78
+_SC_PAGE_SIZE = _SC_PAGESIZE
+_SC_T_IOV_MAX = 79
+_SC_PHYS_PAGES = 500
+_SC_AVPHYS_PAGES = 501
+_SC_COHER_BLKSZ = 503
+_SC_SPLIT_CACHE = 504
+_SC_ICACHE_SZ = 505
+_SC_DCACHE_SZ = 506
+_SC_ICACHE_LINESZ = 507
+_SC_DCACHE_LINESZ = 508
+_SC_ICACHE_BLKSZ = 509
+_SC_DCACHE_BLKSZ = 510
+_SC_DCACHE_TBLKSZ = 511
+_SC_ICACHE_ASSOC = 512
+_SC_DCACHE_ASSOC = 513
+_SC_MAXPID = 514
+_SC_STACK_PROT = 515
+_SC_THREAD_DESTRUCTOR_ITERATIONS = 568
+_SC_GETGR_R_SIZE_MAX = 569
+_SC_GETPW_R_SIZE_MAX = 570
+_SC_LOGIN_NAME_MAX = 571
+_SC_THREAD_KEYS_MAX = 572
+_SC_THREAD_STACK_MIN = 573
+_SC_THREAD_THREADS_MAX = 574
+_SC_TTY_NAME_MAX = 575
+_SC_THREADS = 576
+_SC_THREAD_ATTR_STACKADDR = 577
+_SC_THREAD_ATTR_STACKSIZE = 578
+_SC_THREAD_PRIORITY_SCHEDULING = 579
+_SC_THREAD_PRIO_INHERIT = 580
+_SC_THREAD_PRIO_PROTECT = 581
+_SC_THREAD_PROCESS_SHARED = 582
+_SC_THREAD_SAFE_FUNCTIONS = 583
+_SC_XOPEN_LEGACY = 717
+_SC_XOPEN_REALTIME = 718
+_SC_XOPEN_REALTIME_THREADS = 719
+_SC_XBS5_ILP32_OFF32 = 720
+_SC_XBS5_ILP32_OFFBIG = 721
+_SC_XBS5_LP64_OFF64 = 722
+_SC_XBS5_LPBIG_OFFBIG = 723
+_PC_LINK_MAX = 1
+_PC_MAX_CANON = 2
+_PC_MAX_INPUT = 3
+_PC_NAME_MAX = 4
+_PC_PATH_MAX = 5
+_PC_PIPE_BUF = 6
+_PC_NO_TRUNC = 7
+_PC_VDISABLE = 8
+_PC_CHOWN_RESTRICTED = 9
+_PC_ASYNC_IO = 10
+_PC_PRIO_IO = 11
+_PC_SYNC_IO = 12
+_PC_FILESIZEBITS = 67
+_PC_LAST = 67
+_POSIX_VERSION = 199506L
+_POSIX2_VERSION = 199209L
+_POSIX2_C_VERSION = 199209L
+_XOPEN_XCU_VERSION = 4
+_XOPEN_REALTIME = 1
+_XOPEN_ENH_I18N = 1
+_XOPEN_SHM = 1
+_POSIX2_C_BIND = 1
+_POSIX2_CHAR_TERM = 1
+_POSIX2_LOCALEDEF = 1
+_POSIX2_C_DEV = 1
+_POSIX2_SW_DEV = 1
+_POSIX2_UPE = 1
+
+# Included from sys/mutex.h
+from TYPES import *
+def MUTEX_HELD(x): return (mutex_owned(x))
+
+
+# Included from sys/rwlock.h
+from TYPES import *
+def RW_READ_HELD(x): return (rw_read_held((x)))
+
+def RW_WRITE_HELD(x): return (rw_write_held((x)))
+
+def RW_LOCK_HELD(x): return (rw_lock_held((x)))
+
+def RW_ISWRITER(x): return (rw_iswriter(x))
+
+
+# Included from sys/semaphore.h
+
+# Included from sys/thread.h
+from TYPES import *
+
+# Included from sys/klwp.h
+from TYPES import *
+
+# Included from sys/condvar.h
+from TYPES import *
+
+# Included from sys/time.h
+
+# Included from sys/types32.h
+
+# Included from sys/int_types.h
+TIME32_MAX = INT32_MAX
+TIME32_MIN = INT32_MIN
+def TIMEVAL_OVERFLOW(tv): return \
+
+from TYPES import *
+DST_NONE = 0
+DST_USA = 1
+DST_AUST = 2
+DST_WET = 3
+DST_MET = 4
+DST_EET = 5
+DST_CAN = 6
+DST_GB = 7
+DST_RUM = 8
+DST_TUR = 9
+DST_AUSTALT = 10
+ITIMER_REAL = 0
+ITIMER_VIRTUAL = 1
+ITIMER_PROF = 2
+ITIMER_REALPROF = 3
+def ITIMERVAL_OVERFLOW(itv): return \
+
+SEC = 1
+MILLISEC = 1000
+MICROSEC = 1000000
+NANOSEC = 1000000000
+
+# Included from sys/time_impl.h
+def TIMESPEC_OVERFLOW(ts): return \
+
+def ITIMERSPEC_OVERFLOW(it): return \
+
+__CLOCK_REALTIME0 = 0
+CLOCK_VIRTUAL = 1
+CLOCK_PROF = 2
+__CLOCK_REALTIME3 = 3
+CLOCK_HIGHRES = 4
+CLOCK_MAX = 5
+CLOCK_REALTIME = __CLOCK_REALTIME3
+CLOCK_REALTIME = __CLOCK_REALTIME0
+TIMER_RELTIME = 0x0
+TIMER_ABSTIME = 0x1
+def TICK_TO_SEC(tick): return ((tick) / hz)
+
+def SEC_TO_TICK(sec): return ((sec) * hz)
+
+def TICK_TO_MSEC(tick): return \
+
+def MSEC_TO_TICK(msec): return \
+
+def MSEC_TO_TICK_ROUNDUP(msec): return \
+
+def TICK_TO_USEC(tick): return ((tick) * usec_per_tick)
+
+def USEC_TO_TICK(usec): return ((usec) / usec_per_tick)
+
+def USEC_TO_TICK_ROUNDUP(usec): return \
+
+def TICK_TO_NSEC(tick): return ((tick) * nsec_per_tick)
+
+def NSEC_TO_TICK(nsec): return ((nsec) / nsec_per_tick)
+
+def NSEC_TO_TICK_ROUNDUP(nsec): return \
+
+def TIMEVAL_TO_TICK(tvp): return \
+
+def TIMESTRUC_TO_TICK(tsp): return \
+
+
+# Included from time.h
+from TYPES import *
+
+# Included from iso/time_iso.h
+NULL = 0L
+NULL = 0
+CLOCKS_PER_SEC = 1000000
+
+# Included from sys/select.h
+FD_SETSIZE = 65536
+FD_SETSIZE = 1024
+_NBBY = 8
+NBBY = _NBBY
+def FD_ZERO(p): return bzero((p), sizeof (*(p)))
+
+
+# Included from sys/signal.h
+
+# Included from sys/iso/signal_iso.h
+SIGHUP = 1
+SIGINT = 2
+SIGQUIT = 3
+SIGILL = 4
+SIGTRAP = 5
+SIGIOT = 6
+SIGABRT = 6
+SIGEMT = 7
+SIGFPE = 8
+SIGKILL = 9
+SIGBUS = 10
+SIGSEGV = 11
+SIGSYS = 12
+SIGPIPE = 13
+SIGALRM = 14
+SIGTERM = 15
+SIGUSR1 = 16
+SIGUSR2 = 17
+SIGCLD = 18
+SIGCHLD = 18
+SIGPWR = 19
+SIGWINCH = 20
+SIGURG = 21
+SIGPOLL = 22
+SIGIO = SIGPOLL
+SIGSTOP = 23
+SIGTSTP = 24
+SIGCONT = 25
+SIGTTIN = 26
+SIGTTOU = 27
+SIGVTALRM = 28
+SIGPROF = 29
+SIGXCPU = 30
+SIGXFSZ = 31
+SIGWAITING = 32
+SIGLWP = 33
+SIGFREEZE = 34
+SIGTHAW = 35
+SIGCANCEL = 36
+SIGLOST = 37
+_SIGRTMIN = 38
+_SIGRTMAX = 45
+SIG_BLOCK = 1
+SIG_UNBLOCK = 2
+SIG_SETMASK = 3
+SIGNO_MASK = 0xFF
+SIGDEFER = 0x100
+SIGHOLD = 0x200
+SIGRELSE = 0x400
+SIGIGNORE = 0x800
+SIGPAUSE = 0x1000
+
+# Included from sys/siginfo.h
+from TYPES import *
+SIGEV_NONE = 1
+SIGEV_SIGNAL = 2
+SIGEV_THREAD = 3
+SI_NOINFO = 32767
+SI_USER = 0
+SI_LWP = (-1)
+SI_QUEUE = (-2)
+SI_TIMER = (-3)
+SI_ASYNCIO = (-4)
+SI_MESGQ = (-5)
+
+# Included from sys/machsig.h
+ILL_ILLOPC = 1
+ILL_ILLOPN = 2
+ILL_ILLADR = 3
+ILL_ILLTRP = 4
+ILL_PRVOPC = 5
+ILL_PRVREG = 6
+ILL_COPROC = 7
+ILL_BADSTK = 8
+NSIGILL = 8
+EMT_TAGOVF = 1
+EMT_CPCOVF = 2
+NSIGEMT = 2
+FPE_INTDIV = 1
+FPE_INTOVF = 2
+FPE_FLTDIV = 3
+FPE_FLTOVF = 4
+FPE_FLTUND = 5
+FPE_FLTRES = 6
+FPE_FLTINV = 7
+FPE_FLTSUB = 8
+NSIGFPE = 8
+SEGV_MAPERR = 1
+SEGV_ACCERR = 2
+NSIGSEGV = 2
+BUS_ADRALN = 1
+BUS_ADRERR = 2
+BUS_OBJERR = 3
+NSIGBUS = 3
+TRAP_BRKPT = 1
+TRAP_TRACE = 2
+TRAP_RWATCH = 3
+TRAP_WWATCH = 4
+TRAP_XWATCH = 5
+NSIGTRAP = 5
+CLD_EXITED = 1
+CLD_KILLED = 2
+CLD_DUMPED = 3
+CLD_TRAPPED = 4
+CLD_STOPPED = 5
+CLD_CONTINUED = 6
+NSIGCLD = 6
+POLL_IN = 1
+POLL_OUT = 2
+POLL_MSG = 3
+POLL_ERR = 4
+POLL_PRI = 5
+POLL_HUP = 6
+NSIGPOLL = 6
+PROF_SIG = 1
+NSIGPROF = 1
+SI_MAXSZ = 256
+SI_MAXSZ = 128
+
+# Included from sys/time_std_impl.h
+from TYPES import *
+SI32_MAXSZ = 128
+def SI_CANQUEUE(c): return ((c) <= SI_QUEUE)
+
+SA_NOCLDSTOP = 0x00020000
+SA_ONSTACK = 0x00000001
+SA_RESETHAND = 0x00000002
+SA_RESTART = 0x00000004
+SA_SIGINFO = 0x00000008
+SA_NODEFER = 0x00000010
+SA_NOCLDWAIT = 0x00010000
+SA_WAITSIG = 0x00010000
+NSIG = 46
+MAXSIG = 45
+S_SIGNAL = 1
+S_SIGSET = 2
+S_SIGACTION = 3
+S_NONE = 4
+MINSIGSTKSZ = 2048
+SIGSTKSZ = 8192
+SS_ONSTACK = 0x00000001
+SS_DISABLE = 0x00000002
+SN_PROC = 1
+SN_CANCEL = 2
+SN_SEND = 3
+
+# Included from sys/ucontext.h
+from TYPES import *
+
+# Included from sys/regset.h
+REG_CCR = (0)
+REG_PSR = (0)
+REG_PSR = (0)
+REG_PC = (1)
+REG_nPC = (2)
+REG_Y = (3)
+REG_G1 = (4)
+REG_G2 = (5)
+REG_G3 = (6)
+REG_G4 = (7)
+REG_G5 = (8)
+REG_G6 = (9)
+REG_G7 = (10)
+REG_O0 = (11)
+REG_O1 = (12)
+REG_O2 = (13)
+REG_O3 = (14)
+REG_O4 = (15)
+REG_O5 = (16)
+REG_O6 = (17)
+REG_O7 = (18)
+REG_ASI = (19)
+REG_FPRS = (20)
+REG_PS = REG_PSR
+REG_SP = REG_O6
+REG_R0 = REG_O0
+REG_R1 = REG_O1
+_NGREG = 21
+_NGREG = 19
+NGREG = _NGREG
+_NGREG32 = 19
+_NGREG64 = 21
+SPARC_MAXREGWINDOW = 31
+MAXFPQ = 16
+XRS_ID = 0x78727300
+
+# Included from v7/sys/privregs.h
+
+# Included from v7/sys/psr.h
+PSR_CWP = 0x0000001F
+PSR_ET = 0x00000020
+PSR_PS = 0x00000040
+PSR_S = 0x00000080
+PSR_PIL = 0x00000F00
+PSR_EF = 0x00001000
+PSR_EC = 0x00002000
+PSR_RSV = 0x000FC000
+PSR_ICC = 0x00F00000
+PSR_C = 0x00100000
+PSR_V = 0x00200000
+PSR_Z = 0x00400000
+PSR_N = 0x00800000
+PSR_VER = 0x0F000000
+PSR_IMPL = 0xF0000000
+PSL_ALLCC = PSR_ICC
+PSL_USER = (PSR_S)
+PSL_USERMASK = (PSR_ICC)
+PSL_UBITS = (PSR_ICC|PSR_EF)
+def USERMODE(ps): return (((ps) & PSR_PS) == 0)
+
+
+# Included from sys/fsr.h
+FSR_CEXC = 0x0000001f
+FSR_AEXC = 0x000003e0
+FSR_FCC = 0x00000c00
+FSR_PR = 0x00001000
+FSR_QNE = 0x00002000
+FSR_FTT = 0x0001c000
+FSR_VER = 0x000e0000
+FSR_TEM = 0x0f800000
+FSR_RP = 0x30000000
+FSR_RD = 0xc0000000
+FSR_VER_SHIFT = 17
+FSR_FCC1 = 0x00000003
+FSR_FCC2 = 0x0000000C
+FSR_FCC3 = 0x00000030
+FSR_CEXC_NX = 0x00000001
+FSR_CEXC_DZ = 0x00000002
+FSR_CEXC_UF = 0x00000004
+FSR_CEXC_OF = 0x00000008
+FSR_CEXC_NV = 0x00000010
+FSR_AEXC_NX = (0x1 << 5)
+FSR_AEXC_DZ = (0x2 << 5)
+FSR_AEXC_UF = (0x4 << 5)
+FSR_AEXC_OF = (0x8 << 5)
+FSR_AEXC_NV = (0x10 << 5)
+FTT_NONE = 0
+FTT_IEEE = 1
+FTT_UNFIN = 2
+FTT_UNIMP = 3
+FTT_SEQ = 4
+FTT_ALIGN = 5
+FTT_DFAULT = 6
+FSR_FTT_SHIFT = 14
+FSR_FTT_IEEE = (FTT_IEEE   << FSR_FTT_SHIFT)
+FSR_FTT_UNFIN = (FTT_UNFIN  << FSR_FTT_SHIFT)
+FSR_FTT_UNIMP = (FTT_UNIMP  << FSR_FTT_SHIFT)
+FSR_FTT_SEQ = (FTT_SEQ    << FSR_FTT_SHIFT)
+FSR_FTT_ALIGN = (FTT_ALIGN  << FSR_FTT_SHIFT)
+FSR_FTT_DFAULT = (FTT_DFAULT << FSR_FTT_SHIFT)
+FSR_TEM_NX = (0x1 << 23)
+FSR_TEM_DZ = (0x2 << 23)
+FSR_TEM_UF = (0x4 << 23)
+FSR_TEM_OF = (0x8 << 23)
+FSR_TEM_NV = (0x10 << 23)
+RP_DBLEXT = 0
+RP_SINGLE = 1
+RP_DOUBLE = 2
+RP_RESERVED = 3
+RD_NEAR = 0
+RD_ZER0 = 1
+RD_POSINF = 2
+RD_NEGINF = 3
+FPRS_DL = 0x1
+FPRS_DU = 0x2
+FPRS_FEF = 0x4
+PIL_MAX = 0xf
+def SAVE_GLOBALS(RP): return \
+
+def RESTORE_GLOBALS(RP): return \
+
+def SAVE_OUTS(RP): return \
+
+def RESTORE_OUTS(RP): return \
+
+def SAVE_WINDOW(SBP): return \
+
+def RESTORE_WINDOW(SBP): return \
+
+def STORE_FPREGS(FP): return \
+
+def LOAD_FPREGS(FP): return \
+
+_SPARC_MAXREGWINDOW = 31
+_XRS_ID = 0x78727300
+GETCONTEXT = 0
+SETCONTEXT = 1
+UC_SIGMASK = 001
+UC_STACK = 002
+UC_CPU = 004
+UC_MAU = 010
+UC_FPU = UC_MAU
+UC_INTR = 020
+UC_ASR = 040
+UC_MCONTEXT = (UC_CPU|UC_FPU|UC_ASR)
+UC_ALL = (UC_SIGMASK|UC_STACK|UC_MCONTEXT)
+_SIGQUEUE_MAX = 32
+_SIGNOTIFY_MAX = 32
+
+# Included from sys/pcb.h
+INSTR_VALID = 0x02
+NORMAL_STEP = 0x04
+WATCH_STEP = 0x08
+CPC_OVERFLOW = 0x10
+ASYNC_HWERR = 0x20
+STEP_NONE = 0
+STEP_REQUESTED = 1
+STEP_ACTIVE = 2
+STEP_WASACTIVE = 3
+
+# Included from sys/msacct.h
+LMS_USER = 0
+LMS_SYSTEM = 1
+LMS_TRAP = 2
+LMS_TFAULT = 3
+LMS_DFAULT = 4
+LMS_KFAULT = 5
+LMS_USER_LOCK = 6
+LMS_SLEEP = 7
+LMS_WAIT_CPU = 8
+LMS_STOPPED = 9
+NMSTATES = 10
+
+# Included from sys/lwp.h
+
+# Included from sys/synch.h
+from TYPES import *
+USYNC_THREAD = 0x00
+USYNC_PROCESS = 0x01
+LOCK_NORMAL = 0x00
+LOCK_ERRORCHECK = 0x02
+LOCK_RECURSIVE = 0x04
+USYNC_PROCESS_ROBUST = 0x08
+LOCK_PRIO_NONE = 0x00
+LOCK_PRIO_INHERIT = 0x10
+LOCK_PRIO_PROTECT = 0x20
+LOCK_STALL_NP = 0x00
+LOCK_ROBUST_NP = 0x40
+LOCK_OWNERDEAD = 0x1
+LOCK_NOTRECOVERABLE = 0x2
+LOCK_INITED = 0x4
+LOCK_UNMAPPED = 0x8
+LWP_DETACHED = 0x00000040
+LWP_SUSPENDED = 0x00000080
+__LWP_ASLWP = 0x00000100
+MAXSYSARGS = 8
+NORMALRETURN = 0
+JUSTRETURN = 1
+LWP_USER = 0x01
+LWP_SYS = 0x02
+TS_FREE = 0x00
+TS_SLEEP = 0x01
+TS_RUN = 0x02
+TS_ONPROC = 0x04
+TS_ZOMB = 0x08
+TS_STOPPED = 0x10
+T_INTR_THREAD = 0x0001
+T_WAKEABLE = 0x0002
+T_TOMASK = 0x0004
+T_TALLOCSTK = 0x0008
+T_WOULDBLOCK = 0x0020
+T_DONTBLOCK = 0x0040
+T_DONTPEND = 0x0080
+T_SYS_PROF = 0x0100
+T_WAITCVSEM = 0x0200
+T_WATCHPT = 0x0400
+T_PANIC = 0x0800
+TP_HOLDLWP = 0x0002
+TP_TWAIT = 0x0004
+TP_LWPEXIT = 0x0008
+TP_PRSTOP = 0x0010
+TP_CHKPT = 0x0020
+TP_EXITLWP = 0x0040
+TP_PRVSTOP = 0x0080
+TP_MSACCT = 0x0100
+TP_STOPPING = 0x0200
+TP_WATCHPT = 0x0400
+TP_PAUSE = 0x0800
+TP_CHANGEBIND = 0x1000
+TS_LOAD = 0x0001
+TS_DONT_SWAP = 0x0002
+TS_SWAPENQ = 0x0004
+TS_ON_SWAPQ = 0x0008
+TS_CSTART = 0x0100
+TS_UNPAUSE = 0x0200
+TS_XSTART = 0x0400
+TS_PSTART = 0x0800
+TS_RESUME = 0x1000
+TS_CREATE = 0x2000
+TS_ALLSTART = \
+	(TS_CSTART|TS_UNPAUSE|TS_XSTART|TS_PSTART|TS_RESUME|TS_CREATE)
+def CPR_VSTOPPED(t): return \
+
+def THREAD_TRANSITION(tp): return thread_transition(tp);
+
+def THREAD_STOP(tp): return \
+
+def THREAD_ZOMB(tp): return THREAD_SET_STATE(tp, TS_ZOMB, NULL)
+
+def SEMA_HELD(x): return (sema_held((x)))
+
+NO_LOCKS_HELD = 1
+NO_COMPETING_THREADS = 1
+
+# Included from sys/cred.h
+
+# Included from sys/uio.h
+from TYPES import *
+
+# Included from sys/resource.h
+from TYPES import *
+PRIO_PROCESS = 0
+PRIO_PGRP = 1
+PRIO_USER = 2
+RLIMIT_CPU = 0
+RLIMIT_FSIZE = 1
+RLIMIT_DATA = 2
+RLIMIT_STACK = 3
+RLIMIT_CORE = 4
+RLIMIT_NOFILE = 5
+RLIMIT_VMEM = 6
+RLIMIT_AS = RLIMIT_VMEM
+RLIM_NLIMITS = 7
+RLIM_INFINITY = (-3l)
+RLIM_SAVED_MAX = (-2l)
+RLIM_SAVED_CUR = (-1l)
+RLIM_INFINITY = 0x7fffffff
+RLIM_SAVED_MAX = 0x7ffffffe
+RLIM_SAVED_CUR = 0x7ffffffd
+RLIM32_INFINITY = 0x7fffffff
+RLIM32_SAVED_MAX = 0x7ffffffe
+RLIM32_SAVED_CUR = 0x7ffffffd
+
+# Included from sys/model.h
+
+# Included from sys/debug.h
+def ASSERT64(x): return ASSERT(x)
+
+def ASSERT32(x): return ASSERT(x)
+
+DATAMODEL_MASK = 0x0FF00000
+DATAMODEL_ILP32 = 0x00100000
+DATAMODEL_LP64 = 0x00200000
+DATAMODEL_NONE = 0
+DATAMODEL_NATIVE = DATAMODEL_LP64
+DATAMODEL_NATIVE = DATAMODEL_ILP32
+def STRUCT_SIZE(handle): return \
+
+def STRUCT_BUF(handle): return ((handle).ptr.m64)
+
+def SIZEOF_PTR(umodel): return \
+
+def STRUCT_SIZE(handle): return (sizeof (*(handle).ptr))
+
+def STRUCT_BUF(handle): return ((handle).ptr)
+
+def SIZEOF_PTR(umodel): return sizeof (caddr_t)
+
+def lwp_getdatamodel(t): return DATAMODEL_ILP32
+
+RUSAGE_SELF = 0
+RUSAGE_CHILDREN = -1
+
+# Included from vm/seg_enum.h
+
+# Included from sys/buf.h
+
+# Included from sys/kstat.h
+from TYPES import *
+KSTAT_STRLEN = 31
+def KSTAT_ENTER(k): return \
+
+def KSTAT_EXIT(k): return \
+
+KSTAT_TYPE_RAW = 0
+KSTAT_TYPE_NAMED = 1
+KSTAT_TYPE_INTR = 2
+KSTAT_TYPE_IO = 3
+KSTAT_TYPE_TIMER = 4
+KSTAT_NUM_TYPES = 5
+KSTAT_FLAG_VIRTUAL = 0x01
+KSTAT_FLAG_VAR_SIZE = 0x02
+KSTAT_FLAG_WRITABLE = 0x04
+KSTAT_FLAG_PERSISTENT = 0x08
+KSTAT_FLAG_DORMANT = 0x10
+KSTAT_FLAG_INVALID = 0x20
+KSTAT_READ = 0
+KSTAT_WRITE = 1
+KSTAT_DATA_CHAR = 0
+KSTAT_DATA_INT32 = 1
+KSTAT_DATA_UINT32 = 2
+KSTAT_DATA_INT64 = 3
+KSTAT_DATA_UINT64 = 4
+KSTAT_DATA_LONG = KSTAT_DATA_INT32
+KSTAT_DATA_ULONG = KSTAT_DATA_UINT32
+KSTAT_DATA_LONG = KSTAT_DATA_INT64
+KSTAT_DATA_ULONG = KSTAT_DATA_UINT64
+KSTAT_DATA_LONG = 7
+KSTAT_DATA_ULONG = 8
+KSTAT_DATA_LONGLONG = KSTAT_DATA_INT64
+KSTAT_DATA_ULONGLONG = KSTAT_DATA_UINT64
+KSTAT_DATA_FLOAT = 5
+KSTAT_DATA_DOUBLE = 6
+KSTAT_INTR_HARD = 0
+KSTAT_INTR_SOFT = 1
+KSTAT_INTR_WATCHDOG = 2
+KSTAT_INTR_SPURIOUS = 3
+KSTAT_INTR_MULTSVC = 4
+KSTAT_NUM_INTRS = 5
+B_BUSY = 0x0001
+B_DONE = 0x0002
+B_ERROR = 0x0004
+B_PAGEIO = 0x0010
+B_PHYS = 0x0020
+B_READ = 0x0040
+B_WRITE = 0x0100
+B_KERNBUF = 0x0008
+B_WANTED = 0x0080
+B_AGE = 0x000200
+B_ASYNC = 0x000400
+B_DELWRI = 0x000800
+B_STALE = 0x001000
+B_DONTNEED = 0x002000
+B_REMAPPED = 0x004000
+B_FREE = 0x008000
+B_INVAL = 0x010000
+B_FORCE = 0x020000
+B_HEAD = 0x040000
+B_NOCACHE = 0x080000
+B_TRUNC = 0x100000
+B_SHADOW = 0x200000
+B_RETRYWRI = 0x400000
+def notavail(bp): return \
+
+def BWRITE(bp): return \
+
+def BWRITE2(bp): return \
+
+VROOT = 0x01
+VNOCACHE = 0x02
+VNOMAP = 0x04
+VDUP = 0x08
+VNOSWAP = 0x10
+VNOMOUNT = 0x20
+VISSWAP = 0x40
+VSWAPLIKE = 0x80
+VVFSLOCK = 0x100
+VVFSWAIT = 0x200
+VVMLOCK = 0x400
+VDIROPEN = 0x800
+VVMEXEC = 0x1000
+VPXFS = 0x2000
+AT_TYPE = 0x0001
+AT_MODE = 0x0002
+AT_UID = 0x0004
+AT_GID = 0x0008
+AT_FSID = 0x0010
+AT_NODEID = 0x0020
+AT_NLINK = 0x0040
+AT_SIZE = 0x0080
+AT_ATIME = 0x0100
+AT_MTIME = 0x0200
+AT_CTIME = 0x0400
+AT_RDEV = 0x0800
+AT_BLKSIZE = 0x1000
+AT_NBLOCKS = 0x2000
+AT_VCODE = 0x4000
+AT_ALL = (AT_TYPE|AT_MODE|AT_UID|AT_GID|AT_FSID|AT_NODEID|\
+			AT_NLINK|AT_SIZE|AT_ATIME|AT_MTIME|AT_CTIME|\
+			AT_RDEV|AT_BLKSIZE|AT_NBLOCKS|AT_VCODE)
+AT_STAT = (AT_MODE|AT_UID|AT_GID|AT_FSID|AT_NODEID|AT_NLINK|\
+			AT_SIZE|AT_ATIME|AT_MTIME|AT_CTIME|AT_RDEV)
+AT_TIMES = (AT_ATIME|AT_MTIME|AT_CTIME)
+AT_NOSET = (AT_NLINK|AT_RDEV|AT_FSID|AT_NODEID|AT_TYPE|\
+			AT_BLKSIZE|AT_NBLOCKS|AT_VCODE)
+VSUID = 04000
+VSGID = 02000
+VSVTX = 01000
+VREAD = 00400
+VWRITE = 00200
+VEXEC = 00100
+MODEMASK = 07777
+PERMMASK = 00777
+def MANDMODE(mode): return (((mode) & (VSGID|(VEXEC>>3))) == VSGID)
+
+VSA_ACL = 0x0001
+VSA_ACLCNT = 0x0002
+VSA_DFACL = 0x0004
+VSA_DFACLCNT = 0x0008
+LOOKUP_DIR = 0x01
+DUMP_ALLOC = 0
+DUMP_FREE = 1
+DUMP_SCAN = 2
+ATTR_UTIME = 0x01
+ATTR_EXEC = 0x02
+ATTR_COMM = 0x04
+ATTR_HINT = 0x08
+ATTR_REAL = 0x10
+
+# Included from sys/poll.h
+POLLIN = 0x0001
+POLLPRI = 0x0002
+POLLOUT = 0x0004
+POLLRDNORM = 0x0040
+POLLWRNORM = POLLOUT
+POLLRDBAND = 0x0080
+POLLWRBAND = 0x0100
+POLLNORM = POLLRDNORM
+POLLERR = 0x0008
+POLLHUP = 0x0010
+POLLNVAL = 0x0020
+POLLREMOVE = 0x0800
+POLLRDDATA = 0x0200
+POLLNOERR = 0x0400
+POLLCLOSED = 0x8000
+
+# Included from sys/strmdep.h
+def str_aligned(X): return (((ulong_t)(X) & (sizeof (long) - 1)) == 0)
+
+
+# Included from sys/strft.h
+tdelta_t_sz = 12
+FTEV_MASK = 0x1FFF
+FTEV_ISWR = 0x8000
+FTEV_CS = 0x4000
+FTEV_PS = 0x2000
+FTEV_QMASK = 0x1F00
+FTEV_ALLOCMASK = 0x1FF8
+FTEV_ALLOCB = 0x0000
+FTEV_ESBALLOC = 0x0001
+FTEV_DESBALLOC = 0x0002
+FTEV_ESBALLOCA = 0x0003
+FTEV_DESBALLOCA = 0x0004
+FTEV_ALLOCBIG = 0x0005
+FTEV_ALLOCBW = 0x0006
+FTEV_FREEB = 0x0008
+FTEV_DUPB = 0x0009
+FTEV_COPYB = 0x000A
+FTEV_CALLER = 0x000F
+FTEV_PUT = 0x0100
+FTEV_FSYNCQ = 0x0103
+FTEV_DSYNCQ = 0x0104
+FTEV_PUTQ = 0x0105
+FTEV_GETQ = 0x0106
+FTEV_RMVQ = 0x0107
+FTEV_INSQ = 0x0108
+FTEV_PUTBQ = 0x0109
+FTEV_FLUSHQ = 0x010A
+FTEV_REPLYQ = 0x010B
+FTEV_PUTNEXT = 0x010D
+FTEV_RWNEXT = 0x010E
+FTEV_QWINNER = 0x010F
+FTEV_GEWRITE = 0x0101
+def FTFLW_HASH(h): return (((unsigned)(h))%ftflw_hash_sz)
+
+FTBLK_EVNTS = 0x9
+QENAB = 0x00000001
+QWANTR = 0x00000002
+QWANTW = 0x00000004
+QFULL = 0x00000008
+QREADR = 0x00000010
+QUSE = 0x00000020
+QNOENB = 0x00000040
+QBACK = 0x00000100
+QHLIST = 0x00000200
+QPAIR = 0x00000800
+QPERQ = 0x00001000
+QPERMOD = 0x00002000
+QMTSAFE = 0x00004000
+QMTOUTPERIM = 0x00008000
+QMT_TYPEMASK = (QPAIR|QPERQ|QPERMOD|QMTSAFE|QMTOUTPERIM)
+QINSERVICE = 0x00010000
+QWCLOSE = 0x00020000
+QEND = 0x00040000
+QWANTWSYNC = 0x00080000
+QSYNCSTR = 0x00100000
+QISDRV = 0x00200000
+QHOT = 0x00400000
+QNEXTHOT = 0x00800000
+_QINSERTING = 0x04000000
+_QREMOVING = 0x08000000
+Q_SQQUEUED = 0x01
+Q_SQDRAINING = 0x02
+QB_FULL = 0x01
+QB_WANTW = 0x02
+QB_BACK = 0x04
+NBAND = 256
+STRUIOT_NONE = -1
+STRUIOT_DONTCARE = 0
+STRUIOT_STANDARD = 1
+STRUIOT_IP = 2
+DBLK_REFMIN = 0x01
+STRUIO_SPEC = 0x01
+STRUIO_DONE = 0x02
+STRUIO_IP = 0x04
+STRUIO_ZC = 0x08
+STRUIO_ICK = 0x10
+MSGMARK = 0x01
+MSGNOLOOP = 0x02
+MSGDELIM = 0x04
+MSGNOGET = 0x08
+MSGMARKNEXT = 0x10
+MSGNOTMARKNEXT = 0x20
+M_DATA = 0x00
+M_PROTO = 0x01
+M_BREAK = 0x08
+M_PASSFP = 0x09
+M_EVENT = 0x0a
+M_SIG = 0x0b
+M_DELAY = 0x0c
+M_CTL = 0x0d
+M_IOCTL = 0x0e
+M_SETOPTS = 0x10
+M_RSE = 0x11
+M_IOCACK = 0x81
+M_IOCNAK = 0x82
+M_PCPROTO = 0x83
+M_PCSIG = 0x84
+M_READ = 0x85
+M_FLUSH = 0x86
+M_STOP = 0x87
+M_START = 0x88
+M_HANGUP = 0x89
+M_ERROR = 0x8a
+M_COPYIN = 0x8b
+M_COPYOUT = 0x8c
+M_IOCDATA = 0x8d
+M_PCRSE = 0x8e
+M_STOPI = 0x8f
+M_STARTI = 0x90
+M_PCEVENT = 0x91
+M_UNHANGUP = 0x92
+QNORM = 0x00
+QPCTL = 0x80
+IOC_MODELS = DATAMODEL_MASK
+IOC_ILP32 = DATAMODEL_ILP32
+IOC_LP64 = DATAMODEL_LP64
+IOC_NATIVE = DATAMODEL_NATIVE
+IOC_NONE = DATAMODEL_NONE
+STRCANON = 0x01
+RECOPY = 0x02
+SO_ALL = 0x003f
+SO_READOPT = 0x0001
+SO_WROFF = 0x0002
+SO_MINPSZ = 0x0004
+SO_MAXPSZ = 0x0008
+SO_HIWAT = 0x0010
+SO_LOWAT = 0x0020
+SO_MREADON = 0x0040
+SO_MREADOFF = 0x0080
+SO_NDELON = 0x0100
+SO_NDELOFF = 0x0200
+SO_ISTTY = 0x0400
+SO_ISNTTY = 0x0800
+SO_TOSTOP = 0x1000
+SO_TONSTOP = 0x2000
+SO_BAND = 0x4000
+SO_DELIM = 0x8000
+SO_NODELIM = 0x010000
+SO_STRHOLD = 0x020000
+SO_ERROPT = 0x040000
+SO_COPYOPT = 0x080000
+SO_MAXBLK = 0x100000
+DEF_IOV_MAX = 16
+INFOD_FIRSTBYTES = 0x02
+INFOD_BYTES = 0x04
+INFOD_COUNT = 0x08
+INFOD_COPYOUT = 0x10
+MODOPEN = 0x1
+CLONEOPEN = 0x2
+CONSOPEN = 0x4
+OPENFAIL = -1
+BPRI_LO = 1
+BPRI_MED = 2
+BPRI_HI = 3
+BPRI_FT = 4
+INFPSZ = -1
+FLUSHALL = 1
+FLUSHDATA = 0
+STRHIGH = 5120
+STRLOW = 1024
+MAXIOCBSZ = 1024
+PERIM_INNER = 1
+PERIM_OUTER = 2
+def datamsg(type): return \
+
+def straln(a): return (caddr_t)((intptr_t)(a) & ~(sizeof (int)-1))
+
+
+# Included from sys/byteorder.h
+def ntohl(x): return (x)
+
+def ntohs(x): return (x)
+
+def htonl(x): return (x)
+
+def htons(x): return (x)
+
+IPPROTO_IP = 0
+IPPROTO_HOPOPTS = 0
+IPPROTO_ICMP = 1
+IPPROTO_IGMP = 2
+IPPROTO_GGP = 3
+IPPROTO_ENCAP = 4
+IPPROTO_TCP = 6
+IPPROTO_EGP = 8
+IPPROTO_PUP = 12
+IPPROTO_UDP = 17
+IPPROTO_IDP = 22
+IPPROTO_IPV6 = 41
+IPPROTO_ROUTING = 43
+IPPROTO_FRAGMENT = 44
+IPPROTO_RSVP = 46
+IPPROTO_ESP = 50
+IPPROTO_AH = 51
+IPPROTO_ICMPV6 = 58
+IPPROTO_NONE = 59
+IPPROTO_DSTOPTS = 60
+IPPROTO_HELLO = 63
+IPPROTO_ND = 77
+IPPROTO_EON = 80
+IPPROTO_PIM = 103
+IPPROTO_RAW = 255
+IPPROTO_MAX = 256
+IPPORT_ECHO = 7
+IPPORT_DISCARD = 9
+IPPORT_SYSTAT = 11
+IPPORT_DAYTIME = 13
+IPPORT_NETSTAT = 15
+IPPORT_FTP = 21
+IPPORT_TELNET = 23
+IPPORT_SMTP = 25
+IPPORT_TIMESERVER = 37
+IPPORT_NAMESERVER = 42
+IPPORT_WHOIS = 43
+IPPORT_MTP = 57
+IPPORT_BOOTPS = 67
+IPPORT_BOOTPC = 68
+IPPORT_TFTP = 69
+IPPORT_RJE = 77
+IPPORT_FINGER = 79
+IPPORT_TTYLINK = 87
+IPPORT_SUPDUP = 95
+IPPORT_EXECSERVER = 512
+IPPORT_LOGINSERVER = 513
+IPPORT_CMDSERVER = 514
+IPPORT_EFSSERVER = 520
+IPPORT_BIFFUDP = 512
+IPPORT_WHOSERVER = 513
+IPPORT_ROUTESERVER = 520
+IPPORT_RESERVED = 1024
+IPPORT_USERRESERVED = 5000
+IMPLINK_IP = 155
+IMPLINK_LOWEXPER = 156
+IMPLINK_HIGHEXPER = 158
+IN_CLASSA_NSHIFT = 24
+IN_CLASSA_MAX = 128
+IN_CLASSB_NSHIFT = 16
+IN_CLASSB_MAX = 65536
+IN_CLASSC_NSHIFT = 8
+IN_CLASSD_NSHIFT = 28
+def IN_MULTICAST(i): return IN_CLASSD(i)
+
+IN_LOOPBACKNET = 127
+def IN_SET_LOOPBACK_ADDR(a): return \
+
+def IN6_IS_ADDR_UNSPECIFIED(addr): return \
+
+def IN6_IS_ADDR_LOOPBACK(addr): return \
+
+def IN6_IS_ADDR_LOOPBACK(addr): return \
+
+def IN6_IS_ADDR_MULTICAST(addr): return \
+
+def IN6_IS_ADDR_MULTICAST(addr): return \
+
+def IN6_IS_ADDR_LINKLOCAL(addr): return \
+
+def IN6_IS_ADDR_LINKLOCAL(addr): return \
+
+def IN6_IS_ADDR_SITELOCAL(addr): return \
+
+def IN6_IS_ADDR_SITELOCAL(addr): return \
+
+def IN6_IS_ADDR_V4MAPPED(addr): return \
+
+def IN6_IS_ADDR_V4MAPPED(addr): return \
+
+def IN6_IS_ADDR_V4MAPPED_ANY(addr): return \
+
+def IN6_IS_ADDR_V4MAPPED_ANY(addr): return \
+
+def IN6_IS_ADDR_V4COMPAT(addr): return \
+
+def IN6_IS_ADDR_V4COMPAT(addr): return \
+
+def IN6_IS_ADDR_MC_RESERVED(addr): return \
+
+def IN6_IS_ADDR_MC_RESERVED(addr): return \
+
+def IN6_IS_ADDR_MC_NODELOCAL(addr): return \
+
+def IN6_IS_ADDR_MC_NODELOCAL(addr): return \
+
+def IN6_IS_ADDR_MC_LINKLOCAL(addr): return \
+
+def IN6_IS_ADDR_MC_LINKLOCAL(addr): return \
+
+def IN6_IS_ADDR_MC_SITELOCAL(addr): return \
+
+def IN6_IS_ADDR_MC_SITELOCAL(addr): return \
+
+def IN6_IS_ADDR_MC_ORGLOCAL(addr): return \
+
+def IN6_IS_ADDR_MC_ORGLOCAL(addr): return \
+
+def IN6_IS_ADDR_MC_GLOBAL(addr): return \
+
+def IN6_IS_ADDR_MC_GLOBAL(addr): return \
+
+IP_OPTIONS = 1
+IP_HDRINCL = 2
+IP_TOS = 3
+IP_TTL = 4
+IP_RECVOPTS = 5
+IP_RECVRETOPTS = 6
+IP_RECVDSTADDR = 7
+IP_RETOPTS = 8
+IP_MULTICAST_IF = 0x10
+IP_MULTICAST_TTL = 0x11
+IP_MULTICAST_LOOP = 0x12
+IP_ADD_MEMBERSHIP = 0x13
+IP_DROP_MEMBERSHIP = 0x14
+IP_SEC_OPT = 0x22
+IPSEC_PREF_NEVER = 0x01
+IPSEC_PREF_REQUIRED = 0x02
+IPSEC_PREF_UNIQUE = 0x04
+IP_ADD_PROXY_ADDR = 0x40
+IP_BOUND_IF = 0x41
+IP_UNSPEC_SRC = 0x42
+IP_REUSEADDR = 0x104
+IP_DONTROUTE = 0x105
+IP_BROADCAST = 0x106
+IP_DEFAULT_MULTICAST_TTL = 1
+IP_DEFAULT_MULTICAST_LOOP = 1
+IPV6_RTHDR_TYPE_0 = 0
+IPV6_UNICAST_HOPS = 0x5
+IPV6_MULTICAST_IF = 0x6
+IPV6_MULTICAST_HOPS = 0x7
+IPV6_MULTICAST_LOOP = 0x8
+IPV6_JOIN_GROUP = 0x9
+IPV6_LEAVE_GROUP = 0xa
+IPV6_ADD_MEMBERSHIP = 0x9
+IPV6_DROP_MEMBERSHIP = 0xa
+IPV6_PKTINFO = 0xb
+IPV6_HOPLIMIT = 0xc
+IPV6_NEXTHOP = 0xd
+IPV6_HOPOPTS = 0xe
+IPV6_DSTOPTS = 0xf
+IPV6_RTHDR = 0x10
+IPV6_RTHDRDSTOPTS = 0x11
+IPV6_RECVPKTINFO = 0x12
+IPV6_RECVHOPLIMIT = 0x13
+IPV6_RECVHOPOPTS = 0x14
+IPV6_RECVDSTOPTS = 0x15
+IPV6_RECVRTHDR = 0x16
+IPV6_RECVRTHDRDSTOPTS = 0x17
+IPV6_CHECKSUM = 0x18
+IPV6_BOUND_IF = 0x41
+IPV6_UNSPEC_SRC = 0x42
+INET_ADDRSTRLEN = 16
+INET6_ADDRSTRLEN = 46
+IPV6_PAD1_OPT = 0
diff --git a/lib-python/2.2/plat-sunos5/STROPTS.py b/lib-python/2.2/plat-sunos5/STROPTS.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/plat-sunos5/STROPTS.py
@@ -0,0 +1,1813 @@
+# Generated by h2py from /usr/include/sys/stropts.h
+
+# Included from sys/feature_tests.h
+
+# Included from sys/isa_defs.h
+_CHAR_ALIGNMENT = 1
+_SHORT_ALIGNMENT = 2
+_INT_ALIGNMENT = 4
+_LONG_ALIGNMENT = 8
+_LONG_LONG_ALIGNMENT = 8
+_DOUBLE_ALIGNMENT = 8
+_LONG_DOUBLE_ALIGNMENT = 16
+_POINTER_ALIGNMENT = 8
+_MAX_ALIGNMENT = 16
+_ALIGNMENT_REQUIRED = 1
+_CHAR_ALIGNMENT = 1
+_SHORT_ALIGNMENT = 2
+_INT_ALIGNMENT = 4
+_LONG_ALIGNMENT = 4
+_LONG_LONG_ALIGNMENT = 4
+_DOUBLE_ALIGNMENT = 4
+_LONG_DOUBLE_ALIGNMENT = 4
+_POINTER_ALIGNMENT = 4
+_MAX_ALIGNMENT = 4
+_ALIGNMENT_REQUIRED = 0
+_CHAR_ALIGNMENT = 1
+_SHORT_ALIGNMENT = 2
+_INT_ALIGNMENT = 4
+_LONG_LONG_ALIGNMENT = 8
+_DOUBLE_ALIGNMENT = 8
+_ALIGNMENT_REQUIRED = 1
+_LONG_ALIGNMENT = 4
+_LONG_DOUBLE_ALIGNMENT = 8
+_POINTER_ALIGNMENT = 4
+_MAX_ALIGNMENT = 8
+_LONG_ALIGNMENT = 8
+_LONG_DOUBLE_ALIGNMENT = 16
+_POINTER_ALIGNMENT = 8
+_MAX_ALIGNMENT = 16
+_POSIX_C_SOURCE = 1
+_LARGEFILE64_SOURCE = 1
+_LARGEFILE_SOURCE = 1
+_FILE_OFFSET_BITS = 64
+_FILE_OFFSET_BITS = 32
+_POSIX_C_SOURCE = 199506L
+_POSIX_PTHREAD_SEMANTICS = 1
+_XOPEN_VERSION = 500
+_XOPEN_VERSION = 4
+_XOPEN_VERSION = 3
+from TYPES import *
+
+# Included from sys/conf.h
+
+# Included from sys/t_lock.h
+
+# Included from sys/machlock.h
+from TYPES import *
+LOCK_HELD_VALUE = 0xff
+def SPIN_LOCK(pl): return ((pl) > ipltospl(LOCK_LEVEL))
+
+def LOCK_SAMPLE_INTERVAL(i): return (((i) & 0xff) == 0)
+
+CLOCK_LEVEL = 10
+LOCK_LEVEL = 10
+DISP_LEVEL = (LOCK_LEVEL + 1)
+PTR24_LSB = 5
+PTR24_MSB = (PTR24_LSB + 24)
+PTR24_ALIGN = 32
+PTR24_BASE = 0xe0000000
+
+# Included from sys/param.h
+from TYPES import *
+_POSIX_VDISABLE = 0
+MAX_INPUT = 512
+MAX_CANON = 256
+UID_NOBODY = 60001
+GID_NOBODY = UID_NOBODY
+UID_NOACCESS = 60002
+MAX_TASKID = 999999
+MAX_MAXPID = 999999
+DEFAULT_MAXPID = 999999
+DEFAULT_JUMPPID = 100000
+DEFAULT_MAXPID = 30000
+DEFAULT_JUMPPID = 0
+MAXUID = 2147483647
+MAXPROJID = MAXUID
+MAXLINK = 32767
+NMOUNT = 40
+CANBSIZ = 256
+NOFILE = 20
+NGROUPS_UMIN = 0
+NGROUPS_UMAX = 32
+NGROUPS_MAX_DEFAULT = 16
+NZERO = 20
+NULL = 0L
+NULL = 0
+CMASK = 022
+CDLIMIT = (1L<<11)
+NBPS = 0x20000
+NBPSCTR = 512
+UBSIZE = 512
+SCTRSHFT = 9
+SYSNAME = 9
+PREMOTE = 39
+MAXPATHLEN = 1024
+MAXSYMLINKS = 20
+MAXNAMELEN = 256
+NADDR = 13
+PIPE_BUF = 5120
+PIPE_MAX = 5120
+NBBY = 8
+MAXBSIZE = 8192
+DEV_BSIZE = 512
+DEV_BSHIFT = 9
+MAXFRAG = 8
+MAXOFF32_T = 0x7fffffff
+MAXOFF_T = 0x7fffffffffffffffl
+MAXOFFSET_T = 0x7fffffffffffffffl
+MAXOFF_T = 0x7fffffffl
+MAXOFFSET_T = 0x7fffffff
+def btodb(bytes): return   \
+
+def dbtob(db): return   \
+
+def lbtodb(bytes): return   \
+
+def ldbtob(db): return   \
+
+NCARGS32 = 0x100000
+NCARGS64 = 0x200000
+NCARGS = NCARGS64
+NCARGS = NCARGS32
+FSHIFT = 8
+FSCALE = (1<<FSHIFT)
+def DELAY(n): return drv_usecwait(n)
+
+def mmu_ptob(x): return ((x) << MMU_PAGESHIFT)
+
+def mmu_btop(x): return (((x)) >> MMU_PAGESHIFT)
+
+def mmu_btopr(x): return ((((x) + MMU_PAGEOFFSET) >> MMU_PAGESHIFT))
+
+def mmu_ptod(x): return ((x) << (MMU_PAGESHIFT - DEV_BSHIFT))
+
+def ptod(x): return ((x) << (PAGESHIFT - DEV_BSHIFT))
+
+def ptob(x): return ((x) << PAGESHIFT)
+
+def btop(x): return (((x) >> PAGESHIFT))
+
+def btopr(x): return ((((x) + PAGEOFFSET) >> PAGESHIFT))
+
+def dtop(DD): return (((DD) + NDPP - 1) >> (PAGESHIFT - DEV_BSHIFT))
+
+def dtopt(DD): return ((DD) >> (PAGESHIFT - DEV_BSHIFT))
+
+_AIO_LISTIO_MAX = (4096)
+_AIO_MAX = (-1)
+_MQ_OPEN_MAX = (32)
+_MQ_PRIO_MAX = (32)
+_SEM_NSEMS_MAX = INT_MAX
+_SEM_VALUE_MAX = INT_MAX
+
+# Included from sys/unistd.h
+_CS_PATH = 65
+_CS_LFS_CFLAGS = 68
+_CS_LFS_LDFLAGS = 69
+_CS_LFS_LIBS = 70
+_CS_LFS_LINTFLAGS = 71
+_CS_LFS64_CFLAGS = 72
+_CS_LFS64_LDFLAGS = 73
+_CS_LFS64_LIBS = 74
+_CS_LFS64_LINTFLAGS = 75
+_CS_XBS5_ILP32_OFF32_CFLAGS = 700
+_CS_XBS5_ILP32_OFF32_LDFLAGS = 701
+_CS_XBS5_ILP32_OFF32_LIBS = 702
+_CS_XBS5_ILP32_OFF32_LINTFLAGS = 703
+_CS_XBS5_ILP32_OFFBIG_CFLAGS = 705
+_CS_XBS5_ILP32_OFFBIG_LDFLAGS = 706
+_CS_XBS5_ILP32_OFFBIG_LIBS = 707
+_CS_XBS5_ILP32_OFFBIG_LINTFLAGS = 708
+_CS_XBS5_LP64_OFF64_CFLAGS = 709
+_CS_XBS5_LP64_OFF64_LDFLAGS = 710
+_CS_XBS5_LP64_OFF64_LIBS = 711
+_CS_XBS5_LP64_OFF64_LINTFLAGS = 712
+_CS_XBS5_LPBIG_OFFBIG_CFLAGS = 713
+_CS_XBS5_LPBIG_OFFBIG_LDFLAGS = 714
+_CS_XBS5_LPBIG_OFFBIG_LIBS = 715
+_CS_XBS5_LPBIG_OFFBIG_LINTFLAGS = 716
+_SC_ARG_MAX = 1
+_SC_CHILD_MAX = 2
+_SC_CLK_TCK = 3
+_SC_NGROUPS_MAX = 4
+_SC_OPEN_MAX = 5
+_SC_JOB_CONTROL = 6
+_SC_SAVED_IDS = 7
+_SC_VERSION = 8
+_SC_PASS_MAX = 9
+_SC_LOGNAME_MAX = 10
+_SC_PAGESIZE = 11
+_SC_XOPEN_VERSION = 12
+_SC_NPROCESSORS_CONF = 14
+_SC_NPROCESSORS_ONLN = 15
+_SC_STREAM_MAX = 16
+_SC_TZNAME_MAX = 17
+_SC_AIO_LISTIO_MAX = 18
+_SC_AIO_MAX = 19
+_SC_AIO_PRIO_DELTA_MAX = 20
+_SC_ASYNCHRONOUS_IO = 21
+_SC_DELAYTIMER_MAX = 22
+_SC_FSYNC = 23
+_SC_MAPPED_FILES = 24
+_SC_MEMLOCK = 25
+_SC_MEMLOCK_RANGE = 26
+_SC_MEMORY_PROTECTION = 27
+_SC_MESSAGE_PASSING = 28
+_SC_MQ_OPEN_MAX = 29
+_SC_MQ_PRIO_MAX = 30
+_SC_PRIORITIZED_IO = 31
+_SC_PRIORITY_SCHEDULING = 32
+_SC_REALTIME_SIGNALS = 33
+_SC_RTSIG_MAX = 34
+_SC_SEMAPHORES = 35
+_SC_SEM_NSEMS_MAX = 36
+_SC_SEM_VALUE_MAX = 37
+_SC_SHARED_MEMORY_OBJECTS = 38
+_SC_SIGQUEUE_MAX = 39
+_SC_SIGRT_MIN = 40
+_SC_SIGRT_MAX = 41
+_SC_SYNCHRONIZED_IO = 42
+_SC_TIMERS = 43
+_SC_TIMER_MAX = 44
+_SC_2_C_BIND = 45
+_SC_2_C_DEV = 46
+_SC_2_C_VERSION = 47
+_SC_2_FORT_DEV = 48
+_SC_2_FORT_RUN = 49
+_SC_2_LOCALEDEF = 50
+_SC_2_SW_DEV = 51
+_SC_2_UPE = 52
+_SC_2_VERSION = 53
+_SC_BC_BASE_MAX = 54
+_SC_BC_DIM_MAX = 55
+_SC_BC_SCALE_MAX = 56
+_SC_BC_STRING_MAX = 57
+_SC_COLL_WEIGHTS_MAX = 58
+_SC_EXPR_NEST_MAX = 59
+_SC_LINE_MAX = 60
+_SC_RE_DUP_MAX = 61
+_SC_XOPEN_CRYPT = 62
+_SC_XOPEN_ENH_I18N = 63
+_SC_XOPEN_SHM = 64
+_SC_2_CHAR_TERM = 66
+_SC_XOPEN_XCU_VERSION = 67
+_SC_ATEXIT_MAX = 76
+_SC_IOV_MAX = 77
+_SC_XOPEN_UNIX = 78
+_SC_PAGE_SIZE = _SC_PAGESIZE
+_SC_T_IOV_MAX = 79
+_SC_PHYS_PAGES = 500
+_SC_AVPHYS_PAGES = 501
+_SC_COHER_BLKSZ = 503
+_SC_SPLIT_CACHE = 504
+_SC_ICACHE_SZ = 505
+_SC_DCACHE_SZ = 506
+_SC_ICACHE_LINESZ = 507
+_SC_DCACHE_LINESZ = 508
+_SC_ICACHE_BLKSZ = 509
+_SC_DCACHE_BLKSZ = 510
+_SC_DCACHE_TBLKSZ = 511
+_SC_ICACHE_ASSOC = 512
+_SC_DCACHE_ASSOC = 513
+_SC_MAXPID = 514
+_SC_STACK_PROT = 515
+_SC_THREAD_DESTRUCTOR_ITERATIONS = 568
+_SC_GETGR_R_SIZE_MAX = 569
+_SC_GETPW_R_SIZE_MAX = 570
+_SC_LOGIN_NAME_MAX = 571
+_SC_THREAD_KEYS_MAX = 572
+_SC_THREAD_STACK_MIN = 573
+_SC_THREAD_THREADS_MAX = 574
+_SC_TTY_NAME_MAX = 575
+_SC_THREADS = 576
+_SC_THREAD_ATTR_STACKADDR = 577
+_SC_THREAD_ATTR_STACKSIZE = 578
+_SC_THREAD_PRIORITY_SCHEDULING = 579
+_SC_THREAD_PRIO_INHERIT = 580
+_SC_THREAD_PRIO_PROTECT = 581
+_SC_THREAD_PROCESS_SHARED = 582
+_SC_THREAD_SAFE_FUNCTIONS = 583
+_SC_XOPEN_LEGACY = 717
+_SC_XOPEN_REALTIME = 718
+_SC_XOPEN_REALTIME_THREADS = 719
+_SC_XBS5_ILP32_OFF32 = 720
+_SC_XBS5_ILP32_OFFBIG = 721
+_SC_XBS5_LP64_OFF64 = 722
+_SC_XBS5_LPBIG_OFFBIG = 723
+_PC_LINK_MAX = 1
+_PC_MAX_CANON = 2
+_PC_MAX_INPUT = 3
+_PC_NAME_MAX = 4
+_PC_PATH_MAX = 5
+_PC_PIPE_BUF = 6
+_PC_NO_TRUNC = 7
+_PC_VDISABLE = 8
+_PC_CHOWN_RESTRICTED = 9
+_PC_ASYNC_IO = 10
+_PC_PRIO_IO = 11
+_PC_SYNC_IO = 12
+_PC_FILESIZEBITS = 67
+_PC_LAST = 67
+_POSIX_VERSION = 199506L
+_POSIX2_VERSION = 199209L
+_POSIX2_C_VERSION = 199209L
+_XOPEN_XCU_VERSION = 4
+_XOPEN_REALTIME = 1
+_XOPEN_ENH_I18N = 1
+_XOPEN_SHM = 1
+_POSIX2_C_BIND = 1
+_POSIX2_CHAR_TERM = 1
+_POSIX2_LOCALEDEF = 1
+_POSIX2_C_DEV = 1
+_POSIX2_SW_DEV = 1
+_POSIX2_UPE = 1
+
+# Included from sys/mutex.h
+from TYPES import *
+def MUTEX_HELD(x): return (mutex_owned(x))
+
+
+# Included from sys/rwlock.h
+from TYPES import *
+def RW_READ_HELD(x): return (rw_read_held((x)))
+
+def RW_WRITE_HELD(x): return (rw_write_held((x)))
+
+def RW_LOCK_HELD(x): return (rw_lock_held((x)))
+
+def RW_ISWRITER(x): return (rw_iswriter(x))
+
+
+# Included from sys/semaphore.h
+
+# Included from sys/thread.h
+from TYPES import *
+
+# Included from sys/klwp.h
+from TYPES import *
+
+# Included from sys/condvar.h
+from TYPES import *
+
+# Included from sys/time.h
+
+# Included from sys/types32.h
+
+# Included from sys/int_types.h
+TIME32_MAX = INT32_MAX
+TIME32_MIN = INT32_MIN
+def TIMEVAL_OVERFLOW(tv): return \
+
+from TYPES import *
+DST_NONE = 0
+DST_USA = 1
+DST_AUST = 2
+DST_WET = 3
+DST_MET = 4
+DST_EET = 5
+DST_CAN = 6
+DST_GB = 7
+DST_RUM = 8
+DST_TUR = 9
+DST_AUSTALT = 10
+ITIMER_REAL = 0
+ITIMER_VIRTUAL = 1
+ITIMER_PROF = 2
+ITIMER_REALPROF = 3
+def ITIMERVAL_OVERFLOW(itv): return \
+
+SEC = 1
+MILLISEC = 1000
+MICROSEC = 1000000
+NANOSEC = 1000000000
+
+# Included from sys/time_impl.h
+def TIMESPEC_OVERFLOW(ts): return \
+
+def ITIMERSPEC_OVERFLOW(it): return \
+
+__CLOCK_REALTIME0 = 0
+CLOCK_VIRTUAL = 1
+CLOCK_PROF = 2
+__CLOCK_REALTIME3 = 3
+CLOCK_HIGHRES = 4
+CLOCK_MAX = 5
+CLOCK_REALTIME = __CLOCK_REALTIME3
+CLOCK_REALTIME = __CLOCK_REALTIME0
+TIMER_RELTIME = 0x0
+TIMER_ABSTIME = 0x1
+def TICK_TO_SEC(tick): return ((tick) / hz)
+
+def SEC_TO_TICK(sec): return ((sec) * hz)
+
+def TICK_TO_MSEC(tick): return \
+
+def MSEC_TO_TICK(msec): return \
+
+def MSEC_TO_TICK_ROUNDUP(msec): return \
+
+def TICK_TO_USEC(tick): return ((tick) * usec_per_tick)
+
+def USEC_TO_TICK(usec): return ((usec) / usec_per_tick)
+
+def USEC_TO_TICK_ROUNDUP(usec): return \
+
+def TICK_TO_NSEC(tick): return ((tick) * nsec_per_tick)
+
+def NSEC_TO_TICK(nsec): return ((nsec) / nsec_per_tick)
+
+def NSEC_TO_TICK_ROUNDUP(nsec): return \
+
+def TIMEVAL_TO_TICK(tvp): return \
+
+def TIMESTRUC_TO_TICK(tsp): return \
+
+
+# Included from time.h
+from TYPES import *
+
+# Included from iso/time_iso.h
+NULL = 0L
+NULL = 0
+CLOCKS_PER_SEC = 1000000
+
+# Included from sys/select.h
+FD_SETSIZE = 65536
+FD_SETSIZE = 1024
+_NBBY = 8
+NBBY = _NBBY
+def FD_ZERO(p): return bzero((p), sizeof (*(p)))
+
+
+# Included from sys/signal.h
+
+# Included from sys/iso/signal_iso.h
+SIGHUP = 1
+SIGINT = 2
+SIGQUIT = 3
+SIGILL = 4
+SIGTRAP = 5
+SIGIOT = 6
+SIGABRT = 6
+SIGEMT = 7
+SIGFPE = 8
+SIGKILL = 9
+SIGBUS = 10
+SIGSEGV = 11
+SIGSYS = 12
+SIGPIPE = 13
+SIGALRM = 14
+SIGTERM = 15
+SIGUSR1 = 16
+SIGUSR2 = 17
+SIGCLD = 18
+SIGCHLD = 18
+SIGPWR = 19
+SIGWINCH = 20
+SIGURG = 21
+SIGPOLL = 22
+SIGIO = SIGPOLL
+SIGSTOP = 23
+SIGTSTP = 24
+SIGCONT = 25
+SIGTTIN = 26
+SIGTTOU = 27
+SIGVTALRM = 28
+SIGPROF = 29
+SIGXCPU = 30
+SIGXFSZ = 31
+SIGWAITING = 32
+SIGLWP = 33
+SIGFREEZE = 34
+SIGTHAW = 35
+SIGCANCEL = 36
+SIGLOST = 37
+_SIGRTMIN = 38
+_SIGRTMAX = 45
+SIG_BLOCK = 1
+SIG_UNBLOCK = 2
+SIG_SETMASK = 3
+SIGNO_MASK = 0xFF
+SIGDEFER = 0x100
+SIGHOLD = 0x200
+SIGRELSE = 0x400
+SIGIGNORE = 0x800
+SIGPAUSE = 0x1000
+
+# Included from sys/siginfo.h
+from TYPES import *
+SIGEV_NONE = 1
+SIGEV_SIGNAL = 2
+SIGEV_THREAD = 3
+SI_NOINFO = 32767
+SI_USER = 0
+SI_LWP = (-1)
+SI_QUEUE = (-2)
+SI_TIMER = (-3)
+SI_ASYNCIO = (-4)
+SI_MESGQ = (-5)
+
+# Included from sys/machsig.h
+ILL_ILLOPC = 1
+ILL_ILLOPN = 2
+ILL_ILLADR = 3
+ILL_ILLTRP = 4
+ILL_PRVOPC = 5
+ILL_PRVREG = 6
+ILL_COPROC = 7
+ILL_BADSTK = 8
+NSIGILL = 8
+EMT_TAGOVF = 1
+EMT_CPCOVF = 2
+NSIGEMT = 2
+FPE_INTDIV = 1
+FPE_INTOVF = 2
+FPE_FLTDIV = 3
+FPE_FLTOVF = 4
+FPE_FLTUND = 5
+FPE_FLTRES = 6
+FPE_FLTINV = 7
+FPE_FLTSUB = 8
+NSIGFPE = 8
+SEGV_MAPERR = 1
+SEGV_ACCERR = 2
+NSIGSEGV = 2
+BUS_ADRALN = 1
+BUS_ADRERR = 2
+BUS_OBJERR = 3
+NSIGBUS = 3
+TRAP_BRKPT = 1
+TRAP_TRACE = 2
+TRAP_RWATCH = 3
+TRAP_WWATCH = 4
+TRAP_XWATCH = 5
+NSIGTRAP = 5
+CLD_EXITED = 1
+CLD_KILLED = 2
+CLD_DUMPED = 3
+CLD_TRAPPED = 4
+CLD_STOPPED = 5
+CLD_CONTINUED = 6
+NSIGCLD = 6
+POLL_IN = 1
+POLL_OUT = 2
+POLL_MSG = 3
+POLL_ERR = 4
+POLL_PRI = 5
+POLL_HUP = 6
+NSIGPOLL = 6
+PROF_SIG = 1
+NSIGPROF = 1
+SI_MAXSZ = 256
+SI_MAXSZ = 128
+
+# Included from sys/time_std_impl.h
+from TYPES import *
+SI32_MAXSZ = 128
+def SI_CANQUEUE(c): return ((c) <= SI_QUEUE)
+
+SA_NOCLDSTOP = 0x00020000
+SA_ONSTACK = 0x00000001
+SA_RESETHAND = 0x00000002
+SA_RESTART = 0x00000004
+SA_SIGINFO = 0x00000008
+SA_NODEFER = 0x00000010
+SA_NOCLDWAIT = 0x00010000
+SA_WAITSIG = 0x00010000
+NSIG = 46
+MAXSIG = 45
+S_SIGNAL = 1
+S_SIGSET = 2
+S_SIGACTION = 3
+S_NONE = 4
+MINSIGSTKSZ = 2048
+SIGSTKSZ = 8192
+SS_ONSTACK = 0x00000001
+SS_DISABLE = 0x00000002
+SN_PROC = 1
+SN_CANCEL = 2
+SN_SEND = 3
+
+# Included from sys/ucontext.h
+from TYPES import *
+
+# Included from sys/regset.h
+REG_CCR = (0)
+REG_PSR = (0)
+REG_PSR = (0)
+REG_PC = (1)
+REG_nPC = (2)
+REG_Y = (3)
+REG_G1 = (4)
+REG_G2 = (5)
+REG_G3 = (6)
+REG_G4 = (7)
+REG_G5 = (8)
+REG_G6 = (9)
+REG_G7 = (10)
+REG_O0 = (11)
+REG_O1 = (12)
+REG_O2 = (13)
+REG_O3 = (14)
+REG_O4 = (15)
+REG_O5 = (16)
+REG_O6 = (17)
+REG_O7 = (18)
+REG_ASI = (19)
+REG_FPRS = (20)
+REG_PS = REG_PSR
+REG_SP = REG_O6
+REG_R0 = REG_O0
+REG_R1 = REG_O1
+_NGREG = 21
+_NGREG = 19
+NGREG = _NGREG
+_NGREG32 = 19
+_NGREG64 = 21
+SPARC_MAXREGWINDOW = 31
+MAXFPQ = 16
+XRS_ID = 0x78727300
+
+# Included from v7/sys/privregs.h
+
+# Included from v7/sys/psr.h
+PSR_CWP = 0x0000001F
+PSR_ET = 0x00000020
+PSR_PS = 0x00000040
+PSR_S = 0x00000080
+PSR_PIL = 0x00000F00
+PSR_EF = 0x00001000
+PSR_EC = 0x00002000
+PSR_RSV = 0x000FC000
+PSR_ICC = 0x00F00000
+PSR_C = 0x00100000
+PSR_V = 0x00200000
+PSR_Z = 0x00400000
+PSR_N = 0x00800000
+PSR_VER = 0x0F000000
+PSR_IMPL = 0xF0000000
+PSL_ALLCC = PSR_ICC
+PSL_USER = (PSR_S)
+PSL_USERMASK = (PSR_ICC)
+PSL_UBITS = (PSR_ICC|PSR_EF)
+def USERMODE(ps): return (((ps) & PSR_PS) == 0)
+
+
+# Included from sys/fsr.h
+FSR_CEXC = 0x0000001f
+FSR_AEXC = 0x000003e0
+FSR_FCC = 0x00000c00
+FSR_PR = 0x00001000
+FSR_QNE = 0x00002000
+FSR_FTT = 0x0001c000
+FSR_VER = 0x000e0000
+FSR_TEM = 0x0f800000
+FSR_RP = 0x30000000
+FSR_RD = 0xc0000000
+FSR_VER_SHIFT = 17
+FSR_FCC1 = 0x00000003
+FSR_FCC2 = 0x0000000C
+FSR_FCC3 = 0x00000030
+FSR_CEXC_NX = 0x00000001
+FSR_CEXC_DZ = 0x00000002
+FSR_CEXC_UF = 0x00000004
+FSR_CEXC_OF = 0x00000008
+FSR_CEXC_NV = 0x00000010
+FSR_AEXC_NX = (0x1 << 5)
+FSR_AEXC_DZ = (0x2 << 5)
+FSR_AEXC_UF = (0x4 << 5)
+FSR_AEXC_OF = (0x8 << 5)
+FSR_AEXC_NV = (0x10 << 5)
+FTT_NONE = 0
+FTT_IEEE = 1
+FTT_UNFIN = 2
+FTT_UNIMP = 3
+FTT_SEQ = 4
+FTT_ALIGN = 5
+FTT_DFAULT = 6
+FSR_FTT_SHIFT = 14
+FSR_FTT_IEEE = (FTT_IEEE   << FSR_FTT_SHIFT)
+FSR_FTT_UNFIN = (FTT_UNFIN  << FSR_FTT_SHIFT)
+FSR_FTT_UNIMP = (FTT_UNIMP  << FSR_FTT_SHIFT)
+FSR_FTT_SEQ = (FTT_SEQ    << FSR_FTT_SHIFT)
+FSR_FTT_ALIGN = (FTT_ALIGN  << FSR_FTT_SHIFT)
+FSR_FTT_DFAULT = (FTT_DFAULT << FSR_FTT_SHIFT)
+FSR_TEM_NX = (0x1 << 23)
+FSR_TEM_DZ = (0x2 << 23)
+FSR_TEM_UF = (0x4 << 23)
+FSR_TEM_OF = (0x8 << 23)
+FSR_TEM_NV = (0x10 << 23)
+RP_DBLEXT = 0
+RP_SINGLE = 1
+RP_DOUBLE = 2
+RP_RESERVED = 3
+RD_NEAR = 0
+RD_ZER0 = 1
+RD_POSINF = 2
+RD_NEGINF = 3
+FPRS_DL = 0x1
+FPRS_DU = 0x2
+FPRS_FEF = 0x4
+PIL_MAX = 0xf
+def SAVE_GLOBALS(RP): return \
+
+def RESTORE_GLOBALS(RP): return \
+
+def SAVE_OUTS(RP): return \
+
+def RESTORE_OUTS(RP): return \
+
+def SAVE_WINDOW(SBP): return \
+
+def RESTORE_WINDOW(SBP): return \
+
+def STORE_FPREGS(FP): return \
+
+def LOAD_FPREGS(FP): return \
+
+_SPARC_MAXREGWINDOW = 31
+_XRS_ID = 0x78727300
+GETCONTEXT = 0
+SETCONTEXT = 1
+UC_SIGMASK = 001
+UC_STACK = 002
+UC_CPU = 004
+UC_MAU = 010
+UC_FPU = UC_MAU
+UC_INTR = 020
+UC_ASR = 040
+UC_MCONTEXT = (UC_CPU|UC_FPU|UC_ASR)
+UC_ALL = (UC_SIGMASK|UC_STACK|UC_MCONTEXT)
+_SIGQUEUE_MAX = 32
+_SIGNOTIFY_MAX = 32
+
+# Included from sys/pcb.h
+INSTR_VALID = 0x02
+NORMAL_STEP = 0x04
+WATCH_STEP = 0x08
+CPC_OVERFLOW = 0x10
+ASYNC_HWERR = 0x20
+STEP_NONE = 0
+STEP_REQUESTED = 1
+STEP_ACTIVE = 2
+STEP_WASACTIVE = 3
+
+# Included from sys/msacct.h
+LMS_USER = 0
+LMS_SYSTEM = 1
+LMS_TRAP = 2
+LMS_TFAULT = 3
+LMS_DFAULT = 4
+LMS_KFAULT = 5
+LMS_USER_LOCK = 6
+LMS_SLEEP = 7
+LMS_WAIT_CPU = 8
+LMS_STOPPED = 9
+NMSTATES = 10
+
+# Included from sys/lwp.h
+
+# Included from sys/synch.h
+from TYPES import *
+USYNC_THREAD = 0x00
+USYNC_PROCESS = 0x01
+LOCK_NORMAL = 0x00
+LOCK_ERRORCHECK = 0x02
+LOCK_RECURSIVE = 0x04
+USYNC_PROCESS_ROBUST = 0x08
+LOCK_PRIO_NONE = 0x00
+LOCK_PRIO_INHERIT = 0x10
+LOCK_PRIO_PROTECT = 0x20
+LOCK_STALL_NP = 0x00
+LOCK_ROBUST_NP = 0x40
+LOCK_OWNERDEAD = 0x1
+LOCK_NOTRECOVERABLE = 0x2
+LOCK_INITED = 0x4
+LOCK_UNMAPPED = 0x8
+LWP_DETACHED = 0x00000040
+LWP_SUSPENDED = 0x00000080
+__LWP_ASLWP = 0x00000100
+MAXSYSARGS = 8
+NORMALRETURN = 0
+JUSTRETURN = 1
+LWP_USER = 0x01
+LWP_SYS = 0x02
+TS_FREE = 0x00
+TS_SLEEP = 0x01
+TS_RUN = 0x02
+TS_ONPROC = 0x04
+TS_ZOMB = 0x08
+TS_STOPPED = 0x10
+T_INTR_THREAD = 0x0001
+T_WAKEABLE = 0x0002
+T_TOMASK = 0x0004
+T_TALLOCSTK = 0x0008
+T_WOULDBLOCK = 0x0020
+T_DONTBLOCK = 0x0040
+T_DONTPEND = 0x0080
+T_SYS_PROF = 0x0100
+T_WAITCVSEM = 0x0200
+T_WATCHPT = 0x0400
+T_PANIC = 0x0800
+TP_HOLDLWP = 0x0002
+TP_TWAIT = 0x0004
+TP_LWPEXIT = 0x0008
+TP_PRSTOP = 0x0010
+TP_CHKPT = 0x0020
+TP_EXITLWP = 0x0040
+TP_PRVSTOP = 0x0080
+TP_MSACCT = 0x0100
+TP_STOPPING = 0x0200
+TP_WATCHPT = 0x0400
+TP_PAUSE = 0x0800
+TP_CHANGEBIND = 0x1000
+TS_LOAD = 0x0001
+TS_DONT_SWAP = 0x0002
+TS_SWAPENQ = 0x0004
+TS_ON_SWAPQ = 0x0008
+TS_CSTART = 0x0100
+TS_UNPAUSE = 0x0200
+TS_XSTART = 0x0400
+TS_PSTART = 0x0800
+TS_RESUME = 0x1000
+TS_CREATE = 0x2000
+TS_ALLSTART = \
+	(TS_CSTART|TS_UNPAUSE|TS_XSTART|TS_PSTART|TS_RESUME|TS_CREATE)
+def CPR_VSTOPPED(t): return \
+
+def THREAD_TRANSITION(tp): return thread_transition(tp);
+
+def THREAD_STOP(tp): return \
+
+def THREAD_ZOMB(tp): return THREAD_SET_STATE(tp, TS_ZOMB, NULL)
+
+def SEMA_HELD(x): return (sema_held((x)))
+
+NO_LOCKS_HELD = 1
+NO_COMPETING_THREADS = 1
+FMNAMESZ = 8
+
+# Included from sys/systm.h
+from TYPES import *
+
+# Included from sys/proc.h
+
+# Included from sys/cred.h
+
+# Included from sys/user.h
+from TYPES import *
+
+# Included from sys/resource.h
+from TYPES import *
+PRIO_PROCESS = 0
+PRIO_PGRP = 1
+PRIO_USER = 2
+RLIMIT_CPU = 0
+RLIMIT_FSIZE = 1
+RLIMIT_DATA = 2
+RLIMIT_STACK = 3
+RLIMIT_CORE = 4
+RLIMIT_NOFILE = 5
+RLIMIT_VMEM = 6
+RLIMIT_AS = RLIMIT_VMEM
+RLIM_NLIMITS = 7
+RLIM_INFINITY = (-3l)
+RLIM_SAVED_MAX = (-2l)
+RLIM_SAVED_CUR = (-1l)
+RLIM_INFINITY = 0x7fffffff
+RLIM_SAVED_MAX = 0x7ffffffe
+RLIM_SAVED_CUR = 0x7ffffffd
+RLIM32_INFINITY = 0x7fffffff
+RLIM32_SAVED_MAX = 0x7ffffffe
+RLIM32_SAVED_CUR = 0x7ffffffd
+
+# Included from sys/model.h
+
+# Included from sys/debug.h
+def ASSERT64(x): return ASSERT(x)
+
+def ASSERT32(x): return ASSERT(x)
+
+DATAMODEL_MASK = 0x0FF00000
+DATAMODEL_ILP32 = 0x00100000
+DATAMODEL_LP64 = 0x00200000
+DATAMODEL_NONE = 0
+DATAMODEL_NATIVE = DATAMODEL_LP64
+DATAMODEL_NATIVE = DATAMODEL_ILP32
+def STRUCT_SIZE(handle): return \
+
+def STRUCT_BUF(handle): return ((handle).ptr.m64)
+
+def SIZEOF_PTR(umodel): return \
+
+def STRUCT_SIZE(handle): return (sizeof (*(handle).ptr))
+
+def STRUCT_BUF(handle): return ((handle).ptr)
+
+def SIZEOF_PTR(umodel): return sizeof (caddr_t)
+
+def lwp_getdatamodel(t): return DATAMODEL_ILP32
+
+RUSAGE_SELF = 0
+RUSAGE_CHILDREN = -1
+
+# Included from sys/auxv.h
+AT_NULL = 0
+AT_IGNORE = 1
+AT_EXECFD = 2
+AT_PHDR = 3
+AT_PHENT = 4
+AT_PHNUM = 5
+AT_PAGESZ = 6
+AT_BASE = 7
+AT_FLAGS = 8
+AT_ENTRY = 9
+AT_DCACHEBSIZE = 10
+AT_ICACHEBSIZE = 11
+AT_UCACHEBSIZE = 12
+AT_SUN_UID = 2000
+AT_SUN_RUID = 2001
+AT_SUN_GID = 2002
+AT_SUN_RGID = 2003
+AT_SUN_LDELF = 2004
+AT_SUN_LDSHDR = 2005
+AT_SUN_LDNAME = 2006
+AT_SUN_LPAGESZ = 2007
+AT_SUN_PLATFORM = 2008
+AT_SUN_HWCAP = 2009
+AT_SUN_IFLUSH = 2010
+AT_SUN_CPU = 2011
+AT_SUN_EMUL_ENTRY = 2012
+AT_SUN_EMUL_EXECFD = 2013
+AT_SUN_EXECNAME = 2014
+AT_SUN_MMU = 2015
+
+# Included from sys/errno.h
+EPERM = 1
+ENOENT = 2
+ESRCH = 3
+EINTR = 4
+EIO = 5
+ENXIO = 6
+E2BIG = 7
+ENOEXEC = 8
+EBADF = 9
+ECHILD = 10
+EAGAIN = 11
+ENOMEM = 12
+EACCES = 13
+EFAULT = 14
+ENOTBLK = 15
+EBUSY = 16
+EEXIST = 17
+EXDEV = 18
+ENODEV = 19
+ENOTDIR = 20
+EISDIR = 21
+EINVAL = 22
+ENFILE = 23
+EMFILE = 24
+ENOTTY = 25
+ETXTBSY = 26
+EFBIG = 27
+ENOSPC = 28
+ESPIPE = 29
+EROFS = 30
+EMLINK = 31
+EPIPE = 32
+EDOM = 33
+ERANGE = 34
+ENOMSG = 35
+EIDRM = 36
+ECHRNG = 37
+EL2NSYNC = 38
+EL3HLT = 39
+EL3RST = 40
+ELNRNG = 41
+EUNATCH = 42
+ENOCSI = 43
+EL2HLT = 44
+EDEADLK = 45
+ENOLCK = 46
+ECANCELED = 47
+ENOTSUP = 48
+EDQUOT = 49
+EBADE = 50
+EBADR = 51
+EXFULL = 52
+ENOANO = 53
+EBADRQC = 54
+EBADSLT = 55
+EDEADLOCK = 56
+EBFONT = 57
+EOWNERDEAD = 58
+ENOTRECOVERABLE = 59
+ENOSTR = 60
+ENODATA = 61
+ETIME = 62
+ENOSR = 63
+ENONET = 64
+ENOPKG = 65
+EREMOTE = 66
+ENOLINK = 67
+EADV = 68
+ESRMNT = 69
+ECOMM = 70
+EPROTO = 71
+ELOCKUNMAPPED = 72
+ENOTACTIVE = 73
+EMULTIHOP = 74
+EBADMSG = 77
+ENAMETOOLONG = 78
+EOVERFLOW = 79
+ENOTUNIQ = 80
+EBADFD = 81
+EREMCHG = 82
+ELIBACC = 83
+ELIBBAD = 84
+ELIBSCN = 85
+ELIBMAX = 86
+ELIBEXEC = 87
+EILSEQ = 88
+ENOSYS = 89
+ELOOP = 90
+ERESTART = 91
+ESTRPIPE = 92
+ENOTEMPTY = 93
+EUSERS = 94
+ENOTSOCK = 95
+EDESTADDRREQ = 96
+EMSGSIZE = 97
+EPROTOTYPE = 98
+ENOPROTOOPT = 99
+EPROTONOSUPPORT = 120
+ESOCKTNOSUPPORT = 121
+EOPNOTSUPP = 122
+EPFNOSUPPORT = 123
+EAFNOSUPPORT = 124
+EADDRINUSE = 125
+EADDRNOTAVAIL = 126
+ENETDOWN = 127
+ENETUNREACH = 128
+ENETRESET = 129
+ECONNABORTED = 130
+ECONNRESET = 131
+ENOBUFS = 132
+EISCONN = 133
+ENOTCONN = 134
+ESHUTDOWN = 143
+ETOOMANYREFS = 144
+ETIMEDOUT = 145
+ECONNREFUSED = 146
+EHOSTDOWN = 147
+EHOSTUNREACH = 148
+EWOULDBLOCK = EAGAIN
+EALREADY = 149
+EINPROGRESS = 150
+ESTALE = 151
+PSARGSZ = 80
+PSCOMSIZ = 14
+MAXCOMLEN = 16
+__KERN_NAUXV_IMPL = 19
+__KERN_NAUXV_IMPL = 21
+__KERN_NAUXV_IMPL = 21
+PSARGSZ = 80
+
+# Included from sys/watchpoint.h
+from TYPES import *
+
+# Included from vm/seg_enum.h
+
+# Included from sys/copyops.h
+from TYPES import *
+
+# Included from sys/buf.h
+
+# Included from sys/kstat.h
+from TYPES import *
+KSTAT_STRLEN = 31
+def KSTAT_ENTER(k): return \
+
+def KSTAT_EXIT(k): return \
+
+KSTAT_TYPE_RAW = 0
+KSTAT_TYPE_NAMED = 1
+KSTAT_TYPE_INTR = 2
+KSTAT_TYPE_IO = 3
+KSTAT_TYPE_TIMER = 4
+KSTAT_NUM_TYPES = 5
+KSTAT_FLAG_VIRTUAL = 0x01
+KSTAT_FLAG_VAR_SIZE = 0x02
+KSTAT_FLAG_WRITABLE = 0x04
+KSTAT_FLAG_PERSISTENT = 0x08
+KSTAT_FLAG_DORMANT = 0x10
+KSTAT_FLAG_INVALID = 0x20
+KSTAT_READ = 0
+KSTAT_WRITE = 1
+KSTAT_DATA_CHAR = 0
+KSTAT_DATA_INT32 = 1
+KSTAT_DATA_UINT32 = 2
+KSTAT_DATA_INT64 = 3
+KSTAT_DATA_UINT64 = 4
+KSTAT_DATA_LONG = KSTAT_DATA_INT32
+KSTAT_DATA_ULONG = KSTAT_DATA_UINT32
+KSTAT_DATA_LONG = KSTAT_DATA_INT64
+KSTAT_DATA_ULONG = KSTAT_DATA_UINT64
+KSTAT_DATA_LONG = 7
+KSTAT_DATA_ULONG = 8
+KSTAT_DATA_LONGLONG = KSTAT_DATA_INT64
+KSTAT_DATA_ULONGLONG = KSTAT_DATA_UINT64
+KSTAT_DATA_FLOAT = 5
+KSTAT_DATA_DOUBLE = 6
+KSTAT_INTR_HARD = 0
+KSTAT_INTR_SOFT = 1
+KSTAT_INTR_WATCHDOG = 2
+KSTAT_INTR_SPURIOUS = 3
+KSTAT_INTR_MULTSVC = 4
+KSTAT_NUM_INTRS = 5
+B_BUSY = 0x0001
+B_DONE = 0x0002
+B_ERROR = 0x0004
+B_PAGEIO = 0x0010
+B_PHYS = 0x0020
+B_READ = 0x0040
+B_WRITE = 0x0100
+B_KERNBUF = 0x0008
+B_WANTED = 0x0080
+B_AGE = 0x000200
+B_ASYNC = 0x000400
+B_DELWRI = 0x000800
+B_STALE = 0x001000
+B_DONTNEED = 0x002000
+B_REMAPPED = 0x004000
+B_FREE = 0x008000
+B_INVAL = 0x010000
+B_FORCE = 0x020000
+B_HEAD = 0x040000
+B_NOCACHE = 0x080000
+B_TRUNC = 0x100000
+B_SHADOW = 0x200000
+B_RETRYWRI = 0x400000
+def notavail(bp): return \
+
+def BWRITE(bp): return \
+
+def BWRITE2(bp): return \
+
+
+# Included from sys/aio_req.h
+
+# Included from sys/uio.h
+from TYPES import *
+WP_NOWATCH = 0x01
+WP_SETPROT = 0x02
+
+# Included from sys/timer.h
+from TYPES import *
+_TIMER_MAX = 32
+ITLK_LOCKED = 0x01
+ITLK_WANTED = 0x02
+ITLK_REMOVE = 0x04
+IT_PERLWP = 0x01
+IT_SIGNAL = 0x02
+
+# Included from sys/utrap.h
+UT_INSTRUCTION_DISABLED = 1
+UT_INSTRUCTION_ERROR = 2
+UT_INSTRUCTION_PROTECTION = 3
+UT_ILLTRAP_INSTRUCTION = 4
+UT_ILLEGAL_INSTRUCTION = 5
+UT_PRIVILEGED_OPCODE = 6
+UT_FP_DISABLED = 7
+UT_FP_EXCEPTION_IEEE_754 = 8
+UT_FP_EXCEPTION_OTHER = 9
+UT_TAG_OVERFLOW = 10
+UT_DIVISION_BY_ZERO = 11
+UT_DATA_EXCEPTION = 12
+UT_DATA_ERROR = 13
+UT_DATA_PROTECTION = 14
+UT_MEM_ADDRESS_NOT_ALIGNED = 15
+UT_PRIVILEGED_ACTION = 16
+UT_ASYNC_DATA_ERROR = 17
+UT_TRAP_INSTRUCTION_16 = 18
+UT_TRAP_INSTRUCTION_17 = 19
+UT_TRAP_INSTRUCTION_18 = 20
+UT_TRAP_INSTRUCTION_19 = 21
+UT_TRAP_INSTRUCTION_20 = 22
+UT_TRAP_INSTRUCTION_21 = 23
+UT_TRAP_INSTRUCTION_22 = 24
+UT_TRAP_INSTRUCTION_23 = 25
+UT_TRAP_INSTRUCTION_24 = 26
+UT_TRAP_INSTRUCTION_25 = 27
+UT_TRAP_INSTRUCTION_26 = 28
+UT_TRAP_INSTRUCTION_27 = 29
+UT_TRAP_INSTRUCTION_28 = 30
+UT_TRAP_INSTRUCTION_29 = 31
+UT_TRAP_INSTRUCTION_30 = 32
+UT_TRAP_INSTRUCTION_31 = 33
+UTRAP_V8P_FP_DISABLED = UT_FP_DISABLED
+UTRAP_V8P_MEM_ADDRESS_NOT_ALIGNED = UT_MEM_ADDRESS_NOT_ALIGNED
+UT_PRECISE_MAXTRAPS = 33
+
+# Included from sys/refstr.h
+
+# Included from sys/task.h
+from TYPES import *
+TASK_NORMAL = 0x0
+TASK_FINAL = 0x1
+TASK_FINALITY = 0x1
+
+# Included from sys/id_space.h
+from TYPES import *
+
+# Included from sys/vmem.h
+from TYPES import *
+VM_SLEEP = 0x00000000
+VM_NOSLEEP = 0x00000001
+VM_PANIC = 0x00000002
+VM_KMFLAGS = 0x000000ff
+VM_BESTFIT = 0x00000100
+VMEM_ALLOC = 0x01
+VMEM_FREE = 0x02
+VMEM_SPAN = 0x10
+ISP_NORMAL = 0x0
+ISP_RESERVE = 0x1
+
+# Included from sys/exacct_impl.h
+from TYPES import *
+
+# Included from sys/kmem.h
+from TYPES import *
+KM_SLEEP = 0x0000
+KM_NOSLEEP = 0x0001
+KM_PANIC = 0x0002
+KM_VMFLAGS = 0x00ff
+KM_FLAGS = 0xffff
+KMC_NOTOUCH = 0x00010000
+KMC_NODEBUG = 0x00020000
+KMC_NOMAGAZINE = 0x00040000
+KMC_NOHASH = 0x00080000
+KMC_QCACHE = 0x00100000
+_ISA_IA32 = 0
+_ISA_IA64 = 1
+SSLEEP = 1
+SRUN = 2
+SZOMB = 3
+SSTOP = 4
+SIDL = 5
+SONPROC = 6
+CLDPEND = 0x0001
+CLDCONT = 0x0002
+SSYS = 0x00000001
+STRC = 0x00000002
+SLOAD = 0x00000008
+SLOCK = 0x00000010
+SPREXEC = 0x00000020
+SPROCTR = 0x00000040
+SPRFORK = 0x00000080
+SKILLED = 0x00000100
+SULOAD = 0x00000200
+SRUNLCL = 0x00000400
+SBPTADJ = 0x00000800
+SKILLCL = 0x00001000
+SOWEUPC = 0x00002000
+SEXECED = 0x00004000
+SPASYNC = 0x00008000
+SJCTL = 0x00010000
+SNOWAIT = 0x00020000
+SVFORK = 0x00040000
+SVFWAIT = 0x00080000
+EXITLWPS = 0x00100000
+HOLDFORK = 0x00200000
+SWAITSIG = 0x00400000
+HOLDFORK1 = 0x00800000
+COREDUMP = 0x01000000
+SMSACCT = 0x02000000
+ASLWP = 0x04000000
+SPRLOCK = 0x08000000
+NOCD = 0x10000000
+HOLDWATCH = 0x20000000
+SMSFORK = 0x40000000
+SDOCORE = 0x80000000
+FORREAL = 0
+JUSTLOOKING = 1
+SUSPEND_NORMAL = 0
+SUSPEND_PAUSE = 1
+NOCLASS = (-1)
+
+# Included from sys/dditypes.h
+DDI_DEVICE_ATTR_V0 = 0x0001
+DDI_NEVERSWAP_ACC = 0x00
+DDI_STRUCTURE_LE_ACC = 0x01
+DDI_STRUCTURE_BE_ACC = 0x02
+DDI_STRICTORDER_ACC = 0x00
+DDI_UNORDERED_OK_ACC = 0x01
+DDI_MERGING_OK_ACC = 0x02
+DDI_LOADCACHING_OK_ACC = 0x03
+DDI_STORECACHING_OK_ACC = 0x04
+DDI_DATA_SZ01_ACC = 1
+DDI_DATA_SZ02_ACC = 2
+DDI_DATA_SZ04_ACC = 4
+DDI_DATA_SZ08_ACC = 8
+VERS_ACCHDL = 0x0001
+DEVID_NONE = 0
+DEVID_SCSI3_WWN = 1
+DEVID_SCSI_SERIAL = 2
+DEVID_FAB = 3
+DEVID_ENCAP = 4
+DEVID_MAXTYPE = 4
+
+# Included from sys/varargs.h
+
+# Included from sys/va_list.h
+VA_ALIGN = 8
+def _ARGSIZEOF(t): return ((sizeof (t) + VA_ALIGN - 1) & ~(VA_ALIGN - 1))
+
+VA_ALIGN = 8
+def _ARGSIZEOF(t): return ((sizeof (t) + VA_ALIGN - 1) & ~(VA_ALIGN - 1))
+
+NSYSCALL = 256
+SE_32RVAL1 = 0x0
+SE_32RVAL2 = 0x1
+SE_64RVAL = 0x2
+SE_RVAL_MASK = 0x3
+SE_LOADABLE = 0x08
+SE_LOADED = 0x10
+SE_NOUNLOAD = 0x20
+SE_ARGC = 0x40
+
+# Included from sys/devops.h
+from TYPES import *
+
+# Included from sys/poll.h
+POLLIN = 0x0001
+POLLPRI = 0x0002
+POLLOUT = 0x0004
+POLLRDNORM = 0x0040
+POLLWRNORM = POLLOUT
+POLLRDBAND = 0x0080
+POLLWRBAND = 0x0100
+POLLNORM = POLLRDNORM
+POLLERR = 0x0008
+POLLHUP = 0x0010
+POLLNVAL = 0x0020
+POLLREMOVE = 0x0800
+POLLRDDATA = 0x0200
+POLLNOERR = 0x0400
+POLLCLOSED = 0x8000
+
+# Included from vm/as.h
+
+# Included from vm/seg.h
+
+# Included from sys/vnode.h
+from TYPES import *
+VROOT = 0x01
+VNOCACHE = 0x02
+VNOMAP = 0x04
+VDUP = 0x08
+VNOSWAP = 0x10
+VNOMOUNT = 0x20
+VISSWAP = 0x40
+VSWAPLIKE = 0x80
+VVFSLOCK = 0x100
+VVFSWAIT = 0x200
+VVMLOCK = 0x400
+VDIROPEN = 0x800
+VVMEXEC = 0x1000
+VPXFS = 0x2000
+AT_TYPE = 0x0001
+AT_MODE = 0x0002
+AT_UID = 0x0004
+AT_GID = 0x0008
+AT_FSID = 0x0010
+AT_NODEID = 0x0020
+AT_NLINK = 0x0040
+AT_SIZE = 0x0080
+AT_ATIME = 0x0100
+AT_MTIME = 0x0200
+AT_CTIME = 0x0400
+AT_RDEV = 0x0800
+AT_BLKSIZE = 0x1000
+AT_NBLOCKS = 0x2000
+AT_VCODE = 0x4000
+AT_ALL = (AT_TYPE|AT_MODE|AT_UID|AT_GID|AT_FSID|AT_NODEID|\
+			AT_NLINK|AT_SIZE|AT_ATIME|AT_MTIME|AT_CTIME|\
+			AT_RDEV|AT_BLKSIZE|AT_NBLOCKS|AT_VCODE)
+AT_STAT = (AT_MODE|AT_UID|AT_GID|AT_FSID|AT_NODEID|AT_NLINK|\
+			AT_SIZE|AT_ATIME|AT_MTIME|AT_CTIME|AT_RDEV)
+AT_TIMES = (AT_ATIME|AT_MTIME|AT_CTIME)
+AT_NOSET = (AT_NLINK|AT_RDEV|AT_FSID|AT_NODEID|AT_TYPE|\
+			AT_BLKSIZE|AT_NBLOCKS|AT_VCODE)
+VSUID = 04000
+VSGID = 02000
+VSVTX = 01000
+VREAD = 00400
+VWRITE = 00200
+VEXEC = 00100
+MODEMASK = 07777
+PERMMASK = 00777
+def MANDMODE(mode): return (((mode) & (VSGID|(VEXEC>>3))) == VSGID)
+
+VSA_ACL = 0x0001
+VSA_ACLCNT = 0x0002
+VSA_DFACL = 0x0004
+VSA_DFACLCNT = 0x0008
+LOOKUP_DIR = 0x01
+DUMP_ALLOC = 0
+DUMP_FREE = 1
+DUMP_SCAN = 2
+ATTR_UTIME = 0x01
+ATTR_EXEC = 0x02
+ATTR_COMM = 0x04
+ATTR_HINT = 0x08
+ATTR_REAL = 0x10
+
+# Included from vm/faultcode.h
+FC_HWERR = 0x1
+FC_ALIGN = 0x2
+FC_OBJERR = 0x3
+FC_PROT = 0x4
+FC_NOMAP = 0x5
+FC_NOSUPPORT = 0x6
+def FC_MAKE_ERR(e): return (((e) << 8) | FC_OBJERR)
+
+def FC_CODE(fc): return ((fc) & 0xff)
+
+def FC_ERRNO(fc): return ((unsigned)(fc) >> 8)
+
+
+# Included from vm/hat.h
+from TYPES import *
+
+# Included from vm/page.h
+PAGE_HASHAVELEN = 4
+PAGE_HASHVPSHIFT = 6
+PG_EXCL = 0x0001
+PG_WAIT = 0x0002
+PG_PHYSCONTIG = 0x0004
+PG_MATCH_COLOR = 0x0008
+PG_NORELOC = 0x0010
+PG_FREE_LIST = 1
+PG_CACHE_LIST = 2
+PG_LIST_TAIL = 0
+PG_LIST_HEAD = 1
+def page_next_raw(PP): return page_nextn_raw((PP), 1)
+
+PAGE_IO_INUSE = 0x1
+PAGE_IO_WANTED = 0x2
+PGREL_NOTREL = 0x1
+PGREL_CLEAN = 0x2
+PGREL_MOD = 0x3
+P_FREE = 0x80
+P_NORELOC = 0x40
+def PP_SETAGED(pp): return ASSERT(PP_ISAGED(pp))
+
+HAT_FLAGS_RESV = 0xFF000000
+HAT_LOAD = 0x00
+HAT_LOAD_LOCK = 0x01
+HAT_LOAD_ADV = 0x04
+HAT_LOAD_CONTIG = 0x10
+HAT_LOAD_NOCONSIST = 0x20
+HAT_LOAD_SHARE = 0x40
+HAT_LOAD_REMAP = 0x80
+HAT_RELOAD_SHARE = 0x100
+HAT_PLAT_ATTR_MASK = 0xF00000
+HAT_PROT_MASK = 0x0F
+HAT_NOFAULT = 0x10
+HAT_NOSYNC = 0x20
+HAT_STRICTORDER = 0x0000
+HAT_UNORDERED_OK = 0x0100
+HAT_MERGING_OK = 0x0200
+HAT_LOADCACHING_OK = 0x0300
+HAT_STORECACHING_OK = 0x0400
+HAT_ORDER_MASK = 0x0700
+HAT_NEVERSWAP = 0x0000
+HAT_STRUCTURE_BE = 0x1000
+HAT_STRUCTURE_LE = 0x2000
+HAT_ENDIAN_MASK = 0x3000
+HAT_COW = 0x0001
+HAT_UNLOAD = 0x00
+HAT_UNLOAD_NOSYNC = 0x02
+HAT_UNLOAD_UNLOCK = 0x04
+HAT_UNLOAD_OTHER = 0x08
+HAT_UNLOAD_UNMAP = 0x10
+HAT_SYNC_DONTZERO = 0x00
+HAT_SYNC_ZERORM = 0x01
+HAT_SYNC_STOPON_REF = 0x02
+HAT_SYNC_STOPON_MOD = 0x04
+HAT_SYNC_STOPON_RM = (HAT_SYNC_STOPON_REF | HAT_SYNC_STOPON_MOD)
+HAT_DUP_ALL = 1
+HAT_DUP_COW = 2
+HAT_MAP = 0x00
+HAT_ADV_PGUNLOAD = 0x00
+HAT_FORCE_PGUNLOAD = 0x01
+P_MOD = 0x1
+P_REF = 0x2
+P_RO = 0x4
+def hat_ismod(pp): return (hat_page_getattr(pp, P_MOD))
+
+def hat_isref(pp): return (hat_page_getattr(pp, P_REF))
+
+def hat_isro(pp): return (hat_page_getattr(pp, P_RO))
+
+def hat_setmod(pp): return (hat_page_setattr(pp, P_MOD))
+
+def hat_setref(pp): return (hat_page_setattr(pp, P_REF))
+
+def hat_setrefmod(pp): return (hat_page_setattr(pp, P_REF|P_MOD))
+
+def hat_clrmod(pp): return (hat_page_clrattr(pp, P_MOD))
+
+def hat_clrref(pp): return (hat_page_clrattr(pp, P_REF))
+
+def hat_clrrefmod(pp): return (hat_page_clrattr(pp, P_REF|P_MOD))
+
+def hat_page_is_mapped(pp): return (hat_page_getshare(pp))
+
+HAT_DONTALLOC = 0
+HAT_ALLOC = 1
+HRM_SHIFT = 4
+HRM_BYTES = (1 << HRM_SHIFT)
+HRM_PAGES = ((HRM_BYTES * NBBY) / 2)
+HRM_PGPERBYTE = (NBBY/2)
+HRM_PGBYTEMASK = (HRM_PGPERBYTE-1)
+HRM_HASHSIZE = 0x200
+HRM_HASHMASK = (HRM_HASHSIZE - 1)
+HRM_BLIST_INCR = 0x200
+HRM_SWSMONID = 1
+SSL_NLEVELS = 4
+SSL_BFACTOR = 4
+SSL_LOG2BF = 2
+SEGP_ASYNC_FLUSH = 0x1
+SEGP_FORCE_WIRED = 0x2
+SEGP_SUCCESS = 0
+SEGP_FAIL = 1
+def seg_pages(seg): return \
+
+IE_NOMEM = -1
+AS_PAGLCK = 0x80
+AS_CLAIMGAP = 0x40
+AS_UNMAPWAIT = 0x20
+def AS_TYPE_64BIT(as): return \
+
+AS_LREP_LINKEDLIST = 0
+AS_LREP_SKIPLIST = 1
+AS_MUTATION_THRESH = 225
+AH_DIR = 0x1
+AH_LO = 0x0
+AH_HI = 0x1
+AH_CONTAIN = 0x2
+
+# Included from sys/ddidmareq.h
+DMA_UNIT_8 = 1
+DMA_UNIT_16 = 2
+DMA_UNIT_32 = 4
+DMALIM_VER0 = ((0x86000000) + 0)
+DDI_DMA_FORCE_PHYSICAL = 0x0100
+DMA_ATTR_V0 = 0
+DMA_ATTR_VERSION = DMA_ATTR_V0
+DDI_DMA_CALLBACK_RUNOUT = 0
+DDI_DMA_CALLBACK_DONE = 1
+DDI_DMA_WRITE = 0x0001
+DDI_DMA_READ = 0x0002
+DDI_DMA_RDWR = (DDI_DMA_READ | DDI_DMA_WRITE)
+DDI_DMA_REDZONE = 0x0004
+DDI_DMA_PARTIAL = 0x0008
+DDI_DMA_CONSISTENT = 0x0010
+DDI_DMA_EXCLUSIVE = 0x0020
+DDI_DMA_STREAMING = 0x0040
+DDI_DMA_SBUS_64BIT = 0x2000
+DDI_DMA_MAPPED = 0
+DDI_DMA_MAPOK = 0
+DDI_DMA_PARTIAL_MAP = 1
+DDI_DMA_DONE = 2
+DDI_DMA_NORESOURCES = -1
+DDI_DMA_NOMAPPING = -2
+DDI_DMA_TOOBIG = -3
+DDI_DMA_TOOSMALL = -4
+DDI_DMA_LOCKED = -5
+DDI_DMA_BADLIMITS = -6
+DDI_DMA_STALE = -7
+DDI_DMA_BADATTR = -8
+DDI_DMA_INUSE = -9
+DDI_DMA_SYNC_FORDEV = 0x0
+DDI_DMA_SYNC_FORCPU = 0x1
+DDI_DMA_SYNC_FORKERNEL = 0x2
+
+# Included from sys/ddimapreq.h
+
+# Included from sys/mman.h
+PROT_READ = 0x1
+PROT_WRITE = 0x2
+PROT_EXEC = 0x4
+PROT_USER = 0x8
+PROT_ZFOD = (PROT_READ | PROT_WRITE | PROT_EXEC | PROT_USER)
+PROT_ALL = (PROT_READ | PROT_WRITE | PROT_EXEC | PROT_USER)
+PROT_NONE = 0x0
+MAP_SHARED = 1
+MAP_PRIVATE = 2
+MAP_TYPE = 0xf
+MAP_FIXED = 0x10
+MAP_NORESERVE = 0x40
+MAP_ANON = 0x100
+MAP_ANONYMOUS = MAP_ANON
+MAP_RENAME = 0x20
+PROC_TEXT = (PROT_EXEC | PROT_READ)
+PROC_DATA = (PROT_READ | PROT_WRITE | PROT_EXEC)
+SHARED = 0x10
+PRIVATE = 0x20
+VALID_ATTR = (PROT_READ|PROT_WRITE|PROT_EXEC|SHARED|PRIVATE)
+PROT_EXCL = 0x20
+_MAP_LOW32 = 0x80
+_MAP_NEW = 0x80000000
+from TYPES import *
+MADV_NORMAL = 0
+MADV_RANDOM = 1
+MADV_SEQUENTIAL = 2
+MADV_WILLNEED = 3
+MADV_DONTNEED = 4
+MADV_FREE = 5
+MS_OLDSYNC = 0x0
+MS_SYNC = 0x4
+MS_ASYNC = 0x1
+MS_INVALIDATE = 0x2
+MC_SYNC = 1
+MC_LOCK = 2
+MC_UNLOCK = 3
+MC_ADVISE = 4
+MC_LOCKAS = 5
+MC_UNLOCKAS = 6
+MCL_CURRENT = 0x1
+MCL_FUTURE = 0x2
+DDI_MAP_VERSION = 0x0001
+DDI_MF_USER_MAPPING = 0x1
+DDI_MF_KERNEL_MAPPING = 0x2
+DDI_MF_DEVICE_MAPPING = 0x4
+DDI_ME_GENERIC = (-1)
+DDI_ME_UNIMPLEMENTED = (-2)
+DDI_ME_NORESOURCES = (-3)
+DDI_ME_UNSUPPORTED = (-4)
+DDI_ME_REGSPEC_RANGE = (-5)
+DDI_ME_RNUMBER_RANGE = (-6)
+DDI_ME_INVAL = (-7)
+
+# Included from sys/ddipropdefs.h
+def CELLS_1275_TO_BYTES(n): return ((n) * PROP_1275_CELL_SIZE)
+
+def BYTES_TO_1275_CELLS(n): return ((n) / PROP_1275_CELL_SIZE)
+
+PH_FROM_PROM = 0x01
+DDI_PROP_SUCCESS = 0
+DDI_PROP_NOT_FOUND = 1
+DDI_PROP_UNDEFINED = 2
+DDI_PROP_NO_MEMORY = 3
+DDI_PROP_INVAL_ARG = 4
+DDI_PROP_BUF_TOO_SMALL = 5
+DDI_PROP_CANNOT_DECODE = 6
+DDI_PROP_CANNOT_ENCODE = 7
+DDI_PROP_END_OF_DATA = 8
+DDI_PROP_FOUND_1275 = 255
+PROP_1275_INT_SIZE = 4
+DDI_PROP_DONTPASS = 0x0001
+DDI_PROP_CANSLEEP = 0x0002
+DDI_PROP_SYSTEM_DEF = 0x0004
+DDI_PROP_NOTPROM = 0x0008
+DDI_PROP_DONTSLEEP = 0x0010
+DDI_PROP_STACK_CREATE = 0x0020
+DDI_PROP_UNDEF_IT = 0x0040
+DDI_PROP_HW_DEF = 0x0080
+DDI_PROP_TYPE_INT = 0x0100
+DDI_PROP_TYPE_STRING = 0x0200
+DDI_PROP_TYPE_BYTE = 0x0400
+DDI_PROP_TYPE_COMPOSITE = 0x0800
+DDI_PROP_TYPE_ANY = (DDI_PROP_TYPE_INT	|	\
+					DDI_PROP_TYPE_STRING	|	\
+					DDI_PROP_TYPE_BYTE	|	\
+					DDI_PROP_TYPE_COMPOSITE)
+DDI_PROP_TYPE_MASK = (DDI_PROP_TYPE_INT	|	\
+					DDI_PROP_TYPE_STRING	|	\
+					DDI_PROP_TYPE_BYTE	|	\
+					DDI_PROP_TYPE_COMPOSITE)
+DDI_RELATIVE_ADDRESSING = "relative-addressing"
+DDI_GENERIC_ADDRESSING = "generic-addressing"
+
+# Included from sys/ddidevmap.h
+KMEM_PAGEABLE = 0x100
+KMEM_NON_PAGEABLE = 0x200
+UMEM_LOCKED = 0x400
+UMEM_TRASH = 0x800
+DEVMAP_OPS_REV = 1
+DEVMAP_DEFAULTS = 0x00
+DEVMAP_MAPPING_INVALID = 0x01
+DEVMAP_ALLOW_REMAP = 0x02
+DEVMAP_USE_PAGESIZE = 0x04
+DEVMAP_SETUP_FLAGS = \
+	(DEVMAP_MAPPING_INVALID | DEVMAP_ALLOW_REMAP | DEVMAP_USE_PAGESIZE)
+DEVMAP_SETUP_DONE = 0x100
+DEVMAP_LOCK_INITED = 0x200
+DEVMAP_FAULTING = 0x400
+DEVMAP_LOCKED = 0x800
+DEVMAP_FLAG_LARGE = 0x1000
+DDI_UMEM_SLEEP = 0x0
+DDI_UMEM_NOSLEEP = 0x01
+DDI_UMEM_PAGEABLE = 0x02
+DDI_UMEM_TRASH = 0x04
+DDI_UMEMLOCK_READ = 0x01
+DDI_UMEMLOCK_WRITE = 0x02
+
+# Included from sys/nexusdefs.h
+
+# Included from sys/nexusintr.h
+BUSO_REV = 4
+BUSO_REV_3 = 3
+BUSO_REV_4 = 4
+DEVO_REV = 3
+CB_REV = 1
+DDI_IDENTIFIED = (0)
+DDI_NOT_IDENTIFIED = (-1)
+DDI_PROBE_FAILURE = ENXIO
+DDI_PROBE_DONTCARE = 0
+DDI_PROBE_PARTIAL = 1
+DDI_PROBE_SUCCESS = 2
+MAPDEV_REV = 1
+from TYPES import *
+D_NEW = 0x00
+_D_OLD = 0x01
+D_TAPE = 0x08
+D_MTSAFE = 0x0020
+_D_QNEXTLESS = 0x0040
+_D_MTOCSHARED = 0x0080
+D_MTOCEXCL = 0x0800
+D_MTPUTSHARED = 0x1000
+D_MTPERQ = 0x2000
+D_MTQPAIR = 0x4000
+D_MTPERMOD = 0x6000
+D_MTOUTPERIM = 0x8000
+_D_MTCBSHARED = 0x10000
+D_MTINNER_MOD = (D_MTPUTSHARED|_D_MTOCSHARED|_D_MTCBSHARED)
+D_MTOUTER_MOD = (D_MTOCEXCL)
+D_MP = D_MTSAFE
+D_64BIT = 0x200
+D_SYNCSTR = 0x400
+D_DEVMAP = 0x100
+D_HOTPLUG = 0x4
+SNDZERO = 0x001
+SNDPIPE = 0x002
+RNORM = 0x000
+RMSGD = 0x001
+RMSGN = 0x002
+RMODEMASK = 0x003
+RPROTDAT = 0x004
+RPROTDIS = 0x008
+RPROTNORM = 0x010
+RPROTMASK = 0x01c
+RFLUSHMASK = 0x020
+RFLUSHPCPROT = 0x020
+RERRNORM = 0x001
+RERRNONPERSIST = 0x002
+RERRMASK = (RERRNORM|RERRNONPERSIST)
+WERRNORM = 0x004
+WERRNONPERSIST = 0x008
+WERRMASK = (WERRNORM|WERRNONPERSIST)
+FLUSHR = 0x01
+FLUSHW = 0x02
+FLUSHRW = 0x03
+FLUSHBAND = 0x04
+MAPINOK = 0x01
+NOMAPIN = 0x02
+REMAPOK = 0x04
+NOREMAP = 0x08
+S_INPUT = 0x0001
+S_HIPRI = 0x0002
+S_OUTPUT = 0x0004
+S_MSG = 0x0008
+S_ERROR = 0x0010
+S_HANGUP = 0x0020
+S_RDNORM = 0x0040
+S_WRNORM = S_OUTPUT
+S_RDBAND = 0x0080
+S_WRBAND = 0x0100
+S_BANDURG = 0x0200
+RS_HIPRI = 0x01
+STRUIO_POSTPONE = 0x08
+STRUIO_MAPIN = 0x10
+MSG_HIPRI = 0x01
+MSG_ANY = 0x02
+MSG_BAND = 0x04
+MSG_XPG4 = 0x08
+MSG_IPEEK = 0x10
+MSG_DISCARDTAIL = 0x20
+MSG_HOLDSIG = 0x40
+MSG_IGNERROR = 0x80
+MSG_DELAYERROR = 0x100
+MSG_IGNFLOW = 0x200
+MSG_NOMARK = 0x400
+MORECTL = 1
+MOREDATA = 2
+MUXID_ALL = (-1)
+ANYMARK = 0x01
+LASTMARK = 0x02
+_INFTIM = -1
+INFTIM = _INFTIM
diff --git a/lib-python/2.2/plat-sunos5/SUNAUDIODEV.py b/lib-python/2.2/plat-sunos5/SUNAUDIODEV.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/plat-sunos5/SUNAUDIODEV.py
@@ -0,0 +1,40 @@
+# Symbolic constants for use with sunaudiodev module
+# The names are the same as in audioio.h with the leading AUDIO_
+# removed.
+
+# Not all values are supported on all releases of SunOS.
+
+# Encoding types, for fields i_encoding and o_encoding
+
+ENCODING_NONE = 0			# no encoding assigned
+ENCODING_ULAW = 1			# u-law encoding
+ENCODING_ALAW = 2			# A-law encoding
+ENCODING_LINEAR = 3			# Linear PCM encoding
+
+# Gain ranges for i_gain, o_gain and monitor_gain
+
+MIN_GAIN = 0				# minimum gain value
+MAX_GAIN = 255				# maximum gain value
+
+# Balance values for i_balance and o_balance
+
+LEFT_BALANCE = 0			# left channel only
+MID_BALANCE = 32			# equal left/right channel
+RIGHT_BALANCE = 64			# right channel only
+BALANCE_SHIFT = 3
+
+# Port names for i_port and o_port
+
+PORT_A = 1
+PORT_B = 2
+PORT_C = 3
+PORT_D = 4
+
+SPEAKER = 0x01				# output to built-in speaker
+HEADPHONE = 0x02			# output to headphone jack
+LINE_OUT = 0x04				# output to line out
+
+MICROPHONE = 0x01			# input from microphone
+LINE_IN = 0x02				# input from line in
+CD = 0x04                               # input from on-board CD inputs
+INTERNAL_CD_IN = CD                     # input from internal CDROM
diff --git a/lib-python/2.2/plat-sunos5/TYPES.py b/lib-python/2.2/plat-sunos5/TYPES.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/plat-sunos5/TYPES.py
@@ -0,0 +1,314 @@
+# Generated by h2py from /usr/include/sys/types.h
+
+# Included from sys/isa_defs.h
+_CHAR_ALIGNMENT = 1
+_SHORT_ALIGNMENT = 2
+_INT_ALIGNMENT = 4
+_LONG_ALIGNMENT = 8
+_LONG_LONG_ALIGNMENT = 8
+_DOUBLE_ALIGNMENT = 8
+_LONG_DOUBLE_ALIGNMENT = 16
+_POINTER_ALIGNMENT = 8
+_MAX_ALIGNMENT = 16
+_ALIGNMENT_REQUIRED = 1
+_CHAR_ALIGNMENT = 1
+_SHORT_ALIGNMENT = 2
+_INT_ALIGNMENT = 4
+_LONG_ALIGNMENT = 4
+_LONG_LONG_ALIGNMENT = 4
+_DOUBLE_ALIGNMENT = 4
+_LONG_DOUBLE_ALIGNMENT = 4
+_POINTER_ALIGNMENT = 4
+_MAX_ALIGNMENT = 4
+_ALIGNMENT_REQUIRED = 0
+_CHAR_ALIGNMENT = 1
+_SHORT_ALIGNMENT = 2
+_INT_ALIGNMENT = 4
+_LONG_LONG_ALIGNMENT = 8
+_DOUBLE_ALIGNMENT = 8
+_ALIGNMENT_REQUIRED = 1
+_LONG_ALIGNMENT = 4
+_LONG_DOUBLE_ALIGNMENT = 8
+_POINTER_ALIGNMENT = 4
+_MAX_ALIGNMENT = 8
+_LONG_ALIGNMENT = 8
+_LONG_DOUBLE_ALIGNMENT = 16
+_POINTER_ALIGNMENT = 8
+_MAX_ALIGNMENT = 16
+
+# Included from sys/feature_tests.h
+_POSIX_C_SOURCE = 1
+_LARGEFILE64_SOURCE = 1
+_LARGEFILE_SOURCE = 1
+_FILE_OFFSET_BITS = 64
+_FILE_OFFSET_BITS = 32
+_POSIX_C_SOURCE = 199506L
+_POSIX_PTHREAD_SEMANTICS = 1
+_XOPEN_VERSION = 500
+_XOPEN_VERSION = 4
+_XOPEN_VERSION = 3
+
+# Included from sys/machtypes.h
+
+# Included from sys/inttypes.h
+
+# Included from sys/int_types.h
+
+# Included from sys/int_limits.h
+INT8_MAX = (127)
+INT16_MAX = (32767)
+INT32_MAX = (2147483647)
+INTMAX_MAX = INT32_MAX
+INT_LEAST8_MAX = INT8_MAX
+INT_LEAST16_MAX = INT16_MAX
+INT_LEAST32_MAX = INT32_MAX
+INT8_MIN = (-128)
+INT16_MIN = (-32767-1)
+INT32_MIN = (-2147483647-1)
+INTMAX_MIN = INT32_MIN
+INT_LEAST8_MIN = INT8_MIN
+INT_LEAST16_MIN = INT16_MIN
+INT_LEAST32_MIN = INT32_MIN
+
+# Included from sys/int_const.h
+def INT8_C(c): return (c)
+
+def INT16_C(c): return (c)
+
+def INT32_C(c): return (c)
+
+def INT64_C(c): return __CONCAT__(c,l)
+
+def INT64_C(c): return __CONCAT__(c,ll)
+
+def UINT8_C(c): return __CONCAT__(c,u)
+
+def UINT16_C(c): return __CONCAT__(c,u)
+
+def UINT32_C(c): return __CONCAT__(c,u)
+
+def UINT64_C(c): return __CONCAT__(c,ul)
+
+def UINT64_C(c): return __CONCAT__(c,ull)
+
+def INTMAX_C(c): return __CONCAT__(c,l)
+
+def UINTMAX_C(c): return __CONCAT__(c,ul)
+
+def INTMAX_C(c): return __CONCAT__(c,ll)
+
+def UINTMAX_C(c): return __CONCAT__(c,ull)
+
+def INTMAX_C(c): return (c)
+
+def UINTMAX_C(c): return (c)
+
+
+# Included from sys/int_fmtio.h
+PRId8 = "d"
+PRId16 = "d"
+PRId32 = "d"
+PRId64 = "ld"
+PRId64 = "lld"
+PRIdLEAST8 = "d"
+PRIdLEAST16 = "d"
+PRIdLEAST32 = "d"
+PRIdLEAST64 = "ld"
+PRIdLEAST64 = "lld"
+PRIi8 = "i"
+PRIi16 = "i"
+PRIi32 = "i"
+PRIi64 = "li"
+PRIi64 = "lli"
+PRIiLEAST8 = "i"
+PRIiLEAST16 = "i"
+PRIiLEAST32 = "i"
+PRIiLEAST64 = "li"
+PRIiLEAST64 = "lli"
+PRIo8 = "o"
+PRIo16 = "o"
+PRIo32 = "o"
+PRIo64 = "lo"
+PRIo64 = "llo"
+PRIoLEAST8 = "o"
+PRIoLEAST16 = "o"
+PRIoLEAST32 = "o"
+PRIoLEAST64 = "lo"
+PRIoLEAST64 = "llo"
+PRIx8 = "x"
+PRIx16 = "x"
+PRIx32 = "x"
+PRIx64 = "lx"
+PRIx64 = "llx"
+PRIxLEAST8 = "x"
+PRIxLEAST16 = "x"
+PRIxLEAST32 = "x"
+PRIxLEAST64 = "lx"
+PRIxLEAST64 = "llx"
+PRIX8 = "X"
+PRIX16 = "X"
+PRIX32 = "X"
+PRIX64 = "lX"
+PRIX64 = "llX"
+PRIXLEAST8 = "X"
+PRIXLEAST16 = "X"
+PRIXLEAST32 = "X"
+PRIXLEAST64 = "lX"
+PRIXLEAST64 = "llX"
+PRIu8 = "u"
+PRIu16 = "u"
+PRIu32 = "u"
+PRIu64 = "lu"
+PRIu64 = "llu"
+PRIuLEAST8 = "u"
+PRIuLEAST16 = "u"
+PRIuLEAST32 = "u"
+PRIuLEAST64 = "lu"
+PRIuLEAST64 = "llu"
+SCNd16 = "hd"
+SCNd32 = "d"
+SCNd64 = "ld"
+SCNd64 = "lld"
+SCNi16 = "hi"
+SCNi32 = "i"
+SCNi64 = "li"
+SCNi64 = "lli"
+SCNo16 = "ho"
+SCNo32 = "o"
+SCNo64 = "lo"
+SCNo64 = "llo"
+SCNu16 = "hu"
+SCNu32 = "u"
+SCNu64 = "lu"
+SCNu64 = "llu"
+SCNx16 = "hx"
+SCNx32 = "x"
+SCNx64 = "lx"
+SCNx64 = "llx"
+PRIdMAX = "ld"
+PRIoMAX = "lo"
+PRIxMAX = "lx"
+PRIuMAX = "lu"
+PRIdMAX = "lld"
+PRIoMAX = "llo"
+PRIxMAX = "llx"
+PRIuMAX = "llu"
+PRIdMAX = "d"
+PRIoMAX = "o"
+PRIxMAX = "x"
+PRIuMAX = "u"
+SCNiMAX = "li"
+SCNdMAX = "ld"
+SCNoMAX = "lo"
+SCNxMAX = "lx"
+SCNiMAX = "lli"
+SCNdMAX = "lld"
+SCNoMAX = "llo"
+SCNxMAX = "llx"
+SCNiMAX = "i"
+SCNdMAX = "d"
+SCNoMAX = "o"
+SCNxMAX = "x"
+
+# Included from sys/types32.h
+SHRT_MIN = (-32768)
+SHRT_MAX = 32767
+USHRT_MAX = 65535
+INT_MIN = (-2147483647-1)
+INT_MAX = 2147483647
+LONG_MIN = (-9223372036854775807L-1L)
+LONG_MAX = 9223372036854775807L
+LONG_MIN = (-2147483647L-1L)
+LONG_MAX = 2147483647L
+P_MYID = (-1)
+
+# Included from sys/select.h
+
+# Included from sys/time.h
+TIME32_MAX = INT32_MAX
+TIME32_MIN = INT32_MIN
+def TIMEVAL_OVERFLOW(tv): return \
+
+from TYPES import *
+DST_NONE = 0
+DST_USA = 1
+DST_AUST = 2
+DST_WET = 3
+DST_MET = 4
+DST_EET = 5
+DST_CAN = 6
+DST_GB = 7
+DST_RUM = 8
+DST_TUR = 9
+DST_AUSTALT = 10
+ITIMER_REAL = 0
+ITIMER_VIRTUAL = 1
+ITIMER_PROF = 2
+ITIMER_REALPROF = 3
+def ITIMERVAL_OVERFLOW(itv): return \
+
+SEC = 1
+MILLISEC = 1000
+MICROSEC = 1000000
+NANOSEC = 1000000000
+
+# Included from sys/time_impl.h
+def TIMESPEC_OVERFLOW(ts): return \
+
+def ITIMERSPEC_OVERFLOW(it): return \
+
+__CLOCK_REALTIME0 = 0
+CLOCK_VIRTUAL = 1
+CLOCK_PROF = 2
+__CLOCK_REALTIME3 = 3
+CLOCK_HIGHRES = 4
+CLOCK_MAX = 5
+CLOCK_REALTIME = __CLOCK_REALTIME3
+CLOCK_REALTIME = __CLOCK_REALTIME0
+TIMER_RELTIME = 0x0
+TIMER_ABSTIME = 0x1
+
+# Included from sys/mutex.h
+from TYPES import *
+def MUTEX_HELD(x): return (mutex_owned(x))
+
+def TICK_TO_SEC(tick): return ((tick) / hz)
+
+def SEC_TO_TICK(sec): return ((sec) * hz)
+
+def TICK_TO_MSEC(tick): return \
+
+def MSEC_TO_TICK(msec): return \
+
+def MSEC_TO_TICK_ROUNDUP(msec): return \
+
+def TICK_TO_USEC(tick): return ((tick) * usec_per_tick)
+
+def USEC_TO_TICK(usec): return ((usec) / usec_per_tick)
+
+def USEC_TO_TICK_ROUNDUP(usec): return \
+
+def TICK_TO_NSEC(tick): return ((tick) * nsec_per_tick)
+
+def NSEC_TO_TICK(nsec): return ((nsec) / nsec_per_tick)
+
+def NSEC_TO_TICK_ROUNDUP(nsec): return \
+
+def TIMEVAL_TO_TICK(tvp): return \
+
+def TIMESTRUC_TO_TICK(tsp): return \
+
+
+# Included from time.h
+from TYPES import *
+
+# Included from iso/time_iso.h
+NULL = 0L
+NULL = 0
+CLOCKS_PER_SEC = 1000000
+FD_SETSIZE = 65536
+FD_SETSIZE = 1024
+_NBBY = 8
+NBBY = _NBBY
+def FD_ZERO(p): return bzero((p), sizeof (*(p)))
+
diff --git a/lib-python/2.2/plat-sunos5/regen b/lib-python/2.2/plat-sunos5/regen
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/plat-sunos5/regen
@@ -0,0 +1,9 @@
+#! /bin/sh
+case `uname -sr` in
+'SunOS 5.'*)	;;
+*)	echo Probably not on a Solaris 2 system 1>&2
+	exit 1;;
+esac
+set -v
+h2py -i '(u_long)' /usr/include/sys/types.h /usr/include/netinet/in.h /usr/include/sys/stropts.h /usr/include/dlfcn.h
+
diff --git a/lib-python/2.2/plat-unixware7/IN.py b/lib-python/2.2/plat-unixware7/IN.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/plat-unixware7/IN.py
@@ -0,0 +1,836 @@
+# Generated by h2py from /usr/include/netinet/in.h
+
+# Included from netinet/in_f.h
+def IN_CLASSA(i): return (((long)(i) & 0x80000000) == 0)
+
+IN_CLASSA_NET = 0xff000000
+IN_CLASSA_NSHIFT = 24
+IN_CLASSA_HOST = 0x00ffffff
+IN_CLASSA_MAX = 128
+def IN_CLASSB(i): return (((long)(i) & 0xc0000000) == 0x80000000)
+
+IN_CLASSB_NET = 0xffff0000
+IN_CLASSB_NSHIFT = 16
+IN_CLASSB_HOST = 0x0000ffff
+IN_CLASSB_MAX = 65536
+def IN_CLASSC(i): return (((long)(i) & 0xe0000000) == 0xc0000000)
+
+IN_CLASSC_NET = 0xffffff00
+IN_CLASSC_NSHIFT = 8
+IN_CLASSC_HOST = 0x000000ff
+def IN_CLASSD(i): return (((long)(i) & 0xf0000000) == 0xe0000000)
+
+IN_CLASSD_NET = 0xf0000000
+IN_CLASSD_NSHIFT = 28
+IN_CLASSD_HOST = 0x0fffffff
+def IN_MULTICAST(i): return IN_CLASSD(i)
+
+def IN_EXPERIMENTAL(i): return (((long)(i) & 0xe0000000) == 0xe0000000)
+
+def IN_BADCLASS(i): return (((long)(i) & 0xf0000000) == 0xf0000000)
+
+INADDR_ANY = 0x00000000
+INADDR_LOOPBACK = 0x7f000001
+INADDR_BROADCAST = 0xffffffff
+INADDR_NONE = 0xffffffff
+IN_LOOPBACKNET = 127
+INADDR_UNSPEC_GROUP = 0xe0000000
+INADDR_ALLHOSTS_GROUP = 0xe0000001
+INADDR_ALLRTRS_GROUP = 0xe0000002
+INADDR_MAX_LOCAL_GROUP = 0xe00000ff
+
+# Included from netinet/in6.h
+
+# Included from sys/types.h
+def quad_low(x): return x.val[0]
+
+ADT_EMASKSIZE = 8
+SHRT_MIN = -32768
+SHRT_MAX = 32767
+INT_MIN = (-2147483647-1)
+INT_MAX = 2147483647
+LONG_MIN = (-2147483647-1)
+LONG_MAX = 2147483647
+OFF32_MAX = LONG_MAX
+ISTAT_ASSERTED = 0
+ISTAT_ASSUMED = 1
+ISTAT_NONE = 2
+OFF_MAX = OFF32_MAX
+CLOCK_MAX = LONG_MAX
+P_MYID = (-1)
+P_MYHOSTID = (-1)
+
+# Included from sys/select.h
+FD_SETSIZE = 4096
+NBBY = 8
+NULL = 0
+
+# Included from sys/bitypes.h
+
+# Included from netinet/in6_f.h
+def IN6_IS_ADDR_UNSPECIFIED(a): return IN6_ADDR_EQUAL_L(a, 0, 0, 0, 0)
+
+def IN6_SET_ADDR_UNSPECIFIED(a): return IN6_ADDR_COPY_L(a, 0, 0, 0, 0)
+
+def IN6_IS_ADDR_ANY(a): return IN6_ADDR_EQUAL_L(a, 0, 0, 0, 0)
+
+def IN6_SET_ADDR_ANY(a): return IN6_ADDR_COPY_L(a, 0, 0, 0, 0)
+
+def IN6_IS_ADDR_LOOPBACK(a): return IN6_ADDR_EQUAL_L(a, 0, 0, 0, 0x01000000)
+
+def IN6_SET_ADDR_LOOPBACK(a): return IN6_ADDR_COPY_L(a, 0, 0, 0, 0x01000000)
+
+IN6_MC_FLAG_PERMANENT = 0x0
+IN6_MC_FLAG_TRANSIENT = 0x1
+IN6_MC_SCOPE_NODELOCAL = 0x1
+IN6_MC_SCOPE_LINKLOCAL = 0x2
+IN6_MC_SCOPE_SITELOCAL = 0x5
+IN6_MC_SCOPE_ORGLOCAL = 0x8
+IN6_MC_SCOPE_GLOBAL = 0xE
+def IN6_IS_ADDR_MC_NODELOCAL(a): return \
+
+def IN6_IS_ADDR_MC_LINKLOCAL(a): return \
+
+def IN6_IS_ADDR_MC_SITELOCAL(a): return \
+
+def IN6_IS_ADDR_MC_ORGLOCAL(a): return \
+
+def IN6_IS_ADDR_MC_GLOBAL(a): return \
+
+
+# Included from sys/convsa.h
+__NETLIB_UW211_SVR4 = 1
+__NETLIB_UW211_XPG4 = 2
+__NETLIB_GEMINI_SVR4 = 3
+__NETLIB_GEMINI_XPG4 = 4
+__NETLIB_FP1_SVR4 = 5
+__NETLIB_FP1_XPG4 = 6
+__NETLIB_BASE_VERSION__ = __NETLIB_UW211_SVR4
+__NETLIB_VERSION__ = __NETLIB_FP1_SVR4
+__NETLIB_VERSION__ = __NETLIB_FP1_XPG4
+__NETLIB_VERSION__ = __NETLIB_GEMINI_SVR4
+__NETLIB_VERSION__ = __NETLIB_GEMINI_XPG4
+__NETLIB_VERSION__ = __NETLIB_UW211_SVR4
+__NETLIB_VERSION__ = __NETLIB_UW211_XPG4
+__NETLIB_VERSION__ = __NETLIB_FP1_XPG4
+
+# Included from sys/byteorder.h
+LITTLE_ENDIAN = 1234
+BIG_ENDIAN = 4321
+PDP_ENDIAN = 3412
+
+# Included from sys/byteorder_f.h
+BYTE_ORDER = LITTLE_ENDIAN
+def htonl(hl): return __htonl(hl)
+
+def ntohl(nl): return __ntohl(nl)
+
+def htons(hs): return __htons(hs)
+
+def ntohs(ns): return __ntohs(ns)
+
+def ntohl(x): return (x)
+
+def ntohs(x): return (x)
+
+def htonl(x): return (x)
+
+def htons(x): return (x)
+
+def __NETLIB_VERSION_IS_XPG4(version): return (((version) % 2) == 0)
+
+def __NETLIB_VERSION_HAS_SALEN(version): return ((version) >= __NETLIB_GEMINI_SVR4)
+
+def __NETLIB_VERSION_IS_IKS(version): return ((version) >= __NETLIB_FP1_SVR4)
+
+def SA_FAMILY_GET(sa): return \
+
+INET6_ADDRSTRLEN = 46
+IPV6_UNICAST_HOPS = 3
+IPV6_ADDRFORM = 24
+IPV6_MULTICAST_HOPS = 25
+IPV6_MULTICAST_IF = 26
+IPV6_MULTICAST_LOOP = 27
+IPV6_ADD_MEMBERSHIP = 28
+IPV6_DROP_MEMBERSHIP = 29
+
+# Included from sys/insrem.h
+def LIST_INIT(head): return \
+
+def LIST_INIT(head): return \
+
+def remque(a): return REMQUE(a)
+
+
+# Included from sys/socket.h
+
+# Included from sys/uio.h
+SHUT_RD = 0
+SHUT_WR = 1
+SHUT_RDWR = 2
+
+# Included from sys/netconfig.h
+
+# Included from sys/cdefs.h
+def __P(protos): return protos		 
+
+def __STRING(x): return #x
+
+def __P(protos): return ()		 
+
+def __STRING(x): return "x"
+
+NETCONFIG = "/etc/netconfig"
+NETPATH = "NETPATH"
+NC_TPI_CLTS = 1
+NC_TPI_COTS = 2
+NC_TPI_COTS_ORD = 3
+NC_TPI_RAW = 4
+NC_NOFLAG = 00
+NC_VISIBLE = 01
+NC_BROADCAST = 02
+NC_NOPROTOFMLY = "-"
+NC_LOOPBACK = "loopback"
+NC_INET = "inet"
+NC_INET6 = "inet6"
+NC_IMPLINK = "implink"
+NC_PUP = "pup"
+NC_CHAOS = "chaos"
+NC_NS = "ns"
+NC_NBS = "nbs"
+NC_ECMA = "ecma"
+NC_DATAKIT = "datakit"
+NC_CCITT = "ccitt"
+NC_SNA = "sna"
+NC_DECNET = "decnet"
+NC_DLI = "dli"
+NC_LAT = "lat"
+NC_HYLINK = "hylink"
+NC_APPLETALK = "appletalk"
+NC_NIT = "nit"
+NC_IEEE802 = "ieee802"
+NC_OSI = "osi"
+NC_X25 = "x25"
+NC_OSINET = "osinet"
+NC_GOSIP = "gosip"
+NC_NETWARE = "netware"
+NC_NOPROTO = "-"
+NC_TCP = "tcp"
+NC_UDP = "udp"
+NC_ICMP = "icmp"
+NC_IPX = "ipx"
+NC_SPX = "spx"
+NC_TPI_CLTS = 1
+NC_TPI_COTS = 2
+NC_TPI_COTS_ORD = 3
+NC_TPI_RAW = 4
+SOCK_STREAM = 2
+SOCK_DGRAM = 1
+SOCK_RAW = 4
+SOCK_RDM = 5
+SOCK_SEQPACKET = 6
+SO_DEBUG = 0x0001
+SO_ACCEPTCONN = 0x0002
+SO_REUSEADDR = 0x0004
+SO_KEEPALIVE = 0x0008
+SO_DONTROUTE = 0x0010
+SO_BROADCAST = 0x0020
+SO_USELOOPBACK = 0x0040
+SO_LINGER = 0x0080
+SO_OOBINLINE = 0x0100
+SO_ORDREL = 0x0200
+SO_IMASOCKET = 0x0400
+SO_MGMT = 0x0800
+SO_REUSEPORT = 0x1000
+SO_LISTENING = 0x2000
+SO_RDWR = 0x4000
+SO_SEMA = 0x8000
+SO_DONTLINGER = (~SO_LINGER)
+SO_SNDBUF = 0x1001
+SO_RCVBUF = 0x1002
+SO_SNDLOWAT = 0x1003
+SO_RCVLOWAT = 0x1004
+SO_SNDTIMEO = 0x1005
+SO_RCVTIMEO = 0x1006
+SO_ERROR = 0x1007
+SO_TYPE = 0x1008
+SO_PROTOTYPE = 0x1009
+SO_ALLRAW = 0x100a
+SOL_SOCKET = 0xffff
+AF_UNSPEC = 0
+AF_UNIX = 1
+AF_LOCAL = AF_UNIX
+AF_INET = 2
+AF_IMPLINK = 3
+AF_PUP = 4
+AF_CHAOS = 5
+AF_NS = 6
+AF_NBS = 7
+AF_ECMA = 8
+AF_DATAKIT = 9
+AF_CCITT = 10
+AF_SNA = 11
+AF_DECnet = 12
+AF_DLI = 13
+AF_LAT = 14
+AF_HYLINK = 15
+AF_APPLETALK = 16
+AF_NIT = 17
+AF_802 = 18
+AF_OSI = 19
+AF_ISO = AF_OSI
+AF_X25 = 20
+AF_OSINET = 21
+AF_GOSIP = 22
+AF_YNET = 23
+AF_ROUTE = 24
+AF_LINK = 25
+pseudo_AF_XTP = 26
+AF_INET6 = 27
+AF_MAX = 27
+AF_INET_BSWAP = 0x0200
+PF_UNSPEC = AF_UNSPEC
+PF_UNIX = AF_UNIX
+PF_LOCAL = AF_LOCAL
+PF_INET = AF_INET
+PF_IMPLINK = AF_IMPLINK
+PF_PUP = AF_PUP
+PF_CHAOS = AF_CHAOS
+PF_NS = AF_NS
+PF_NBS = AF_NBS
+PF_ECMA = AF_ECMA
+PF_DATAKIT = AF_DATAKIT
+PF_CCITT = AF_CCITT
+PF_SNA = AF_SNA
+PF_DECnet = AF_DECnet
+PF_DLI = AF_DLI
+PF_LAT = AF_LAT
+PF_HYLINK = AF_HYLINK
+PF_APPLETALK = AF_APPLETALK
+PF_NIT = AF_NIT
+PF_802 = AF_802
+PF_OSI = AF_OSI
+PF_ISO = PF_OSI
+PF_X25 = AF_X25
+PF_OSINET = AF_OSINET
+PF_GOSIP = AF_GOSIP
+PF_YNET = AF_YNET
+PF_ROUTE = AF_ROUTE
+PF_LINK = AF_LINK
+pseudo_PF_XTP = pseudo_AF_XTP
+PF_INET6 = AF_INET6
+PF_MAX = AF_MAX
+SOMAXCONN = 5
+SCM_RIGHTS = 1
+MSG_OOB = 0x1
+MSG_PEEK = 0x2
+MSG_DONTROUTE = 0x4
+MSG_CTRUNC = 0x8
+MSG_TRUNC = 0x10
+MSG_EOR = 0x30
+MSG_WAITALL = 0x20
+MSG_MAXIOVLEN = 16
+def OPTLEN(x): return ((((x) + sizeof(long) - 1) / sizeof(long)) * sizeof(long))
+
+GIARG = 0x1
+CONTI = 0x2
+GITAB = 0x4
+SOCKETSYS = 88
+SOCKETSYS = 83
+SO_ACCEPT = 1
+SO_BIND = 2
+SO_CONNECT = 3
+SO_GETPEERNAME = 4
+SO_GETSOCKNAME = 5
+SO_GETSOCKOPT = 6
+SO_LISTEN = 7
+SO_RECV = 8
+SO_RECVFROM = 9
+SO_SEND = 10
+SO_SENDTO = 11
+SO_SETSOCKOPT = 12
+SO_SHUTDOWN = 13
+SO_SOCKET = 14
+SO_SOCKPOLL = 15
+SO_GETIPDOMAIN = 16
+SO_SETIPDOMAIN = 17
+SO_ADJTIME = 18
+
+# Included from sys/stream.h
+
+# Included from sys/cred.h
+
+# Included from sys/ksynch.h
+
+# Included from sys/dl.h
+SIGNBIT = 0x80000000
+
+# Included from sys/ipl.h
+
+# Included from sys/disp_p.h
+
+# Included from sys/trap.h
+DIVERR = 0
+SGLSTP = 1
+NMIFLT = 2
+BPTFLT = 3
+INTOFLT = 4
+BOUNDFLT = 5
+INVOPFLT = 6
+NOEXTFLT = 7
+DBLFLT = 8
+EXTOVRFLT = 9
+INVTSSFLT = 10
+SEGNPFLT = 11
+STKFLT = 12
+GPFLT = 13
+PGFLT = 14
+EXTERRFLT = 16
+ALIGNFLT = 17
+MCEFLT = 18
+USERFLT = 0x100
+TRP_PREEMPT = 0x200
+TRP_UNUSED = 0x201
+PF_ERR_MASK = 0x01
+PF_ERR_PAGE = 0
+PF_ERR_PROT = 1
+PF_ERR_WRITE = 2
+PF_ERR_USER = 4
+EVT_STRSCHED = 0x04
+EVT_GLOBCALLOUT = 0x08
+EVT_LCLCALLOUT = 0x10
+EVT_SOFTINTMASK = (EVT_STRSCHED|EVT_GLOBCALLOUT|EVT_LCLCALLOUT)
+PL0 = 0
+PL1 = 1
+PL2 = 2
+PL3 = 3
+PL4 = 4
+PL5 = 5
+PL6 = 6
+PLHI = 8
+PL7 = PLHI
+PLBASE = PL0
+PLTIMEOUT = PL1
+PLDISK = PL5
+PLSTR = PL6
+PLTTY = PLSTR
+PLMIN = PL0
+PLMIN = PL1
+MAX_INTR_LEVELS = 10
+MAX_INTR_NESTING = 50
+STRSCHED = EVT_STRSCHED
+GLOBALSOFTINT = EVT_GLOBCALLOUT
+LOCALSOFTINT = EVT_LCLCALLOUT
+
+# Included from sys/ksynch_p.h
+def GET_TIME(timep): return \
+
+LK_THRESHOLD = 500000
+
+# Included from sys/list.h
+
+# Included from sys/listasm.h
+def remque_null(e): return \
+
+def LS_ISEMPTY(listp): return \
+
+LK_BASIC = 0x1
+LK_SLEEP = 0x2
+LK_NOSTATS = 0x4
+def CYCLES_SINCE(c): return CYCLES_BETWEEN((c), CYCLES())
+
+LSB_NLKDS = 92
+EVT_RUNRUN = 0x01
+EVT_KPRUNRUN = 0x02
+SP_UNLOCKED = 0
+SP_LOCKED = 1
+KS_LOCKTEST = 0x01
+KS_MPSTATS = 0x02
+KS_DEINITED = 0x04
+KS_NVLTTRACE = 0x08
+RWS_READ = (ord('r'))
+RWS_WRITE = (ord('w'))
+RWS_UNLOCKED = (ord('u'))
+RWS_BUSY = (ord('b'))
+def SLEEP_LOCKOWNED(lkp): return \
+
+def SLEEP_DISOWN(lkp): return \
+
+KS_NOPRMPT = 0x00000001
+__KS_LOCKTEST = KS_LOCKTEST
+__KS_LOCKTEST = 0
+__KS_MPSTATS = KS_MPSTATS
+__KS_MPSTATS = 0
+__KS_NVLTTRACE = KS_NVLTTRACE
+__KS_NVLTTRACE = 0
+KSFLAGS = (__KS_LOCKTEST|__KS_MPSTATS|__KS_NVLTTRACE)
+KSVUNIPROC = 1
+KSVMPDEBUG = 2
+KSVMPNODEBUG = 3
+KSVFLAG = KSVUNIPROC
+KSVFLAG = KSVMPDEBUG
+KSVFLAG = KSVMPNODEBUG
+
+# Included from sys/ksinline.h
+_A_SP_LOCKED = 1
+_A_SP_UNLOCKED = 0
+_A_INVPL = -1
+def _ATOMIC_INT_INCR(atomic_intp): return \
+
+def _ATOMIC_INT_DECR(atomic_intp): return \
+
+def ATOMIC_INT_READ(atomic_intp): return _ATOMIC_INT_READ(atomic_intp)
+
+def ATOMIC_INT_INCR(atomic_intp): return _ATOMIC_INT_INCR(atomic_intp)
+
+def ATOMIC_INT_DECR(atomic_intp): return _ATOMIC_INT_DECR(atomic_intp)
+
+def FSPIN_INIT(lp): return  
+
+def FSPIN_LOCK(l): return DISABLE()
+
+def FSPIN_TRYLOCK(l): return (DISABLE(), B_TRUE)
+
+def FSPIN_UNLOCK(l): return ENABLE()
+
+def LOCK_DEINIT(lp): return  
+
+def LOCK_DEALLOC(lp): return  
+
+def LOCK_OWNED(lp): return (B_TRUE)
+
+def RW_DEINIT(lp): return  
+
+def RW_DEALLOC(lp): return  
+
+def RW_OWNED(lp): return (B_TRUE)
+
+def IS_LOCKED(lockp): return B_FALSE
+
+def LOCK_PLMIN(lockp): return \
+
+def TRYLOCK_PLMIN(lockp): return LOCK_PLMIN(lockp)
+
+def LOCK_SH_PLMIN(lockp): return LOCK_PLMIN(lockp)
+
+def RW_RDLOCK_PLMIN(lockp): return LOCK_PLMIN(lockp)
+
+def RW_WRLOCK_PLMIN(lockp): return LOCK_PLMIN(lockp)
+
+def LOCK_DEINIT(l): return  
+
+def LOCK_PLMIN(lockp): return LOCK((lockp), PLMIN)
+
+def TRYLOCK_PLMIN(lockp): return TRYLOCK((lockp), PLMIN)
+
+def LOCK_SH_PLMIN(lockp): return LOCK_SH((lockp), PLMIN)
+
+def RW_RDLOCK_PLMIN(lockp): return RW_RDLOCK((lockp), PLMIN)
+
+def RW_WRLOCK_PLMIN(lockp): return RW_WRLOCK((lockp), PLMIN)
+
+def FSPIN_IS_LOCKED(fsp): return B_FALSE
+
+def SPIN_IS_LOCKED(lockp): return B_FALSE
+
+def FSPIN_OWNED(l): return (B_TRUE)
+
+CR_MLDREAL = 0x00000001
+CR_RDUMP = 0x00000002
+def crhold(credp): return crholdn((credp), 1)	 
+
+def crfree(credp): return crfreen((credp), 1)	 
+
+
+# Included from sys/strmdep.h
+def str_aligned(X): return (((uint)(X) & (sizeof(int) - 1)) == 0)
+
+
+# Included from sys/engine.h
+
+# Included from sys/clock.h
+
+# Included from sys/time.h
+DST_NONE = 0
+DST_USA = 1
+DST_AUST = 2
+DST_WET = 3
+DST_MET = 4
+DST_EET = 5
+DST_CAN = 6
+DST_GB = 7
+DST_RUM = 8
+DST_TUR = 9
+DST_AUSTALT = 10
+ITIMER_REAL = 0
+ITIMER_VIRTUAL = 1
+ITIMER_PROF = 2
+FD_SETSIZE = 4096
+FD_NBBY = 8
+
+# Included from time.h
+NULL = 0
+CLOCKS_PER_SEC = 1000000
+
+# Included from sys/clock_p.h
+CGBITS = 4
+IDBITS = 28
+def toid_unpackcg(idval): return (((idval) >> IDBITS) & 0xf)
+
+def toid_unpackid(idval): return ((idval) & 0xfffffff)
+
+def toid_unpackcg(idval): return 0
+
+def toid_unpackid(idval): return (idval)
+
+NCALLOUT_HASH = 1024
+CALLOUT_MAXVAL = 0x7fffffff
+TO_PERIODIC = 0x80000000
+TO_IMMEDIATE = 0x80000000
+SEC = 1
+MILLISEC = 1000
+MICROSEC = 1000000
+NANOSEC = 1000000000
+SECHR = (60*60)
+SECDAY = (24*SECHR)
+SECYR = (365*SECDAY)
+def TIME_OWNED_R(cgnum): return (B_TRUE)
+
+LOOPSECONDS = 1800
+LOOPMICROSECONDS = (LOOPSECONDS * MICROSEC)
+def TICKS_SINCE(t): return TICKS_BETWEEN(t, TICKS())
+
+MAXRQS = 2
+E_OFFLINE = 0x01
+E_BAD = 0x02
+E_SHUTDOWN = 0x04
+E_DRIVER = 0x08
+E_DEFAULTKEEP = 0x100
+E_DRIVERBOUND = 0x200
+E_EXCLUSIVE = 0x400
+E_CGLEADER = 0x800
+E_NOWAY = (E_OFFLINE|E_BAD|E_SHUTDOWN)
+E_BOUND = 0x01
+E_GLOBAL = 0x00
+E_UNAVAIL = -1
+ENGINE_ONLINE = 1
+def PROCESSOR_UNMAP(e): return ((e) - engine)
+
+BOOTENG = 0
+QMOVED = 0x0001
+QWANTR = 0x0002
+QWANTW = 0x0004
+QFULL = 0x0008
+QREADR = 0x0010
+QUSE = 0x0020
+QNOENB = 0x0040
+QUP = 0x0080
+QBACK = 0x0100
+QINTER = 0x0200
+QPROCSON = 0x0400
+QTOENAB = 0x0800
+QFREEZE = 0x1000
+QBOUND = 0x2000
+QDEFCNT = 0x4000
+QENAB = 0x0001
+QSVCBUSY = 0x0002
+STRM_PUTCNT_TABLES = 31
+def STRM_MYENG_PUTCNT(sdp): return STRM_PUTCNT(l.eng_num, sdp)
+
+QB_FULL = 0x01
+QB_WANTW = 0x02
+QB_BACK = 0x04
+NBAND = 256
+DB_WASDUPED = 0x1
+DB_2PIECE = 0x2
+STRLEAKHASHSZ = 1021
+MSGMARK = 0x01
+MSGNOLOOP = 0x02
+MSGDELIM = 0x04
+MSGNOGET = 0x08
+MSGLOG = 0x10
+M_DATA = 0x00
+M_PROTO = 0x01
+M_BREAK = 0x08
+M_PASSFP = 0x09
+M_SIG = 0x0b
+M_DELAY = 0x0c
+M_CTL = 0x0d
+M_IOCTL = 0x0e
+M_SETOPTS = 0x10
+M_RSE = 0x11
+M_TRAIL = 0x12
+M_IOCACK = 0x81
+M_IOCNAK = 0x82
+M_PCPROTO = 0x83
+M_PCSIG = 0x84
+M_READ = 0x85
+M_FLUSH = 0x86
+M_STOP = 0x87
+M_START = 0x88
+M_HANGUP = 0x89
+M_ERROR = 0x8a
+M_COPYIN = 0x8b
+M_COPYOUT = 0x8c
+M_IOCDATA = 0x8d
+M_PCRSE = 0x8e
+M_STOPI = 0x8f
+M_STARTI = 0x90
+M_PCCTL = 0x91
+M_PCSETOPTS = 0x92
+QNORM = 0x00
+QPCTL = 0x80
+STRCANON = 0x01
+RECOPY = 0x02
+SO_ALL = 0x003f
+SO_READOPT = 0x0001
+SO_WROFF = 0x0002
+SO_MINPSZ = 0x0004
+SO_MAXPSZ = 0x0008
+SO_HIWAT = 0x0010
+SO_LOWAT = 0x0020
+SO_MREADON = 0x0040
+SO_MREADOFF = 0x0080
+SO_NDELON = 0x0100
+SO_NDELOFF = 0x0200
+SO_ISTTY = 0x0400
+SO_ISNTTY = 0x0800
+SO_TOSTOP = 0x1000
+SO_TONSTOP = 0x2000
+SO_BAND = 0x4000
+SO_DELIM = 0x8000
+SO_NODELIM = 0x010000
+SO_STRHOLD = 0x020000
+SO_LOOP = 0x040000
+DRVOPEN = 0x0
+MODOPEN = 0x1
+CLONEOPEN = 0x2
+OPENFAIL = -1
+BPRI_LO = 1
+BPRI_MED = 2
+BPRI_HI = 3
+INFPSZ = -1
+FLUSHALL = 1
+FLUSHDATA = 0
+STRHIGH = 5120
+STRLOW = 1024
+MAXIOCBSZ = 1024
+def straln(a): return (caddr_t)((long)(a) & ~(sizeof(int)-1))
+
+IPM_ID = 200
+ICMPM_ID = 201
+TCPM_ID = 202
+UDPM_ID = 203
+ARPM_ID = 204
+APPM_ID = 205
+RIPM_ID = 206
+PPPM_ID = 207
+AHDLCM_ID = 208
+MHDLCRIPM_ID = 209
+HDLCM_ID = 210
+PPCID_ID = 211
+IGMPM_ID = 212
+IPIPM_ID = 213
+IPPROTO_IP = 0
+IPPROTO_HOPOPTS = 0
+IPPROTO_ICMP = 1
+IPPROTO_IGMP = 2
+IPPROTO_GGP = 3
+IPPROTO_IPIP = 4
+IPPROTO_TCP = 6
+IPPROTO_EGP = 8
+IPPROTO_PUP = 12
+IPPROTO_UDP = 17
+IPPROTO_IDP = 22
+IPPROTO_TP = 29
+IPPROTO_IPV6 = 41
+IPPROTO_ROUTING = 43
+IPPROTO_FRAGMENT = 44
+IPPROTO_ESP = 50
+IPPROTO_AH = 51
+IPPROTO_ICMPV6 = 58
+IPPROTO_NONE = 59
+IPPROTO_DSTOPTS = 60
+IPPROTO_HELLO = 63
+IPPROTO_ND = 77
+IPPROTO_EON = 80
+IPPROTO_RAW = 255
+IPPROTO_MAX = 256
+IPPORT_ECHO = 7
+IPPORT_DISCARD = 9
+IPPORT_SYSTAT = 11
+IPPORT_DAYTIME = 13
+IPPORT_NETSTAT = 15
+IPPORT_FTP = 21
+IPPORT_TELNET = 23
+IPPORT_SMTP = 25
+IPPORT_TIMESERVER = 37
+IPPORT_NAMESERVER = 42
+IPPORT_WHOIS = 43
+IPPORT_MTP = 57
+IPPORT_TFTP = 69
+IPPORT_RJE = 77
+IPPORT_FINGER = 79
+IPPORT_TTYLINK = 87
+IPPORT_SUPDUP = 95
+IPPORT_EXECSERVER = 512
+IPPORT_LOGINSERVER = 513
+IPPORT_CMDSERVER = 514
+IPPORT_EFSSERVER = 520
+IPPORT_BIFFUDP = 512
+IPPORT_WHOSERVER = 513
+IPPORT_ROUTESERVER = 520
+IPPORT_RESERVED = 1024
+IPPORT_USERRESERVED = 65535
+IPPORT_RESERVED_LOW = 512
+IPPORT_RESERVED_HIGH = 1023
+IPPORT_USERRESERVED_LOW = 32768
+IPPORT_USERRESERVED_HIGH = 65535
+INET_ADDRSTRLEN = 16
+IP_OPTIONS = 1
+IP_TOS = 2
+IP_TTL = 3
+IP_HDRINCL = 4
+IP_RECVOPTS = 5
+IP_RECVRETOPTS = 6
+IP_RECVDSTADDR = 7
+IP_RETOPTS = 8
+IP_MULTICAST_IF = 9
+IP_MULTICAST_LOOP = 10
+IP_ADD_MEMBERSHIP = 11
+IP_DROP_MEMBERSHIP = 12
+IP_BROADCAST_IF = 14
+IP_RECVIFINDEX = 15
+IP_MULTICAST_TTL = 16
+MRT_INIT = 17
+MRT_DONE = 18
+MRT_ADD_VIF = 19
+MRT_DEL_VIF = 20
+MRT_ADD_MFC = 21
+MRT_DEL_MFC = 22
+MRT_VERSION = 23
+IP_DEFAULT_MULTICAST_TTL = 1
+IP_DEFAULT_MULTICAST_LOOP = 1
+IP_MAX_MEMBERSHIPS = 20
+INADDR_UNSPEC_GROUP = 0xe0000000
+INADDR_ALLHOSTS_GROUP = 0xe0000001
+INADDR_ALLRTRS_GROUP = 0xe0000002
+INADDR_MAX_LOCAL_GROUP = 0xe00000ff
+
+# Included from netinet/in_mp.h
+
+# Included from netinet/in_mp_ddi.h
+
+# Included from sys/inline.h
+IP_HIER_BASE = (20)
+def ASSERT_LOCK(x): return  
+
+def ASSERT_WRLOCK(x): return  
+
+def ASSERT_UNLOCK(x): return  
+
+def CANPUT(q): return canput((q))
+
+def CANPUTNEXT(q): return canputnext((q))
+
+INET_DEBUG = 1
diff --git a/lib-python/2.2/plat-unixware7/STROPTS.py b/lib-python/2.2/plat-unixware7/STROPTS.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/plat-unixware7/STROPTS.py
@@ -0,0 +1,328 @@
+# Generated by h2py from /usr/include/sys/stropts.h
+
+# Included from sys/types.h
+def quad_low(x): return x.val[0]
+
+ADT_EMASKSIZE = 8
+SHRT_MIN = -32768
+SHRT_MAX = 32767
+INT_MIN = (-2147483647-1)
+INT_MAX = 2147483647
+LONG_MIN = (-2147483647-1)
+LONG_MAX = 2147483647
+OFF32_MAX = LONG_MAX
+ISTAT_ASSERTED = 0
+ISTAT_ASSUMED = 1
+ISTAT_NONE = 2
+OFF_MAX = OFF32_MAX
+CLOCK_MAX = LONG_MAX
+P_MYID = (-1)
+P_MYHOSTID = (-1)
+
+# Included from sys/select.h
+FD_SETSIZE = 4096
+NBBY = 8
+NULL = 0
+
+# Included from sys/conf.h
+D_NEW = 0x00
+D_OLD = 0x01
+D_DMA = 0x02
+D_BLKOFF = 0x400
+D_LFS = 0x8000
+D_STR = 0x0800
+D_MOD = 0x1000
+D_PSEUDO = 0x2000
+D_RANDOM = 0x4000
+D_HOT = 0x10000
+D_SEEKNEG = 0x04
+D_TAPE = 0x08
+D_NOBRKUP = 0x10
+D_INITPUB = 0x20
+D_NOSPECMACDATA = 0x40
+D_RDWEQ = 0x80
+SECMASK = (D_INITPUB|D_NOSPECMACDATA|D_RDWEQ)
+DAF_REQDMA = 0x1
+DAF_PHYSREQ = 0x2
+DAF_PRE8 = 0x4
+DAF_STATIC = 0x8
+DAF_STR = 0x10
+D_MP = 0x100
+D_UPF = 0x200
+ROOTFS_NAMESZ = 7
+FMNAMESZ = 8
+MCD_VERSION = 1
+DI_BCBP = 0
+DI_MEDIA = 1
+
+# Included from sys/secsys.h
+ES_MACOPENLID = 1
+ES_MACSYSLID = 2
+ES_MACROOTLID = 3
+ES_PRVINFO = 4
+ES_PRVSETCNT = 5
+ES_PRVSETS = 6
+ES_MACADTLID = 7
+ES_PRVID = 8
+ES_TPGETMAJOR = 9
+SA_EXEC = 001
+SA_WRITE = 002
+SA_READ = 004
+SA_SUBSIZE = 010
+
+# Included from sys/stropts_f.h
+X_STR = (ord('S')<<8)
+X_I_BASE = (X_STR|0200)
+X_I_NREAD = (X_STR|0201)
+X_I_PUSH = (X_STR|0202)
+X_I_POP = (X_STR|0203)
+X_I_LOOK = (X_STR|0204)
+X_I_FLUSH = (X_STR|0205)
+X_I_SRDOPT = (X_STR|0206)
+X_I_GRDOPT = (X_STR|0207)
+X_I_STR = (X_STR|0210)
+X_I_SETSIG = (X_STR|0211)
+X_I_GETSIG = (X_STR|0212)
+X_I_FIND = (X_STR|0213)
+X_I_LINK = (X_STR|0214)
+X_I_UNLINK = (X_STR|0215)
+X_I_PEEK = (X_STR|0217)
+X_I_FDINSERT = (X_STR|0220)
+X_I_SENDFD = (X_STR|0221)
+X_I_RECVFD = (X_STR|0222)
+
+# Included from unistd.h
+
+# Included from sys/unistd.h
+R_OK = 004
+W_OK = 002
+X_OK = 001
+F_OK = 000
+EFF_ONLY_OK = 010
+EX_OK = 020
+SEEK_SET = 0
+SEEK_CUR = 1
+SEEK_END = 2
+_SC_ARG_MAX = 1
+_SC_CHILD_MAX = 2
+_SC_CLK_TCK = 3
+_SC_NGROUPS_MAX = 4
+_SC_OPEN_MAX = 5
+_SC_JOB_CONTROL = 6
+_SC_SAVED_IDS = 7
+_SC_VERSION = 8
+_SC_PASS_MAX = 9
+_SC_LOGNAME_MAX = 10
+_SC_PAGESIZE = 11
+_SC_PAGE_SIZE = _SC_PAGESIZE
+_SC_XOPEN_VERSION = 12
+_SC_NACLS_MAX = 13
+_SC_NPROCESSORS_CONF = 14
+_SC_NPROCESSORS_ONLN = 15
+_SC_NPROCESSES = 39
+_SC_TOTAL_MEMORY = 40
+_SC_USEABLE_MEMORY = 41
+_SC_GENERAL_MEMORY = 42
+_SC_DEDICATED_MEMORY = 43
+_SC_NCGS_CONF = 44
+_SC_NCGS_ONLN = 45
+_SC_MAX_CPUS_PER_CG = 46
+_SC_CG_SIMPLE_IMPL = 47
+_SC_CACHE_LINE = 48
+_SC_SYSTEM_ID = 49
+_SC_THREADS = 51
+_SC_THREAD_ATTR_STACKADDR = 52
+_SC_THREAD_ATTR_STACKSIZE = 53
+_SC_THREAD_DESTRUCTOR_ITERATIONS = 54
+_SC_THREAD_KEYS_MAX = 55
+_SC_THREAD_PRIORITY_SCHEDULING = 56
+_SC_THREAD_PRIO_INHERIT = 57
+_SC_THREAD_PRIO_PROTECT = 58
+_SC_THREAD_STACK_MIN = 59
+_SC_THREAD_PROCESS_SHARED = 60
+_SC_THREAD_SAFE_FUNCTIONS = 61
+_SC_THREAD_THREADS_MAX = 62
+_SC_KERNEL_VM = 63
+_SC_TZNAME_MAX = 320
+_SC_STREAM_MAX = 321
+_SC_XOPEN_CRYPT = 323
+_SC_XOPEN_ENH_I18N = 324
+_SC_XOPEN_SHM = 325
+_SC_XOPEN_XCU_VERSION = 327
+_SC_AES_OS_VERSION = 330
+_SC_ATEXIT_MAX = 331
+_SC_2_C_BIND = 350
+_SC_2_C_DEV = 351
+_SC_2_C_VERSION = 352
+_SC_2_CHAR_TERM = 353
+_SC_2_FORT_DEV = 354
+_SC_2_FORT_RUN = 355
+_SC_2_LOCALEDEF = 356
+_SC_2_SW_DEV = 357
+_SC_2_UPE = 358
+_SC_2_VERSION = 359
+_SC_BC_BASE_MAX = 370
+_SC_BC_DIM_MAX = 371
+_SC_BC_SCALE_MAX = 372
+_SC_BC_STRING_MAX = 373
+_SC_COLL_WEIGHTS_MAX = 380
+_SC_EXPR_NEST_MAX = 381
+_SC_LINE_MAX = 382
+_SC_RE_DUP_MAX = 383
+_SC_IOV_MAX = 390
+_SC_NPROC_CONF = 391
+_SC_NPROC_ONLN = 392
+_SC_XOPEN_UNIX = 400
+_SC_SEMAPHORES = 440
+_CS_PATH = 1
+__O_CS_HOSTNAME = 2
+_CS_RELEASE = 3
+_CS_VERSION = 4
+__O_CS_MACHINE = 5
+__O_CS_ARCHITECTURE = 6
+_CS_HW_SERIAL = 7
+__O_CS_HW_PROVIDER = 8
+_CS_SRPC_DOMAIN = 9
+_CS_INITTAB_NAME = 10
+__O_CS_SYSNAME = 11
+_CS_LFS_CFLAGS = 20
+_CS_LFS_LDFLAGS = 21
+_CS_LFS_LIBS = 22
+_CS_LFS_LINTFLAGS = 23
+_CS_LFS64_CFLAGS = 24
+_CS_LFS64_LDFLAGS = 25
+_CS_LFS64_LIBS = 26
+_CS_LFS64_LINTFLAGS = 27
+_CS_ARCHITECTURE = 100
+_CS_BUSTYPES = 101
+_CS_HOSTNAME = 102
+_CS_HW_PROVIDER = 103
+_CS_KERNEL_STAMP = 104
+_CS_MACHINE = 105
+_CS_OS_BASE = 106
+_CS_OS_PROVIDER = 107
+_CS_SYSNAME = 108
+_CS_USER_LIMIT = 109
+_PC_LINK_MAX = 1
+_PC_MAX_CANON = 2
+_PC_MAX_INPUT = 3
+_PC_NAME_MAX = 4
+_PC_PATH_MAX = 5
+_PC_PIPE_BUF = 6
+_PC_NO_TRUNC = 7
+_PC_VDISABLE = 8
+_PC_CHOWN_RESTRICTED = 9
+_PC_FILESIZEBITS = 10
+_POSIX_VERSION = 199009L
+_XOPEN_VERSION = 4
+GF_PATH = "/etc/group"
+PF_PATH = "/etc/passwd"
+F_ULOCK = 0
+F_LOCK = 1
+F_TLOCK = 2
+F_TEST = 3
+_POSIX_JOB_CONTROL = 1
+_POSIX_SAVED_IDS = 1
+_POSIX_VDISABLE = 0
+NULL = 0
+STDIN_FILENO = 0
+STDOUT_FILENO = 1
+STDERR_FILENO = 2
+_XOPEN_UNIX = 1
+_XOPEN_ENH_I18N = 1
+_XOPEN_XPG4 = 1
+_POSIX2_C_VERSION = 199209L
+_POSIX2_VERSION = 199209L
+_XOPEN_XCU_VERSION = 4
+_POSIX_SEMAPHORES = 1
+_POSIX_THREADS = 1
+_POSIX_THREAD_ATTR_STACKADDR = 1
+_POSIX_THREAD_ATTR_STACKSIZE = 1
+_POSIX_THREAD_PRIORITY_SCHEDULING = 1
+_POSIX_THREAD_PROCESS_SHARED = 1
+_POSIX_THREAD_SAFE_FUNCTIONS = 1
+_POSIX2_C_BIND = 1
+_POSIX2_CHAR_TERM = 1
+_POSIX2_FORT_RUN = 1
+_POSIX2_LOCALEDEF = 1
+_POSIX2_UPE = 1
+_LFS_ASYNCHRONOUS_IO = 1
+_LFS_LARGEFILE = 1
+_LFS64_ASYNCHRONOUS_IO = 1
+_LFS64_LARGEFILE = 1
+_LFS64_STDIO = 1
+FMNAMESZ = 8
+SNDZERO = 0x001
+SNDPIPE = 0x002
+RNORM = 0x000
+RMSGD = 0x001
+RMSGN = 0x002
+RMODEMASK = 0x003
+RPROTDAT = 0x004
+RPROTDIS = 0x008
+RPROTNORM = 0x010
+RPROTMASK = 0x01c
+FLUSHR = 0x01
+FLUSHW = 0x02
+FLUSHRW = 0x03
+FLUSHBAND = 0x04
+S_INPUT = 0x0001
+S_HIPRI = 0x0002
+S_OUTPUT = 0x0004
+S_MSG = 0x0008
+S_ERROR = 0x0010
+S_HANGUP = 0x0020
+S_RDNORM = 0x0040
+S_WRNORM = S_OUTPUT
+S_RDBAND = 0x0080
+S_WRBAND = 0x0100
+S_BANDURG = 0x0200
+RS_HIPRI = 0x01
+MSG_HIPRI = 0x01
+MSG_ANY = 0x02
+MSG_BAND = 0x04
+MSG_DISCARD = 0x08
+MSG_PEEKIOCTL = 0x10
+MORECTL = 1
+MOREDATA = 2
+MUXID_ALL = (-1)
+ANYMARK = 0x01
+LASTMARK = 0x02
+STR = (ord('S')<<8)
+I_NREAD = (STR|01)
+I_PUSH = (STR|02)
+I_POP = (STR|03)
+I_LOOK = (STR|04)
+I_FLUSH = (STR|05)
+I_SRDOPT = (STR|06)
+I_GRDOPT = (STR|07)
+I_STR = (STR|010)
+I_SETSIG = (STR|011)
+I_GETSIG = (STR|012)
+I_FIND = (STR|013)
+I_LINK = (STR|014)
+I_UNLINK = (STR|015)
+I_PEEK = (STR|017)
+I_FDINSERT = (STR|020)
+I_SENDFD = (STR|021)
+I_RECVFD = (STR|022)
+I_E_RECVFD = (STR|016)
+I_RECVFD = (STR|016)
+I_RECVFD = (STR|022)
+I_SWROPT = (STR|023)
+I_GWROPT = (STR|024)
+I_LIST = (STR|025)
+I_PLINK = (STR|026)
+I_PUNLINK = (STR|027)
+I_FLUSHBAND = (STR|034)
+I_CKBAND = (STR|035)
+I_GETBAND = (STR|036)
+I_ATMARK = (STR|037)
+I_SETCLTIME = (STR|040)
+I_GETCLTIME = (STR|041)
+I_CANPUT = (STR|042)
+I_S_RECVFD = (STR|043)
+I_STATS = (STR|044)
+I_BIGPIPE = (STR|045)
+I_GETTP = (STR|046)
+INFTIM = -1
diff --git a/lib-python/2.2/plat-unixware7/regen b/lib-python/2.2/plat-unixware7/regen
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/plat-unixware7/regen
@@ -0,0 +1,9 @@
+#! /bin/sh
+case `uname -sr` in
+UnixWare*)	;;
+*)	echo Probably not on a UnixWare system 1>&2
+	exit 1;;
+esac
+set -v
+h2py -i '(u_long)' /usr/include/netinet/in.h
+h2py /usr/include/sys/stropts.h
diff --git a/lib-python/2.2/popen2.py b/lib-python/2.2/popen2.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/popen2.py
@@ -0,0 +1,199 @@
+"""Spawn a command with pipes to its stdin, stdout, and optionally stderr.
+
+The normal os.popen(cmd, mode) call spawns a shell command and provides a
+file interface to just the input or output of the process depending on
+whether mode is 'r' or 'w'.  This module provides the functions popen2(cmd)
+and popen3(cmd) which return two or three pipes to the spawned command.
+"""
+
+import os
+import sys
+import types
+
+__all__ = ["popen2", "popen3", "popen4"]
+
+MAXFD = 256     # Max number of file descriptors (os.getdtablesize()???)
+
+_active = []
+
+def _cleanup():
+    for inst in _active[:]:
+        inst.poll()
+
+class Popen3:
+    """Class representing a child process.  Normally instances are created
+    by the factory functions popen2() and popen3()."""
+
+    sts = -1                    # Child not completed yet
+
+    def __init__(self, cmd, capturestderr=0, bufsize=-1):
+        """The parameter 'cmd' is the shell command to execute in a
+        sub-process.  The 'capturestderr' flag, if true, specifies that
+        the object should capture standard error output of the child process.
+        The default is false.  If the 'bufsize' parameter is specified, it
+        specifies the size of the I/O buffers to/from the child process."""
+        _cleanup()
+        p2cread, p2cwrite = os.pipe()
+        c2pread, c2pwrite = os.pipe()
+        if capturestderr:
+            errout, errin = os.pipe()
+        self.pid = os.fork()
+        if self.pid == 0:
+            # Child
+            os.dup2(p2cread, 0)
+            os.dup2(c2pwrite, 1)
+            if capturestderr:
+                os.dup2(errin, 2)
+            self._run_child(cmd)
+        os.close(p2cread)
+        self.tochild = os.fdopen(p2cwrite, 'w', bufsize)
+        os.close(c2pwrite)
+        self.fromchild = os.fdopen(c2pread, 'r', bufsize)
+        if capturestderr:
+            os.close(errin)
+            self.childerr = os.fdopen(errout, 'r', bufsize)
+        else:
+            self.childerr = None
+        _active.append(self)
+
+    def _run_child(self, cmd):
+        if isinstance(cmd, types.StringTypes):
+            cmd = ['/bin/sh', '-c', cmd]
+        for i in range(3, MAXFD):
+            try:
+                os.close(i)
+            except:
+                pass
+        try:
+            os.execvp(cmd[0], cmd)
+        finally:
+            os._exit(1)
+
+    def poll(self):
+        """Return the exit status of the child process if it has finished,
+        or -1 if it hasn't finished yet."""
+        if self.sts < 0:
+            try:
+                pid, sts = os.waitpid(self.pid, os.WNOHANG)
+                if pid == self.pid:
+                    self.sts = sts
+                    _active.remove(self)
+            except os.error:
+                pass
+        return self.sts
+
+    def wait(self):
+        """Wait for and return the exit status of the child process."""
+        pid, sts = os.waitpid(self.pid, 0)
+        if pid == self.pid:
+            self.sts = sts
+            _active.remove(self)
+        return self.sts
+
+
+class Popen4(Popen3):
+    childerr = None
+
+    def __init__(self, cmd, bufsize=-1):
+        _cleanup()
+        p2cread, p2cwrite = os.pipe()
+        c2pread, c2pwrite = os.pipe()
+        self.pid = os.fork()
+        if self.pid == 0:
+            # Child
+            os.dup2(p2cread, 0)
+            os.dup2(c2pwrite, 1)
+            os.dup2(c2pwrite, 2)
+            self._run_child(cmd)
+        os.close(p2cread)
+        self.tochild = os.fdopen(p2cwrite, 'w', bufsize)
+        os.close(c2pwrite)
+        self.fromchild = os.fdopen(c2pread, 'r', bufsize)
+        _active.append(self)
+
+
+if sys.platform[:3] == "win":
+    # Some things don't make sense on non-Unix platforms.
+    del Popen3, Popen4
+
+    def popen2(cmd, bufsize=-1, mode='t'):
+        """Execute the shell command 'cmd' in a sub-process.  If 'bufsize' is
+        specified, it sets the buffer size for the I/O pipes.  The file objects
+        (child_stdout, child_stdin) are returned."""
+        w, r = os.popen2(cmd, mode, bufsize)
+        return r, w
+
+    def popen3(cmd, bufsize=-1, mode='t'):
+        """Execute the shell command 'cmd' in a sub-process.  If 'bufsize' is
+        specified, it sets the buffer size for the I/O pipes.  The file objects
+        (child_stdout, child_stdin, child_stderr) are returned."""
+        w, r, e = os.popen3(cmd, mode, bufsize)
+        return r, w, e
+
+    def popen4(cmd, bufsize=-1, mode='t'):
+        """Execute the shell command 'cmd' in a sub-process.  If 'bufsize' is
+        specified, it sets the buffer size for the I/O pipes.  The file objects
+        (child_stdout_stderr, child_stdin) are returned."""
+        w, r = os.popen4(cmd, mode, bufsize)
+        return r, w
+else:
+    def popen2(cmd, bufsize=-1, mode='t'):
+        """Execute the shell command 'cmd' in a sub-process.  If 'bufsize' is
+        specified, it sets the buffer size for the I/O pipes.  The file objects
+        (child_stdout, child_stdin) are returned."""
+        inst = Popen3(cmd, 0, bufsize)
+        return inst.fromchild, inst.tochild
+
+    def popen3(cmd, bufsize=-1, mode='t'):
+        """Execute the shell command 'cmd' in a sub-process.  If 'bufsize' is
+        specified, it sets the buffer size for the I/O pipes.  The file objects
+        (child_stdout, child_stdin, child_stderr) are returned."""
+        inst = Popen3(cmd, 1, bufsize)
+        return inst.fromchild, inst.tochild, inst.childerr
+
+    def popen4(cmd, bufsize=-1, mode='t'):
+        """Execute the shell command 'cmd' in a sub-process.  If 'bufsize' is
+        specified, it sets the buffer size for the I/O pipes.  The file objects
+        (child_stdout_stderr, child_stdin) are returned."""
+        inst = Popen4(cmd, bufsize)
+        return inst.fromchild, inst.tochild
+
+    __all__.extend(["Popen3", "Popen4"])
+
+def _test():
+    cmd  = "cat"
+    teststr = "ab cd\n"
+    if os.name == "nt":
+        cmd = "more"
+    # "more" doesn't act the same way across Windows flavors,
+    # sometimes adding an extra newline at the start or the
+    # end.  So we strip whitespace off both ends for comparison.
+    expected = teststr.strip()
+    print "testing popen2..."
+    r, w = popen2(cmd)
+    w.write(teststr)
+    w.close()
+    got = r.read()
+    if got.strip() != expected:
+        raise ValueError("wrote %s read %s" % (`teststr`, `got`))
+    print "testing popen3..."
+    try:
+        r, w, e = popen3([cmd])
+    except:
+        r, w, e = popen3(cmd)
+    w.write(teststr)
+    w.close()
+    got = r.read()
+    if got.strip() != expected:
+        raise ValueError("wrote %s read %s" % (`teststr`, `got`))
+    got = e.read()
+    if got:
+        raise ValueError("unexected %s on stderr" % `got`)
+    for inst in _active[:]:
+        inst.wait()
+    if _active:
+        raise ValueError("_active not empty")
+    print "All OK"
+
+if __name__ == '__main__':
+    _test()
diff --git a/lib-python/2.2/poplib.py b/lib-python/2.2/poplib.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/poplib.py
@@ -0,0 +1,335 @@
+"""A POP3 client class.
+
+Based on the J. Myers POP3 draft, Jan. 96
+"""
+
+# Author: David Ascher <david_ascher at brown.edu>
+#         [heavily stealing from nntplib.py]
+# Updated: Piers Lauder <piers at cs.su.oz.au> [Jul '97]
+# String method conversion and test jig improvements by ESR, February 2001.
+
+# Example (see the test function at the end of this file)
+
+# Imports
+
+import re, socket
+
+__all__ = ["POP3","error_proto"]
+
+# Exception raised when an error or invalid response is received:
+
+class error_proto(Exception): pass
+
+# Standard Port
+POP3_PORT = 110
+
+# Line terminators (we always output CRLF, but accept any of CRLF, LFCR, LF)
+CR = '\r'
+LF = '\n'
+CRLF = CR+LF
+
+
+class POP3:
+
+    """This class supports both the minimal and optional command sets.
+    Arguments can be strings or integers (where appropriate)
+    (e.g.: retr(1) and retr('1') both work equally well.
+
+    Minimal Command Set:
+            USER name               user(name)
+            PASS string             pass_(string)
+            STAT                    stat()
+            LIST [msg]              list(msg = None)
+            RETR msg                retr(msg)
+            DELE msg                dele(msg)
+            NOOP                    noop()
+            RSET                    rset()
+            QUIT                    quit()
+
+    Optional Commands (some servers support these):
+            RPOP name               rpop(name)
+            APOP name digest        apop(name, digest)
+            TOP msg n               top(msg, n)
+            UIDL [msg]              uidl(msg = None)
+
+    Raises one exception: 'error_proto'.
+
+    Instantiate with:
+            POP3(hostname, port=110)
+
+    NB:     the POP protocol locks the mailbox from user
+            authorization until QUIT, so be sure to get in, suck
+            the messages, and quit, each time you access the
+            mailbox.
+
+            POP is a line-based protocol, which means large mail
+            messages consume lots of python cycles reading them
+            line-by-line.
+
+            If it's available on your mail server, use IMAP4
+            instead, it doesn't suffer from the two problems
+            above.
+    """
+
+
+    def __init__(self, host, port = POP3_PORT):
+        self.host = host
+        self.port = port
+        msg = "getaddrinfo returns an empty list"
+        self.sock = None
+        for res in socket.getaddrinfo(self.host, self.port, 0, socket.SOCK_STREAM):
+            af, socktype, proto, canonname, sa = res
+            try:
+                self.sock = socket.socket(af, socktype, proto)
+                self.sock.connect(sa)
+            except socket.error, msg:
+                if self.sock:
+                    self.sock.close()
+                self.sock = None
+                continue
+            break
+        if not self.sock:
+            raise socket.error, msg
+        self.file = self.sock.makefile('rb')
+        self._debugging = 0
+        self.welcome = self._getresp()
+
+
+    def _putline(self, line):
+        if self._debugging > 1: print '*put*', `line`
+        self.sock.sendall('%s%s' % (line, CRLF))
+
+
+    # Internal: send one command to the server (through _putline())
+
+    def _putcmd(self, line):
+        if self._debugging: print '*cmd*', `line`
+        self._putline(line)
+
+
+    # Internal: return one line from the server, stripping CRLF.
+    # This is where all the CPU time of this module is consumed.
+    # Raise error_proto('-ERR EOF') if the connection is closed.
+
+    def _getline(self):
+        line = self.file.readline()
+        if self._debugging > 1: print '*get*', `line`
+        if not line: raise error_proto('-ERR EOF')
+        octets = len(line)
+        # server can send any combination of CR & LF
+        # however, 'readline()' returns lines ending in LF
+        # so only possibilities are ...LF, ...CRLF, CR...LF
+        if line[-2:] == CRLF:
+            return line[:-2], octets
+        if line[0] == CR:
+            return line[1:-1], octets
+        return line[:-1], octets
+
+
+    # Internal: get a response from the server.
+    # Raise 'error_proto' if the response doesn't start with '+'.
+
+    def _getresp(self):
+        resp, o = self._getline()
+        if self._debugging > 1: print '*resp*', `resp`
+        c = resp[:1]
+        if c != '+':
+            raise error_proto(resp)
+        return resp
+
+
+    # Internal: get a response plus following text from the server.
+
+    def _getlongresp(self):
+        resp = self._getresp()
+        list = []; octets = 0
+        line, o = self._getline()
+        while line != '.':
+            if line[:2] == '..':
+                o = o-1
+                line = line[1:]
+            octets = octets + o
+            list.append(line)
+            line, o = self._getline()
+        return resp, list, octets
+
+
+    # Internal: send a command and get the response
+
+    def _shortcmd(self, line):
+        self._putcmd(line)
+        return self._getresp()
+
+
+    # Internal: send a command and get the response plus following text
+
+    def _longcmd(self, line):
+        self._putcmd(line)
+        return self._getlongresp()
+
+
+    # These can be useful:
+
+    def getwelcome(self):
+        return self.welcome
+
+
+    def set_debuglevel(self, level):
+        self._debugging = level
+
+
+    # Here are all the POP commands:
+
+    def user(self, user):
+        """Send user name, return response
+
+        (should indicate password required).
+        """
+        return self._shortcmd('USER %s' % user)
+
+
+    def pass_(self, pswd):
+        """Send password, return response
+
+        (response includes message count, mailbox size).
+
+        NB: mailbox is locked by server from here to 'quit()'
+        """
+        return self._shortcmd('PASS %s' % pswd)
+
+
+    def stat(self):
+        """Get mailbox status.
+
+        Result is tuple of 2 ints (message count, mailbox size)
+        """
+        retval = self._shortcmd('STAT')
+        rets = retval.split()
+        if self._debugging: print '*stat*', `rets`
+        numMessages = int(rets[1])
+        sizeMessages = int(rets[2])
+        return (numMessages, sizeMessages)
+
+
+    def list(self, which=None):
+        """Request listing, return result.
+
+        Result without a message number argument is in form
+        ['response', ['mesg_num octets', ...]].
+
+        Result when a message number argument is given is a
+        single response: the "scan listing" for that message.
+        """
+        if which:
+            return self._shortcmd('LIST %s' % which)
+        return self._longcmd('LIST')
+
+
+    def retr(self, which):
+        """Retrieve whole message number 'which'.
+
+        Result is in form ['response', ['line', ...], octets].
+        """
+        return self._longcmd('RETR %s' % which)
+
+
+    def dele(self, which):
+        """Delete message number 'which'.
+
+        Result is 'response'.
+        """
+        return self._shortcmd('DELE %s' % which)
+
+
+    def noop(self):
+        """Does nothing.
+
+        One supposes the response indicates the server is alive.
+        """
+        return self._shortcmd('NOOP')
+
+
+    def rset(self):
+        """Not sure what this does."""
+        return self._shortcmd('RSET')
+
+
+    def quit(self):
+        """Signoff: commit changes on server, unlock mailbox, close connection."""
+        try:
+            resp = self._shortcmd('QUIT')
+        except error_proto, val:
+            resp = val
+        self.file.close()
+        self.sock.close()
+        del self.file, self.sock
+        return resp
+
+    #__del__ = quit
+
+
+    # optional commands:
+
+    def rpop(self, user):
+        """Not sure what this does."""
+        return self._shortcmd('RPOP %s' % user)
+
+
+    timestamp = re.compile(r'\+OK.*(<[^>]+>)')
+
+    def apop(self, user, secret):
+        """Authorisation
+
+        - only possible if server has supplied a timestamp in initial greeting.
+
+        Args:
+                user    - mailbox user;
+                secret  - secret shared between client and server.
+
+        NB: mailbox is locked by server from here to 'quit()'
+        """
+        m = self.timestamp.match(self.welcome)
+        if not m:
+            raise error_proto('-ERR APOP not supported by server')
+        import md5
+        digest = md5.new(m.group(1)+secret).digest()
+        digest = ''.join(map(lambda x:'%02x'%ord(x), digest))
+        return self._shortcmd('APOP %s %s' % (user, digest))
+
+
+    def top(self, which, howmuch):
+        """Retrieve message header of message number 'which'
+        and first 'howmuch' lines of message body.
+
+        Result is in form ['response', ['line', ...], octets].
+        """
+        return self._longcmd('TOP %s %s' % (which, howmuch))
+
+
+    def uidl(self, which=None):
+        """Return message digest (unique id) list.
+
+        If 'which', result contains unique id for that message
+        in the form 'response mesgnum uid', otherwise result is
+        the list ['response', ['mesgnum uid', ...], octets]
+        """
+        if which:
+            return self._shortcmd('UIDL %s' % which)
+        return self._longcmd('UIDL')
+
+
+if __name__ == "__main__":
+    import sys
+    a = POP3(sys.argv[1])
+    print a.getwelcome()
+    a.user(sys.argv[2])
+    a.pass_(sys.argv[3])
+    a.list()
+    (numMsgs, totalSize) = a.stat()
+    for i in range(1, numMsgs + 1):
+        (header, msg, octets) = a.retr(i)
+        print "Message ", `i`, ':'
+        for line in msg:
+            print '   ' + line
+        print '-----------------------'
+    a.quit()
diff --git a/lib-python/2.2/posixfile.py b/lib-python/2.2/posixfile.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/posixfile.py
@@ -0,0 +1,240 @@
+"""Extended file operations available in POSIX.
+
+f = posixfile.open(filename, [mode, [bufsize]])
+      will create a new posixfile object
+
+f = posixfile.fileopen(fileobject)
+      will create a posixfile object from a builtin file object
+
+f.file()
+      will return the original builtin file object
+
+f.dup()
+      will return a new file object based on a new filedescriptor
+
+f.dup2(fd)
+      will return a new file object based on the given filedescriptor
+
+f.flags(mode)
+      will turn on the associated flag (merge)
+      mode can contain the following characters:
+
+  (character representing a flag)
+      a       append only flag
+      c       close on exec flag
+      n       no delay flag
+      s       synchronization flag
+  (modifiers)
+      !       turn flags 'off' instead of default 'on'
+      =       copy flags 'as is' instead of default 'merge'
+      ?       return a string in which the characters represent the flags
+              that are set
+
+      note: - the '!' and '=' modifiers are mutually exclusive.
+            - the '?' modifier will return the status of the flags after they
+              have been changed by other characters in the mode string
+
+f.lock(mode [, len [, start [, whence]]])
+      will (un)lock a region
+      mode can contain the following characters:
+
+  (character representing type of lock)
+      u       unlock
+      r       read lock
+      w       write lock
+  (modifiers)
+      |       wait until the lock can be granted
+      ?       return the first lock conflicting with the requested lock
+              or 'None' if there is no conflict. The lock returned is in the
+              format (mode, len, start, whence, pid) where mode is a
+              character representing the type of lock ('r' or 'w')
+
+      note: - the '?' modifier prevents a region from being locked; it is
+              query only
+"""
+
+import warnings
+warnings.warn(
+    "The posixfile module is obsolete and will disappear in the future",
+    DeprecationWarning)
+del warnings
+
+
+class _posixfile_:
+    """File wrapper class that provides extra POSIX file routines."""
+
+    states = ['open', 'closed']
+
+    #
+    # Internal routines
+    #
+    def __repr__(self):
+        file = self._file_
+        return "<%s posixfile '%s', mode '%s' at %s>" % \
+                (self.states[file.closed], file.name, file.mode, \
+                 hex(id(self))[2:])
+
+    #
+    # Initialization routines
+    #
+    def open(self, name, mode='r', bufsize=-1):
+        import __builtin__
+        return self.fileopen(__builtin__.open(name, mode, bufsize))
+
+    def fileopen(self, file):
+        import types
+        if `type(file)` != "<type 'file'>":
+            raise TypeError, 'posixfile.fileopen() arg must be file object'
+        self._file_  = file
+        # Copy basic file methods
+        for maybemethod in dir(file):
+            if not maybemethod.startswith('_'):
+                attr = getattr(file, maybemethod)
+                if isinstance(attr, types.BuiltinMethodType):
+                    setattr(self, maybemethod, attr)
+        return self
+
+    #
+    # New methods
+    #
+    def file(self):
+        return self._file_
+
+    def dup(self):
+        import posix
+
+        if not hasattr(posix, 'fdopen'):
+            raise AttributeError, 'dup() method unavailable'
+
+        return posix.fdopen(posix.dup(self._file_.fileno()), self._file_.mode)
+
+    def dup2(self, fd):
+        import posix
+
+        if not hasattr(posix, 'fdopen'):
+            raise AttributeError, 'dup() method unavailable'
+
+        posix.dup2(self._file_.fileno(), fd)
+        return posix.fdopen(fd, self._file_.mode)
+
+    def flags(self, *which):
+        import fcntl, os
+
+        if which:
+            if len(which) > 1:
+                raise TypeError, 'Too many arguments'
+            which = which[0]
+        else: which = '?'
+
+        l_flags = 0
+        if 'n' in which: l_flags = l_flags | os.O_NDELAY
+        if 'a' in which: l_flags = l_flags | os.O_APPEND
+        if 's' in which: l_flags = l_flags | os.O_SYNC
+
+        file = self._file_
+
+        if '=' not in which:
+            cur_fl = fcntl.fcntl(file.fileno(), fcntl.F_GETFL, 0)
+            if '!' in which: l_flags = cur_fl & ~ l_flags
+            else: l_flags = cur_fl | l_flags
+
+        l_flags = fcntl.fcntl(file.fileno(), fcntl.F_SETFL, l_flags)
+
+        if 'c' in which:
+            arg = ('!' not in which)    # 0 is don't, 1 is do close on exec
+            l_flags = fcntl.fcntl(file.fileno(), fcntl.F_SETFD, arg)
+
+        if '?' in which:
+            which = ''                  # Return current flags
+            l_flags = fcntl.fcntl(file.fileno(), fcntl.F_GETFL, 0)
+            if os.O_APPEND & l_flags: which = which + 'a'
+            if fcntl.fcntl(file.fileno(), fcntl.F_GETFD, 0) & 1:
+                which = which + 'c'
+            if os.O_NDELAY & l_flags: which = which + 'n'
+            if os.O_SYNC & l_flags: which = which + 's'
+            return which
+
+    def lock(self, how, *args):
+        import struct, fcntl
+
+        if 'w' in how: l_type = fcntl.F_WRLCK
+        elif 'r' in how: l_type = fcntl.F_RDLCK
+        elif 'u' in how: l_type = fcntl.F_UNLCK
+        else: raise TypeError, 'no type of lock specified'
+
+        if '|' in how: cmd = fcntl.F_SETLKW
+        elif '?' in how: cmd = fcntl.F_GETLK
+        else: cmd = fcntl.F_SETLK
+
+        l_whence = 0
+        l_start = 0
+        l_len = 0
+
+        if len(args) == 1:
+            l_len = args[0]
+        elif len(args) == 2:
+            l_len, l_start = args
+        elif len(args) == 3:
+            l_len, l_start, l_whence = args
+        elif len(args) > 3:
+            raise TypeError, 'too many arguments'
+
+        # Hack by davem at magnet.com to get locking to go on freebsd;
+        # additions for AIX by Vladimir.Marangozov at imag.fr
+        import sys, os
+        if sys.platform in ('netbsd1',
+                            'openbsd2',
+                            'freebsd2', 'freebsd3', 'freebsd4', 'freebsd5',
+                            'bsdos2', 'bsdos3', 'bsdos4'):
+            flock = struct.pack('lxxxxlxxxxlhh', \
+                  l_start, l_len, os.getpid(), l_type, l_whence)
+        elif sys.platform in ['aix3', 'aix4']:
+            flock = struct.pack('hhlllii', \
+                  l_type, l_whence, l_start, l_len, 0, 0, 0)
+        else:
+            flock = struct.pack('hhllhh', \
+                  l_type, l_whence, l_start, l_len, 0, 0)
+
+        flock = fcntl.fcntl(self._file_.fileno(), cmd, flock)
+
+        if '?' in how:
+            if sys.platform in ('netbsd1',
+                                'openbsd2',
+                                'freebsd2', 'freebsd3', 'freebsd4', 'freebsd5',
+                                'bsdos2', 'bsdos3', 'bsdos4'):
+                l_start, l_len, l_pid, l_type, l_whence = \
+                    struct.unpack('lxxxxlxxxxlhh', flock)
+            elif sys.platform in ['aix3', 'aix4']:
+                l_type, l_whence, l_start, l_len, l_sysid, l_pid, l_vfs = \
+                    struct.unpack('hhlllii', flock)
+            elif sys.platform == "linux2":
+                l_type, l_whence, l_start, l_len, l_pid, l_sysid = \
+                    struct.unpack('hhllhh', flock)
+            else:
+                l_type, l_whence, l_start, l_len, l_sysid, l_pid = \
+                    struct.unpack('hhllhh', flock)
+
+            if l_type != fcntl.F_UNLCK:
+                if l_type == fcntl.F_RDLCK:
+                    return 'r', l_len, l_start, l_whence, l_pid
+                else:
+                    return 'w', l_len, l_start, l_whence, l_pid
+
+def open(name, mode='r', bufsize=-1):
+    """Public routine to open a file as a posixfile object."""
+    return _posixfile_().open(name, mode, bufsize)
+
+def fileopen(file):
+    """Public routine to get a posixfile object from a Python file object."""
+    return _posixfile_().fileopen(file)
+
+#
+# Constants
+#
+SEEK_SET = 0
+SEEK_CUR = 1
+SEEK_END = 2
+
+#
+# End of posixfile.py
+#
diff --git a/lib-python/2.2/posixpath.py b/lib-python/2.2/posixpath.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/posixpath.py
@@ -0,0 +1,414 @@
+"""Common operations on Posix pathnames.
+
+Instead of importing this module directly, import os and refer to
+this module as os.path.  The "os.path" name is an alias for this
+module on Posix systems; on other systems (e.g. Mac, Windows),
+os.path provides the same operations in a manner specific to that
+platform, and is an alias to another module (e.g. macpath, ntpath).
+
+Some of this can actually be useful on non-Posix systems too, e.g.
+for manipulation of the pathname component of URLs.
+"""
+
+import os
+import stat
+
+__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
+           "basename","dirname","commonprefix","getsize","getmtime",
+           "getatime","islink","exists","isdir","isfile","ismount",
+           "walk","expanduser","expandvars","normpath","abspath",
+           "samefile","sameopenfile","samestat","realpath"]
+
+# Normalize the case of a pathname.  Trivial in Posix, string.lower on Mac.
+# On MS-DOS this may also turn slashes into backslashes; however, other
+# normalizations (such as optimizing '../' away) are not allowed
+# (another function should be defined to do that).
+
+def normcase(s):
+    """Normalize case of pathname.  Has no effect under Posix"""
+    return s
+
+
+# Return whether a path is absolute.
+# Trivial in Posix, harder on the Mac or MS-DOS.
+
+def isabs(s):
+    """Test whether a path is absolute"""
+    return s[:1] == '/'
+
+
+# Join pathnames.
+# Ignore the previous parts if a part is absolute.
+# Insert a '/' unless the first part is empty or already ends in '/'.
+
+def join(a, *p):
+    """Join two or more pathname components, inserting '/' as needed"""
+    path = a
+    for b in p:
+        if b[:1] == '/':
+            path = b
+        elif path == '' or path[-1:] == '/':
+            path = path + b
+        else:
+            path = path + '/' + b
+    return path
+
+
+# Split a path in head (everything up to the last '/') and tail (the
+# rest).  If the path ends in '/', tail will be empty.  If there is no
+# '/' in the path, head  will be empty.
+# Trailing '/'es are stripped from head unless it is the root.
+
+def split(p):
+    """Split a pathname.  Returns tuple "(head, tail)" where "tail" is
+    everything after the final slash.  Either part may be empty."""
+    i = p.rfind('/') + 1
+    head, tail = p[:i], p[i:]
+    if head and head != '/'*len(head):
+        while head[-1] == '/':
+            head = head[:-1]
+    return head, tail
+
+
+# Split a path in root and extension.
+# The extension is everything starting at the last dot in the last
+# pathname component; the root is everything before that.
+# It is always true that root + ext == p.
+
+def splitext(p):
+    """Split the extension from a pathname.  Extension is everything from the
+    last dot to the end.  Returns "(root, ext)", either part may be empty."""
+    root, ext = '', ''
+    for c in p:
+        if c == '/':
+            root, ext = root + ext + c, ''
+        elif c == '.':
+            if ext:
+                root, ext = root + ext, c
+            else:
+                ext = c
+        elif ext:
+            ext = ext + c
+        else:
+            root = root + c
+    return root, ext
+
+
+# Split a pathname into a drive specification and the rest of the
+# path.  Useful on DOS/Windows/NT; on Unix, the drive is always empty.
+
+def splitdrive(p):
+    """Split a pathname into drive and path. On Posix, drive is always
+    empty."""
+    return '', p
+
+
+# Return the tail (basename) part of a path.
+
+def basename(p):
+    """Returns the final component of a pathname"""
+    return split(p)[1]
+
+
+# Return the head (dirname) part of a path.
+
+def dirname(p):
+    """Returns the directory component of a pathname"""
+    return split(p)[0]
+
+
+# Return the longest prefix of all list elements.
+
+def commonprefix(m):
+    "Given a list of pathnames, returns the longest common leading component"
+    if not m: return ''
+    prefix = m[0]
+    for item in m:
+        for i in range(len(prefix)):
+            if prefix[:i+1] != item[:i+1]:
+                prefix = prefix[:i]
+                if i == 0: return ''
+                break
+    return prefix
+
+
+# Get size, mtime, atime of files.
+
+def getsize(filename):
+    """Return the size of a file, reported by os.stat()."""
+    st = os.stat(filename)
+    return st[stat.ST_SIZE]
+
+def getmtime(filename):
+    """Return the last modification time of a file, reported by os.stat()."""
+    st = os.stat(filename)
+    return st[stat.ST_MTIME]
+
+def getatime(filename):
+    """Return the last access time of a file, reported by os.stat()."""
+    st = os.stat(filename)
+    return st[stat.ST_ATIME]
+
+
+# Is a path a symbolic link?
+# This will always return false on systems where os.lstat doesn't exist.
+
+def islink(path):
+    """Test whether a path is a symbolic link"""
+    try:
+        st = os.lstat(path)
+    except (os.error, AttributeError):
+        return 0
+    return stat.S_ISLNK(st[stat.ST_MODE])
+
+
+# Does a path exist?
+# This is false for dangling symbolic links.
+
+def exists(path):
+    """Test whether a path exists.  Returns false for broken symbolic links"""
+    try:
+        st = os.stat(path)
+    except os.error:
+        return 0
+    return 1
+
+
+# Is a path a directory?
+# This follows symbolic links, so both islink() and isdir() can be true
+# for the same path.
+
+def isdir(path):
+    """Test whether a path is a directory"""
+    try:
+        st = os.stat(path)
+    except os.error:
+        return 0
+    return stat.S_ISDIR(st[stat.ST_MODE])
+
+
+# Is a path a regular file?
+# This follows symbolic links, so both islink() and isfile() can be true
+# for the same path.
+
+def isfile(path):
+    """Test whether a path is a regular file"""
+    try:
+        st = os.stat(path)
+    except os.error:
+        return 0
+    return stat.S_ISREG(st[stat.ST_MODE])
+
+
+# Are two filenames really pointing to the same file?
+
+def samefile(f1, f2):
+    """Test whether two pathnames reference the same actual file"""
+    s1 = os.stat(f1)
+    s2 = os.stat(f2)
+    return samestat(s1, s2)
+
+
+# Are two open files really referencing the same file?
+# (Not necessarily the same file descriptor!)
+
+def sameopenfile(fp1, fp2):
+    """Test whether two open file objects reference the same file"""
+    s1 = os.fstat(fp1)
+    s2 = os.fstat(fp2)
+    return samestat(s1, s2)
+
+
+# Are two stat buffers (obtained from stat, fstat or lstat)
+# describing the same file?
+
+def samestat(s1, s2):
+    """Test whether two stat buffers reference the same file"""
+    return s1[stat.ST_INO] == s2[stat.ST_INO] and \
+           s1[stat.ST_DEV] == s2[stat.ST_DEV]
+
+
+# Is a path a mount point?
+# (Does this work for all UNIXes?  Is it even guaranteed to work by Posix?)
+
+def ismount(path):
+    """Test whether a path is a mount point"""
+    try:
+        s1 = os.stat(path)
+        s2 = os.stat(join(path, '..'))
+    except os.error:
+        return 0 # It doesn't exist -- so not a mount point :-)
+    dev1 = s1[stat.ST_DEV]
+    dev2 = s2[stat.ST_DEV]
+    if dev1 != dev2:
+        return 1        # path/.. on a different device as path
+    ino1 = s1[stat.ST_INO]
+    ino2 = s2[stat.ST_INO]
+    if ino1 == ino2:
+        return 1        # path/.. is the same i-node as path
+    return 0
+
+
+# Directory tree walk.
+# For each directory under top (including top itself, but excluding
+# '.' and '..'), func(arg, dirname, filenames) is called, where
+# dirname is the name of the directory and filenames is the list
+# of files (and subdirectories etc.) in the directory.
+# The func may modify the filenames list, to implement a filter,
+# or to impose a different order of visiting.
+
+def walk(top, func, arg):
+    """Directory tree walk with callback function.
+
+    For each directory in the directory tree rooted at top (including top
+    itself, but excluding '.' and '..'), call func(arg, dirname, fnames).
+    dirname is the name of the directory, and fnames a list of the names of
+    the files and subdirectories in dirname (excluding '.' and '..').  func
+    may modify the fnames list in-place (e.g. via del or slice assignment),
+    and walk will only recurse into the subdirectories whose names remain in
+    fnames; this can be used to implement a filter, or to impose a specific
+    order of visiting.  No semantics are defined for, or required of, arg,
+    beyond that arg is always passed to func.  It can be used, e.g., to pass
+    a filename pattern, or a mutable object designed to accumulate
+    statistics.  Passing None for arg is common."""
+
+    try:
+        names = os.listdir(top)
+    except os.error:
+        return
+    func(arg, top, names)
+    for name in names:
+        name = join(top, name)
+        try:
+            st = os.lstat(name)
+        except os.error:
+            continue
+        if stat.S_ISDIR(st[stat.ST_MODE]):
+            walk(name, func, arg)
+
+
+# Expand paths beginning with '~' or '~user'.
+# '~' means $HOME; '~user' means that user's home directory.
+# If the path doesn't begin with '~', or if the user or $HOME is unknown,
+# the path is returned unchanged (leaving error reporting to whatever
+# function is called with the expanded path as argument).
+# See also module 'glob' for expansion of *, ? and [...] in pathnames.
+# (A function should also be defined to do full *sh-style environment
+# variable expansion.)
+
+def expanduser(path):
+    """Expand ~ and ~user constructions.  If user or $HOME is unknown,
+    do nothing."""
+    if path[:1] != '~':
+        return path
+    i, n = 1, len(path)
+    while i < n and path[i] != '/':
+        i = i + 1
+    if i == 1:
+        if not os.environ.has_key('HOME'):
+            import pwd
+            userhome = pwd.getpwuid(os.getuid())[5]
+        else:
+            userhome = os.environ['HOME']
+    else:
+        import pwd
+        try:
+            pwent = pwd.getpwnam(path[1:i])
+        except KeyError:
+            return path
+        userhome = pwent[5]
+    if userhome[-1:] == '/': i = i + 1
+    return userhome + path[i:]
+
+
+# Expand paths containing shell variable substitutions.
+# This expands the forms $variable and ${variable} only.
+# Non-existent variables are left unchanged.
+
+_varprog = None
+
+def expandvars(path):
+    """Expand shell variables of form $var and ${var}.  Unknown variables
+    are left unchanged."""
+    global _varprog
+    if '$' not in path:
+        return path
+    if not _varprog:
+        import re
+        _varprog = re.compile(r'\$(\w+|\{[^}]*\})')
+    i = 0
+    while 1:
+        m = _varprog.search(path, i)
+        if not m:
+            break
+        i, j = m.span(0)
+        name = m.group(1)
+        if name[:1] == '{' and name[-1:] == '}':
+            name = name[1:-1]
+        if os.environ.has_key(name):
+            tail = path[j:]
+            path = path[:i] + os.environ[name]
+            i = len(path)
+            path = path + tail
+        else:
+            i = j
+    return path
+
+
+# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A/B.
+# It should be understood that this may change the meaning of the path
+# if it contains symbolic links!
+
+def normpath(path):
+    """Normalize path, eliminating double slashes, etc."""
+    if path == '':
+        return '.'
+    initial_slashes = path.startswith('/')
+    # POSIX allows one or two initial slashes, but treats three or more
+    # as single slash.
+    if (initial_slashes and
+        path.startswith('//') and not path.startswith('///')):
+        initial_slashes = 2
+    comps = path.split('/')
+    new_comps = []
+    for comp in comps:
+        if comp in ('', '.'):
+            continue
+        if (comp != '..' or (not initial_slashes and not new_comps) or
+             (new_comps and new_comps[-1] == '..')):
+            new_comps.append(comp)
+        elif new_comps:
+            new_comps.pop()
+    comps = new_comps
+    path = '/'.join(comps)
+    if initial_slashes:
+        path = '/'*initial_slashes + path
+    return path or '.'
+
+
+def abspath(path):
+    """Return an absolute path."""
+    if not isabs(path):
+        path = join(os.getcwd(), path)
+    return normpath(path)
+
+
+# Return a canonical path (i.e. the absolute location of a file on the
+# filesystem).
+
+def realpath(filename):
+    """Return the canonical path of the specified filename, eliminating any
+symbolic links encountered in the path."""
+    filename = abspath(filename)
+
+    bits = ['/'] + filename.split('/')[1:]
+    for i in range(2, len(bits)+1):
+        component = join(*bits[0:i])
+        if islink(component):
+            resolved = os.readlink(component)
+            (dir, file) = split(component)
+            resolved = normpath(join(dir, resolved))
+            newpath = join(*([resolved] + bits[i:]))
+            return realpath(newpath)
+
+    return filename
diff --git a/lib-python/2.2/pprint.py b/lib-python/2.2/pprint.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/pprint.py
@@ -0,0 +1,310 @@
+#  Author:      Fred L. Drake, Jr.
+#               fdrake at acm.org
+#
+#  This is a simple little module I wrote to make life easier.  I didn't
+#  see anything quite like it in the library, though I may have overlooked
+#  something.  I wrote this when I was trying to read some heavily nested
+#  tuples with fairly non-descriptive content.  This is modeled very much
+#  after Lisp/Scheme - style pretty-printing of lists.  If you find it
+#  useful, thank small children who sleep at night.
+
+"""Support to pretty-print lists, tuples, & dictionaries recursively.
+
+Very simple, but useful, especially in debugging data structures.
+
+Classes
+-------
+
+PrettyPrinter()
+    Handle pretty-printing operations onto a stream using a configured
+    set of formatting parameters.
+
+Functions
+---------
+
+pformat()
+    Format a Python object into a pretty-printed representation.
+
+pprint()
+    Pretty-print a Python object to a stream [default is sys.sydout].
+
+saferepr()
+    Generate a 'standard' repr()-like value, but protect against recursive
+    data structures.
+
+"""
+
+from types import DictType, ListType, TupleType, StringType
+import sys
+
+try:
+    from cStringIO import StringIO
+except ImportError:
+    from StringIO import StringIO
+
+__all__ = ["pprint","pformat","isreadable","isrecursive","saferepr",
+           "PrettyPrinter"]
+
+# cache these for faster access:
+_commajoin = ", ".join
+_sys_modules = sys.modules
+_id = id
+_len = len
+_type = type
+
+
+def pprint(object, stream=None):
+    """Pretty-print a Python object to a stream [default is sys.sydout]."""
+    printer = PrettyPrinter(stream=stream)
+    printer.pprint(object)
+
+def pformat(object):
+    """Format a Python object into a pretty-printed representation."""
+    return PrettyPrinter().pformat(object)
+
+def saferepr(object):
+    """Version of repr() which can handle recursive data structures."""
+    return _safe_repr(object, {}, None, 0)[0]
+
+def isreadable(object):
+    """Determine if saferepr(object) is readable by eval()."""
+    return _safe_repr(object, {}, None, 0)[1]
+
+def isrecursive(object):
+    """Determine if object requires a recursive representation."""
+    return _safe_repr(object, {}, None, 0)[2]
+
+class PrettyPrinter:
+    def __init__(self, indent=1, width=80, depth=None, stream=None):
+        """Handle pretty printing operations onto a stream using a set of
+        configured parameters.
+
+        indent
+            Number of spaces to indent for each level of nesting.
+
+        width
+            Attempted maximum number of columns in the output.
+
+        depth
+            The maximum depth to print out nested structures.
+
+        stream
+            The desired output stream.  If omitted (or false), the standard
+            output stream available at construction will be used.
+
+        """
+        indent = int(indent)
+        width = int(width)
+        assert indent >= 0
+        assert depth is None or depth > 0, "depth must be > 0"
+        assert width
+        self.__depth = depth
+        self.__indent_per_level = indent
+        self.__width = width
+        if stream:
+            self.__stream = stream
+        else:
+            self.__stream = sys.stdout
+
+    def pprint(self, object):
+        self.__stream.write(self.pformat(object) + "\n")
+
+    def pformat(self, object):
+        sio = StringIO()
+        self.__format(object, sio, 0, 0, {}, 0)
+        return sio.getvalue()
+
+    def isrecursive(self, object):
+        self.__recursive = 0
+        self.__repr(object, {}, 0)
+        return self.__recursive
+
+    def isreadable(self, object):
+        self.__recursive = 0
+        self.__readable = 1
+        self.__repr(object, {}, 0)
+        return self.__readable and not self.__recursive
+
+    def __format(self, object, stream, indent, allowance, context, level):
+        level = level + 1
+        objid = _id(object)
+        if objid in context:
+            stream.write(_recursion(object))
+            self.__recursive = 1
+            self.__readable = 0
+            return
+        rep = self.__repr(object, context, level - 1)
+        typ = _type(object)
+        sepLines = _len(rep) > (self.__width - 1 - indent - allowance)
+        write = stream.write
+
+        if sepLines:
+            if typ is DictType:
+                write('{')
+                if self.__indent_per_level > 1:
+                    write((self.__indent_per_level - 1) * ' ')
+                length = _len(object)
+                if length:
+                    context[objid] = 1
+                    indent = indent + self.__indent_per_level
+                    items  = object.items()
+                    items.sort()
+                    key, ent = items[0]
+                    rep = self.__repr(key, context, level)
+                    write(rep)
+                    write(': ')
+                    self.__format(ent, stream, indent + _len(rep) + 2,
+                                  allowance + 1, context, level)
+                    if length > 1:
+                        for key, ent in items[1:]:
+                            rep = self.__repr(key, context, level)
+                            write(',\n%s%s: ' % (' '*indent, rep))
+                            self.__format(ent, stream, indent + _len(rep) + 2,
+                                          allowance + 1, context, level)
+                    indent = indent - self.__indent_per_level
+                    del context[objid]
+                write('}')
+                return
+
+            if typ is ListType or typ is TupleType:
+                if typ is ListType:
+                    write('[')
+                    endchar = ']'
+                else:
+                    write('(')
+                    endchar = ')'
+                if self.__indent_per_level > 1:
+                    write((self.__indent_per_level - 1) * ' ')
+                length = _len(object)
+                if length:
+                    context[objid] = 1
+                    indent = indent + self.__indent_per_level
+                    self.__format(object[0], stream, indent, allowance + 1,
+                                  context, level)
+                    if length > 1:
+                        for ent in object[1:]:
+                            write(',\n' + ' '*indent)
+                            self.__format(ent, stream, indent,
+                                          allowance + 1, context, level)
+                    indent = indent - self.__indent_per_level
+                    del context[objid]
+                if typ is TupleType and length == 1:
+                    write(',')
+                write(endchar)
+                return
+
+        write(rep)
+
+    def __repr(self, object, context, level):
+        repr, readable, recursive = _safe_repr(object, context,
+                                               self.__depth, level)
+        if not readable:
+            self.__readable = 0
+        if recursive:
+            self.__recursive = 1
+        return repr
+
+# Return triple (repr_string, isreadable, isrecursive).
+
+def _safe_repr(object, context, maxlevels, level):
+    typ = _type(object)
+    if typ is StringType:
+        if 'locale' not in _sys_modules:
+            return `object`, 1, 0
+        if "'" in object and '"' not in object:
+            closure = '"'
+            quotes = {'"': '\\"'}
+        else:
+            closure = "'"
+            quotes = {"'": "\\'"}
+        qget = quotes.get
+        sio = StringIO()
+        write = sio.write
+        for char in object:
+            if char.isalpha():
+                write(char)
+            else:
+                write(qget(char, `char`[1:-1]))
+        return ("%s%s%s" % (closure, sio.getvalue(), closure)), 1, 0
+
+    if typ is DictType:
+        if not object:
+            return "{}", 1, 0
+        objid = _id(object)
+        if maxlevels and level > maxlevels:
+            return "{...}", 0, objid in context
+        if objid in context:
+            return _recursion(object), 0, 1
+        context[objid] = 1
+        readable = 1
+        recursive = 0
+        components = []
+        append = components.append
+        level += 1
+        saferepr = _safe_repr
+        for k, v in object.iteritems():
+            krepr, kreadable, krecur = saferepr(k, context, maxlevels, level)
+            vrepr, vreadable, vrecur = saferepr(v, context, maxlevels, level)
+            append("%s: %s" % (krepr, vrepr))
+            readable = readable and kreadable and vreadable
+            if krecur or vrecur:
+                recursive = 1
+        del context[objid]
+        return "{%s}" % _commajoin(components), readable, recursive
+
+    if typ is ListType or typ is TupleType:
+        if typ is ListType:
+            if not object:
+                return "[]", 1, 0
+            format = "[%s]"
+        elif _len(object) == 1:
+            format = "(%s,)"
+        else:
+            if not object:
+                return "()", 1, 0
+            format = "(%s)"
+        objid = _id(object)
+        if maxlevels and level > maxlevels:
+            return format % "...", 0, objid in context
+        if objid in context:
+            return _recursion(object), 0, 1
+        context[objid] = 1
+        readable = 1
+        recursive = 0
+        components = []
+        append = components.append
+        level += 1
+        for o in object:
+            orepr, oreadable, orecur = _safe_repr(o, context, maxlevels, level)
+            append(orepr)
+            if not oreadable:
+                readable = 0
+            if orecur:
+                recursive = 1
+        del context[objid]
+        return format % _commajoin(components), readable, recursive
+
+    rep = `object`
+    return rep, (rep and not rep.startswith('<')), 0
+
+
+def _recursion(object):
+    return ("<Recursion on %s with id=%s>"
+            % (_type(object).__name__, _id(object)))
+
+
+def _perfcheck(object=None):
+    import time
+    if object is None:
+        object = [("string", (1, 2), [3, 4], {5: 6, 7: 8})] * 100000
+    p = PrettyPrinter()
+    t1 = time.time()
+    _safe_repr(object, {}, None, 0)
+    t2 = time.time()
+    p.pformat(object)
+    t3 = time.time()
+    print "_safe_repr:", t2 - t1
+    print "pformat:", t3 - t2
+
+if __name__ == "__main__":
+    _perfcheck()
diff --git a/lib-python/2.2/pre.py b/lib-python/2.2/pre.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/pre.py
@@ -0,0 +1,656 @@
+# module 're' -- A collection of regular expression operations
+
+r"""Support for regular expressions (RE).
+
+This module provides regular expression matching operations similar to
+those found in Perl. It's 8-bit clean: the strings being processed may
+contain both null bytes and characters whose high bit is set. Regular
+expression pattern strings may not contain null bytes, but can specify
+the null byte using the \\number notation. Characters with the high
+bit set may be included.
+
+Regular expressions can contain both special and ordinary
+characters. Most ordinary characters, like "A", "a", or "0", are the
+simplest regular expressions; they simply match themselves. You can
+concatenate ordinary characters, so last matches the string 'last'.
+
+The special characters are:
+    "."      Matches any character except a newline.
+    "^"      Matches the start of the string.
+    "$"      Matches the end of the string.
+    "*"      Matches 0 or more (greedy) repetitions of the preceding RE.
+             Greedy means that it will match as many repetitions as possible.
+    "+"      Matches 1 or more (greedy) repetitions of the preceding RE.
+    "?"      Matches 0 or 1 (greedy) of the preceding RE.
+    *?,+?,?? Non-greedy versions of the previous three special characters.
+    {m,n}    Matches from m to n repetitions of the preceding RE.
+    {m,n}?   Non-greedy version of the above.
+    "\\"      Either escapes special characters or signals a special sequence.
+    []       Indicates a set of characters.
+             A "^" as the first character indicates a complementing set.
+    "|"      A|B, creates an RE that will match either A or B.
+    (...)    Matches the RE inside the parentheses.
+             The contents can be retrieved or matched later in the string.
+    (?iLmsx) Set the I, L, M, S, or X flag for the RE.
+    (?:...)  Non-grouping version of regular parentheses.
+    (?P<name>...) The substring matched by the group is accessible by name.
+    (?P=name)     Matches the text matched earlier by the group named name.
+    (?#...)  A comment; ignored.
+    (?=...)  Matches if ... matches next, but doesn't consume the string.
+    (?!...)  Matches if ... doesn't match next.
+
+The special sequences consist of "\\" and a character from the list
+below. If the ordinary character is not on the list, then the
+resulting RE will match the second character.
+    \\number  Matches the contents of the group of the same number.
+    \\A       Matches only at the start of the string.
+    \\Z       Matches only at the end of the string.
+    \\b       Matches the empty string, but only at the start or end of a word.
+    \\B       Matches the empty string, but not at the start or end of a word.
+    \\d       Matches any decimal digit; equivalent to the set [0-9].
+    \\D       Matches any non-digit character; equivalent to the set [^0-9].
+    \\s       Matches any whitespace character; equivalent to [ \\t\\n\\r\\f\\v].
+    \\S       Matches any non-whitespace character; equiv. to [^ \\t\\n\\r\\f\\v].
+    \\w       Matches any alphanumeric character; equivalent to [a-zA-Z0-9_].
+             With LOCALE, it will match the set [0-9_] plus characters defined
+             as letters for the current locale.
+    \\W       Matches the complement of \\w.
+    \\\\       Matches a literal backslash.
+
+This module exports the following functions:
+    match    Match a regular expression pattern to the beginning of a string.
+    search   Search a string for the presence of a pattern.
+    sub      Substitute occurrences of a pattern found in a string.
+    subn     Same as sub, but also return the number of substitutions made.
+    split    Split a string by the occurrences of a pattern.
+    findall  Find all occurrences of a pattern in a string.
+    compile  Compile a pattern into a RegexObject.
+    escape   Backslash all non-alphanumerics in a string.
+
+This module exports the following classes:
+    RegexObject    Holds a compiled regular expression pattern.
+    MatchObject    Contains information about pattern matches.
+
+Some of the functions in this module takes flags as optional parameters:
+    I  IGNORECASE  Perform case-insensitive matching.
+    L  LOCALE      Make \w, \W, \b, \B, dependent on the current locale.
+    M  MULTILINE   "^" matches the beginning of lines as well as the string.
+                   "$" matches the end of lines as well as the string.
+    S  DOTALL      "." matches any character at all, including the newline.
+    X  VERBOSE     Ignore whitespace and comments for nicer looking RE's.
+
+This module also defines an exception 'error'.
+
+"""
+
+
+import sys
+from pcre import *
+
+__all__ = ["match","search","sub","subn","split","findall","escape","compile",
+           "I","L","M","S","X","IGNORECASE","LOCALE","MULTILINE","DOTALL",
+           "VERBOSE","error"]
+
+#
+# First, the public part of the interface:
+#
+
+# pcre.error and re.error should be the same, since exceptions can be
+# raised from either module.
+
+# compilation flags
+
+I = IGNORECASE
+L = LOCALE
+M = MULTILINE
+S = DOTALL
+X = VERBOSE
+
+
+#
+#
+#
+
+_cache = {}
+_MAXCACHE = 20
+
+def _cachecompile(pattern, flags=0):
+    key = (pattern, flags)
+    try:
+        return _cache[key]
+    except KeyError:
+        pass
+    value = compile(pattern, flags)
+    if len(_cache) >= _MAXCACHE:
+        _cache.clear()
+    _cache[key] = value
+    return value
+
+def match(pattern, string, flags=0):
+    """match (pattern, string[, flags]) -> MatchObject or None
+
+    If zero or more characters at the beginning of string match the
+    regular expression pattern, return a corresponding MatchObject
+    instance. Return None if the string does not match the pattern;
+    note that this is different from a zero-length match.
+
+    Note: If you want to locate a match anywhere in string, use
+    search() instead.
+
+    """
+
+    return _cachecompile(pattern, flags).match(string)
+
+def search(pattern, string, flags=0):
+    """search (pattern, string[, flags]) -> MatchObject or None
+
+    Scan through string looking for a location where the regular
+    expression pattern produces a match, and return a corresponding
+    MatchObject instance. Return None if no position in the string
+    matches the pattern; note that this is different from finding a
+    zero-length match at some point in the string.
+
+    """
+    return _cachecompile(pattern, flags).search(string)
+
+def sub(pattern, repl, string, count=0):
+    """sub(pattern, repl, string[, count=0]) -> string
+
+    Return the string obtained by replacing the leftmost
+    non-overlapping occurrences of pattern in string by the
+    replacement repl. If the pattern isn't found, string is returned
+    unchanged. repl can be a string or a function; if a function, it
+    is called for every non-overlapping occurrence of pattern. The
+    function takes a single match object argument, and returns the
+    replacement string.
+
+    The pattern may be a string or a regex object; if you need to
+    specify regular expression flags, you must use a regex object, or
+    use embedded modifiers in a pattern; e.g.
+    sub("(?i)b+", "x", "bbbb BBBB") returns 'x x'.
+
+    The optional argument count is the maximum number of pattern
+    occurrences to be replaced; count must be a non-negative integer,
+    and the default value of 0 means to replace all occurrences.
+
+    """
+    if type(pattern) == type(''):
+        pattern = _cachecompile(pattern)
+    return pattern.sub(repl, string, count)
+
+def subn(pattern, repl, string, count=0):
+    """subn(pattern, repl, string[, count=0]) -> (string, num substitutions)
+
+    Perform the same operation as sub(), but return a tuple
+    (new_string, number_of_subs_made).
+
+    """
+    if type(pattern) == type(''):
+        pattern = _cachecompile(pattern)
+    return pattern.subn(repl, string, count)
+
+def split(pattern, string, maxsplit=0):
+    """split(pattern, string[, maxsplit=0]) -> list of strings
+
+    Split string by the occurrences of pattern. If capturing
+    parentheses are used in pattern, then the text of all groups in
+    the pattern are also returned as part of the resulting list. If
+    maxsplit is nonzero, at most maxsplit splits occur, and the
+    remainder of the string is returned as the final element of the
+    list.
+
+    """
+    if type(pattern) == type(''):
+        pattern = _cachecompile(pattern)
+    return pattern.split(string, maxsplit)
+
+def findall(pattern, string):
+    """findall(pattern, string) -> list
+
+    Return a list of all non-overlapping matches of pattern in
+    string. If one or more groups are present in the pattern, return a
+    list of groups; this will be a list of tuples if the pattern has
+    more than one group. Empty matches are included in the result.
+
+    """
+    if type(pattern) == type(''):
+        pattern = _cachecompile(pattern)
+    return pattern.findall(string)
+
+def escape(pattern):
+    """escape(string) -> string
+
+    Return string with all non-alphanumerics backslashed; this is
+    useful if you want to match an arbitrary literal string that may
+    have regular expression metacharacters in it.
+
+    """
+    result = list(pattern)
+    for i in range(len(pattern)):
+        char = pattern[i]
+        if not char.isalnum():
+            if char=='\000': result[i] = '\\000'
+            else: result[i] = '\\'+char
+    return ''.join(result)
+
+def compile(pattern, flags=0):
+    """compile(pattern[, flags]) -> RegexObject
+
+    Compile a regular expression pattern into a regular expression
+    object, which can be used for matching using its match() and
+    search() methods.
+
+    """
+    groupindex={}
+    code=pcre_compile(pattern, flags, groupindex)
+    return RegexObject(pattern, flags, code, groupindex)
+
+
+#
+#   Class definitions
+#
+
+class RegexObject:
+    """Holds a compiled regular expression pattern.
+
+    Methods:
+    match    Match the pattern to the beginning of a string.
+    search   Search a string for the presence of the pattern.
+    sub      Substitute occurrences of the pattern found in a string.
+    subn     Same as sub, but also return the number of substitutions made.
+    split    Split a string by the occurrences of the pattern.
+    findall  Find all occurrences of the pattern in a string.
+
+    """
+
+    def __init__(self, pattern, flags, code, groupindex):
+        self.code = code
+        self.flags = flags
+        self.pattern = pattern
+        self.groupindex = groupindex
+
+    def search(self, string, pos=0, endpos=None):
+        """search(string[, pos][, endpos]) -> MatchObject or None
+
+        Scan through string looking for a location where this regular
+        expression produces a match, and return a corresponding
+        MatchObject instance. Return None if no position in the string
+        matches the pattern; note that this is different from finding
+        a zero-length match at some point in the string. The optional
+        pos and endpos parameters have the same meaning as for the
+        match() method.
+
+        """
+        if endpos is None or endpos>len(string):
+            endpos=len(string)
+        if endpos<pos: endpos=pos
+        regs = self.code.match(string, pos, endpos, 0)
+        if regs is None:
+            return None
+        self._num_regs=len(regs)
+
+        return MatchObject(self,
+                           string,
+                           pos, endpos,
+                           regs)
+
+    def match(self, string, pos=0, endpos=None):
+        """match(string[, pos][, endpos]) -> MatchObject or None
+
+        If zero or more characters at the beginning of string match
+        this regular expression, return a corresponding MatchObject
+        instance. Return None if the string does not match the
+        pattern; note that this is different from a zero-length match.
+
+        Note: If you want to locate a match anywhere in string, use
+        search() instead.
+
+        The optional second parameter pos gives an index in the string
+        where the search is to start; it defaults to 0.  This is not
+        completely equivalent to slicing the string; the '' pattern
+        character matches at the real beginning of the string and at
+        positions just after a newline, but not necessarily at the
+        index where the search is to start.
+
+        The optional parameter endpos limits how far the string will
+        be searched; it will be as if the string is endpos characters
+        long, so only the characters from pos to endpos will be
+        searched for a match.
+
+        """
+        if endpos is None or endpos>len(string):
+            endpos=len(string)
+        if endpos<pos: endpos=pos
+        regs = self.code.match(string, pos, endpos, ANCHORED)
+        if regs is None:
+            return None
+        self._num_regs=len(regs)
+        return MatchObject(self,
+                           string,
+                           pos, endpos,
+                           regs)
+
+    def sub(self, repl, string, count=0):
+        """sub(repl, string[, count=0]) -> string
+
+        Return the string obtained by replacing the leftmost
+        non-overlapping occurrences of the compiled pattern in string
+        by the replacement repl. If the pattern isn't found, string is
+        returned unchanged.
+
+        Identical to the sub() function, using the compiled pattern.
+
+        """
+        return self.subn(repl, string, count)[0]
+
+    def subn(self, repl, source, count=0):
+        """subn(repl, string[, count=0]) -> tuple
+
+        Perform the same operation as sub(), but return a tuple
+        (new_string, number_of_subs_made).
+
+        """
+        if count < 0:
+            raise error, "negative substitution count"
+        if count == 0:
+            count = sys.maxint
+        n = 0           # Number of matches
+        pos = 0         # Where to start searching
+        lastmatch = -1  # End of last match
+        results = []    # Substrings making up the result
+        end = len(source)
+
+        if type(repl) is type(''):
+            # See if repl contains group references (if it does,
+            # pcre_expand will attempt to call _Dummy.group, which
+            # results in a TypeError)
+            try:
+                repl = pcre_expand(_Dummy, repl)
+            except (error, TypeError):
+                m = MatchObject(self, source, 0, end, [])
+                repl = lambda m, repl=repl, expand=pcre_expand: expand(m, repl)
+            else:
+                m = None
+        else:
+            m = MatchObject(self, source, 0, end, [])
+
+        match = self.code.match
+        append = results.append
+        while n < count and pos <= end:
+            regs = match(source, pos, end, 0)
+            if not regs:
+                break
+            self._num_regs = len(regs)
+            i, j = regs[0]
+            if i == j == lastmatch:
+                # Empty match adjacent to previous match
+                pos = pos + 1
+                append(source[lastmatch:pos])
+                continue
+            if pos < i:
+                append(source[pos:i])
+            if m:
+                m.pos = pos
+                m.regs = regs
+                append(repl(m))
+            else:
+                append(repl)
+            pos = lastmatch = j
+            if i == j:
+                # Last match was empty; don't try here again
+                pos = pos + 1
+                append(source[lastmatch:pos])
+            n = n + 1
+        append(source[pos:])
+        return (''.join(results), n)
+
+    def split(self, source, maxsplit=0):
+        """split(source[, maxsplit=0]) -> list of strings
+
+        Split string by the occurrences of the compiled pattern. If
+        capturing parentheses are used in the pattern, then the text
+        of all groups in the pattern are also returned as part of the
+        resulting list. If maxsplit is nonzero, at most maxsplit
+        splits occur, and the remainder of the string is returned as
+        the final element of the list.
+
+        """
+        if maxsplit < 0:
+            raise error, "negative split count"
+        if maxsplit == 0:
+            maxsplit = sys.maxint
+        n = 0
+        pos = 0
+        lastmatch = 0
+        results = []
+        end = len(source)
+        match = self.code.match
+        append = results.append
+        while n < maxsplit:
+            regs = match(source, pos, end, 0)
+            if not regs:
+                break
+            i, j = regs[0]
+            if i == j:
+                # Empty match
+                if pos >= end:
+                    break
+                pos = pos+1
+                continue
+            append(source[lastmatch:i])
+            rest = regs[1:]
+            if rest:
+                for a, b in rest:
+                    if a == -1 or b == -1:
+                        group = None
+                    else:
+                        group = source[a:b]
+                    append(group)
+            pos = lastmatch = j
+            n = n + 1
+        append(source[lastmatch:])
+        return results
+
+    def findall(self, source):
+        """findall(source) -> list
+
+        Return a list of all non-overlapping matches of the compiled
+        pattern in string. If one or more groups are present in the
+        pattern, return a list of groups; this will be a list of
+        tuples if the pattern has more than one group. Empty matches
+        are included in the result.
+
+        """
+        pos = 0
+        end = len(source)
+        results = []
+        match = self.code.match
+        append = results.append
+        while pos <= end:
+            regs = match(source, pos, end, 0)
+            if not regs:
+                break
+            i, j = regs[0]
+            rest = regs[1:]
+            if not rest:
+                gr = source[i:j]
+            elif len(rest) == 1:
+                a, b = rest[0]
+                gr = source[a:b]
+            else:
+                gr = []
+                for (a, b) in rest:
+                    gr.append(source[a:b])
+                gr = tuple(gr)
+            append(gr)
+            pos = max(j, pos+1)
+        return results
+
+    # The following 3 functions were contributed by Mike Fletcher, and
+    # allow pickling and unpickling of RegexObject instances.
+    def __getinitargs__(self):
+        return (None,None,None,None) # any 4 elements, to work around
+                                     # problems with the
+                                     # pickle/cPickle modules not yet
+                                     # ignoring the __init__ function
+    def __getstate__(self):
+        return self.pattern, self.flags, self.groupindex
+    def __setstate__(self, statetuple):
+        self.pattern = statetuple[0]
+        self.flags = statetuple[1]
+        self.groupindex = statetuple[2]
+        self.code = apply(pcre_compile, statetuple)
+
+class _Dummy:
+    # Dummy class used by _subn_string().  Has 'group' to avoid core dump.
+    group = None
+
+class MatchObject:
+    """Holds a compiled regular expression pattern.
+
+    Methods:
+    start      Return the index of the start of a matched substring.
+    end        Return the index of the end of a matched substring.
+    span       Return a tuple of (start, end) of a matched substring.
+    groups     Return a tuple of all the subgroups of the match.
+    group      Return one or more subgroups of the match.
+    groupdict  Return a dictionary of all the named subgroups of the match.
+
+    """
+
+    def __init__(self, re, string, pos, endpos, regs):
+        self.re = re
+        self.string = string
+        self.pos = pos
+        self.endpos = endpos
+        self.regs = regs
+
+    def start(self, g = 0):
+        """start([group=0]) -> int or None
+
+        Return the index of the start of the substring matched by
+        group; group defaults to zero (meaning the whole matched
+        substring). Return -1 if group exists but did not contribute
+        to the match.
+
+        """
+        if type(g) == type(''):
+            try:
+                g = self.re.groupindex[g]
+            except (KeyError, TypeError):
+                raise IndexError, 'group %s is undefined' % `g`
+        return self.regs[g][0]
+
+    def end(self, g = 0):
+        """end([group=0]) -> int or None
+
+        Return the indices of the end of the substring matched by
+        group; group defaults to zero (meaning the whole matched
+        substring). Return -1 if group exists but did not contribute
+        to the match.
+
+        """
+        if type(g) == type(''):
+            try:
+                g = self.re.groupindex[g]
+            except (KeyError, TypeError):
+                raise IndexError, 'group %s is undefined' % `g`
+        return self.regs[g][1]
+
+    def span(self, g = 0):
+        """span([group=0]) -> tuple
+
+        Return the 2-tuple (m.start(group), m.end(group)). Note that
+        if group did not contribute to the match, this is (-1,
+        -1). Group defaults to zero (meaning the whole matched
+        substring).
+
+        """
+        if type(g) == type(''):
+            try:
+                g = self.re.groupindex[g]
+            except (KeyError, TypeError):
+                raise IndexError, 'group %s is undefined' % `g`
+        return self.regs[g]
+
+    def groups(self, default=None):
+        """groups([default=None]) -> tuple
+
+        Return a tuple containing all the subgroups of the match, from
+        1 up to however many groups are in the pattern. The default
+        argument is used for groups that did not participate in the
+        match.
+
+        """
+        result = []
+        for g in range(1, self.re._num_regs):
+            a, b = self.regs[g]
+            if a == -1 or b == -1:
+                result.append(default)
+            else:
+                result.append(self.string[a:b])
+        return tuple(result)
+
+    def group(self, *groups):
+        """group([group1, group2, ...]) -> string or tuple
+
+        Return one or more subgroups of the match. If there is a
+        single argument, the result is a single string; if there are
+        multiple arguments, the result is a tuple with one item per
+        argument. Without arguments, group1 defaults to zero (i.e. the
+        whole match is returned). If a groupN argument is zero, the
+        corresponding return value is the entire matching string; if
+        it is in the inclusive range [1..99], it is the string
+        matching the the corresponding parenthesized group. If a group
+        number is negative or larger than the number of groups defined
+        in the pattern, an IndexError exception is raised. If a group
+        is contained in a part of the pattern that did not match, the
+        corresponding result is None. If a group is contained in a
+        part of the pattern that matched multiple times, the last
+        match is returned.
+
+        If the regular expression uses the (?P<name>...) syntax, the
+        groupN arguments may also be strings identifying groups by
+        their group name. If a string argument is not used as a group
+        name in the pattern, an IndexError exception is raised.
+
+        """
+        if len(groups) == 0:
+            groups = (0,)
+        result = []
+        for g in groups:
+            if type(g) == type(''):
+                try:
+                    g = self.re.groupindex[g]
+                except (KeyError, TypeError):
+                    raise IndexError, 'group %s is undefined' % `g`
+            if g >= len(self.regs):
+                raise IndexError, 'group %s is undefined' % `g`
+            a, b = self.regs[g]
+            if a == -1 or b == -1:
+                result.append(None)
+            else:
+                result.append(self.string[a:b])
+        if len(result) > 1:
+            return tuple(result)
+        elif len(result) == 1:
+            return result[0]
+        else:
+            return ()
+
+    def groupdict(self, default=None):
+        """groupdict([default=None]) -> dictionary
+
+        Return a dictionary containing all the named subgroups of the
+        match, keyed by the subgroup name. The default argument is
+        used for groups that did not participate in the match.
+
+        """
+        dict = {}
+        for name, index in self.re.groupindex.items():
+            a, b = self.regs[index]
+            if a == -1 or b == -1:
+                dict[name] = default
+            else:
+                dict[name] = self.string[a:b]
+        return dict
diff --git a/lib-python/2.2/profile.doc b/lib-python/2.2/profile.doc
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/profile.doc
@@ -0,0 +1,702 @@
+profile.doc                     last updated 6/23/94 [by Guido]
+
+ PROFILER DOCUMENTATION and (mini) USER'S MANUAL
+
+Copyright 1994, by InfoSeek Corporation, all rights reserved.
+Written by James Roskind
+
+Permission to use, copy, modify, and distribute this Python software
+and its associated documentation for any purpose (subject to the
+restriction in the following sentence) without fee is hereby granted,
+provided that the above copyright notice appears in all copies, and
+that both that copyright notice and this permission notice appear in
+supporting documentation, and that the name of InfoSeek not be used in
+advertising or publicity pertaining to distribution of the software
+without specific, written prior permission.  This permission is
+explicitly restricted to the copying and modification of the software
+to remain in Python, compiled Python, or other languages (such as C)
+wherein the modified or derived code is exclusively imported into a
+Python module.
+
+INFOSEEK CORPORATION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
+SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
+FITNESS. IN NO EVENT SHALL INFOSEEK CORPORATION BE LIABLE FOR ANY
+SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
+RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF
+CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+
+The profiler was written after only programming in Python for 3 weeks.
+As a result, it is probably clumsy code, but I don't know for sure yet
+'cause I'm a beginner :-).  I did work hard to make the code run fast,
+so that profiling would be a reasonable thing to do.  I tried not to
+repeat code fragments, but I'm sure I did some stuff in really awkward
+ways at times.  Please send suggestions for improvements to:
+jar at infoseek.com.  I won't promise *any* support.  ...but I'd
+appreciate the feedback.
+
+
+SECTION HEADING LIST:
+  INTRODUCTION
+  HOW IS THIS profile DIFFERENT FROM THE OLD profile MODULE?
+  INSTANT USERS MANUAL
+  WHAT IS DETERMINISTIC PROFILING?
+  REFERENCE MANUAL			  
+    FUNCTION	profile.run(string, filename_opt)
+    CLASS	Stats(filename, ...)
+    METHOD	strip_dirs()
+    METHOD	add(filename, ...)
+    METHOD	sort_stats(key, ...)
+    METHOD	reverse_order()
+    METHOD	print_stats(restriction, ...)
+    METHOD	print_callers(restrictions, ...)
+    METHOD	print_callees(restrictions, ...)
+    METHOD	ignore()
+  LIMITATIONS
+  CALIBRATION
+  EXTENSIONS: Deriving Better Profilers
+
+
+
+INTRODUCTION
+
+A "profiler" is a program that describes the run time performance of a
+program, providing a variety of statistics.  This documentation
+describes the profiler functionality provided in the modules
+"profile" and "pstats."  This profiler provides "deterministic
+profiling" of any Python programs.  It also provides a series of
+report generation tools to allow users to rapidly examine the results
+of a profile operation.
+
+
+HOW IS THIS profile DIFFERENT FROM THE OLD profile MODULE?
+
+The big changes from standard profiling module are that you get more
+information, and you pay less CPU time.  It's not a trade-off, it's a
+trade-up.
+
+To be specific:
+
+ bugs removed: local stack frame is no longer molested, execution time
+      is now charged to correct functions, ....
+
+ accuracy increased: profiler execution time is no longer charged to
+      user's code, calibration for platform is supported, file reads
+      are not done *by* profiler *during* profiling (and charged to
+      user's code!), ...
+
+ speed increased: Overhead CPU cost was reduced by more than a factor of
+      two (perhaps a factor of five), lightweight profiler module is
+      all that must be loaded, and the report generating module
+      (pstats) is not needed during profiling. 
+
+ recursive functions support: cumulative times in recursive functions
+      are correctly calculated; recursive entries are counted; ...
+
+ large growth in report generating UI: distinct profiles runs can be added
+       together forming a comprehensive report; functions that import
+       statistics take arbitrary lists of files; sorting criteria is now
+       based on keywords (instead of 4 integer options); reports shows
+       what functions were profiled as well as what profile file was
+       referenced; output format has been improved, ...
+
+
+INSTANT USERS MANUAL
+
+This section is provided for users that "don't want to read the
+manual." It provides a very brief overview, and allows a user to
+rapidly perform profiling on an existing application.
+
+To profile an application with a main entry point of "foo()", you
+would add the following to your module:
+
+	import profile
+	profile.run("foo()")
+
+The above action would cause "foo()" to be run, and a series of
+informative lines (the profile) to be printed.  The above approach is
+most useful when working with the interpreter.  If you would like to
+save the results of a profile into a file for later examination, you
+can supply a file name as the second argument to the run() function:
+
+	import profile
+	profile.run("foo()", 'fooprof')
+
+When you wish to review the profile, you should use the methods in the
+pstats module.  Typically you would load the statistics data as
+follows:
+
+	import pstats
+	p = pstats.Stats('fooprof')
+
+The class "Stats" (the above code just created an instance of this
+class) has a variety of methods for manipulating and printing the data
+that was just read into "p".  When you ran profile.run() above, what
+was printed was the result of three method calls:
+
+	p.strip_dirs().sort_stats(-1).print_stats()
+
+The first method removed the extraneous path from all the module
+names. The second method sorted all the entries according to the
+standard module/line/name string that is printed (this is to comply
+with the semantics of the old profiler).  The third method printed out
+all the statistics.  You might try the following sort calls:
+
+	p.sort_stats('name')
+	p.print_stats()
+
+The first call will actually sort the list by function name, and the
+second call will print out the statistics.  The following are some
+interesting calls to experiment with:
+
+	p.sort_stats('cumulative').print_stats(10)
+
+This sorts the profile by cumulative time in a function, and then only
+prints the ten most significant lines.  If you want to understand what
+algorithms are taking time, the above line is what you would use.
+
+If you were looking to see what functions were looping a lot, and
+taking a lot of time, you would do:
+
+	p.sort_stats('time').print_stats(10)
+
+to sort according to time spent within each function, and then print
+the statistics for the top ten functions.
+
+You might also try:
+
+	p.sort_stats('file').print_stats('__init__')
+
+This will sort all the statistics by file name, and then print out
+statistics for only the class init methods ('cause they are spelled
+with "__init__" in them).  As one final example, you could try:
+
+	p.sort_stats('time', 'cum').print_stats(.5, 'init')
+
+This line sorts stats with a primary key of time, and a secondary key
+of cumulative time, and then prints out some of the statistics.  To be
+specific, the list is first culled down to 50% (re: .5) of its
+original size, then only lines containing "init" are maintained, and
+that sub-sub-list is printed.
+
+If you wondered what functions called the above functions, you could
+now (p is still sorted according to the last criteria) do:
+
+	p.print_callers(.5, 'init')
+
+and you would get a list of callers for each of the listed functions. 
+
+If you want more functionality, you're going to have to read the
+manual (or guess) what the following functions do:
+
+	p.print_callees()
+	p.add('fooprof')
+
+
+WHAT IS DETERMINISTIC PROFILING?
+
+"Deterministic profiling" is meant to reflect the fact that all
+"function call", "function return", and "exception" events are
+monitored, and precise timings are made for the intervals between
+these events (during which time the user's code is executing).  In
+contrast, "statistical profiling" (which is not done by this module)
+randomly samples the effective instruction pointer, and deduces where
+time is being spent.  The latter technique traditionally involves less
+overhead (as the code does not need to be instrumented), but provides
+only relative indications of where time is being spent.
+
+In Python, since there is an interpreter active during execution, the
+presence of instrumented code is not required to do deterministic
+profiling. Python automatically provides a hook (optional callback)
+for each event.  In addition, the interpreted nature of Python tends
+to add so much overhead to execution, that deterministic profiling
+tends to only add small processing overhead, in typical applications.
+The result is that deterministic profiling is not that expensive, but
+yet provides extensive run time statistics about the execution of a
+Python program.  
+
+Call count statistics can be used to identify bugs in code (surprising
+counts), and to identify possible inline-expansion points (high call
+counts).  Internal time statistics can be used to identify hot loops
+that should be carefully optimized.  Cumulative time statistics should
+be used to identify high level errors in the selection of algorithms.
+Note that the unusual handling of cumulative times in this profiler
+allows statistics for recursive implementations of algorithms to be
+directly compared to iterative implementations.
+
+
+REFERENCE MANUAL			  
+
+The primary entry point for the profiler is the global function
+profile.run().  It is typically used to create any profile
+information.  The reports are formatted and printed using methods for
+the class pstats.Stats.  The following is a description of all of
+these standard entry points and functions.  For a more in-depth view
+of some of the code, consider reading the later section on "Profiler
+Extensions," which includes discussion of how to derive "better"
+profilers from the classes presented, or reading the source code for
+these modules.
+
+
+FUNCTION	profile.run(string, filename_opt)
+
+This function takes a single argument that has can be passed to the
+"exec" statement, and an optional file name.  In all cases this
+routine attempts to "exec" its first argument, and gather profiling
+statistics from the execution. If no file name is present, then this
+function automatically prints a simple profiling report, sorted by the
+standard name string (file/line/function-name) that is presented in
+each line.  The following is a typical output from such a call:
+
+cut here----
+
+         main()
+         2706 function calls (2004 primitive calls) in 4.504 CPU seconds
+
+   Ordered by: standard name
+
+   ncalls  tottime  percall  cumtime  percall filename:lineno(function)
+        2    0.006    0.003    0.953    0.477 pobject.py:75(save_objects)
+     43/3    0.533    0.012    0.749    0.250 pobject.py:99(evaluate)
+	...
+
+cut here----
+
+The first line indicates that this profile was generated by the call:
+profile.run('main()'), and hence the exec'ed string is 'main()'.  The
+second line indicates that 2706 calls were monitored.  Of those calls,
+2004 were "primitive."  We define "primitive" to mean that the call
+was not induced via recursion.  The next line: "Ordered by: standard
+name", indicates that the text string in the far right column was used
+to sort the output.  The column headings include:
+
+	"ncalls" for the number of calls, 
+	"tottime" for the total time spent in the given function
+		(and excluding time made in calls to sub-functions), 
+	"percall" is the quotient of "tottime" divided by "ncalls"
+	"cumtime" is the total time spent in this and all subfunctions
+		(i.e., from invocation till exit). This figure is
+		accurate *even* for recursive functions.
+	"percall" is the quotient of "cumtime" divided by primitive
+		calls
+	"filename:lineno(function)" provides the respective data of
+		each function
+
+When there are two numbers in the first column (e.g.: 43/3), then the
+latter is the number of primitive calls, and the former is the actual
+number of calls.  Note that when the function does not recurse, these
+two values are the same, and only the single figure is printed.
+
+
+CLASS	Stats(filename, ...)
+
+This class constructor creates an instance of a statistics object from
+a filename (or set of filenames).  Stats objects are manipulated by
+methods, in order to print useful reports.  
+
+The file selected by the above constructor must have been created by
+the corresponding version of profile.  To be specific, there is *NO*
+file compatibility guaranteed with future versions of this profiler,
+and there is no compatibility with files produced by other profilers
+(e.g., the standard system profiler).
+
+If several files are provided, all the statistics for identical
+functions will be coalesced, so that an overall view of several
+processes can be considered in a single report.  If additional files
+need to be combined with data in an existing Stats object, the add()
+method can be used.
+
+
+METHOD	strip_dirs()
+
+This method for the Stats class removes all leading path information
+from file names.  It is very useful in reducing the size of the
+printout to fit within (close to) 80 columns.  This method modifies
+the object, and the striped information is lost.  After performing a
+strip operation, the object is considered to have its entries in a
+"random" order, as it was just after object initialization and
+loading.  If strip_dir() causes two function names to be
+indistinguishable (i.e., they are on the same line of the same
+filename, and have the same function name), then the statistics for
+these two entries are accumulated into a single entry.
+
+
+METHOD	add(filename, ...)
+
+This methods of the Stats class accumulates additional profiling
+information into the current profiling object.  Its arguments should
+refer to filenames created my the corresponding version of
+profile.run().  Statistics for identically named (re: file, line,
+name) functions are automatically accumulated into single function
+statistics.
+
+
+METHOD	sort_stats(key, ...)
+
+This method modifies the Stats object by sorting it according to the
+supplied criteria.  The argument is typically a string identifying the
+basis of a sort (example: "time" or "name").
+
+When more than one key is provided, then additional keys are used as
+secondary criteria when the there is equality in all keys selected
+before them.  For example, sort_stats('name', 'file') will sort all
+the entries according to their function name, and resolve all ties
+(identical function names) by sorting by file name.
+
+Abbreviations can be used for any key names, as long as the
+abbreviation is unambiguous.  The following are the keys currently
+defined: 
+
+		Valid Arg       Meaning
+		  "calls"      call count
+		  "cumulative" cumulative time
+		  "file"       file name
+		  "module"     file name
+		  "pcalls"     primitive call count
+		  "line"       line number
+		  "name"       function name
+		  "nfl"        name/file/line
+		  "stdname"    standard name
+		  "time"       internal time
+
+Note that all sorts on statistics are in descending order (placing most
+time consuming items first), where as name, file, and line number
+searches are in ascending order (i.e., alphabetical). The subtle
+distinction between "nfl" and "stdname" is that the standard name is a
+sort of the name as printed, which means that the embedded line
+numbers get compared in an odd way.  For example, lines 3, 20, and 40
+would (if the file names were the same) appear in the string order
+"20" "3" and "40".  In contrast, "nfl" does a numeric compare of the
+line numbers.  In fact, sort_stats("nfl") is the same as
+sort_stats("name", "file", "line").
+
+For compatibility with the standard profiler, the numeric argument -1,
+0, 1, and 2 are permitted.  They are interpreted as "stdname",
+"calls", "time", and "cumulative" respectively.  If this old style
+format (numeric) is used, only one sort key (the numeric key) will be
+used, and additionally arguments will be silently ignored.
+
+
+METHOD	reverse_order()
+
+This method for the Stats class reverses the ordering of the basic
+list within the object.  This method is provided primarily for
+compatibility with the standard profiler.  Its utility is questionable
+now that ascending vs descending order is properly selected based on
+the sort key of choice.
+
+
+METHOD	print_stats(restriction, ...)
+
+This method for the Stats class prints out a report as described in
+the profile.run() definition.  
+
+The order of the printing is based on the last sort_stats() operation
+done on the object (subject to caveats in add() and strip_dirs()).
+
+The arguments provided (if any) can be used to limit the list down to
+the significant entries.  Initially, the list is taken to be the
+complete set of profiled functions.  Each restriction is either an
+integer (to select a count of lines), or a decimal fraction between
+0.0 and 1.0 inclusive (to select a percentage of lines), or a regular
+expression (to pattern match the standard name that is printed).  If
+several restrictions are provided, then they are applied sequentially.
+For example:
+
+	print_stats(.1, "foo:")
+
+would first limit the printing to first 10% of list, and then only 
+print functions that were part of filename ".*foo:".  In contrast, the
+command: 
+
+	print_stats("foo:", .1)
+
+would limit the list to all functions having file names ".*foo:", and
+then proceed to only print the first 10% of them.
+
+
+METHOD	print_callers(restrictions, ...)
+
+This method for the Stats class prints a list of all functions that
+called each function in the profiled database.  The ordering is
+identical to that provided by print_stats(), and the definition of the
+restricting argument is also identical.  For convenience, a number is
+shown in parentheses after each caller to show how many times this
+specific call was made.  A second non-parenthesized number is the
+cumulative time spent in the function at the right.
+
+
+METHOD	print_callees(restrictions, ...)
+
+This method for the Stats class prints a list of all function that
+were called by the indicated function.  Aside from this reversal of
+direction of calls (re: called vs was called by), the arguments and
+ordering are identical to the print_callers() method.
+
+
+METHOD	ignore()
+
+This method of the Stats class is used to dispose of the value
+returned by earlier methods.  All standard methods in this class
+return the instance that is being processed, so that the commands can
+be strung together.  For example:
+
+pstats.Stats('foofile').strip_dirs().sort_stats('cum').print_stats().ignore()
+
+would perform all the indicated functions, but it would not return
+the final reference to the Stats instance.
+
+
+
+	
+LIMITATIONS
+
+There are two fundamental limitations on this profiler.  The first is
+that it relies on the Python interpreter to dispatch "call", "return",
+and "exception" events.  Compiled C code does not get interpreted,
+and hence is "invisible" to the profiler.  All time spent in C code
+(including builtin functions) will be charged to the Python function
+that was invoked the C code.  IF the C code calls out to some native
+Python code, then those calls will be profiled properly.
+
+The second limitation has to do with accuracy of timing information.
+There is a fundamental problem with deterministic profilers involving
+accuracy.  The most obvious restriction is that the underlying "clock"
+is only ticking at a rate (typically) of about .001 seconds.  Hence no
+measurements will be more accurate that that underlying clock.  If
+enough measurements are taken, then the "error" will tend to average
+out. Unfortunately, removing this first error induces a second source
+of error...
+
+The second problem is that it "takes a while" from when an event is
+dispatched until the profiler's call to get the time actually *gets*
+the state of the clock.  Similarly, there is a certain lag when
+exiting the profiler event handler from the time that the clock's
+value was obtained (and then squirreled away), until the user's code
+is once again executing.  As a result, functions that are called many
+times, or call many functions, will typically accumulate this error.
+The error that accumulates in this fashion is typically less than the
+accuracy of the clock (i.e., less than one clock tick), but it *can*
+accumulate and become very significant.  This profiler provides a
+means of calibrating itself for a give platform so that this error can
+be probabilistically (i.e., on the average) removed.  After the
+profiler is calibrated, it will be more accurate (in a least square
+sense), but it will sometimes produce negative numbers (when call
+counts are exceptionally low, and the gods of probability work against
+you :-). )  Do *NOT* be alarmed by negative numbers in the profile.
+They should *only* appear if you have calibrated your profiler, and
+the results are actually better than without calibration.
+
+
+CALIBRATION
+
+The profiler class has a hard coded constant that is added to each
+event handling time to compensate for the overhead of calling the time
+function, and socking away the results.  The following procedure can
+be used to obtain this constant for a given platform (see discussion
+in LIMITATIONS above). 
+
+	import profile
+	pr = profile.Profile()
+	pr.calibrate(100)
+	pr.calibrate(100)
+	pr.calibrate(100)
+
+The argument to calibrate() is the number of times to try to do the
+sample calls to get the CPU times.  If your computer is *very* fast,
+you might have to do:
+
+	pr.calibrate(1000)
+
+or even:
+
+	pr.calibrate(10000)
+
+The object of this exercise is to get a fairly consistent result.
+When you have a consistent answer, you are ready to use that number in
+the source code.  For a Sun Sparcstation 1000 running Solaris 2.3, the
+magical number is about .00053.  If you have a choice, you are better
+off with a smaller constant, and your results will "less often" show
+up as negative in profile statistics.
+
+The following shows how the trace_dispatch() method in the Profile
+class should be modified to install the calibration constant on a Sun
+Sparcstation 1000:
+
+	def trace_dispatch(self, frame, event, arg):
+		t = self.timer()
+		t = t[0] + t[1] - self.t - .00053 # Calibration constant
+
+		if self.dispatch[event](frame,t):
+			t = self.timer()
+			self.t = t[0] + t[1]
+		else:
+			r = self.timer()
+			self.t = r[0] + r[1] - t # put back unrecorded delta
+		return
+
+Note that if there is no calibration constant, then the line
+containing the callibration constant should simply say:
+
+		t = t[0] + t[1] - self.t  # no calibration constant
+
+You can also achieve the same results using a derived class (and the
+profiler will actually run equally fast!!), but the above method is
+the simplest to use.  I could have made the profiler "self
+calibrating", but it would have made the initialization of the
+profiler class slower, and would have required some *very* fancy
+coding, or else the use of a variable where the constant .00053 was
+placed in the code shown.  This is a ****VERY**** critical performance
+section, and there is no reason to use a variable lookup at this
+point, when a constant can be used.
+
+
+EXTENSIONS: Deriving Better Profilers
+
+The Profile class of profile was written so that derived classes
+could be developed to extend the profiler.  Rather than describing all
+the details of such an effort, I'll just present the following two
+examples of derived classes that can be used to do profiling.  If the
+reader is an avid Python programmer, then it should be possible to use
+these as a model and create similar (and perchance better) profile
+classes. 
+
+If all you want to do is change how the timer is called, or which
+timer function is used, then the basic class has an option for that in
+the constructor for the class.  Consider passing the name of a
+function to call into the constructor:
+
+	pr = profile.Profile(your_time_func)
+
+The resulting profiler will call your time function instead of
+os.times().  The function should return either a single number, or a
+list of numbers (like what os.times() returns).  If the function
+returns a single time number, or the list of returned numbers has
+length 2, then you will get an especially fast version of the dispatch
+routine.  
+
+Be warned that you *should* calibrate the profiler class for the
+timer function that you choose.  For most machines, a timer that
+returns a lone integer value will provide the best results in terms of
+low overhead during profiling.  (os.times is *pretty* bad, 'cause it
+returns a tuple of floating point values, so all arithmetic is
+floating point in the profiler!).  If you want to be substitute a
+better timer in the cleanest fashion, you should derive a class, and
+simply put in the replacement dispatch method that better handles your timer
+call, along with the appropriate calibration constant :-).
+
+
+cut here------------------------------------------------------------------
+#****************************************************************************
+# OldProfile class documentation
+#****************************************************************************
+#
+# The following derived profiler simulates the old style profile, providing
+# errant results on recursive functions. The reason for the usefulness of this
+# profiler is that it runs faster (i.e., less overhead) than the old
+# profiler.  It still creates all the caller stats, and is quite
+# useful when there is *no* recursion in the user's code.  It is also
+# a lot more accurate than the old profiler, as it does not charge all
+# its overhead time to the user's code. 
+#****************************************************************************
+class OldProfile(Profile):
+	def trace_dispatch_exception(self, frame, t):
+		rt, rtt, rct, rfn, rframe, rcur = self.cur
+		if rcur and not rframe is frame:
+			return self.trace_dispatch_return(rframe, t)
+		return 0
+
+	def trace_dispatch_call(self, frame, t):
+		fn = `frame.f_code`
+		
+		self.cur = (t, 0, 0, fn, frame, self.cur)
+		if self.timings.has_key(fn):
+			tt, ct, callers = self.timings[fn]
+			self.timings[fn] = tt, ct, callers
+		else:
+			self.timings[fn] = 0, 0, {}
+		return 1
+
+	def trace_dispatch_return(self, frame, t):
+		rt, rtt, rct, rfn, frame, rcur = self.cur
+		rtt = rtt + t
+		sft = rtt + rct
+
+		pt, ptt, pct, pfn, pframe, pcur = rcur
+		self.cur = pt, ptt+rt, pct+sft, pfn, pframe, pcur
+
+		tt, ct, callers = self.timings[rfn]
+		if callers.has_key(pfn):
+			callers[pfn] = callers[pfn] + 1
+		else:
+			callers[pfn] = 1
+		self.timings[rfn] = tt+rtt, ct + sft, callers
+
+		return 1
+
+
+	def snapshot_stats(self):
+		self.stats = {}
+		for func in self.timings.keys():
+			tt, ct, callers = self.timings[func]
+			nor_func = self.func_normalize(func)
+			nor_callers = {}
+			nc = 0
+			for func_caller in callers.keys():
+				nor_callers[self.func_normalize(func_caller)]=\
+					  callers[func_caller]
+				nc = nc + callers[func_caller]
+			self.stats[nor_func] = nc, nc, tt, ct, nor_callers
+
+		
+
+#****************************************************************************
+# HotProfile class documentation
+#****************************************************************************
+#
+# This profiler is the fastest derived profile example.  It does not
+# calculate caller-callee relationships, and does not calculate cumulative
+# time under a function.  It only calculates time spent in a function, so
+# it runs very quickly (re: very low overhead).  In truth, the basic
+# profiler is so fast, that is probably not worth the savings to give
+# up the data, but this class still provides a nice example.
+#****************************************************************************
+class HotProfile(Profile):
+	def trace_dispatch_exception(self, frame, t):
+		rt, rtt, rfn, rframe, rcur = self.cur
+		if rcur and not rframe is frame:
+			return self.trace_dispatch_return(rframe, t)
+		return 0
+
+	def trace_dispatch_call(self, frame, t):
+		self.cur = (t, 0, frame, self.cur)
+		return 1
+
+	def trace_dispatch_return(self, frame, t):
+		rt, rtt, frame, rcur = self.cur
+
+		rfn = `frame.f_code`
+
+		pt, ptt, pframe, pcur = rcur
+		self.cur = pt, ptt+rt, pframe, pcur
+
+		if self.timings.has_key(rfn):
+			nc, tt = self.timings[rfn]
+			self.timings[rfn] = nc + 1, rt + rtt + tt
+		else:
+			self.timings[rfn] =      1, rt + rtt
+
+		return 1
+
+
+	def snapshot_stats(self):
+		self.stats = {}
+		for func in self.timings.keys():
+			nc, tt = self.timings[func]
+			nor_func = self.func_normalize(func)
+			self.stats[nor_func] = nc, nc, tt, 0, {}
+
+		
+
+cut here------------------------------------------------------------------
diff --git a/lib-python/2.2/profile.py b/lib-python/2.2/profile.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/profile.py
@@ -0,0 +1,556 @@
+#! /usr/bin/env python
+#
+# Class for profiling python code. rev 1.0  6/2/94
+#
+# Based on prior profile module by Sjoerd Mullender...
+#   which was hacked somewhat by: Guido van Rossum
+#
+# See profile.doc for more information
+
+"""Class for profiling Python code."""
+
+# Copyright 1994, by InfoSeek Corporation, all rights reserved.
+# Written by James Roskind
+#
+# Permission to use, copy, modify, and distribute this Python software
+# and its associated documentation for any purpose (subject to the
+# restriction in the following sentence) without fee is hereby granted,
+# provided that the above copyright notice appears in all copies, and
+# that both that copyright notice and this permission notice appear in
+# supporting documentation, and that the name of InfoSeek not be used in
+# advertising or publicity pertaining to distribution of the software
+# without specific, written prior permission.  This permission is
+# explicitly restricted to the copying and modification of the software
+# to remain in Python, compiled Python, or other languages (such as C)
+# wherein the modified or derived code is exclusively imported into a
+# Python module.
+#
+# INFOSEEK CORPORATION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
+# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
+# FITNESS. IN NO EVENT SHALL INFOSEEK CORPORATION BE LIABLE FOR ANY
+# SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
+# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF
+# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+
+
+import sys
+import os
+import time
+import marshal
+
+__all__ = ["run","help","Profile"]
+
+# Sample timer for use with
+#i_count = 0
+#def integer_timer():
+#       global i_count
+#       i_count = i_count + 1
+#       return i_count
+#itimes = integer_timer # replace with C coded timer returning integers
+
+#**************************************************************************
+# The following are the static member functions for the profiler class
+# Note that an instance of Profile() is *not* needed to call them.
+#**************************************************************************
+
+def run(statement, filename=None):
+    """Run statement under profiler optionally saving results in filename
+
+    This function takes a single argument that can be passed to the
+    "exec" statement, and an optional file name.  In all cases this
+    routine attempts to "exec" its first argument and gather profiling
+    statistics from the execution. If no file name is present, then this
+    function automatically prints a simple profiling report, sorted by the
+    standard name string (file/line/function-name) that is presented in
+    each line.
+    """
+    prof = Profile()
+    try:
+        prof = prof.run(statement)
+    except SystemExit:
+        pass
+    if filename is not None:
+        prof.dump_stats(filename)
+    else:
+        return prof.print_stats()
+
+# print help
+def help():
+    for dirname in sys.path:
+        fullname = os.path.join(dirname, 'profile.doc')
+        if os.path.exists(fullname):
+            sts = os.system('${PAGER-more} ' + fullname)
+            if sts: print '*** Pager exit status:', sts
+            break
+    else:
+        print 'Sorry, can\'t find the help file "profile.doc"',
+        print 'along the Python search path.'
+
+
+if os.name == "mac":
+    import MacOS
+    def _get_time_mac(timer=MacOS.GetTicks):
+        return timer() / 60.0
+
+if hasattr(os, "times"):
+    def _get_time_times(timer=os.times):
+        t = timer()
+        return t[0] + t[1]
+
+
+class Profile:
+    """Profiler class.
+
+    self.cur is always a tuple.  Each such tuple corresponds to a stack
+    frame that is currently active (self.cur[-2]).  The following are the
+    definitions of its members.  We use this external "parallel stack" to
+    avoid contaminating the program that we are profiling. (old profiler
+    used to write into the frames local dictionary!!) Derived classes
+    can change the definition of some entries, as long as they leave
+    [-2:] intact (frame and previous tuple).  In case an internal error is
+    detected, the -3 element is used as the function name.
+
+    [ 0] = Time that needs to be charged to the parent frame's function.
+           It is used so that a function call will not have to access the
+           timing data for the parent frame.
+    [ 1] = Total time spent in this frame's function, excluding time in
+           subfunctions (this latter is tallied in cur[2]).
+    [ 2] = Total time spent in subfunctions, excluding time executing the
+           frame's function (this latter is tallied in cur[1]).
+    [-3] = Name of the function that corresponds to this frame.
+    [-2] = Actual frame that we correspond to (used to sync exception handling).
+    [-1] = Our parent 6-tuple (corresponds to frame.f_back).
+
+    Timing data for each function is stored as a 5-tuple in the dictionary
+    self.timings[].  The index is always the name stored in self.cur[-3].
+    The following are the definitions of the members:
+
+    [0] = The number of times this function was called, not counting direct
+          or indirect recursion,
+    [1] = Number of times this function appears on the stack, minus one
+    [2] = Total time spent internal to this function
+    [3] = Cumulative time that this function was present on the stack.  In
+          non-recursive functions, this is the total execution time from start
+          to finish of each invocation of a function, including time spent in
+          all subfunctions.
+    [4] = A dictionary indicating for each function name, the number of times
+          it was called by us.
+    """
+
+    bias = 0  # calibration constant
+
+    def __init__(self, timer=None, bias=None):
+        self.timings = {}
+        self.cur = None
+        self.cmd = ""
+
+        if bias is None:
+            bias = self.bias
+        self.bias = bias     # Materialize in local dict for lookup speed.
+
+        if not timer:
+            if os.name == 'mac':
+                self.timer = MacOS.GetTicks
+                self.dispatcher = self.trace_dispatch_mac
+                self.get_time = _get_time_mac
+            elif hasattr(time, 'clock'):
+                self.timer = self.get_time = time.clock
+                self.dispatcher = self.trace_dispatch_i
+            elif hasattr(os, 'times'):
+                self.timer = os.times
+                self.dispatcher = self.trace_dispatch
+                self.get_time = _get_time_times
+            else:
+                self.timer = self.get_time = time.time
+                self.dispatcher = self.trace_dispatch_i
+        else:
+            self.timer = timer
+            t = self.timer() # test out timer function
+            try:
+                length = len(t)
+            except TypeError:
+                self.get_time = timer
+                self.dispatcher = self.trace_dispatch_i
+            else:
+                if length == 2:
+                    self.dispatcher = self.trace_dispatch
+                else:
+                    self.dispatcher = self.trace_dispatch_l
+                # This get_time() implementation needs to be defined
+                # here to capture the passed-in timer in the parameter
+                # list (for performance).  Note that we can't assume
+                # the timer() result contains two values in all
+                # cases.
+                import operator
+                def get_time_timer(timer=timer,
+                                   reduce=reduce, reducer=operator.add):
+                    return reduce(reducer, timer(), 0)
+                self.get_time = get_time_timer
+        self.t = self.get_time()
+        self.simulate_call('profiler')
+
+    # Heavily optimized dispatch routine for os.times() timer
+
+    def trace_dispatch(self, frame, event, arg):
+        timer = self.timer
+        t = timer()
+        t = t[0] + t[1] - self.t - self.bias
+
+        if self.dispatch[event](self, frame,t):
+            t = timer()
+            self.t = t[0] + t[1]
+        else:
+            r = timer()
+            self.t = r[0] + r[1] - t # put back unrecorded delta
+
+    # Dispatch routine for best timer program (return = scalar, fastest if
+    # an integer but float works too -- and time.clock() relies on that).
+
+    def trace_dispatch_i(self, frame, event, arg):
+        timer = self.timer
+        t = timer() - self.t - self.bias
+        if self.dispatch[event](self, frame,t):
+            self.t = timer()
+        else:
+            self.t = timer() - t  # put back unrecorded delta
+
+    # Dispatch routine for macintosh (timer returns time in ticks of
+    # 1/60th second)
+
+    def trace_dispatch_mac(self, frame, event, arg):
+        timer = self.timer
+        t = timer()/60.0 - self.t - self.bias
+        if self.dispatch[event](self, frame, t):
+            self.t = timer()/60.0
+        else:
+            self.t = timer()/60.0 - t  # put back unrecorded delta
+
+    # SLOW generic dispatch routine for timer returning lists of numbers
+
+    def trace_dispatch_l(self, frame, event, arg):
+        get_time = self.get_time
+        t = get_time() - self.t - self.bias
+
+        if self.dispatch[event](self, frame, t):
+            self.t = get_time()
+        else:
+            self.t = get_time() - t # put back unrecorded delta
+
+    # In the event handlers, the first 3 elements of self.cur are unpacked
+    # into vrbls w/ 3-letter names.  The last two characters are meant to be
+    # mnemonic:
+    #     _pt  self.cur[0] "parent time"   time to be charged to parent frame
+    #     _it  self.cur[1] "internal time" time spent directly in the function
+    #     _et  self.cur[2] "external time" time spent in subfunctions
+
+    def trace_dispatch_exception(self, frame, t):
+        rpt, rit, ret, rfn, rframe, rcur = self.cur
+        if (rframe is not frame) and rcur:
+            return self.trace_dispatch_return(rframe, t)
+        self.cur = rpt, rit+t, ret, rfn, rframe, rcur
+        return 1
+
+
+    def trace_dispatch_call(self, frame, t):
+        if self.cur and frame.f_back is not self.cur[-2]:
+            rpt, rit, ret, rfn, rframe, rcur = self.cur
+            if not isinstance(rframe, Profile.fake_frame):
+                assert rframe.f_back is frame.f_back, ("Bad call", rfn,
+                                                       rframe, rframe.f_back,
+                                                       frame, frame.f_back)
+                self.trace_dispatch_return(rframe, 0)
+                assert (self.cur is None or \
+                        frame.f_back is self.cur[-2]), ("Bad call",
+                                                        self.cur[-3])
+        fcode = frame.f_code
+        fn = (fcode.co_filename, fcode.co_firstlineno, fcode.co_name)
+        self.cur = (t, 0, 0, fn, frame, self.cur)
+        timings = self.timings
+        if timings.has_key(fn):
+            cc, ns, tt, ct, callers = timings[fn]
+            timings[fn] = cc, ns + 1, tt, ct, callers
+        else:
+            timings[fn] = 0, 0, 0, 0, {}
+        return 1
+
+    def trace_dispatch_return(self, frame, t):
+        if frame is not self.cur[-2]:
+            assert frame is self.cur[-2].f_back, ("Bad return", self.cur[-3])
+            self.trace_dispatch_return(self.cur[-2], 0)
+
+        # Prefix "r" means part of the Returning or exiting frame.
+        # Prefix "p" means part of the Previous or Parent or older frame.
+
+        rpt, rit, ret, rfn, frame, rcur = self.cur
+        rit = rit + t
+        frame_total = rit + ret
+
+        ppt, pit, pet, pfn, pframe, pcur = rcur
+        self.cur = ppt, pit + rpt, pet + frame_total, pfn, pframe, pcur
+
+        timings = self.timings
+        cc, ns, tt, ct, callers = timings[rfn]
+        if not ns:
+            # This is the only occurrence of the function on the stack.
+            # Else this is a (directly or indirectly) recursive call, and
+            # its cumulative time will get updated when the topmost call to
+            # it returns.
+            ct = ct + frame_total
+            cc = cc + 1
+
+        if callers.has_key(pfn):
+            callers[pfn] = callers[pfn] + 1  # hack: gather more
+            # stats such as the amount of time added to ct courtesy
+            # of this specific call, and the contribution to cc
+            # courtesy of this call.
+        else:
+            callers[pfn] = 1
+
+        timings[rfn] = cc, ns - 1, tt + rit, ct, callers
+
+        return 1
+
+
+    dispatch = {
+        "call": trace_dispatch_call,
+        "exception": trace_dispatch_exception,
+        "return": trace_dispatch_return,
+        }
+
+
+    # The next few functions play with self.cmd. By carefully preloading
+    # our parallel stack, we can force the profiled result to include
+    # an arbitrary string as the name of the calling function.
+    # We use self.cmd as that string, and the resulting stats look
+    # very nice :-).
+
+    def set_cmd(self, cmd):
+        if self.cur[-1]: return   # already set
+        self.cmd = cmd
+        self.simulate_call(cmd)
+
+    class fake_code:
+        def __init__(self, filename, line, name):
+            self.co_filename = filename
+            self.co_line = line
+            self.co_name = name
+            self.co_firstlineno = 0
+
+        def __repr__(self):
+            return repr((self.co_filename, self.co_line, self.co_name))
+
+    class fake_frame:
+        def __init__(self, code, prior):
+            self.f_code = code
+            self.f_back = prior
+
+    def simulate_call(self, name):
+        code = self.fake_code('profile', 0, name)
+        if self.cur:
+            pframe = self.cur[-2]
+        else:
+            pframe = None
+        frame = self.fake_frame(code, pframe)
+        self.dispatch['call'](self, frame, 0)
+
+    # collect stats from pending stack, including getting final
+    # timings for self.cmd frame.
+
+    def simulate_cmd_complete(self):
+        get_time = self.get_time
+        t = get_time() - self.t
+        while self.cur[-1]:
+            # We *can* cause assertion errors here if
+            # dispatch_trace_return checks for a frame match!
+            self.dispatch['return'](self, self.cur[-2], t)
+            t = 0
+        self.t = get_time() - t
+
+
+    def print_stats(self):
+        import pstats
+        pstats.Stats(self).strip_dirs().sort_stats(-1). \
+                  print_stats()
+
+    def dump_stats(self, file):
+        f = open(file, 'wb')
+        self.create_stats()
+        marshal.dump(self.stats, f)
+        f.close()
+
+    def create_stats(self):
+        self.simulate_cmd_complete()
+        self.snapshot_stats()
+
+    def snapshot_stats(self):
+        self.stats = {}
+        for func in self.timings.keys():
+            cc, ns, tt, ct, callers = self.timings[func]
+            callers = callers.copy()
+            nc = 0
+            for func_caller in callers.keys():
+                nc = nc + callers[func_caller]
+            self.stats[func] = cc, nc, tt, ct, callers
+
+
+    # The following two methods can be called by clients to use
+    # a profiler to profile a statement, given as a string.
+
+    def run(self, cmd):
+        import __main__
+        dict = __main__.__dict__
+        return self.runctx(cmd, dict, dict)
+
+    def runctx(self, cmd, globals, locals):
+        self.set_cmd(cmd)
+        sys.setprofile(self.dispatcher)
+        try:
+            exec cmd in globals, locals
+        finally:
+            sys.setprofile(None)
+        return self
+
+    # This method is more useful to profile a single function call.
+    def runcall(self, func, *args, **kw):
+        self.set_cmd(`func`)
+        sys.setprofile(self.dispatcher)
+        try:
+            return apply(func, args, kw)
+        finally:
+            sys.setprofile(None)
+
+
+    #******************************************************************
+    # The following calculates the overhead for using a profiler.  The
+    # problem is that it takes a fair amount of time for the profiler
+    # to stop the stopwatch (from the time it receives an event).
+    # Similarly, there is a delay from the time that the profiler
+    # re-starts the stopwatch before the user's code really gets to
+    # continue.  The following code tries to measure the difference on
+    # a per-event basis.
+    #
+    # Note that this difference is only significant if there are a lot of
+    # events, and relatively little user code per event.  For example,
+    # code with small functions will typically benefit from having the
+    # profiler calibrated for the current platform.  This *could* be
+    # done on the fly during init() time, but it is not worth the
+    # effort.  Also note that if too large a value specified, then
+    # execution time on some functions will actually appear as a
+    # negative number.  It is *normal* for some functions (with very
+    # low call counts) to have such negative stats, even if the
+    # calibration figure is "correct."
+    #
+    # One alternative to profile-time calibration adjustments (i.e.,
+    # adding in the magic little delta during each event) is to track
+    # more carefully the number of events (and cumulatively, the number
+    # of events during sub functions) that are seen.  If this were
+    # done, then the arithmetic could be done after the fact (i.e., at
+    # display time).  Currently, we track only call/return events.
+    # These values can be deduced by examining the callees and callers
+    # vectors for each functions.  Hence we *can* almost correct the
+    # internal time figure at print time (note that we currently don't
+    # track exception event processing counts).  Unfortunately, there
+    # is currently no similar information for cumulative sub-function
+    # time.  It would not be hard to "get all this info" at profiler
+    # time.  Specifically, we would have to extend the tuples to keep
+    # counts of this in each frame, and then extend the defs of timing
+    # tuples to include the significant two figures. I'm a bit fearful
+    # that this additional feature will slow the heavily optimized
+    # event/time ratio (i.e., the profiler would run slower, fur a very
+    # low "value added" feature.)
+    #**************************************************************
+
+    def calibrate(self, m, verbose=0):
+        if self.__class__ is not Profile:
+            raise TypeError("Subclasses must override .calibrate().")
+
+        saved_bias = self.bias
+        self.bias = 0
+        try:
+            return self._calibrate_inner(m, verbose)
+        finally:
+            self.bias = saved_bias
+
+    def _calibrate_inner(self, m, verbose):
+        get_time = self.get_time
+
+        # Set up a test case to be run with and without profiling.  Include
+        # lots of calls, because we're trying to quantify stopwatch overhead.
+        # Do not raise any exceptions, though, because we want to know
+        # exactly how many profile events are generated (one call event, +
+        # one return event, per Python-level call).
+
+        def f1(n):
+            for i in range(n):
+                x = 1
+
+        def f(m, f1=f1):
+            for i in range(m):
+                f1(100)
+
+        f(m)    # warm up the cache
+
+        # elapsed_noprofile <- time f(m) takes without profiling.
+        t0 = get_time()
+        f(m)
+        t1 = get_time()
+        elapsed_noprofile = t1 - t0
+        if verbose:
+            print "elapsed time without profiling =", elapsed_noprofile
+
+        # elapsed_profile <- time f(m) takes with profiling.  The difference
+        # is profiling overhead, only some of which the profiler subtracts
+        # out on its own.
+        p = Profile()
+        t0 = get_time()
+        p.runctx('f(m)', globals(), locals())
+        t1 = get_time()
+        elapsed_profile = t1 - t0
+        if verbose:
+            print "elapsed time with profiling =", elapsed_profile
+
+        # reported_time <- "CPU seconds" the profiler charged to f and f1.
+        total_calls = 0.0
+        reported_time = 0.0
+        for (filename, line, funcname), (cc, ns, tt, ct, callers) in \
+                p.timings.items():
+            if funcname in ("f", "f1"):
+                total_calls += cc
+                reported_time += tt
+
+        if verbose:
+            print "'CPU seconds' profiler reported =", reported_time
+            print "total # calls =", total_calls
+        if total_calls != m + 1:
+            raise ValueError("internal error: total calls = %d" % total_calls)
+
+        # reported_time - elapsed_noprofile = overhead the profiler wasn't
+        # able to measure.  Divide by twice the number of calls (since there
+        # are two profiler events per call in this test) to get the hidden
+        # overhead per event.
+        mean = (reported_time - elapsed_noprofile) / 2.0 / total_calls
+        if verbose:
+            print "mean stopwatch overhead per profile event =", mean
+        return mean
+
+#****************************************************************************
+def Stats(*args):
+    print 'Report generating functions are in the "pstats" module\a'
+
+
+# When invoked as main program, invoke the profiler on a script
+if __name__ == '__main__':
+    if not sys.argv[1:]:
+        print "usage: profile.py scriptfile [arg] ..."
+        sys.exit(2)
+
+    filename = sys.argv[1]  # Get script filename
+
+    del sys.argv[0]         # Hide "profile.py" from argument list
+
+    # Insert script directory in front of module search path
+    sys.path.insert(0, os.path.dirname(filename))
+
+    run('execfile(' + `filename` + ')')
diff --git a/lib-python/2.2/pstats.py b/lib-python/2.2/pstats.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/pstats.py
@@ -0,0 +1,641 @@
+"""Class for printing reports on profiled python code."""
+
+# Class for printing reports on profiled python code. rev 1.0  4/1/94
+#
+# Based on prior profile module by Sjoerd Mullender...
+#   which was hacked somewhat by: Guido van Rossum
+#
+# see profile.doc and profile.py for more info.
+
+# Copyright 1994, by InfoSeek Corporation, all rights reserved.
+# Written by James Roskind
+#
+# Permission to use, copy, modify, and distribute this Python software
+# and its associated documentation for any purpose (subject to the
+# restriction in the following sentence) without fee is hereby granted,
+# provided that the above copyright notice appears in all copies, and
+# that both that copyright notice and this permission notice appear in
+# supporting documentation, and that the name of InfoSeek not be used in
+# advertising or publicity pertaining to distribution of the software
+# without specific, written prior permission.  This permission is
+# explicitly restricted to the copying and modification of the software
+# to remain in Python, compiled Python, or other languages (such as C)
+# wherein the modified or derived code is exclusively imported into a
+# Python module.
+#
+# INFOSEEK CORPORATION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
+# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
+# FITNESS. IN NO EVENT SHALL INFOSEEK CORPORATION BE LIABLE FOR ANY
+# SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
+# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF
+# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+
+import os
+import time
+import marshal
+import re
+
+__all__ = ["Stats"]
+
+class Stats:
+    """This class is used for creating reports from data generated by the
+    Profile class.  It is a "friend" of that class, and imports data either
+    by direct access to members of Profile class, or by reading in a dictionary
+    that was emitted (via marshal) from the Profile class.
+
+    The big change from the previous Profiler (in terms of raw functionality)
+    is that an "add()" method has been provided to combine Stats from
+    several distinct profile runs.  Both the constructor and the add()
+    method now take arbitrarily many file names as arguments.
+
+    All the print methods now take an argument that indicates how many lines
+    to print.  If the arg is a floating point number between 0 and 1.0, then
+    it is taken as a decimal percentage of the available lines to be printed
+    (e.g., .1 means print 10% of all available lines).  If it is an integer,
+    it is taken to mean the number of lines of data that you wish to have
+    printed.
+
+    The sort_stats() method now processes some additional options (i.e., in
+    addition to the old -1, 0, 1, or 2).  It takes an arbitrary number of quoted
+    strings to select the sort order.  For example sort_stats('time', 'name')
+    sorts on the major key of "internal function time", and on the minor
+    key of 'the name of the function'.  Look at the two tables in sort_stats()
+    and get_sort_arg_defs(self) for more examples.
+
+    All methods now return "self",  so you can string together commands like:
+        Stats('foo', 'goo').strip_dirs().sort_stats('calls').\
+                            print_stats(5).print_callers(5)
+    """
+
+    def __init__(self, *args):
+        if not len(args):
+            arg = None
+        else:
+            arg = args[0]
+            args = args[1:]
+        self.init(arg)
+        apply(self.add, args)
+
+    def init(self, arg):
+        self.all_callees = None  # calc only if needed
+        self.files = []
+        self.fcn_list = None
+        self.total_tt = 0
+        self.total_calls = 0
+        self.prim_calls = 0
+        self.max_name_len = 0
+        self.top_level = {}
+        self.stats = {}
+        self.sort_arg_dict = {}
+        self.load_stats(arg)
+        trouble = 1
+        try:
+            self.get_top_level_stats()
+            trouble = 0
+        finally:
+            if trouble:
+                print "Invalid timing data",
+                if self.files: print self.files[-1],
+                print
+
+    def load_stats(self, arg):
+        if not arg:  self.stats = {}
+        elif type(arg) == type(""):
+            f = open(arg, 'rb')
+            self.stats = marshal.load(f)
+            f.close()
+            try:
+                file_stats = os.stat(arg)
+                arg = time.ctime(file_stats[8]) + "    " + arg
+            except:  # in case this is not unix
+                pass
+            self.files = [ arg ]
+        elif hasattr(arg, 'create_stats'):
+            arg.create_stats()
+            self.stats = arg.stats
+            arg.stats = {}
+        if not self.stats:
+            raise TypeError,  "Cannot create or construct a " \
+                      + `self.__class__` \
+                      + " object from '" + `arg` + "'"
+        return
+
+    def get_top_level_stats(self):
+        for func, (cc, nc, tt, ct, callers) in self.stats.items():
+            self.total_calls += nc
+            self.prim_calls  += cc
+            self.total_tt    += tt
+            if callers.has_key(("jprofile", 0, "profiler")):
+                self.top_level[func] = None
+            if len(func_std_string(func)) > self.max_name_len:
+                self.max_name_len = len(func_std_string(func))
+
+    def add(self, *arg_list):
+        if not arg_list: return self
+        if len(arg_list) > 1: apply(self.add, arg_list[1:])
+        other = arg_list[0]
+        if type(self) != type(other) or self.__class__ != other.__class__:
+            other = Stats(other)
+        self.files += other.files
+        self.total_calls += other.total_calls
+        self.prim_calls += other.prim_calls
+        self.total_tt += other.total_tt
+        for func in other.top_level.keys():
+            self.top_level[func] = None
+
+        if self.max_name_len < other.max_name_len:
+            self.max_name_len = other.max_name_len
+
+        self.fcn_list = None
+
+        for func in other.stats.keys():
+            if self.stats.has_key(func):
+                old_func_stat = self.stats[func]
+            else:
+                old_func_stat = (0, 0, 0, 0, {},)
+            self.stats[func] = add_func_stats(old_func_stat, other.stats[func])
+        return self
+
+    # list the tuple indices and directions for sorting,
+    # along with some printable description
+    sort_arg_dict_default = {
+              "calls"     : (((1,-1),              ), "call count"),
+              "cumulative": (((3,-1),              ), "cumulative time"),
+              "file"      : (((4, 1),              ), "file name"),
+              "line"      : (((5, 1),              ), "line number"),
+              "module"    : (((4, 1),              ), "file name"),
+              "name"      : (((6, 1),              ), "function name"),
+              "nfl"       : (((6, 1),(4, 1),(5, 1),), "name/file/line"),
+              "pcalls"    : (((0,-1),              ), "call count"),
+              "stdname"   : (((7, 1),              ), "standard name"),
+              "time"      : (((2,-1),              ), "internal time"),
+              }
+
+    def get_sort_arg_defs(self):
+        """Expand all abbreviations that are unique."""
+        if not self.sort_arg_dict:
+            self.sort_arg_dict = dict = {}
+            bad_list = {}
+            for word in self.sort_arg_dict_default.keys():
+                fragment = word
+                while fragment:
+                    if not fragment:
+                        break
+                    if dict.has_key(fragment):
+                        bad_list[fragment] = 0
+                        break
+                    dict[fragment] = self.sort_arg_dict_default[word]
+                    fragment = fragment[:-1]
+            for word in bad_list.keys():
+                del dict[word]
+        return self.sort_arg_dict
+
+    def sort_stats(self, *field):
+        if not field:
+            self.fcn_list = 0
+            return self
+        if len(field) == 1 and type(field[0]) == type(1):
+            # Be compatible with old profiler
+            field = [ {-1: "stdname",
+                      0:"calls",
+                      1:"time",
+                      2: "cumulative" }  [ field[0] ] ]
+
+        sort_arg_defs = self.get_sort_arg_defs()
+        sort_tuple = ()
+        self.sort_type = ""
+        connector = ""
+        for word in field:
+            sort_tuple = sort_tuple + sort_arg_defs[word][0]
+            self.sort_type += connector + sort_arg_defs[word][1]
+            connector = ", "
+
+        stats_list = []
+        for func in self.stats.keys():
+            cc, nc, tt, ct, callers = self.stats[func]
+            stats_list.append((cc, nc, tt, ct) + func +
+                              (func_std_string(func), func))
+
+        stats_list.sort(TupleComp(sort_tuple).compare)
+
+        self.fcn_list = fcn_list = []
+        for tuple in stats_list:
+            fcn_list.append(tuple[-1])
+        return self
+
+    def reverse_order(self):
+        if self.fcn_list:
+            self.fcn_list.reverse()
+        return self
+
+    def strip_dirs(self):
+        oldstats = self.stats
+        self.stats = newstats = {}
+        max_name_len = 0
+        for func in oldstats.keys():
+            cc, nc, tt, ct, callers = oldstats[func]
+            newfunc = func_strip_path(func)
+            if len(func_std_string(newfunc)) > max_name_len:
+                max_name_len = len(func_std_string(newfunc))
+            newcallers = {}
+            for func2 in callers.keys():
+                newcallers[func_strip_path(func2)] = callers[func2]
+
+            if newstats.has_key(newfunc):
+                newstats[newfunc] = add_func_stats(
+                                        newstats[newfunc],
+                                        (cc, nc, tt, ct, newcallers))
+            else:
+                newstats[newfunc] = (cc, nc, tt, ct, newcallers)
+        old_top = self.top_level
+        self.top_level = new_top = {}
+        for func in old_top.keys():
+            new_top[func_strip_path(func)] = None
+
+        self.max_name_len = max_name_len
+
+        self.fcn_list = None
+        self.all_callees = None
+        return self
+
+    def calc_callees(self):
+        if self.all_callees: return
+        self.all_callees = all_callees = {}
+        for func in self.stats.keys():
+            if not all_callees.has_key(func):
+                all_callees[func] = {}
+            cc, nc, tt, ct, callers = self.stats[func]
+            for func2 in callers.keys():
+                if not all_callees.has_key(func2):
+                    all_callees[func2] = {}
+                all_callees[func2][func]  = callers[func2]
+        return
+
+    #******************************************************************
+    # The following functions support actual printing of reports
+    #******************************************************************
+
+    # Optional "amount" is either a line count, or a percentage of lines.
+
+    def eval_print_amount(self, sel, list, msg):
+        new_list = list
+        if type(sel) == type(""):
+            new_list = []
+            for func in list:
+                if re.search(sel, func_std_string(func)):
+                    new_list.append(func)
+        else:
+            count = len(list)
+            if type(sel) == type(1.0) and 0.0 <= sel < 1.0:
+                count = int(count * sel + .5)
+                new_list = list[:count]
+            elif type(sel) == type(1) and 0 <= sel < count:
+                count = sel
+                new_list = list[:count]
+        if len(list) != len(new_list):
+            msg = msg + "   List reduced from " + `len(list)` \
+                      + " to " + `len(new_list)` + \
+                      " due to restriction <" + `sel` + ">\n"
+
+        return new_list, msg
+
+    def get_print_list(self, sel_list):
+        width = self.max_name_len
+        if self.fcn_list:
+            list = self.fcn_list[:]
+            msg = "   Ordered by: " + self.sort_type + '\n'
+        else:
+            list = self.stats.keys()
+            msg = "   Random listing order was used\n"
+
+        for selection in sel_list:
+            list, msg = self.eval_print_amount(selection, list, msg)
+
+        count = len(list)
+
+        if not list:
+            return 0, list
+        print msg
+        if count < len(self.stats):
+            width = 0
+            for func in list:
+                if  len(func_std_string(func)) > width:
+                    width = len(func_std_string(func))
+        return width+2, list
+
+    def print_stats(self, *amount):
+        for filename in self.files:
+            print filename
+        if self.files: print
+        indent = ' ' * 8
+        for func in self.top_level.keys():
+            print indent, func_get_function_name(func)
+
+        print indent, self.total_calls, "function calls",
+        if self.total_calls != self.prim_calls:
+            print "(%d primitive calls)" % self.prim_calls,
+        print "in %.3f CPU seconds" % self.total_tt
+        print
+        width, list = self.get_print_list(amount)
+        if list:
+            self.print_title()
+            for func in list:
+                self.print_line(func)
+            print
+            print
+        return self
+
+    def print_callees(self, *amount):
+        width, list = self.get_print_list(amount)
+        if list:
+            self.calc_callees()
+
+            self.print_call_heading(width, "called...")
+            for func in list:
+                if self.all_callees.has_key(func):
+                    self.print_call_line(width, func, self.all_callees[func])
+                else:
+                    self.print_call_line(width, func, {})
+            print
+            print
+        return self
+
+    def print_callers(self, *amount):
+        width, list = self.get_print_list(amount)
+        if list:
+            self.print_call_heading(width, "was called by...")
+            for func in list:
+                cc, nc, tt, ct, callers = self.stats[func]
+                self.print_call_line(width, func, callers)
+            print
+            print
+        return self
+
+    def print_call_heading(self, name_size, column_title):
+        print "Function ".ljust(name_size) + column_title
+
+    def print_call_line(self, name_size, source, call_dict):
+        print func_std_string(source).ljust(name_size),
+        if not call_dict:
+            print "--"
+            return
+        clist = call_dict.keys()
+        clist.sort()
+        name_size = name_size + 1
+        indent = ""
+        for func in clist:
+            name = func_std_string(func)
+            print indent*name_size + name + '(' \
+                      + `call_dict[func]`+')', \
+                      f8(self.stats[func][3])
+            indent = " "
+
+    def print_title(self):
+        print '   ncalls  tottime  percall  cumtime  percall', \
+              'filename:lineno(function)'
+
+    def print_line(self, func):  # hack : should print percentages
+        cc, nc, tt, ct, callers = self.stats[func]
+        c = str(nc)
+        if nc != cc:
+            c = c + '/' + str(cc)
+        print c.rjust(9),
+        print f8(tt),
+        if nc == 0:
+            print ' '*8,
+        else:
+            print f8(tt/nc),
+        print f8(ct),
+        if cc == 0:
+            print ' '*8,
+        else:
+            print f8(ct/cc),
+        print func_std_string(func)
+
+    def ignore(self):
+        # Deprecated since 1.5.1 -- see the docs.
+        pass # has no return value, so use at end of line :-)
+
+class TupleComp:
+    """This class provides a generic function for comparing any two tuples.
+    Each instance records a list of tuple-indices (from most significant
+    to least significant), and sort direction (ascending or decending) for
+    each tuple-index.  The compare functions can then be used as the function
+    argument to the system sort() function when a list of tuples need to be
+    sorted in the instances order."""
+
+    def __init__(self, comp_select_list):
+        self.comp_select_list = comp_select_list
+
+    def compare (self, left, right):
+        for index, direction in self.comp_select_list:
+            l = left[index]
+            r = right[index]
+            if l < r:
+                return -direction
+            if l > r:
+                return direction
+        return 0
+
+#**************************************************************************
+# func_name is a triple (file:string, line:int, name:string)
+
+def func_strip_path(func_name):
+    file, line, name = func_name
+    return os.path.basename(file), line, name
+
+def func_get_function_name(func):
+    return func[2]
+
+def func_std_string(func_name): # match what old profile produced
+    return "%s:%d(%s)" % func_name
+
+#**************************************************************************
+# The following functions combine statists for pairs functions.
+# The bulk of the processing involves correctly handling "call" lists,
+# such as callers and callees.
+#**************************************************************************
+
+def add_func_stats(target, source):
+    """Add together all the stats for two profile entries."""
+    cc, nc, tt, ct, callers = source
+    t_cc, t_nc, t_tt, t_ct, t_callers = target
+    return (cc+t_cc, nc+t_nc, tt+t_tt, ct+t_ct,
+              add_callers(t_callers, callers))
+
+def add_callers(target, source):
+    """Combine two caller lists in a single list."""
+    new_callers = {}
+    for func in target.keys():
+        new_callers[func] = target[func]
+    for func in source.keys():
+        if new_callers.has_key(func):
+            new_callers[func] = source[func] + new_callers[func]
+        else:
+            new_callers[func] = source[func]
+    return new_callers
+
+def count_calls(callers):
+    """Sum the caller statistics to get total number of calls received."""
+    nc = 0
+    for func in callers.keys():
+        nc += callers[func]
+    return nc
+
+#**************************************************************************
+# The following functions support printing of reports
+#**************************************************************************
+
+def f8(x):
+    return "%8.3f" % x
+
+#**************************************************************************
+# Statistics browser added by ESR, April 2001
+#**************************************************************************
+
+if __name__ == '__main__':
+    import cmd
+    try:
+        import readline
+    except ImportError:
+        pass
+
+    class ProfileBrowser(cmd.Cmd):
+        def __init__(self, profile=None):
+            cmd.Cmd.__init__(self)
+            self.prompt = "% "
+            if profile:
+                self.stats = Stats(profile)
+            else:
+                self.stats = None
+
+        def generic(self, fn, line):
+            args = line.split()
+            processed = []
+            for term in args:
+                try:
+                    processed.append(int(term))
+                    continue
+                except ValueError:
+                    pass
+                try:
+                    frac = float(term)
+                    if frac > 1 or frac < 0:
+                        print "Fraction argument mus be in [0, 1]"
+                        continue
+                    processed.append(frac)
+                    continue
+                except ValueError:
+                    pass
+                processed.append(term)
+            if self.stats:
+                apply(getattr(self.stats, fn), processed)
+            else:
+                print "No statistics object is loaded."
+            return 0
+        def generic_help(self):
+            print "Arguments may be:"
+            print "* An integer maximum number of entries to print."
+            print "* A decimal fractional number between 0 and 1, controlling"
+            print "  what fraction of selected entries to print."
+            print "* A regular expression; only entries with function names"
+            print "  that match it are printed."
+
+        def do_add(self, line):
+            self.stats.add(line)
+            return 0
+        def help_add(self):
+            print "Add profile info from given file to current statistics object."
+
+        def do_callees(self, line):
+            return self.generic('print_callees', line)
+        def help_callees(self):
+            print "Print callees statistics from the current stat object."
+            self.generic_help()
+
+        def do_callers(self, line):
+            return self.generic('print_callers', line)
+        def help_callers(self):
+            print "Print callers statistics from the current stat object."
+            self.generic_help()
+
+        def do_EOF(self, line):
+            print ""
+            return 1
+        def help_EOF(self):
+            print "Leave the profile brower."
+
+        def do_quit(self, line):
+            return 1
+        def help_quit(self):
+            print "Leave the profile brower."
+
+        def do_read(self, line):
+            if line:
+                try:
+                    self.stats = Stats(line)
+                except IOError, args:
+                    print args[1]
+                    return
+                self.prompt = line + "% "
+            elif len(self.prompt) > 2:
+                line = self.prompt[-2:]
+            else:
+                print "No statistics object is current -- cannot reload."
+            return 0
+        def help_read(self):
+            print "Read in profile data from a specified file."
+
+        def do_reverse(self, line):
+            self.stats.reverse_order()
+            return 0
+        def help_reverse(self):
+            print "Reverse the sort order of the profiling report."
+
+        def do_sort(self, line):
+            abbrevs = self.stats.get_sort_arg_defs().keys()
+            if line and not filter(lambda x,a=abbrevs: x not in a,line.split()):
+                apply(self.stats.sort_stats, line.split())
+            else:
+                print "Valid sort keys (unique prefixes are accepted):"
+                for (key, value) in Stats.sort_arg_dict_default.items():
+                    print "%s -- %s" % (key, value[1])
+            return 0
+        def help_sort(self):
+            print "Sort profile data according to specified keys."
+            print "(Typing `sort' without arguments lists valid keys.)"
+        def complete_sort(self, text, *args):
+            return [a for a in Stats.sort_arg_dict_default.keys() if a.startswith(text)]
+
+        def do_stats(self, line):
+            return self.generic('print_stats', line)
+        def help_stats(self):
+            print "Print statistics from the current stat object."
+            self.generic_help()
+
+        def do_strip(self, line):
+            self.stats.strip_dirs()
+            return 0
+        def help_strip(self):
+            print "Strip leading path information from filenames in the report."
+
+        def postcmd(self, stop, line):
+            if stop:
+                return stop
+            return None
+
+    import sys
+    print "Welcome to the profile statistics browser."
+    if len(sys.argv) > 1:
+        initprofile = sys.argv[1]
+    else:
+        initprofile = None
+    try:
+        ProfileBrowser(initprofile).cmdloop()
+        print "Goodbye."
+    except KeyboardInterrupt:
+        pass
+
+# That's all, folks.
diff --git a/lib-python/2.2/pty.py b/lib-python/2.2/pty.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/pty.py
@@ -0,0 +1,167 @@
+"""Pseudo terminal utilities."""
+
+# Bugs: No signal handling.  Doesn't set slave termios and window size.
+#       Only tested on Linux.
+# See:  W. Richard Stevens. 1992.  Advanced Programming in the
+#       UNIX Environment.  Chapter 19.
+# Author: Steen Lumholt -- with additions by Guido.
+
+from select import select
+import os
+
+# Absurd:  import termios and then delete it.  This is to force an attempt
+# to import pty to raise an ImportError on platforms that lack termios.
+# Without this explicit import of termios here, some other module may
+# import tty first, which in turn imports termios and dies with an
+# ImportError then.  But since tty *does* exist across platforms, that
+# leaves a damaged module object for tty in sys.modules, and the import
+# of tty here then appears to work despite that the tty imported is junk.
+import termios
+del termios
+
+import tty
+
+__all__ = ["openpty","fork","spawn"]
+
+STDIN_FILENO = 0
+STDOUT_FILENO = 1
+STDERR_FILENO = 2
+
+CHILD = 0
+
+def openpty():
+    """openpty() -> (master_fd, slave_fd)
+    Open a pty master/slave pair, using os.openpty() if possible."""
+
+    try:
+        return os.openpty()
+    except (AttributeError, OSError):
+        pass
+    master_fd, slave_name = _open_terminal()
+    slave_fd = slave_open(slave_name)
+    return master_fd, slave_fd
+
+def master_open():
+    """master_open() -> (master_fd, slave_name)
+    Open a pty master and return the fd, and the filename of the slave end.
+    Deprecated, use openpty() instead."""
+
+    try:
+        master_fd, slave_fd = os.openpty()
+    except (AttributeError, OSError):
+        pass
+    else:
+        slave_name = os.ttyname(slave_fd)
+        os.close(slave_fd)
+        return master_fd, slave_name
+
+    return _open_terminal()
+
+def _open_terminal():
+    """Open pty master and return (master_fd, tty_name).
+    SGI and generic BSD version, for when openpty() fails."""
+    try:
+        import sgi
+    except ImportError:
+        pass
+    else:
+        try:
+            tty_name, master_fd = sgi._getpty(os.O_RDWR, 0666, 0)
+        except IOError, msg:
+            raise os.error, msg
+        return master_fd, tty_name
+    for x in 'pqrstuvwxyzPQRST':
+        for y in '0123456789abcdef':
+            pty_name = '/dev/pty' + x + y
+            try:
+                fd = os.open(pty_name, os.O_RDWR)
+            except os.error:
+                continue
+            return (fd, '/dev/tty' + x + y)
+    raise os.error, 'out of pty devices'
+
+def slave_open(tty_name):
+    """slave_open(tty_name) -> slave_fd
+    Open the pty slave and acquire the controlling terminal, returning
+    opened filedescriptor.
+    Deprecated, use openpty() instead."""
+
+    return os.open(tty_name, os.O_RDWR)
+
+def fork():
+    """fork() -> (pid, master_fd)
+    Fork and make the child a session leader with a controlling terminal."""
+
+    try:
+        pid, fd = os.forkpty()
+    except (AttributeError, OSError):
+        pass
+    else:
+        if pid == CHILD:
+            try:
+                os.setsid()
+            except OSError:
+                # os.forkpty() already set us session leader
+                pass
+        return pid, fd
+
+    master_fd, slave_fd = openpty()
+    pid = os.fork()
+    if pid == CHILD:
+        # Establish a new session.
+        os.setsid()
+        os.close(master_fd)
+
+        # Slave becomes stdin/stdout/stderr of child.
+        os.dup2(slave_fd, STDIN_FILENO)
+        os.dup2(slave_fd, STDOUT_FILENO)
+        os.dup2(slave_fd, STDERR_FILENO)
+        if (slave_fd > STDERR_FILENO):
+            os.close (slave_fd)
+
+    # Parent and child process.
+    return pid, master_fd
+
+def _writen(fd, data):
+    """Write all the data to a descriptor."""
+    while data != '':
+        n = os.write(fd, data)
+        data = data[n:]
+
+def _read(fd):
+    """Default read function."""
+    return os.read(fd, 1024)
+
+def _copy(master_fd, master_read=_read, stdin_read=_read):
+    """Parent copy loop.
+    Copies
+            pty master -> standard output   (master_read)
+            standard input -> pty master    (stdin_read)"""
+    while 1:
+        rfds, wfds, xfds = select(
+                [master_fd, STDIN_FILENO], [], [])
+        if master_fd in rfds:
+            data = master_read(master_fd)
+            os.write(STDOUT_FILENO, data)
+        if STDIN_FILENO in rfds:
+            data = stdin_read(STDIN_FILENO)
+            _writen(master_fd, data)
+
+def spawn(argv, master_read=_read, stdin_read=_read):
+    """Create a spawned process."""
+    if type(argv) == type(''):
+        argv = (argv,)
+    pid, master_fd = fork()
+    if pid == CHILD:
+        apply(os.execlp, (argv[0],) + argv)
+    try:
+        mode = tty.tcgetattr(STDIN_FILENO)
+        tty.setraw(STDIN_FILENO)
+        restore = 1
+    except tty.error:    # This is the same as termios.error
+        restore = 0
+    try:
+        _copy(master_fd, master_read, stdin_read)
+    except (IOError, OSError):
+        if restore:
+            tty.tcsetattr(STDIN_FILENO, tty.TCSAFLUSH, mode)
diff --git a/lib-python/2.2/py_compile.py b/lib-python/2.2/py_compile.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/py_compile.py
@@ -0,0 +1,82 @@
+"""Routine to "compile" a .py file to a .pyc (or .pyo) file.
+
+This module has intimate knowledge of the format of .pyc files.
+"""
+
+import imp
+MAGIC = imp.get_magic()
+
+__all__ = ["compile"]
+
+def wr_long(f, x):
+    """Internal; write a 32-bit int to a file in little-endian order."""
+    f.write(chr( x        & 0xff))
+    f.write(chr((x >> 8)  & 0xff))
+    f.write(chr((x >> 16) & 0xff))
+    f.write(chr((x >> 24) & 0xff))
+
+def compile(file, cfile=None, dfile=None):
+    """Byte-compile one Python source file to Python bytecode.
+
+    Arguments:
+
+    file:  source filename
+    cfile: target filename; defaults to source with 'c' or 'o' appended
+           ('c' normally, 'o' in optimizing mode, giving .pyc or .pyo)
+    dfile: purported filename; defaults to source (this is the filename
+           that will show up in error messages)
+
+    Note that it isn't necessary to byte-compile Python modules for
+    execution efficiency -- Python itself byte-compiles a module when
+    it is loaded, and if it can, writes out the bytecode to the
+    corresponding .pyc (or .pyo) file.
+
+    However, if a Python installation is shared between users, it is a
+    good idea to byte-compile all modules upon installation, since
+    other users may not be able to write in the source directories,
+    and thus they won't be able to write the .pyc/.pyo file, and then
+    they would be byte-compiling every module each time it is loaded.
+    This can slow down program start-up considerably.
+
+    See compileall.py for a script/module that uses this module to
+    byte-compile all installed files (or all files in selected
+    directories).
+
+    """
+    import os, marshal, __builtin__
+    f = open(file)
+    try:
+        timestamp = long(os.fstat(f.fileno())[8])
+    except AttributeError:
+        timestamp = long(os.stat(file)[8])
+    codestring = f.read()
+    # If parsing from a string, line breaks are \n (see parsetok.c:tok_nextc)
+    # Replace will return original string if pattern is not found, so
+    # we don't need to check whether it is found first.
+    codestring = codestring.replace("\r\n","\n")
+    codestring = codestring.replace("\r","\n")
+    f.close()
+    if codestring and codestring[-1] != '\n':
+        codestring = codestring + '\n'
+    try:
+        codeobject = __builtin__.compile(codestring, dfile or file, 'exec')
+    except SyntaxError, detail:
+        import traceback, sys
+        lines = traceback.format_exception_only(SyntaxError, detail)
+        for line in lines:
+            sys.stderr.write(line.replace('File "<string>"',
+                                            'File "%s"' % (dfile or file)))
+        return
+    if not cfile:
+        cfile = file + (__debug__ and 'c' or 'o')
+    fc = open(cfile, 'wb')
+    fc.write('\0\0\0\0')
+    wr_long(fc, timestamp)
+    marshal.dump(codeobject, fc)
+    fc.flush()
+    fc.seek(0, 0)
+    fc.write(MAGIC)
+    fc.close()
+    if os.name == 'mac':
+        import macfs
+        macfs.FSSpec(cfile).SetCreatorType('Pyth', 'PYC ')
diff --git a/lib-python/2.2/pyclbr.py b/lib-python/2.2/pyclbr.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/pyclbr.py
@@ -0,0 +1,337 @@
+"""Parse a Python file and retrieve classes and methods.
+
+Parse enough of a Python file to recognize class and method
+definitions and to find out the superclasses of a class.
+
+The interface consists of a single function:
+        readmodule(module, path)
+module is the name of a Python module, path is an optional list of
+directories where the module is to be searched.  If present, path is
+prepended to the system search path sys.path.
+The return value is a dictionary.  The keys of the dictionary are
+the names of the classes defined in the module (including classes
+that are defined via the from XXX import YYY construct).  The values
+are class instances of the class Class defined here.
+
+A class is described by the class Class in this module.  Instances
+of this class have the following instance variables:
+        name -- the name of the class
+        super -- a list of super classes (Class instances)
+        methods -- a dictionary of methods
+        file -- the file in which the class was defined
+        lineno -- the line in the file on which the class statement occurred
+The dictionary of methods uses the method names as keys and the line
+numbers on which the method was defined as values.
+If the name of a super class is not recognized, the corresponding
+entry in the list of super classes is not a class instance but a
+string giving the name of the super class.  Since import statements
+are recognized and imported modules are scanned as well, this
+shouldn't happen often.
+
+BUGS
+- Continuation lines are not dealt with at all, except inside strings.
+- Nested classes and functions can confuse it.
+- Code that doesn't pass tabnanny or python -t will confuse it, unless
+  you set the module TABWIDTH vrbl (default 8) to the correct tab width
+  for the file.
+
+PACKAGE RELATED BUGS
+- If you have a package and a module inside that or another package
+  with the same name, module caching doesn't work properly since the
+  key is the base name of the module/package.
+- The only entry that is returned when you readmodule a package is a
+  __path__ whose value is a list which confuses certain class browsers.
+- When code does:
+  from package import subpackage
+  class MyClass(subpackage.SuperClass):
+    ...
+  It can't locate the parent.  It probably needs to have the same
+  hairy logic that the import locator already does.  (This logic
+  exists coded in Python in the freeze package.)
+"""
+
+import sys
+import imp
+import re
+import string
+
+__all__ = ["readmodule"]
+
+TABWIDTH = 8
+
+_getnext = re.compile(r"""
+    (?P<String>
+       \""" [^"\\]* (?:
+                        (?: \\. | "(?!"") )
+                        [^"\\]*
+                    )*
+       \"""
+
+    |   ''' [^'\\]* (?:
+                        (?: \\. | '(?!'') )
+                        [^'\\]*
+                    )*
+        '''
+
+    |   " [^"\\\n]* (?: \\. [^"\\\n]*)* "
+
+    |   ' [^'\\\n]* (?: \\. [^'\\\n]*)* '
+    )
+
+|   (?P<Method>
+        ^
+        (?P<MethodIndent> [ \t]* )
+        def [ \t]+
+        (?P<MethodName> [a-zA-Z_] \w* )
+        [ \t]* \(
+    )
+
+|   (?P<Class>
+        ^
+        (?P<ClassIndent> [ \t]* )
+        class [ \t]+
+        (?P<ClassName> [a-zA-Z_] \w* )
+        [ \t]*
+        (?P<ClassSupers> \( [^)\n]* \) )?
+        [ \t]* :
+    )
+
+|   (?P<Import>
+        ^ import [ \t]+
+        (?P<ImportList> [^#;\n]+ )
+    )
+
+|   (?P<ImportFrom>
+        ^ from [ \t]+
+        (?P<ImportFromPath>
+            [a-zA-Z_] \w*
+            (?:
+                [ \t]* \. [ \t]* [a-zA-Z_] \w*
+            )*
+        )
+        [ \t]+
+        import [ \t]+
+        (?P<ImportFromList> [^#;\n]+ )
+    )
+""", re.VERBOSE | re.DOTALL | re.MULTILINE).search
+
+_modules = {}                           # cache of modules we've seen
+
+# each Python class is represented by an instance of this class
+class Class:
+    '''Class to represent a Python class.'''
+    def __init__(self, module, name, super, file, lineno):
+        self.module = module
+        self.name = name
+        if super is None:
+            super = []
+        self.super = super
+        self.methods = {}
+        self.file = file
+        self.lineno = lineno
+
+    def _addmethod(self, name, lineno):
+        self.methods[name] = lineno
+
+class Function(Class):
+    '''Class to represent a top-level Python function'''
+    def __init__(self, module, name, file, lineno):
+        Class.__init__(self, module, name, None, file, lineno)
+    def _addmethod(self, name, lineno):
+        assert 0, "Function._addmethod() shouldn't be called"
+
+def readmodule(module, path=[], inpackage=0):
+    '''Backwards compatible interface.
+
+    Like readmodule_ex() but strips Function objects from the
+    resulting dictionary.'''
+
+    dict = readmodule_ex(module, path, inpackage)
+    res = {}
+    for key, value in dict.items():
+        if not isinstance(value, Function):
+            res[key] = value
+    return res
+
+def readmodule_ex(module, path=[], inpackage=0):
+    '''Read a module file and return a dictionary of classes.
+
+    Search for MODULE in PATH and sys.path, read and parse the
+    module and return a dictionary with one entry for each class
+    found in the module.'''
+
+    dict = {}
+
+    i = module.rfind('.')
+    if i >= 0:
+        # Dotted module name
+        package = module[:i].strip()
+        submodule = module[i+1:].strip()
+        parent = readmodule_ex(package, path, inpackage)
+        child = readmodule_ex(submodule, parent['__path__'], 1)
+        return child
+
+    if _modules.has_key(module):
+        # we've seen this module before...
+        return _modules[module]
+    if module in sys.builtin_module_names:
+        # this is a built-in module
+        _modules[module] = dict
+        return dict
+
+    # search the path for the module
+    f = None
+    if inpackage:
+        try:
+            f, file, (suff, mode, type) = \
+                    imp.find_module(module, path)
+        except ImportError:
+            f = None
+    if f is None:
+        fullpath = list(path) + sys.path
+        f, file, (suff, mode, type) = imp.find_module(module, fullpath)
+    if type == imp.PKG_DIRECTORY:
+        dict['__path__'] = [file]
+        _modules[module] = dict
+        path = [file] + path
+        f, file, (suff, mode, type) = \
+                        imp.find_module('__init__', [file])
+    if type != imp.PY_SOURCE:
+        # not Python source, can't do anything with this module
+        f.close()
+        _modules[module] = dict
+        return dict
+
+    _modules[module] = dict
+    classstack = [] # stack of (class, indent) pairs
+    src = f.read()
+    f.close()
+
+    # To avoid having to stop the regexp at each newline, instead
+    # when we need a line number we simply string.count the number of
+    # newlines in the string since the last time we did this; i.e.,
+    #    lineno = lineno + \
+    #             string.count(src, '\n', last_lineno_pos, here)
+    #    last_lineno_pos = here
+    countnl = string.count
+    lineno, last_lineno_pos = 1, 0
+    i = 0
+    while 1:
+        m = _getnext(src, i)
+        if not m:
+            break
+        start, i = m.span()
+
+        if m.start("Method") >= 0:
+            # found a method definition or function
+            thisindent = _indent(m.group("MethodIndent"))
+            meth_name = m.group("MethodName")
+            lineno = lineno + \
+                     countnl(src, '\n',
+                             last_lineno_pos, start)
+            last_lineno_pos = start
+            # close all classes indented at least as much
+            while classstack and \
+                  classstack[-1][1] >= thisindent:
+                del classstack[-1]
+            if classstack:
+                # it's a class method
+                cur_class = classstack[-1][0]
+                cur_class._addmethod(meth_name, lineno)
+            else:
+                # it's a function
+                f = Function(module, meth_name,
+                             file, lineno)
+                dict[meth_name] = f
+
+        elif m.start("String") >= 0:
+            pass
+
+        elif m.start("Class") >= 0:
+            # we found a class definition
+            thisindent = _indent(m.group("ClassIndent"))
+            # close all classes indented at least as much
+            while classstack and \
+                  classstack[-1][1] >= thisindent:
+                del classstack[-1]
+            lineno = lineno + \
+                     countnl(src, '\n', last_lineno_pos, start)
+            last_lineno_pos = start
+            class_name = m.group("ClassName")
+            inherit = m.group("ClassSupers")
+            if inherit:
+                # the class inherits from other classes
+                inherit = inherit[1:-1].strip()
+                names = []
+                for n in inherit.split(','):
+                    n = n.strip()
+                    if dict.has_key(n):
+                        # we know this super class
+                        n = dict[n]
+                    else:
+                        c = n.split('.')
+                        if len(c) > 1:
+                            # super class
+                            # is of the
+                            # form module.class:
+                            # look in
+                            # module for class
+                            m = c[-2]
+                            c = c[-1]
+                            if _modules.has_key(m):
+                                d = _modules[m]
+                                if d.has_key(c):
+                                    n = d[c]
+                    names.append(n)
+                inherit = names
+            # remember this class
+            cur_class = Class(module, class_name, inherit,
+                              file, lineno)
+            dict[class_name] = cur_class
+            classstack.append((cur_class, thisindent))
+
+        elif m.start("Import") >= 0:
+            # import module
+            for n in m.group("ImportList").split(','):
+                n = n.strip()
+                try:
+                    # recursively read the imported module
+                    d = readmodule_ex(n, path, inpackage)
+                except:
+                    ##print 'module', n, 'not found'
+                    pass
+
+        elif m.start("ImportFrom") >= 0:
+            # from module import stuff
+            mod = m.group("ImportFromPath")
+            names = m.group("ImportFromList").split(',')
+            try:
+                # recursively read the imported module
+                d = readmodule_ex(mod, path, inpackage)
+            except:
+                ##print 'module', mod, 'not found'
+                continue
+            # add any classes that were defined in the
+            # imported module to our name space if they
+            # were mentioned in the list
+            for n in names:
+                n = n.strip()
+                if d.has_key(n):
+                    dict[n] = d[n]
+                elif n == '*':
+                    # only add a name if not
+                    # already there (to mimic what
+                    # Python does internally)
+                    # also don't add names that
+                    # start with _
+                    for n in d.keys():
+                        if n[0] != '_' and \
+                           not dict.has_key(n):
+                            dict[n] = d[n]
+        else:
+            assert 0, "regexp _getnext found something unexpected"
+
+    return dict
+
+def _indent(ws, _expandtabs=string.expandtabs):
+    return len(_expandtabs(ws, TABWIDTH))
diff --git a/lib-python/2.2/pydoc.py b/lib-python/2.2/pydoc.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/pydoc.py
@@ -0,0 +1,2112 @@
+#!/usr/bin/env python
+"""Generate Python documentation in HTML or text for interactive use.
+
+In the Python interpreter, do "from pydoc import help" to provide online
+help.  Calling help(thing) on a Python object documents the object.
+
+Or, at the shell command line outside of Python:
+
+Run "pydoc <name>" to show documentation on something.  <name> may be
+the name of a function, module, package, or a dotted reference to a
+class or function within a module or module in a package.  If the
+argument contains a path segment delimiter (e.g. slash on Unix,
+backslash on Windows) it is treated as the path to a Python source file.
+
+Run "pydoc -k <keyword>" to search for a keyword in the synopsis lines
+of all available modules.
+
+Run "pydoc -p <port>" to start an HTTP server on a given port on the
+local machine to generate documentation web pages.
+
+For platforms without a command line, "pydoc -g" starts the HTTP server
+and also pops up a little window for controlling it.
+
+Run "pydoc -w <name>" to write out the HTML documentation for a module
+to a file named "<name>.html".
+"""
+
+__author__ = "Ka-Ping Yee <ping at lfw.org>"
+__date__ = "26 February 2001"
+__version__ = "$Revision$"
+__credits__ = """Guido van Rossum, for an excellent programming language.
+Tommy Burnette, the original creator of manpy.
+Paul Prescod, for all his work on onlinehelp.
+Richard Chamberlain, for the first implementation of textdoc.
+
+Mynd you, møøse bites Kan be pretty nasti..."""
+
+# Known bugs that can't be fixed here:
+#   - imp.load_module() cannot be prevented from clobbering existing
+#     loaded modules, so calling synopsis() on a binary module file
+#     changes the contents of any existing module with the same name.
+#   - If the __file__ attribute on a module is a relative path and
+#     the current directory is changed with os.chdir(), an incorrect
+#     path will be displayed.
+
+import sys, imp, os, stat, re, types, inspect
+from repr import Repr
+from string import expandtabs, find, join, lower, split, strip, rfind, rstrip
+
+# --------------------------------------------------------- common routines
+
+def pathdirs():
+    """Convert sys.path into a list of absolute, existing, unique paths."""
+    dirs = []
+    normdirs = []
+    for dir in sys.path:
+        dir = os.path.abspath(dir or '.')
+        normdir = os.path.normcase(dir)
+        if normdir not in normdirs and os.path.isdir(dir):
+            dirs.append(dir)
+            normdirs.append(normdir)
+    return dirs
+
+def getdoc(object):
+    """Get the doc string or comments for an object."""
+    result = inspect.getdoc(object) or inspect.getcomments(object)
+    return result and re.sub('^ *\n', '', rstrip(result)) or ''
+
+def splitdoc(doc):
+    """Split a doc string into a synopsis line (if any) and the rest."""
+    lines = split(strip(doc), '\n')
+    if len(lines) == 1:
+        return lines[0], ''
+    elif len(lines) >= 2 and not rstrip(lines[1]):
+        return lines[0], join(lines[2:], '\n')
+    return '', join(lines, '\n')
+
+def classname(object, modname):
+    """Get a class name and qualify it with a module name if necessary."""
+    name = object.__name__
+    if object.__module__ != modname:
+        name = object.__module__ + '.' + name
+    return name
+
+def isdata(object):
+    """Check if an object is of a type that probably means it's data."""
+    return not (inspect.ismodule(object) or inspect.isclass(object) or
+                inspect.isroutine(object) or inspect.isframe(object) or
+                inspect.istraceback(object) or inspect.iscode(object))
+
+def replace(text, *pairs):
+    """Do a series of global replacements on a string."""
+    while pairs:
+        text = join(split(text, pairs[0]), pairs[1])
+        pairs = pairs[2:]
+    return text
+
+def cram(text, maxlen):
+    """Omit part of a string if needed to make it fit in a maximum length."""
+    if len(text) > maxlen:
+        pre = max(0, (maxlen-3)/2)
+        post = max(0, maxlen-3-pre)
+        return text[:pre] + '...' + text[len(text)-post:]
+    return text
+
+def stripid(text):
+    """Remove the hexadecimal id from a Python object representation."""
+    # The behaviour of %p is implementation-dependent; we check two cases.
+    for pattern in [' at 0x[0-9a-f]{6,}(>+)$', ' at [0-9A-F]{8,}(>+)$']:
+        if re.search(pattern, repr(Exception)):
+            return re.sub(pattern, '\\1', text)
+    return text
+
+def _is_some_method(object):
+    return inspect.ismethod(object) or inspect.ismethoddescriptor(object)
+
+def allmethods(cl):
+    methods = {}
+    for key, value in inspect.getmembers(cl, _is_some_method):
+        methods[key] = 1
+    for base in cl.__bases__:
+        methods.update(allmethods(base)) # all your base are belong to us
+    for key in methods.keys():
+        methods[key] = getattr(cl, key)
+    return methods
+
+def _split_list(s, predicate):
+    """Split sequence s via predicate, and return pair ([true], [false]).
+
+    The return value is a 2-tuple of lists,
+        ([x for x in s if predicate(x)],
+         [x for x in s if not predicate(x)])
+    """
+
+    yes = []
+    no = []
+    for x in s:
+        if predicate(x):
+            yes.append(x)
+        else:
+            no.append(x)
+    return yes, no
+
+# ----------------------------------------------------- module manipulation
+
+def ispackage(path):
+    """Guess whether a path refers to a package directory."""
+    if os.path.isdir(path):
+        for ext in ['.py', '.pyc', '.pyo']:
+            if os.path.isfile(os.path.join(path, '__init__' + ext)):
+                return 1
+
+def synopsis(filename, cache={}):
+    """Get the one-line summary out of a module file."""
+    mtime = os.stat(filename)[stat.ST_MTIME]
+    lastupdate, result = cache.get(filename, (0, None))
+    if lastupdate < mtime:
+        info = inspect.getmoduleinfo(filename)
+        file = open(filename)
+        if info and 'b' in info[2]: # binary modules have to be imported
+            try: module = imp.load_module('__temp__', file, filename, info[1:])
+            except: return None
+            result = split(module.__doc__ or '', '\n')[0]
+            del sys.modules['__temp__']
+        else: # text modules can be directly examined
+            line = file.readline()
+            while line[:1] == '#' or not strip(line):
+                line = file.readline()
+                if not line: break
+            line = strip(line)
+            if line[:4] == 'r"""': line = line[1:]
+            if line[:3] == '"""':
+                line = line[3:]
+                if line[-1:] == '\\': line = line[:-1]
+                while not strip(line):
+                    line = file.readline()
+                    if not line: break
+                result = strip(split(line, '"""')[0])
+            else: result = None
+        file.close()
+        cache[filename] = (mtime, result)
+    return result
+
+class ErrorDuringImport(Exception):
+    """Errors that occurred while trying to import something to document it."""
+    def __init__(self, filename, (exc, value, tb)):
+        self.filename = filename
+        self.exc = exc
+        self.value = value
+        self.tb = tb
+
+    def __str__(self):
+        exc = self.exc
+        if type(exc) is types.ClassType:
+            exc = exc.__name__
+        return 'problem in %s - %s: %s' % (self.filename, exc, self.value)
+
+def importfile(path):
+    """Import a Python source file or compiled file given its path."""
+    magic = imp.get_magic()
+    file = open(path, 'r')
+    if file.read(len(magic)) == magic:
+        kind = imp.PY_COMPILED
+    else:
+        kind = imp.PY_SOURCE
+    file.close()
+    filename = os.path.basename(path)
+    name, ext = os.path.splitext(filename)
+    file = open(path, 'r')
+    try:
+        module = imp.load_module(name, file, path, (ext, 'r', kind))
+    except:
+        raise ErrorDuringImport(path, sys.exc_info())
+    file.close()
+    return module
+
+def safeimport(path, forceload=0, cache={}):
+    """Import a module; handle errors; return None if the module isn't found.
+
+    If the module *is* found but an exception occurs, it's wrapped in an
+    ErrorDuringImport exception and reraised.  Unlike __import__, if a
+    package path is specified, the module at the end of the path is returned,
+    not the package at the beginning.  If the optional 'forceload' argument
+    is 1, we reload the module from disk (unless it's a dynamic extension)."""
+    if forceload and sys.modules.has_key(path):
+        # This is the only way to be sure.  Checking the mtime of the file
+        # isn't good enough (e.g. what if the module contains a class that
+        # inherits from another module that has changed?).
+        if path not in sys.builtin_module_names:
+            # Python never loads a dynamic extension a second time from the
+            # same path, even if the file is changed or missing.  Deleting
+            # the entry in sys.modules doesn't help for dynamic extensions,
+            # so we're not even going to try to keep them up to date.
+            info = inspect.getmoduleinfo(sys.modules[path].__file__)
+            if info[3] != imp.C_EXTENSION:
+                cache[path] = sys.modules[path] # prevent module from clearing
+                del sys.modules[path]
+    try:
+        module = __import__(path)
+    except:
+        # Did the error occur before or after the module was found?
+        (exc, value, tb) = info = sys.exc_info()
+        if sys.modules.has_key(path):
+            # An error occured while executing the imported module.
+            raise ErrorDuringImport(sys.modules[path].__file__, info)
+        elif exc is SyntaxError:
+            # A SyntaxError occurred before we could execute the module.
+            raise ErrorDuringImport(value.filename, info)
+        elif exc is ImportError and \
+             split(lower(str(value)))[:2] == ['no', 'module']:
+            # The module was not found.
+            return None
+        else:
+            # Some other error occurred during the importing process.
+            raise ErrorDuringImport(path, sys.exc_info())
+    for part in split(path, '.')[1:]:
+        try: module = getattr(module, part)
+        except AttributeError: return None
+    return module
+
+# ---------------------------------------------------- formatter base class
+
+class Doc:
+    def document(self, object, name=None, *args):
+        """Generate documentation for an object."""
+        args = (object, name) + args
+        if inspect.ismodule(object): return apply(self.docmodule, args)
+        if inspect.isclass(object): return apply(self.docclass, args)
+        if inspect.isroutine(object): return apply(self.docroutine, args)
+        return apply(self.docother, args)
+
+    def fail(self, object, name=None, *args):
+        """Raise an exception for unimplemented types."""
+        message = "don't know how to document object%s of type %s" % (
+            name and ' ' + repr(name), type(object).__name__)
+        raise TypeError, message
+
+    docmodule = docclass = docroutine = docother = fail
+
+# -------------------------------------------- HTML documentation generator
+
+class HTMLRepr(Repr):
+    """Class for safely making an HTML representation of a Python object."""
+    def __init__(self):
+        Repr.__init__(self)
+        self.maxlist = self.maxtuple = 20
+        self.maxdict = 10
+        self.maxstring = self.maxother = 100
+
+    def escape(self, text):
+        return replace(text, '&', '&amp;', '<', '&lt;', '>', '&gt;')
+
+    def repr(self, object):
+        return Repr.repr(self, object)
+
+    def repr1(self, x, level):
+        methodname = 'repr_' + join(split(type(x).__name__), '_')
+        if hasattr(self, methodname):
+            return getattr(self, methodname)(x, level)
+        else:
+            return self.escape(cram(stripid(repr(x)), self.maxother))
+
+    def repr_string(self, x, level):
+        test = cram(x, self.maxstring)
+        testrepr = repr(test)
+        if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
+            # Backslashes are only literal in the string and are never
+            # needed to make any special characters, so show a raw string.
+            return 'r' + testrepr[0] + self.escape(test) + testrepr[0]
+        return re.sub(r'((\\[\\abfnrtv\'"]|\\[0-9]..|\\x..|\\u....)+)',
+                      r'<font color="#c040c0">\1</font>',
+                      self.escape(testrepr))
+
+    repr_str = repr_string
+
+    def repr_instance(self, x, level):
+        try:
+            return self.escape(cram(stripid(repr(x)), self.maxstring))
+        except:
+            return self.escape('<%s instance>' % x.__class__.__name__)
+
+    repr_unicode = repr_string
+
+class HTMLDoc(Doc):
+    """Formatter class for HTML documentation."""
+
+    # ------------------------------------------- HTML formatting utilities
+
+    _repr_instance = HTMLRepr()
+    repr = _repr_instance.repr
+    escape = _repr_instance.escape
+
+    def page(self, title, contents):
+        """Format an HTML page."""
+        return '''
+<!doctype html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: %s</title>
+<style type="text/css"><!--
+TT { font-family: lucidatypewriter, lucida console, courier }
+--></style></head><body bgcolor="#f0f0f8">
+%s
+</body></html>''' % (title, contents)
+
+    def heading(self, title, fgcol, bgcol, extras=''):
+        """Format a page heading."""
+        return '''
+<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="%s">
+<td valign=bottom>&nbsp;<br>
+<font color="%s" face="helvetica, arial">&nbsp;<br>%s</font></td
+><td align=right valign=bottom
+><font color="%s" face="helvetica, arial">%s</font></td></tr></table>
+    ''' % (bgcol, fgcol, title, fgcol, extras or '&nbsp;')
+
+    def section(self, title, fgcol, bgcol, contents, width=10,
+                prelude='', marginalia=None, gap='&nbsp;&nbsp;'):
+        """Format a section with a heading."""
+        if marginalia is None:
+            marginalia = '<tt>' + '&nbsp;' * width + '</tt>'
+        result = '''
+<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="%s">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="%s" face="helvetica, arial">%s</font></td></tr>
+    ''' % (bgcol, fgcol, title)
+        if prelude:
+            result = result + '''
+<tr bgcolor="%s"><td rowspan=2>%s</td>
+<td colspan=2>%s</td></tr>
+<tr><td>%s</td>''' % (bgcol, marginalia, prelude, gap)
+        else:
+            result = result + '''
+<tr><td bgcolor="%s">%s</td><td>%s</td>''' % (bgcol, marginalia, gap)
+
+        return result + '\n<td width="100%%">%s</td></tr></table>' % contents
+
+    def bigsection(self, title, *args):
+        """Format a section with a big heading."""
+        title = '<big><strong>%s</strong></big>' % title
+        return apply(self.section, (title,) + args)
+
+    def preformat(self, text):
+        """Format literal preformatted text."""
+        text = self.escape(expandtabs(text))
+        return replace(text, '\n\n', '\n \n', '\n\n', '\n \n',
+                             ' ', '&nbsp;', '\n', '<br>\n')
+
+    def multicolumn(self, list, format, cols=4):
+        """Format a list of items into a multi-column list."""
+        result = ''
+        rows = (len(list)+cols-1)/cols
+        for col in range(cols):
+            result = result + '<td width="%d%%" valign=top>' % (100/cols)
+            for i in range(rows*col, rows*col+rows):
+                if i < len(list):
+                    result = result + format(list[i]) + '<br>\n'
+            result = result + '</td>'
+        return '<table width="100%%" summary="list"><tr>%s</tr></table>' % result
+
+    def grey(self, text): return '<font color="#909090">%s</font>' % text
+
+    def namelink(self, name, *dicts):
+        """Make a link for an identifier, given name-to-URL mappings."""
+        for dict in dicts:
+            if dict.has_key(name):
+                return '<a href="%s">%s</a>' % (dict[name], name)
+        return name
+
+    def classlink(self, object, modname):
+        """Make a link for a class."""
+        name, module = object.__name__, sys.modules.get(object.__module__)
+        if hasattr(module, name) and getattr(module, name) is object:
+            return '<a href="%s.html#%s">%s</a>' % (
+                module.__name__, name, classname(object, modname))
+        return classname(object, modname)
+
+    def modulelink(self, object):
+        """Make a link for a module."""
+        return '<a href="%s.html">%s</a>' % (object.__name__, object.__name__)
+
+    def modpkglink(self, (name, path, ispackage, shadowed)):
+        """Make a link for a module or package to display in an index."""
+        if shadowed:
+            return self.grey(name)
+        if path:
+            url = '%s.%s.html' % (path, name)
+        else:
+            url = '%s.html' % name
+        if ispackage:
+            text = '<strong>%s</strong>&nbsp;(package)' % name
+        else:
+            text = name
+        return '<a href="%s">%s</a>' % (url, text)
+
+    def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
+        """Mark up some plain text, given a context of symbols to look for.
+        Each context dictionary maps object names to anchor names."""
+        escape = escape or self.escape
+        results = []
+        here = 0
+        pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
+                                r'RFC[- ]?(\d+)|'
+                                r'PEP[- ]?(\d+)|'
+                                r'(self\.)?(\w+))')
+        while 1:
+            match = pattern.search(text, here)
+            if not match: break
+            start, end = match.span()
+            results.append(escape(text[here:start]))
+
+            all, scheme, rfc, pep, selfdot, name = match.groups()
+            if scheme:
+                url = escape(all).replace('"', '&quot;')
+                results.append('<a href="%s">%s</a>' % (url, url))
+            elif rfc:
+                url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
+                results.append('<a href="%s">%s</a>' % (url, escape(all)))
+            elif pep:
+                url = 'http://www.python.org/peps/pep-%04d.html' % int(pep)
+                results.append('<a href="%s">%s</a>' % (url, escape(all)))
+            elif text[end:end+1] == '(':
+                results.append(self.namelink(name, methods, funcs, classes))
+            elif selfdot:
+                results.append('self.<strong>%s</strong>' % name)
+            else:
+                results.append(self.namelink(name, classes))
+            here = end
+        results.append(escape(text[here:]))
+        return join(results, '')
+
+    # ---------------------------------------------- type-specific routines
+
+    def formattree(self, tree, modname, parent=None):
+        """Produce HTML for a class tree as given by inspect.getclasstree()."""
+        result = ''
+        for entry in tree:
+            if type(entry) is type(()):
+                c, bases = entry
+                result = result + '<dt><font face="helvetica, arial">'
+                result = result + self.classlink(c, modname)
+                if bases and bases != (parent,):
+                    parents = []
+                    for base in bases:
+                        parents.append(self.classlink(base, modname))
+                    result = result + '(' + join(parents, ', ') + ')'
+                result = result + '\n</font></dt>'
+            elif type(entry) is type([]):
+                result = result + '<dd>\n%s</dd>\n' % self.formattree(
+                    entry, modname, c)
+        return '<dl>\n%s</dl>\n' % result
+
+    def docmodule(self, object, name=None, mod=None, *ignored):
+        """Produce HTML documentation for a module object."""
+        name = object.__name__ # ignore the passed-in name
+        parts = split(name, '.')
+        links = []
+        for i in range(len(parts)-1):
+            links.append(
+                '<a href="%s.html"><font color="#ffffff">%s</font></a>' %
+                (join(parts[:i+1], '.'), parts[i]))
+        linkedname = join(links + parts[-1:], '.')
+        head = '<big><big><strong>%s</strong></big></big>' % linkedname
+        try:
+            path = inspect.getabsfile(object)
+            url = path
+            if sys.platform == 'win32':
+                import nturl2path
+                url = nturl2path.pathname2url(path)
+            filelink = '<a href="file:%s">%s</a>' % (url, path)
+        except TypeError:
+            filelink = '(built-in)'
+        info = []
+        if hasattr(object, '__version__'):
+            version = str(object.__version__)
+            if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
+                version = strip(version[11:-1])
+            info.append('version %s' % self.escape(version))
+        if hasattr(object, '__date__'):
+            info.append(self.escape(str(object.__date__)))
+        if info:
+            head = head + ' (%s)' % join(info, ', ')
+        result = self.heading(
+            head, '#ffffff', '#7799ee', '<a href=".">index</a><br>' + filelink)
+
+        modules = inspect.getmembers(object, inspect.ismodule)
+
+        classes, cdict = [], {}
+        for key, value in inspect.getmembers(object, inspect.isclass):
+            if (inspect.getmodule(value) or object) is object:
+                classes.append((key, value))
+                cdict[key] = cdict[value] = '#' + key
+        for key, value in classes:
+            for base in value.__bases__:
+                key, modname = base.__name__, base.__module__
+                module = sys.modules.get(modname)
+                if modname != name and module and hasattr(module, key):
+                    if getattr(module, key) is base:
+                        if not cdict.has_key(key):
+                            cdict[key] = cdict[base] = modname + '.html#' + key
+        funcs, fdict = [], {}
+        for key, value in inspect.getmembers(object, inspect.isroutine):
+            if inspect.isbuiltin(value) or inspect.getmodule(value) is object:
+                funcs.append((key, value))
+                fdict[key] = '#-' + key
+                if inspect.isfunction(value): fdict[value] = fdict[key]
+        data = []
+        for key, value in inspect.getmembers(object, isdata):
+            if key not in ['__builtins__', '__doc__']:
+                data.append((key, value))
+
+        doc = self.markup(getdoc(object), self.preformat, fdict, cdict)
+        doc = doc and '<tt>%s</tt>' % doc
+        result = result + '<p>%s</p>\n' % doc
+
+        if hasattr(object, '__path__'):
+            modpkgs = []
+            modnames = []
+            for file in os.listdir(object.__path__[0]):
+                path = os.path.join(object.__path__[0], file)
+                modname = inspect.getmodulename(file)
+                if modname and modname not in modnames:
+                    modpkgs.append((modname, name, 0, 0))
+                    modnames.append(modname)
+                elif ispackage(path):
+                    modpkgs.append((file, name, 1, 0))
+            modpkgs.sort()
+            contents = self.multicolumn(modpkgs, self.modpkglink)
+            result = result + self.bigsection(
+                'Package Contents', '#ffffff', '#aa55cc', contents)
+        elif modules:
+            contents = self.multicolumn(
+                modules, lambda (key, value), s=self: s.modulelink(value))
+            result = result + self.bigsection(
+                'Modules', '#fffff', '#aa55cc', contents)
+
+        if classes:
+            classlist = map(lambda (key, value): value, classes)
+            contents = [
+                self.formattree(inspect.getclasstree(classlist, 1), name)]
+            for key, value in classes:
+                contents.append(self.document(value, key, name, fdict, cdict))
+            result = result + self.bigsection(
+                'Classes', '#ffffff', '#ee77aa', join(contents))
+        if funcs:
+            contents = []
+            for key, value in funcs:
+                contents.append(self.document(value, key, name, fdict, cdict))
+            result = result + self.bigsection(
+                'Functions', '#ffffff', '#eeaa77', join(contents))
+        if data:
+            contents = []
+            for key, value in data:
+                contents.append(self.document(value, key))
+            result = result + self.bigsection(
+                'Data', '#ffffff', '#55aa55', join(contents, '<br>\n'))
+        if hasattr(object, '__author__'):
+            contents = self.markup(str(object.__author__), self.preformat)
+            result = result + self.bigsection(
+                'Author', '#ffffff', '#7799ee', contents)
+        if hasattr(object, '__credits__'):
+            contents = self.markup(str(object.__credits__), self.preformat)
+            result = result + self.bigsection(
+                'Credits', '#ffffff', '#7799ee', contents)
+
+        return result
+
+    def docclass(self, object, name=None, mod=None, funcs={}, classes={},
+                 *ignored):
+        """Produce HTML documentation for a class object."""
+        realname = object.__name__
+        name = name or realname
+        bases = object.__bases__
+
+        contents = []
+        push = contents.append
+
+        # Cute little class to pump out a horizontal rule between sections.
+        class HorizontalRule:
+            def __init__(self):
+                self.needone = 0
+            def maybe(self):
+                if self.needone:
+                    push('<hr>\n')
+                self.needone = 1
+        hr = HorizontalRule()
+
+        # List the mro, if non-trivial.
+        mro = list(inspect.getmro(object))
+        if len(mro) > 2:
+            hr.maybe()
+            push('<dl><dt>Method resolution order:</dt>\n')
+            for base in mro:
+                push('<dd>%s</dd>\n' % self.classlink(base,
+                                                      object.__module__))
+            push('</dl>\n')
+
+        def spill(msg, attrs, predicate):
+            ok, attrs = _split_list(attrs, predicate)
+            if ok:
+                hr.maybe()
+                push(msg)
+                for name, kind, homecls, value in ok:
+                    push(self.document(getattr(object, name), name, mod,
+                                       funcs, classes, mdict, object))
+                    push('\n')
+            return attrs
+
+        def spillproperties(msg, attrs, predicate):
+            ok, attrs = _split_list(attrs, predicate)
+            if ok:
+                hr.maybe()
+                push(msg)
+                for name, kind, homecls, value in ok:
+                    push('<dl><dt><strong>%s</strong></dt>\n' % name)
+                    if value.__doc__ is not None:
+                        doc = self.markup(value.__doc__, self.preformat,
+                                          funcs, classes, mdict)
+                        push('<dd><tt>%s</tt></dd>\n' % doc)
+                    for attr, tag in [("fget", " getter"),
+                                      ("fset", " setter"),
+                                      ("fdel", " deleter")]:
+                        func = getattr(value, attr)
+                        if func is not None:
+                            base = self.document(func, name + tag, mod,
+                                                 funcs, classes, mdict, object)
+                            push('<dd>%s</dd>\n' % base)
+                    push('</dl>\n')
+            return attrs
+
+        def spilldata(msg, attrs, predicate):
+            ok, attrs = _split_list(attrs, predicate)
+            if ok:
+                hr.maybe()
+                push(msg)
+                for name, kind, homecls, value in ok:
+                    base = self.docother(getattr(object, name), name, mod)
+                    if callable(value):
+                        doc = getattr(value, "__doc__", None)
+                    else:
+                        doc = None
+                    if doc is None:
+                        push('<dl><dt>%s</dl>\n' % base)
+                    else:
+                        doc = self.markup(getdoc(value), self.preformat,
+                                          funcs, classes, mdict)
+                        doc = '<dd><tt>%s</tt>' % doc
+                        push('<dl><dt>%s%s</dl>\n' % (base, doc))
+                    push('\n')
+            return attrs
+
+        attrs = inspect.classify_class_attrs(object)
+        mdict = {}
+        for key, kind, homecls, value in attrs:
+            mdict[key] = anchor = '#' + name + '-' + key
+            value = getattr(object, key)
+            try:
+                # The value may not be hashable (e.g., a data attr with
+                # a dict or list value).
+                mdict[value] = anchor
+            except TypeError:
+                pass
+
+        while attrs:
+            if mro:
+                thisclass = mro.pop(0)
+            else:
+                thisclass = attrs[0][2]
+            attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
+
+            if thisclass is object:
+                tag = "defined here"
+            else:
+                tag = "inherited from %s" % self.classlink(thisclass,
+                                                          object.__module__)
+            tag += ':<br>\n'
+
+            # Sort attrs by name.
+            attrs.sort(lambda t1, t2: cmp(t1[0], t2[0]))
+
+            # Pump out the attrs, segregated by kind.
+            attrs = spill("Methods %s" % tag, attrs,
+                          lambda t: t[1] == 'method')
+            attrs = spill("Class methods %s" % tag, attrs,
+                          lambda t: t[1] == 'class method')
+            attrs = spill("Static methods %s" % tag, attrs,
+                          lambda t: t[1] == 'static method')
+            attrs = spillproperties("Properties %s" % tag, attrs,
+                                    lambda t: t[1] == 'property')
+            attrs = spilldata("Data and non-method functions %s" % tag, attrs,
+                              lambda t: t[1] == 'data')
+            assert attrs == []
+            attrs = inherited
+
+        contents = ''.join(contents)
+
+        if name == realname:
+            title = '<a name="%s">class <strong>%s</strong></a>' % (
+                name, realname)
+        else:
+            title = '<strong>%s</strong> = <a name="%s">class %s</a>' % (
+                name, name, realname)
+        if bases:
+            parents = []
+            for base in bases:
+                parents.append(self.classlink(base, object.__module__))
+            title = title + '(%s)' % join(parents, ', ')
+        doc = self.markup(getdoc(object), self.preformat, funcs, classes, mdict)
+        doc = doc and '<tt>%s<br>&nbsp;</tt>' % doc or '&nbsp;'
+
+        return self.section(title, '#000000', '#ffc8d8', contents, 5, doc)
+
+    def formatvalue(self, object):
+        """Format an argument default value as text."""
+        return self.grey('=' + self.repr(object))
+
+    def docroutine(self, object, name=None, mod=None,
+                   funcs={}, classes={}, methods={}, cl=None):
+        """Produce HTML documentation for a function or method object."""
+        realname = object.__name__
+        name = name or realname
+        anchor = (cl and cl.__name__ or '') + '-' + name
+        note = ''
+        skipdocs = 0
+        if inspect.ismethod(object):
+            imclass = object.im_class
+            if cl:
+                if imclass is not cl:
+                    note = ' from ' + self.classlink(imclass, mod)
+            else:
+                if object.im_self:
+                    note = ' method of %s instance' % self.classlink(
+                        object.im_self.__class__, mod)
+                else:
+                    note = ' unbound %s method' % self.classlink(imclass,mod)
+            object = object.im_func
+
+        if name == realname:
+            title = '<a name="%s"><strong>%s</strong></a>' % (anchor, realname)
+        else:
+            if (cl and cl.__dict__.has_key(realname) and
+                cl.__dict__[realname] is object):
+                reallink = '<a href="#%s">%s</a>' % (
+                    cl.__name__ + '-' + realname, realname)
+                skipdocs = 1
+            else:
+                reallink = realname
+            title = '<a name="%s"><strong>%s</strong></a> = %s' % (
+                anchor, name, reallink)
+        if inspect.isfunction(object):
+            args, varargs, varkw, defaults = inspect.getargspec(object)
+            argspec = inspect.formatargspec(
+                args, varargs, varkw, defaults, formatvalue=self.formatvalue)
+            if realname == '<lambda>':
+                title = '<strong>%s</strong> <em>lambda</em> ' % name
+                argspec = argspec[1:-1] # remove parentheses
+        else:
+            argspec = '(...)'
+
+        decl = title + argspec + (note and self.grey(
+               '<font face="helvetica, arial">%s</font>' % note))
+
+        if skipdocs:
+            return '<dl><dt>%s</dt></dl>\n' % decl
+        else:
+            doc = self.markup(
+                getdoc(object), self.preformat, funcs, classes, methods)
+            doc = doc and '<dd><tt>%s</tt></dd>' % doc
+            return '<dl><dt>%s</dt>%s</dl>\n' % (decl, doc)
+
+    def docother(self, object, name=None, mod=None, *ignored):
+        """Produce HTML documentation for a data object."""
+        lhs = name and '<strong>%s</strong> = ' % name or ''
+        return lhs + self.repr(object)
+
+    def index(self, dir, shadowed=None):
+        """Generate an HTML index for a directory of modules."""
+        modpkgs = []
+        if shadowed is None: shadowed = {}
+        seen = {}
+        files = os.listdir(dir)
+
+        def found(name, ispackage,
+                  modpkgs=modpkgs, shadowed=shadowed, seen=seen):
+            if not seen.has_key(name):
+                modpkgs.append((name, '', ispackage, shadowed.has_key(name)))
+                seen[name] = 1
+                shadowed[name] = 1
+
+        # Package spam/__init__.py takes precedence over module spam.py.
+        for file in files:
+            path = os.path.join(dir, file)
+            if ispackage(path): found(file, 1)
+        for file in files:
+            path = os.path.join(dir, file)
+            if os.path.isfile(path):
+                modname = inspect.getmodulename(file)
+                if modname: found(modname, 0)
+
+        modpkgs.sort()
+        contents = self.multicolumn(modpkgs, self.modpkglink)
+        return self.bigsection(dir, '#ffffff', '#ee77aa', contents)
+
+# -------------------------------------------- text documentation generator
+
+class TextRepr(Repr):
+    """Class for safely making a text representation of a Python object."""
+    def __init__(self):
+        Repr.__init__(self)
+        self.maxlist = self.maxtuple = 20
+        self.maxdict = 10
+        self.maxstring = self.maxother = 100
+
+    def repr1(self, x, level):
+        methodname = 'repr_' + join(split(type(x).__name__), '_')
+        if hasattr(self, methodname):
+            return getattr(self, methodname)(x, level)
+        else:
+            return cram(stripid(repr(x)), self.maxother)
+
+    def repr_string(self, x, level):
+        test = cram(x, self.maxstring)
+        testrepr = repr(test)
+        if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
+            # Backslashes are only literal in the string and are never
+            # needed to make any special characters, so show a raw string.
+            return 'r' + testrepr[0] + test + testrepr[0]
+        return testrepr
+
+    repr_str = repr_string
+
+    def repr_instance(self, x, level):
+        try:
+            return cram(stripid(repr(x)), self.maxstring)
+        except:
+            return '<%s instance>' % x.__class__.__name__
+
+class TextDoc(Doc):
+    """Formatter class for text documentation."""
+
+    # ------------------------------------------- text formatting utilities
+
+    _repr_instance = TextRepr()
+    repr = _repr_instance.repr
+
+    def bold(self, text):
+        """Format a string in bold by overstriking."""
+        return join(map(lambda ch: ch + '\b' + ch, text), '')
+
+    def indent(self, text, prefix='    '):
+        """Indent text by prepending a given prefix to each line."""
+        if not text: return ''
+        lines = split(text, '\n')
+        lines = map(lambda line, prefix=prefix: prefix + line, lines)
+        if lines: lines[-1] = rstrip(lines[-1])
+        return join(lines, '\n')
+
+    def section(self, title, contents):
+        """Format a section with a given heading."""
+        return self.bold(title) + '\n' + rstrip(self.indent(contents)) + '\n\n'
+
+    # ---------------------------------------------- type-specific routines
+
+    def formattree(self, tree, modname, parent=None, prefix=''):
+        """Render in text a class tree as returned by inspect.getclasstree()."""
+        result = ''
+        for entry in tree:
+            if type(entry) is type(()):
+                c, bases = entry
+                result = result + prefix + classname(c, modname)
+                if bases and bases != (parent,):
+                    parents = map(lambda c, m=modname: classname(c, m), bases)
+                    result = result + '(%s)' % join(parents, ', ')
+                result = result + '\n'
+            elif type(entry) is type([]):
+                result = result + self.formattree(
+                    entry, modname, c, prefix + '    ')
+        return result
+
+    def docmodule(self, object, name=None, mod=None):
+        """Produce text documentation for a given module object."""
+        name = object.__name__ # ignore the passed-in name
+        synop, desc = splitdoc(getdoc(object))
+        result = self.section('NAME', name + (synop and ' - ' + synop))
+
+        try:
+            file = inspect.getabsfile(object)
+        except TypeError:
+            file = '(built-in)'
+        result = result + self.section('FILE', file)
+        if desc:
+            result = result + self.section('DESCRIPTION', desc)
+
+        classes = []
+        for key, value in inspect.getmembers(object, inspect.isclass):
+            if (inspect.getmodule(value) or object) is object:
+                classes.append((key, value))
+        funcs = []
+        for key, value in inspect.getmembers(object, inspect.isroutine):
+            if inspect.isbuiltin(value) or inspect.getmodule(value) is object:
+                funcs.append((key, value))
+        data = []
+        for key, value in inspect.getmembers(object, isdata):
+            if key not in ['__builtins__', '__doc__']:
+                data.append((key, value))
+
+        if hasattr(object, '__path__'):
+            modpkgs = []
+            for file in os.listdir(object.__path__[0]):
+                path = os.path.join(object.__path__[0], file)
+                modname = inspect.getmodulename(file)
+                if modname and modname not in modpkgs:
+                    modpkgs.append(modname)
+                elif ispackage(path):
+                    modpkgs.append(file + ' (package)')
+            modpkgs.sort()
+            result = result + self.section(
+                'PACKAGE CONTENTS', join(modpkgs, '\n'))
+
+        if classes:
+            classlist = map(lambda (key, value): value, classes)
+            contents = [self.formattree(
+                inspect.getclasstree(classlist, 1), name)]
+            for key, value in classes:
+                contents.append(self.document(value, key, name))
+            result = result + self.section('CLASSES', join(contents, '\n'))
+
+        if funcs:
+            contents = []
+            for key, value in funcs:
+                contents.append(self.document(value, key, name))
+            result = result + self.section('FUNCTIONS', join(contents, '\n'))
+
+        if data:
+            contents = []
+            for key, value in data:
+                contents.append(self.docother(value, key, name, 70))
+            result = result + self.section('DATA', join(contents, '\n'))
+
+        if hasattr(object, '__version__'):
+            version = str(object.__version__)
+            if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
+                version = strip(version[11:-1])
+            result = result + self.section('VERSION', version)
+        if hasattr(object, '__date__'):
+            result = result + self.section('DATE', str(object.__date__))
+        if hasattr(object, '__author__'):
+            result = result + self.section('AUTHOR', str(object.__author__))
+        if hasattr(object, '__credits__'):
+            result = result + self.section('CREDITS', str(object.__credits__))
+        return result
+
+    def docclass(self, object, name=None, mod=None):
+        """Produce text documentation for a given class object."""
+        realname = object.__name__
+        name = name or realname
+        bases = object.__bases__
+
+        def makename(c, m=object.__module__):
+            return classname(c, m)
+
+        if name == realname:
+            title = 'class ' + self.bold(realname)
+        else:
+            title = self.bold(name) + ' = class ' + realname
+        if bases:
+            parents = map(makename, bases)
+            title = title + '(%s)' % join(parents, ', ')
+
+        doc = getdoc(object)
+        contents = doc and [doc + '\n'] or []
+        push = contents.append
+
+        # List the mro, if non-trivial.
+        mro = list(inspect.getmro(object))
+        if len(mro) > 2:
+            push("Method resolution order:")
+            for base in mro:
+                push('    ' + makename(base))
+            push('')
+
+        # Cute little class to pump out a horizontal rule between sections.
+        class HorizontalRule:
+            def __init__(self):
+                self.needone = 0
+            def maybe(self):
+                if self.needone:
+                    push('-' * 70)
+                self.needone = 1
+        hr = HorizontalRule()
+
+        def spill(msg, attrs, predicate):
+            ok, attrs = _split_list(attrs, predicate)
+            if ok:
+                hr.maybe()
+                push(msg)
+                for name, kind, homecls, value in ok:
+                    push(self.document(getattr(object, name),
+                                       name, mod, object))
+            return attrs
+
+        def spillproperties(msg, attrs, predicate):
+            ok, attrs = _split_list(attrs, predicate)
+            if ok:
+                hr.maybe()
+                push(msg)
+                for name, kind, homecls, value in ok:
+                    push(name)
+                    need_blank_after_doc = 0
+                    doc = getdoc(value) or ''
+                    if doc:
+                        push(self.indent(doc))
+                        need_blank_after_doc = 1
+                    for attr, tag in [("fget", " getter"),
+                                      ("fset", " setter"),
+                                      ("fdel", " deleter")]:
+                        func = getattr(value, attr)
+                        if func is not None:
+                            if need_blank_after_doc:
+                                push('')
+                                need_blank_after_doc = 0
+                            base = self.docother(func, name + tag, mod, 70)
+                            push(self.indent(base))
+                    push('')
+            return attrs
+
+        def spilldata(msg, attrs, predicate):
+            ok, attrs = _split_list(attrs, predicate)
+            if ok:
+                hr.maybe()
+                push(msg)
+                for name, kind, homecls, value in ok:
+                    if callable(value):
+                        doc = getattr(value, "__doc__", None)
+                    else:
+                        doc = None
+                    push(self.docother(getattr(object, name),
+                                       name, mod, 70, doc) + '\n')
+            return attrs
+
+        attrs = inspect.classify_class_attrs(object)
+        while attrs:
+            if mro:
+                thisclass = mro.pop(0)
+            else:
+                thisclass = attrs[0][2]
+            attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
+
+            if thisclass is object:
+                tag = "defined here"
+            else:
+                tag = "inherited from %s" % classname(thisclass,
+                                                      object.__module__)
+
+            # Sort attrs by name.
+            attrs.sort(lambda t1, t2: cmp(t1[0], t2[0]))
+
+            # Pump out the attrs, segregated by kind.
+            attrs = spill("Methods %s:\n" % tag, attrs,
+                          lambda t: t[1] == 'method')
+            attrs = spill("Class methods %s:\n" % tag, attrs,
+                          lambda t: t[1] == 'class method')
+            attrs = spill("Static methods %s:\n" % tag, attrs,
+                          lambda t: t[1] == 'static method')
+            attrs = spillproperties("Properties %s:\n" % tag, attrs,
+                                    lambda t: t[1] == 'property')
+            attrs = spilldata("Data and non-method functions %s:\n" % tag,
+                              attrs, lambda t: t[1] == 'data')
+            assert attrs == []
+            attrs = inherited
+
+        contents = '\n'.join(contents)
+        if not contents:
+            return title + '\n'
+        return title + '\n' + self.indent(rstrip(contents), ' |  ') + '\n'
+
+    def formatvalue(self, object):
+        """Format an argument default value as text."""
+        return '=' + self.repr(object)
+
+    def docroutine(self, object, name=None, mod=None, cl=None):
+        """Produce text documentation for a function or method object."""
+        realname = object.__name__
+        name = name or realname
+        note = ''
+        skipdocs = 0
+        if inspect.ismethod(object):
+            imclass = object.im_class
+            if cl:
+                if imclass is not cl:
+                    note = ' from ' + classname(imclass, mod)
+            else:
+                if object.im_self:
+                    note = ' method of %s instance' % classname(
+                        object.im_self.__class__, mod)
+                else:
+                    note = ' unbound %s method' % classname(imclass,mod)
+            object = object.im_func
+
+        if name == realname:
+            title = self.bold(realname)
+        else:
+            if (cl and cl.__dict__.has_key(realname) and
+                cl.__dict__[realname] is object):
+                skipdocs = 1
+            title = self.bold(name) + ' = ' + realname
+        if inspect.isfunction(object):
+            args, varargs, varkw, defaults = inspect.getargspec(object)
+            argspec = inspect.formatargspec(
+                args, varargs, varkw, defaults, formatvalue=self.formatvalue)
+            if realname == '<lambda>':
+                title = 'lambda'
+                argspec = argspec[1:-1] # remove parentheses
+        else:
+            argspec = '(...)'
+        decl = title + argspec + note
+
+        if skipdocs:
+            return decl + '\n'
+        else:
+            doc = getdoc(object) or ''
+            return decl + '\n' + (doc and rstrip(self.indent(doc)) + '\n')
+
+    def docother(self, object, name=None, mod=None, maxlen=None, doc=None):
+        """Produce text documentation for a data object."""
+        repr = self.repr(object)
+        if maxlen:
+            line = (name and name + ' = ' or '') + repr
+            chop = maxlen - len(line)
+            if chop < 0: repr = repr[:chop] + '...'
+        line = (name and self.bold(name) + ' = ' or '') + repr
+        if doc is not None:
+            line += '\n' + self.indent(str(doc))
+        return line
+
+# --------------------------------------------------------- user interfaces
+
+def pager(text):
+    """The first time this is called, determine what kind of pager to use."""
+    global pager
+    pager = getpager()
+    pager(text)
+
+def getpager():
+    """Decide what method to use for paging through text."""
+    if type(sys.stdout) is not types.FileType:
+        return plainpager
+    if not sys.stdin.isatty() or not sys.stdout.isatty():
+        return plainpager
+    if os.environ.get('TERM') in ['dumb', 'emacs']:
+        return plainpager
+    if os.environ.has_key('PAGER'):
+        if sys.platform == 'win32': # pipes completely broken in Windows
+            return lambda text: tempfilepager(plain(text), os.environ['PAGER'])
+        elif os.environ.get('TERM') in ['dumb', 'emacs']:
+            return lambda text: pipepager(plain(text), os.environ['PAGER'])
+        else:
+            return lambda text: pipepager(text, os.environ['PAGER'])
+    if sys.platform == 'win32':
+        return lambda text: tempfilepager(plain(text), 'more <')
+    if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0:
+        return lambda text: pipepager(text, 'less')
+
+    import tempfile
+    filename = tempfile.mktemp()
+    open(filename, 'w').close()
+    try:
+        if hasattr(os, 'system') and os.system('more %s' % filename) == 0:
+            return lambda text: pipepager(text, 'more')
+        else:
+            return ttypager
+    finally:
+        os.unlink(filename)
+
+def plain(text):
+    """Remove boldface formatting from text."""
+    return re.sub('.\b', '', text)
+
+def pipepager(text, cmd):
+    """Page through text by feeding it to another program."""
+    pipe = os.popen(cmd, 'w')
+    try:
+        pipe.write(text)
+        pipe.close()
+    except IOError:
+        pass # Ignore broken pipes caused by quitting the pager program.
+
+def tempfilepager(text, cmd):
+    """Page through text by invoking a program on a temporary file."""
+    import tempfile
+    filename = tempfile.mktemp()
+    file = open(filename, 'w')
+    file.write(text)
+    file.close()
+    try:
+        os.system(cmd + ' ' + filename)
+    finally:
+        os.unlink(filename)
+
+def ttypager(text):
+    """Page through text on a text terminal."""
+    lines = split(plain(text), '\n')
+    try:
+        import tty
+        fd = sys.stdin.fileno()
+        old = tty.tcgetattr(fd)
+        tty.setcbreak(fd)
+        getchar = lambda: sys.stdin.read(1)
+    except (ImportError, AttributeError):
+        tty = None
+        getchar = lambda: sys.stdin.readline()[:-1][:1]
+
+    try:
+        r = inc = os.environ.get('LINES', 25) - 1
+        sys.stdout.write(join(lines[:inc], '\n') + '\n')
+        while lines[r:]:
+            sys.stdout.write('-- more --')
+            sys.stdout.flush()
+            c = getchar()
+
+            if c in ['q', 'Q']:
+                sys.stdout.write('\r          \r')
+                break
+            elif c in ['\r', '\n']:
+                sys.stdout.write('\r          \r' + lines[r] + '\n')
+                r = r + 1
+                continue
+            if c in ['b', 'B', '\x1b']:
+                r = r - inc - inc
+                if r < 0: r = 0
+            sys.stdout.write('\n' + join(lines[r:r+inc], '\n') + '\n')
+            r = r + inc
+
+    finally:
+        if tty:
+            tty.tcsetattr(fd, tty.TCSAFLUSH, old)
+
+def plainpager(text):
+    """Simply print unformatted text.  This is the ultimate fallback."""
+    sys.stdout.write(plain(text))
+
+def describe(thing):
+    """Produce a short description of the given thing."""
+    if inspect.ismodule(thing):
+        if thing.__name__ in sys.builtin_module_names:
+            return 'built-in module ' + thing.__name__
+        if hasattr(thing, '__path__'):
+            return 'package ' + thing.__name__
+        else:
+            return 'module ' + thing.__name__
+    if inspect.isbuiltin(thing):
+        return 'built-in function ' + thing.__name__
+    if inspect.isclass(thing):
+        return 'class ' + thing.__name__
+    if inspect.isfunction(thing):
+        return 'function ' + thing.__name__
+    if inspect.ismethod(thing):
+        return 'method ' + thing.__name__
+    if type(thing) is types.InstanceType:
+        return 'instance of ' + thing.__class__.__name__
+    return type(thing).__name__
+
+def locate(path, forceload=0):
+    """Locate an object by name or dotted path, importing as necessary."""
+    parts = split(path, '.')
+    module, n = None, 0
+    while n < len(parts):
+        nextmodule = safeimport(join(parts[:n+1], '.'), forceload)
+        if nextmodule: module, n = nextmodule, n + 1
+        else: break
+    if module:
+        object = module
+        for part in parts[n:]:
+            try: object = getattr(object, part)
+            except AttributeError: return None
+        return object
+    else:
+        import __builtin__
+        if hasattr(__builtin__, path):
+            return getattr(__builtin__, path)
+
+# --------------------------------------- interactive interpreter interface
+
+text = TextDoc()
+html = HTMLDoc()
+
+def resolve(thing, forceload=0):
+    """Given an object or a path to an object, get the object and its name."""
+    if isinstance(thing, str):
+        object = locate(thing, forceload)
+        if not object:
+            raise ImportError, 'no Python documentation found for %r' % thing
+        return object, thing
+    else:
+        return thing, getattr(thing, '__name__', None)
+
+def doc(thing, title='Python Library Documentation: %s', forceload=0):
+    """Display text documentation, given an object or a path to an object."""
+    try:
+        object, name = resolve(thing, forceload)
+        desc = describe(object)
+        module = inspect.getmodule(object)
+        if name and '.' in name:
+            desc += ' in ' + name[:name.rfind('.')]
+        elif module and module is not object:
+            desc += ' in module ' + module.__name__
+        pager(title % desc + '\n\n' + text.document(object, name))
+    except (ImportError, ErrorDuringImport), value:
+        print value
+
+def writedoc(thing, forceload=0):
+    """Write HTML documentation to a file in the current directory."""
+    try:
+        object, name = resolve(thing, forceload)
+        page = html.page(describe(object), html.document(object, name))
+        file = open(name + '.html', 'w')
+        file.write(page)
+        file.close()
+        print 'wrote', name + '.html'
+    except (ImportError, ErrorDuringImport), value:
+        print value
+
+def writedocs(dir, pkgpath='', done=None):
+    """Write out HTML documentation for all modules in a directory tree."""
+    if done is None: done = {}
+    for file in os.listdir(dir):
+        path = os.path.join(dir, file)
+        if ispackage(path):
+            writedocs(path, pkgpath + file + '.', done)
+        elif os.path.isfile(path):
+            modname = inspect.getmodulename(path)
+            if modname:
+                modname = pkgpath + modname
+                if not done.has_key(modname):
+                    done[modname] = 1
+                    writedoc(modname)
+
+class Helper:
+    keywords = {
+        'and': 'BOOLEAN',
+        'assert': ('ref/assert', ''),
+        'break': ('ref/break', 'while for'),
+        'class': ('ref/class', 'CLASSES SPECIALMETHODS'),
+        'continue': ('ref/continue', 'while for'),
+        'def': ('ref/function', ''),
+        'del': ('ref/del', 'BASICMETHODS'),
+        'elif': 'if',
+        'else': ('ref/if', 'while for'),
+        'except': 'try',
+        'exec': ('ref/exec', ''),
+        'finally': 'try',
+        'for': ('ref/for', 'break continue while'),
+        'from': 'import',
+        'global': ('ref/global', 'NAMESPACES'),
+        'if': ('ref/if', 'TRUTHVALUE'),
+        'import': ('ref/import', 'MODULES'),
+        'in': ('ref/comparisons', 'SEQUENCEMETHODS2'),
+        'is': 'COMPARISON',
+        'lambda': ('ref/lambdas', 'FUNCTIONS'),
+        'not': 'BOOLEAN',
+        'or': 'BOOLEAN',
+        'pass': ('ref/pass', ''),
+        'print': ('ref/print', ''),
+        'raise': ('ref/raise', 'EXCEPTIONS'),
+        'return': ('ref/return', 'FUNCTIONS'),
+        'try': ('ref/try', 'EXCEPTIONS'),
+        'while': ('ref/while', 'break continue if TRUTHVALUE'),
+        'yield': ('ref/yield', ''),
+    }
+
+    topics = {
+        'TYPES': ('ref/types', 'STRINGS UNICODE NUMBERS SEQUENCES MAPPINGS FUNCTIONS CLASSES MODULES FILES inspect'),
+        'STRINGS': ('ref/strings', 'str UNICODE SEQUENCES STRINGMETHODS FORMATTING TYPES'),
+        'STRINGMETHODS': ('lib/string-methods', 'STRINGS FORMATTING'),
+        'FORMATTING': ('lib/typesseq-strings', 'OPERATORS'),
+        'UNICODE': ('ref/strings', 'encodings unicode TYPES STRING'),
+        'NUMBERS': ('ref/numbers', 'INTEGER FLOAT COMPLEX TYPES'),
+        'INTEGER': ('ref/integers', 'int range'),
+        'FLOAT': ('ref/floating', 'float math'),
+        'COMPLEX': ('ref/imaginary', 'complex cmath'),
+        'SEQUENCES': ('lib/typesseq', 'STRINGMETHODS FORMATTING xrange LISTS'),
+        'MAPPINGS': 'DICTIONARIES',
+        'FUNCTIONS': ('lib/typesfunctions', 'def TYPES'),
+        'METHODS': ('lib/typesmethods', 'class def CLASSES TYPES'),
+        'CODEOBJECTS': ('lib/bltin-code-objects', 'compile FUNCTIONS TYPES'),
+        'TYPEOBJECTS': ('lib/bltin-type-objects', 'types TYPES'),
+        'FRAMEOBJECTS': 'TYPES',
+        'TRACEBACKS': 'TYPES',
+        'NONE': ('lib/bltin-null-object', ''),
+        'ELLIPSIS': ('lib/bltin-ellipsis-object', 'SLICINGS'),
+        'FILES': ('lib/bltin-file-objects', ''),
+        'SPECIALATTRIBUTES': ('lib/specialattrs', ''),
+        'CLASSES': ('ref/types', 'class SPECIALMETHODS PRIVATENAMES'),
+        'MODULES': ('lib/typesmodules', 'import'),
+        'PACKAGES': 'import',
+        'EXPRESSIONS': ('ref/summary', 'lambda or and not in is BOOLEAN COMPARISON BITWISE SHIFTING BINARY FORMATTING POWER UNARY ATTRIBUTES SUBSCRIPTS SLICINGS CALLS TUPLES LISTS DICTIONARIES BACKQUOTES'),
+        'OPERATORS': 'EXPRESSIONS',
+        'PRECEDENCE': 'EXPRESSIONS',
+        'OBJECTS': ('ref/objects', 'TYPES'),
+        'SPECIALMETHODS': ('ref/specialnames', 'BASICMETHODS ATTRIBUTEMETHODS CALLABLEMETHODS SEQUENCEMETHODS1 MAPPINGMETHODS SEQUENCEMETHODS2 NUMBERMETHODS CLASSES'),
+        'BASICMETHODS': ('ref/customization', 'cmp hash repr str SPECIALMETHODS'),
+        'ATTRIBUTEMETHODS': ('ref/attribute-access', 'ATTRIBUTES SPECIALMETHODS'),
+        'CALLABLEMETHODS': ('ref/callable-types', 'CALLS SPECIALMETHODS'),
+        'SEQUENCEMETHODS1': ('ref/sequence-types', 'SEQUENCES SEQUENCEMETHODS2 SPECIALMETHODS'),
+        'SEQUENCEMETHODS2': ('ref/sequence-methods', 'SEQUENCES SEQUENCEMETHODS1 SPECIALMETHODS'),
+        'MAPPINGMETHODS': ('ref/sequence-types', 'MAPPINGS SPECIALMETHODS'),
+        'NUMBERMETHODS': ('ref/numeric-types', 'NUMBERS AUGMENTEDASSIGNMENT SPECIALMETHODS'),
+        'EXECUTION': ('ref/naming', ''),
+        'NAMESPACES': ('ref/naming', 'global ASSIGNMENT DELETION'),
+        'SCOPING': 'NAMESPACES',
+        'FRAMES': 'NAMESPACES',
+        'EXCEPTIONS': ('ref/exceptions', 'try except finally raise'),
+        'COERCIONS': 'CONVERSIONS',
+        'CONVERSIONS': ('ref/conversions', ''),
+        'IDENTIFIERS': ('ref/identifiers', 'keywords SPECIALIDENTIFIERS'),
+        'SPECIALIDENTIFIERS': ('ref/id-classes', ''),
+        'PRIVATENAMES': ('ref/atom-identifiers', ''),
+        'LITERALS': ('ref/atom-literals', 'STRINGS BACKQUOTES NUMBERS TUPLELITERALS LISTLITERALS DICTIONARYLITERALS'),
+        'TUPLES': 'SEQUENCES',
+        'TUPLELITERALS': ('ref/exprlists', 'TUPLES LITERALS'),
+        'LISTS': ('lib/typesseq-mutable', 'LISTLITERALS'),
+        'LISTLITERALS': ('ref/lists', 'LISTS LITERALS'),
+        'DICTIONARIES': ('lib/typesmapping', 'DICTIONARYLITERALS'),
+        'DICTIONARYLITERALS': ('ref/dict', 'DICTIONARIES LITERALS'),
+        'BACKQUOTES': ('ref/string-conversions', 'repr str STRINGS LITERALS'),
+        'ATTRIBUTES': ('ref/attribute-references', 'getattr hasattr setattr ATTRIBUTEMETHODS'),
+        'SUBSCRIPTS': ('ref/subscriptions', 'SEQUENCEMETHODS1'),
+        'SLICINGS': ('ref/slicings', 'SEQUENCEMETHODS2'),
+        'CALLS': ('ref/calls', 'EXPRESSIONS'),
+        'POWER': ('ref/power', 'EXPRESSIONS'),
+        'UNARY': ('ref/unary', 'EXPRESSIONS'),
+        'BINARY': ('ref/binary', 'EXPRESSIONS'),
+        'SHIFTING': ('ref/shifting', 'EXPRESSIONS'),
+        'BITWISE': ('ref/bitwise', 'EXPRESSIONS'),
+        'COMPARISON': ('ref/comparisons', 'EXPRESSIONS BASICMETHODS'),
+        'BOOLEAN': ('ref/Booleans', 'EXPRESSIONS TRUTHVALUE'),
+        'ASSERTION': 'assert',
+        'ASSIGNMENT': ('ref/assignment', 'AUGMENTEDASSIGNMENT'),
+        'AUGMENTEDASSIGNMENT': ('ref/augassign', 'NUMBERMETHODS'),
+        'DELETION': 'del',
+        'PRINTING': 'print',
+        'RETURNING': 'return',
+        'IMPORTING': 'import',
+        'CONDITIONAL': 'if',
+        'LOOPING': ('ref/compound', 'for while break continue'),
+        'TRUTHVALUE': ('lib/truth', 'if while and or not BASICMETHODS'),
+        'DEBUGGING': ('lib/module-pdb', 'pdb'),
+    }
+
+    def __init__(self, input, output):
+        self.input = input
+        self.output = output
+        self.docdir = None
+        execdir = os.path.dirname(sys.executable)
+        homedir = os.environ.get('PYTHONHOME')
+        for dir in [os.environ.get('PYTHONDOCS'),
+                    homedir and os.path.join(homedir, 'doc'),
+                    os.path.join(execdir, 'doc'),
+                    '/usr/doc/python-docs-' + split(sys.version)[0],
+                    '/usr/doc/python-' + split(sys.version)[0],
+                    '/usr/doc/python-docs-' + sys.version[:3],
+                    '/usr/doc/python-' + sys.version[:3]]:
+            if dir and os.path.isdir(os.path.join(dir, 'lib')):
+                self.docdir = dir
+
+    def __repr__(self):
+        if inspect.stack()[1][3] == '?':
+            self()
+            return ''
+        return '<pydoc.Helper instance>'
+
+    def __call__(self, request=None):
+        if request is not None:
+            self.help(request)
+        else:
+            self.intro()
+            self.interact()
+            self.output.write('''
+You are now leaving help and returning to the Python interpreter.
+If you want to ask for help on a particular object directly from the
+interpreter, you can type "help(object)".  Executing "help('string')"
+has the same effect as typing a particular string at the help> prompt.
+''')
+
+    def interact(self):
+        self.output.write('\n')
+        while 1:
+            self.output.write('help> ')
+            self.output.flush()
+            try:
+                request = self.input.readline()
+                if not request: break
+            except KeyboardInterrupt: break
+            request = strip(replace(request, '"', '', "'", ''))
+            if lower(request) in ['q', 'quit']: break
+            self.help(request)
+
+    def help(self, request):
+        if type(request) is type(''):
+            if request == 'help': self.intro()
+            elif request == 'keywords': self.listkeywords()
+            elif request == 'topics': self.listtopics()
+            elif request == 'modules': self.listmodules()
+            elif request[:8] == 'modules ':
+                self.listmodules(split(request)[1])
+            elif self.keywords.has_key(request): self.showtopic(request)
+            elif self.topics.has_key(request): self.showtopic(request)
+            elif request: doc(request, 'Help on %s:')
+        elif isinstance(request, Helper): self()
+        else: doc(request, 'Help on %s:')
+        self.output.write('\n')
+
+    def intro(self):
+        self.output.write('''
+Welcome to Python %s!  This is the online help utility.
+
+If this is your first time using Python, you should definitely check out
+the tutorial on the Internet at http://www.python.org/doc/tut/.
+
+Enter the name of any module, keyword, or topic to get help on writing
+Python programs and using Python modules.  To quit this help utility and
+return to the interpreter, just type "quit".
+
+To get a list of available modules, keywords, or topics, type "modules",
+"keywords", or "topics".  Each module also comes with a one-line summary
+of what it does; to list the modules whose summaries contain a given word
+such as "spam", type "modules spam".
+''' % sys.version[:3])
+
+    def list(self, items, columns=4, width=80):
+        items = items[:]
+        items.sort()
+        colw = width / columns
+        rows = (len(items) + columns - 1) / columns
+        for row in range(rows):
+            for col in range(columns):
+                i = col * rows + row
+                if i < len(items):
+                    self.output.write(items[i])
+                    if col < columns - 1:
+                        self.output.write(' ' + ' ' * (colw-1 - len(items[i])))
+            self.output.write('\n')
+
+    def listkeywords(self):
+        self.output.write('''
+Here is a list of the Python keywords.  Enter any keyword to get more help.
+
+''')
+        self.list(self.keywords.keys())
+
+    def listtopics(self):
+        self.output.write('''
+Here is a list of available topics.  Enter any topic name to get more help.
+
+''')
+        self.list(self.topics.keys())
+
+    def showtopic(self, topic):
+        if not self.docdir:
+            self.output.write('''
+Sorry, topic and keyword documentation is not available because the Python
+HTML documentation files could not be found.  If you have installed them,
+please set the environment variable PYTHONDOCS to indicate their location.
+''')
+            return
+        target = self.topics.get(topic, self.keywords.get(topic))
+        if not target:
+            self.output.write('no documentation found for %s\n' % repr(topic))
+            return
+        if type(target) is type(''):
+            return self.showtopic(target)
+
+        filename, xrefs = target
+        filename = self.docdir + '/' + filename + '.html'
+        try:
+            file = open(filename)
+        except:
+            self.output.write('could not read docs from %s\n' % filename)
+            return
+
+        divpat = re.compile('<div[^>]*navigat.*?</div.*?>', re.I | re.S)
+        addrpat = re.compile('<address.*?>.*?</address.*?>', re.I | re.S)
+        document = re.sub(addrpat, '', re.sub(divpat, '', file.read()))
+        file.close()
+
+        import htmllib, formatter, StringIO
+        buffer = StringIO.StringIO()
+        parser = htmllib.HTMLParser(
+            formatter.AbstractFormatter(formatter.DumbWriter(buffer)))
+        parser.start_table = parser.do_p
+        parser.end_table = lambda parser=parser: parser.do_p({})
+        parser.start_tr = parser.do_br
+        parser.start_td = parser.start_th = lambda a, b=buffer: b.write('\t')
+        parser.feed(document)
+        buffer = replace(buffer.getvalue(), '\xa0', ' ', '\n', '\n  ')
+        pager('  ' + strip(buffer) + '\n')
+        if xrefs:
+            buffer = StringIO.StringIO()
+            formatter.DumbWriter(buffer).send_flowing_data(
+                'Related help topics: ' + join(split(xrefs), ', ') + '\n')
+            self.output.write('\n%s\n' % buffer.getvalue())
+
+    def listmodules(self, key=''):
+        if key:
+            self.output.write('''
+Here is a list of matching modules.  Enter any module name to get more help.
+
+''')
+            apropos(key)
+        else:
+            self.output.write('''
+Please wait a moment while I gather a list of all available modules...
+
+''')
+            modules = {}
+            def callback(path, modname, desc, modules=modules):
+                if modname and modname[-9:] == '.__init__':
+                    modname = modname[:-9] + ' (package)'
+                if find(modname, '.') < 0:
+                    modules[modname] = 1
+            ModuleScanner().run(callback)
+            self.list(modules.keys())
+            self.output.write('''
+Enter any module name to get more help.  Or, type "modules spam" to search
+for modules whose descriptions contain the word "spam".
+''')
+
+help = Helper(sys.stdin, sys.stdout)
+
+class Scanner:
+    """A generic tree iterator."""
+    def __init__(self, roots, children, descendp):
+        self.roots = roots[:]
+        self.state = []
+        self.children = children
+        self.descendp = descendp
+
+    def next(self):
+        if not self.state:
+            if not self.roots:
+                return None
+            root = self.roots.pop(0)
+            self.state = [(root, self.children(root))]
+        node, children = self.state[-1]
+        if not children:
+            self.state.pop()
+            return self.next()
+        child = children.pop(0)
+        if self.descendp(child):
+            self.state.append((child, self.children(child)))
+        return child
+
+class ModuleScanner(Scanner):
+    """An interruptible scanner that searches module synopses."""
+    def __init__(self):
+        roots = map(lambda dir: (dir, ''), pathdirs())
+        Scanner.__init__(self, roots, self.submodules, self.isnewpackage)
+        self.inodes = map(lambda (dir, pkg): os.stat(dir)[1], roots)
+
+    def submodules(self, (dir, package)):
+        children = []
+        for file in os.listdir(dir):
+            path = os.path.join(dir, file)
+            if ispackage(path):
+                children.append((path, package + (package and '.') + file))
+            else:
+                children.append((path, package))
+        children.sort() # so that spam.py comes before spam.pyc or spam.pyo
+        return children
+
+    def isnewpackage(self, (dir, package)):
+        inode = os.path.exists(dir) and os.stat(dir)[1]
+        if not (os.path.islink(dir) and inode in self.inodes):
+            self.inodes.append(inode) # detect circular symbolic links
+            return ispackage(dir)
+
+    def run(self, callback, key=None, completer=None):
+        if key: key = lower(key)
+        self.quit = 0
+        seen = {}
+
+        for modname in sys.builtin_module_names:
+            if modname != '__main__':
+                seen[modname] = 1
+                if key is None:
+                    callback(None, modname, '')
+                else:
+                    desc = split(__import__(modname).__doc__ or '', '\n')[0]
+                    if find(lower(modname + ' - ' + desc), key) >= 0:
+                        callback(None, modname, desc)
+
+        while not self.quit:
+            node = self.next()
+            if not node: break
+            path, package = node
+            modname = inspect.getmodulename(path)
+            if os.path.isfile(path) and modname:
+                modname = package + (package and '.') + modname
+                if not seen.has_key(modname):
+                    seen[modname] = 1 # if we see spam.py, skip spam.pyc
+                    if key is None:
+                        callback(path, modname, '')
+                    else:
+                        desc = synopsis(path) or ''
+                        if find(lower(modname + ' - ' + desc), key) >= 0:
+                            callback(path, modname, desc)
+        if completer: completer()
+
+def apropos(key):
+    """Print all the one-line module summaries that contain a substring."""
+    def callback(path, modname, desc):
+        if modname[-9:] == '.__init__':
+            modname = modname[:-9] + ' (package)'
+        print modname, desc and '- ' + desc
+    try: import warnings
+    except ImportError: pass
+    else: warnings.filterwarnings('ignore') # ignore problems during import
+    ModuleScanner().run(callback, key)
+
+# --------------------------------------------------- web browser interface
+
+def serve(port, callback=None, completer=None):
+    import BaseHTTPServer, mimetools, select
+
+    # Patch up mimetools.Message so it doesn't break if rfc822 is reloaded.
+    class Message(mimetools.Message):
+        def __init__(self, fp, seekable=1):
+            Message = self.__class__
+            Message.__bases__[0].__bases__[0].__init__(self, fp, seekable)
+            self.encodingheader = self.getheader('content-transfer-encoding')
+            self.typeheader = self.getheader('content-type')
+            self.parsetype()
+            self.parseplist()
+
+    class DocHandler(BaseHTTPServer.BaseHTTPRequestHandler):
+        def send_document(self, title, contents):
+            try:
+                self.send_response(200)
+                self.send_header('Content-Type', 'text/html')
+                self.end_headers()
+                self.wfile.write(html.page(title, contents))
+            except IOError: pass
+
+        def do_GET(self):
+            path = self.path
+            if path[-5:] == '.html': path = path[:-5]
+            if path[:1] == '/': path = path[1:]
+            if path and path != '.':
+                try:
+                    obj = locate(path, forceload=1)
+                except ErrorDuringImport, value:
+                    self.send_document(path, html.escape(str(value)))
+                    return
+                if obj:
+                    self.send_document(describe(obj), html.document(obj, path))
+                else:
+                    self.send_document(path,
+'no Python documentation found for %s' % repr(path))
+            else:
+                heading = html.heading(
+'<big><big><strong>Python: Index of Modules</strong></big></big>',
+'#ffffff', '#7799ee')
+                def bltinlink(name):
+                    return '<a href="%s.html">%s</a>' % (name, name)
+                names = filter(lambda x: x != '__main__',
+                               sys.builtin_module_names)
+                contents = html.multicolumn(names, bltinlink)
+                indices = ['<p>' + html.bigsection(
+                    'Built-in Modules', '#ffffff', '#ee77aa', contents)]
+
+                seen = {}
+                for dir in pathdirs():
+                    indices.append(html.index(dir, seen))
+                contents = heading + join(indices) + '''<p align=right>
+<font color="#909090" face="helvetica, arial"><strong>
+pydoc</strong> by Ka-Ping Yee &lt;ping at lfw.org&gt;</font>'''
+                self.send_document('Index of Modules', contents)
+
+        def log_message(self, *args): pass
+
+    class DocServer(BaseHTTPServer.HTTPServer):
+        def __init__(self, port, callback):
+            host = (sys.platform == 'mac') and '127.0.0.1' or 'localhost'
+            self.address = ('', port)
+            self.url = 'http://%s:%d/' % (host, port)
+            self.callback = callback
+            self.base.__init__(self, self.address, self.handler)
+
+        def serve_until_quit(self):
+            import select
+            self.quit = 0
+            while not self.quit:
+                rd, wr, ex = select.select([self.socket.fileno()], [], [], 1)
+                if rd: self.handle_request()
+
+        def server_activate(self):
+            self.base.server_activate(self)
+            if self.callback: self.callback(self)
+
+    DocServer.base = BaseHTTPServer.HTTPServer
+    DocServer.handler = DocHandler
+    DocHandler.MessageClass = Message
+    try:
+        try:
+            DocServer(port, callback).serve_until_quit()
+        except (KeyboardInterrupt, select.error):
+            pass
+    finally:
+        if completer: completer()
+
+# ----------------------------------------------------- graphical interface
+
+def gui():
+    """Graphical interface (starts web server and pops up a control window)."""
+    class GUI:
+        def __init__(self, window, port=7464):
+            self.window = window
+            self.server = None
+            self.scanner = None
+
+            import Tkinter
+            self.server_frm = Tkinter.Frame(window)
+            self.title_lbl = Tkinter.Label(self.server_frm,
+                text='Starting server...\n ')
+            self.open_btn = Tkinter.Button(self.server_frm,
+                text='open browser', command=self.open, state='disabled')
+            self.quit_btn = Tkinter.Button(self.server_frm,
+                text='quit serving', command=self.quit, state='disabled')
+
+            self.search_frm = Tkinter.Frame(window)
+            self.search_lbl = Tkinter.Label(self.search_frm, text='Search for')
+            self.search_ent = Tkinter.Entry(self.search_frm)
+            self.search_ent.bind('<Return>', self.search)
+            self.stop_btn = Tkinter.Button(self.search_frm,
+                text='stop', pady=0, command=self.stop, state='disabled')
+            if sys.platform == 'win32':
+                # Trying to hide and show this button crashes under Windows.
+                self.stop_btn.pack(side='right')
+
+            self.window.title('pydoc')
+            self.window.protocol('WM_DELETE_WINDOW', self.quit)
+            self.title_lbl.pack(side='top', fill='x')
+            self.open_btn.pack(side='left', fill='x', expand=1)
+            self.quit_btn.pack(side='right', fill='x', expand=1)
+            self.server_frm.pack(side='top', fill='x')
+
+            self.search_lbl.pack(side='left')
+            self.search_ent.pack(side='right', fill='x', expand=1)
+            self.search_frm.pack(side='top', fill='x')
+            self.search_ent.focus_set()
+
+            font = ('helvetica', sys.platform == 'win32' and 8 or 10)
+            self.result_lst = Tkinter.Listbox(window, font=font, height=6)
+            self.result_lst.bind('<Button-1>', self.select)
+            self.result_lst.bind('<Double-Button-1>', self.goto)
+            self.result_scr = Tkinter.Scrollbar(window,
+                orient='vertical', command=self.result_lst.yview)
+            self.result_lst.config(yscrollcommand=self.result_scr.set)
+
+            self.result_frm = Tkinter.Frame(window)
+            self.goto_btn = Tkinter.Button(self.result_frm,
+                text='go to selected', command=self.goto)
+            self.hide_btn = Tkinter.Button(self.result_frm,
+                text='hide results', command=self.hide)
+            self.goto_btn.pack(side='left', fill='x', expand=1)
+            self.hide_btn.pack(side='right', fill='x', expand=1)
+
+            self.window.update()
+            self.minwidth = self.window.winfo_width()
+            self.minheight = self.window.winfo_height()
+            self.bigminheight = (self.server_frm.winfo_reqheight() +
+                                 self.search_frm.winfo_reqheight() +
+                                 self.result_lst.winfo_reqheight() +
+                                 self.result_frm.winfo_reqheight())
+            self.bigwidth, self.bigheight = self.minwidth, self.bigminheight
+            self.expanded = 0
+            self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight))
+            self.window.wm_minsize(self.minwidth, self.minheight)
+
+            import threading
+            threading.Thread(
+                target=serve, args=(port, self.ready, self.quit)).start()
+
+        def ready(self, server):
+            self.server = server
+            self.title_lbl.config(
+                text='Python documentation server at\n' + server.url)
+            self.open_btn.config(state='normal')
+            self.quit_btn.config(state='normal')
+
+        def open(self, event=None, url=None):
+            url = url or self.server.url
+            try:
+                import webbrowser
+                webbrowser.open(url)
+            except ImportError: # pre-webbrowser.py compatibility
+                if sys.platform == 'win32':
+                    os.system('start "%s"' % url)
+                elif sys.platform == 'mac':
+                    try: import ic
+                    except ImportError: pass
+                    else: ic.launchurl(url)
+                else:
+                    rc = os.system('netscape -remote "openURL(%s)" &' % url)
+                    if rc: os.system('netscape "%s" &' % url)
+
+        def quit(self, event=None):
+            if self.server:
+                self.server.quit = 1
+            self.window.quit()
+
+        def search(self, event=None):
+            key = self.search_ent.get()
+            self.stop_btn.pack(side='right')
+            self.stop_btn.config(state='normal')
+            self.search_lbl.config(text='Searching for "%s"...' % key)
+            self.search_ent.forget()
+            self.search_lbl.pack(side='left')
+            self.result_lst.delete(0, 'end')
+            self.goto_btn.config(state='disabled')
+            self.expand()
+
+            import threading
+            if self.scanner:
+                self.scanner.quit = 1
+            self.scanner = ModuleScanner()
+            threading.Thread(target=self.scanner.run,
+                             args=(self.update, key, self.done)).start()
+
+        def update(self, path, modname, desc):
+            if modname[-9:] == '.__init__':
+                modname = modname[:-9] + ' (package)'
+            self.result_lst.insert('end',
+                modname + ' - ' + (desc or '(no description)'))
+
+        def stop(self, event=None):
+            if self.scanner:
+                self.scanner.quit = 1
+                self.scanner = None
+
+        def done(self):
+            self.scanner = None
+            self.search_lbl.config(text='Search for')
+            self.search_lbl.pack(side='left')
+            self.search_ent.pack(side='right', fill='x', expand=1)
+            if sys.platform != 'win32': self.stop_btn.forget()
+            self.stop_btn.config(state='disabled')
+
+        def select(self, event=None):
+            self.goto_btn.config(state='normal')
+
+        def goto(self, event=None):
+            selection = self.result_lst.curselection()
+            if selection:
+                modname = split(self.result_lst.get(selection[0]))[0]
+                self.open(url=self.server.url + modname + '.html')
+
+        def collapse(self):
+            if not self.expanded: return
+            self.result_frm.forget()
+            self.result_scr.forget()
+            self.result_lst.forget()
+            self.bigwidth = self.window.winfo_width()
+            self.bigheight = self.window.winfo_height()
+            self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight))
+            self.window.wm_minsize(self.minwidth, self.minheight)
+            self.expanded = 0
+
+        def expand(self):
+            if self.expanded: return
+            self.result_frm.pack(side='bottom', fill='x')
+            self.result_scr.pack(side='right', fill='y')
+            self.result_lst.pack(side='top', fill='both', expand=1)
+            self.window.wm_geometry('%dx%d' % (self.bigwidth, self.bigheight))
+            self.window.wm_minsize(self.minwidth, self.bigminheight)
+            self.expanded = 1
+
+        def hide(self, event=None):
+            self.stop()
+            self.collapse()
+
+    import Tkinter
+    try:
+        gui = GUI(Tkinter.Tk())
+        Tkinter.mainloop()
+    except KeyboardInterrupt:
+        pass
+
+# -------------------------------------------------- command-line interface
+
+def ispath(x):
+    return isinstance(x, str) and find(x, os.sep) >= 0
+
+def cli():
+    """Command-line interface (looks at sys.argv to decide what to do)."""
+    import getopt
+    class BadUsage: pass
+
+    # Scripts don't get the current directory in their path by default.
+    scriptdir = os.path.dirname(sys.argv[0])
+    if scriptdir in sys.path:
+        sys.path.remove(scriptdir)
+    sys.path.insert(0, '.')
+
+    try:
+        opts, args = getopt.getopt(sys.argv[1:], 'gk:p:w')
+        writing = 0
+
+        for opt, val in opts:
+            if opt == '-g':
+                gui()
+                return
+            if opt == '-k':
+                apropos(val)
+                return
+            if opt == '-p':
+                try:
+                    port = int(val)
+                except ValueError:
+                    raise BadUsage
+                def ready(server):
+                    print 'pydoc server ready at %s' % server.url
+                def stopped():
+                    print 'pydoc server stopped'
+                serve(port, ready, stopped)
+                return
+            if opt == '-w':
+                writing = 1
+
+        if not args: raise BadUsage
+        for arg in args:
+            if ispath(arg) and not os.path.exists(arg):
+                print 'file %r does not exist' % arg
+                break
+            try:
+                if ispath(arg) and os.path.isfile(arg):
+                    arg = importfile(arg)
+                if writing:
+                    if ispath(arg) and os.path.isdir(arg):
+                        writedocs(arg)
+                    else:
+                        writedoc(arg)
+                else:
+                    doc(arg)
+            except ErrorDuringImport, value:
+                print value
+
+    except (getopt.error, BadUsage):
+        cmd = sys.argv[0]
+        print """pydoc - the Python documentation tool
+
+%s <name> ...
+    Show text documentation on something.  <name> may be the name of a
+    function, module, or package, or a dotted reference to a class or
+    function within a module or module in a package.  If <name> contains
+    a '%s', it is used as the path to a Python source file to document.
+
+%s -k <keyword>
+    Search for a keyword in the synopsis lines of all available modules.
+
+%s -p <port>
+    Start an HTTP server on the given port on the local machine.
+
+%s -g
+    Pop up a graphical interface for finding and serving documentation.
+
+%s -w <name> ...
+    Write out the HTML documentation for a module to a file in the current
+    directory.  If <name> contains a '%s', it is treated as a filename; if
+    it names a directory, documentation is written for all the contents.
+""" % (cmd, os.sep, cmd, cmd, cmd, cmd, os.sep)
+
+if __name__ == '__main__': cli()
diff --git a/lib-python/2.2/quopri.py b/lib-python/2.2/quopri.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/quopri.py
@@ -0,0 +1,237 @@
+#! /usr/bin/env python
+
+"""Conversions to/from quoted-printable transport encoding as per RFC 1521."""
+
+# (Dec 1991 version).
+
+__all__ = ["encode", "decode", "encodestring", "decodestring"]
+
+ESCAPE = '='
+MAXLINESIZE = 76
+HEX = '0123456789ABCDEF'
+EMPTYSTRING = ''
+
+try:
+    from binascii import a2b_qp, b2a_qp
+except:
+    a2b_qp = None
+    b2a_qp = None
+
+
+def needsquoting(c, quotetabs, header):
+    """Decide whether a particular character needs to be quoted.
+
+    The 'quotetabs' flag indicates whether embedded tabs and spaces should be
+    quoted.  Note that line-ending tabs and spaces are always encoded, as per
+    RFC 1521.
+    """
+    if c in ' \t':
+        return quotetabs
+    # if header, we have to escape _ because _ is used to escape space
+    if c == '_':
+        return header
+    return c == ESCAPE or not (' ' <= c <= '~')
+
+def quote(c):
+    """Quote a single character."""
+    i = ord(c)
+    return ESCAPE + HEX[i//16] + HEX[i%16]
+
+
+
+def encode(input, output, quotetabs, header = 0):
+    """Read 'input', apply quoted-printable encoding, and write to 'output'.
+
+    'input' and 'output' are files with readline() and write() methods.
+    The 'quotetabs' flag indicates whether embedded tabs and spaces should be
+    quoted.  Note that line-ending tabs and spaces are always encoded, as per
+    RFC 1521.
+    The 'header' flag indicates whether we are encoding spaces as _ as per
+    RFC 1522.
+    """
+
+    if b2a_qp is not None:
+        data = input.read()
+        odata = b2a_qp(data, quotetabs = quotetabs, header = header)
+        output.write(odata)
+        return
+
+    def write(s, output=output, lineEnd='\n'):
+        # RFC 1521 requires that the line ending in a space or tab must have
+        # that trailing character encoded.
+        if s and s[-1:] in ' \t':
+            output.write(s[:-1] + quote(s[-1]) + lineEnd)
+        elif s == '.':
+            output.write(quote(s) + lineEnd)
+        else:
+            output.write(s + lineEnd)
+
+    prevline = None
+    while 1:
+        line = input.readline()
+        if not line:
+            break
+        outline = []
+        # Strip off any readline induced trailing newline
+        stripped = ''
+        if line[-1:] == '\n':
+            line = line[:-1]
+            stripped = '\n'
+        # Calculate the un-length-limited encoded line
+        for c in line:
+            if needsquoting(c, quotetabs, header):
+                c = quote(c)
+            if header and c == ' ':
+                outline.append('_')
+            else:
+                outline.append(c)
+        # First, write out the previous line
+        if prevline is not None:
+            write(prevline)
+        # Now see if we need any soft line breaks because of RFC-imposed
+        # length limitations.  Then do the thisline->prevline dance.
+        thisline = EMPTYSTRING.join(outline)
+        while len(thisline) > MAXLINESIZE:
+            # Don't forget to include the soft line break `=' sign in the
+            # length calculation!
+            write(thisline[:MAXLINESIZE-1], lineEnd='=\n')
+            thisline = thisline[MAXLINESIZE-1:]
+        # Write out the current line
+        prevline = thisline
+    # Write out the last line, without a trailing newline
+    if prevline is not None:
+        write(prevline, lineEnd=stripped)
+
+def encodestring(s, quotetabs = 0, header = 0):
+    if b2a_qp is not None:
+        return b2a_qp(s, quotetabs = quotetabs, header = header)
+    from cStringIO import StringIO
+    infp = StringIO(s)
+    outfp = StringIO()
+    encode(infp, outfp, quotetabs, header)
+    return outfp.getvalue()
+
+
+
+def decode(input, output, header = 0):
+    """Read 'input', apply quoted-printable decoding, and write to 'output'.
+    'input' and 'output' are files with readline() and write() methods.
+    If 'header' is true, decode underscore as space (per RFC 1522)."""
+
+    if a2b_qp is not None:
+        data = input.read()
+        odata = a2b_qp(data, header = header)
+        output.write(odata)
+        return
+
+    new = ''
+    while 1:
+        line = input.readline()
+        if not line: break
+        i, n = 0, len(line)
+        if n > 0 and line[n-1] == '\n':
+            partial = 0; n = n-1
+            # Strip trailing whitespace
+            while n > 0 and line[n-1] in " \t\r":
+                n = n-1
+        else:
+            partial = 1
+        while i < n:
+            c = line[i]
+            if c == '_' and header:
+                new = new + ' '; i = i+1
+            elif c != ESCAPE:
+                new = new + c; i = i+1
+            elif i+1 == n and not partial:
+                partial = 1; break
+            elif i+1 < n and line[i+1] == ESCAPE:
+                new = new + ESCAPE; i = i+2
+            elif i+2 < n and ishex(line[i+1]) and ishex(line[i+2]):
+                new = new + chr(unhex(line[i+1:i+3])); i = i+3
+            else: # Bad escape sequence -- leave it in
+                new = new + c; i = i+1
+        if not partial:
+            output.write(new + '\n')
+            new = ''
+    if new:
+        output.write(new)
+
+def decodestring(s, header = 0):
+    if a2b_qp is not None:
+        return a2b_qp(s, header = header)
+    from cStringIO import StringIO
+    infp = StringIO(s)
+    outfp = StringIO()
+    decode(infp, outfp, header = header)
+    return outfp.getvalue()
+
+
+
+# Other helper functions
+def ishex(c):
+    """Return true if the character 'c' is a hexadecimal digit."""
+    return '0' <= c <= '9' or 'a' <= c <= 'f' or 'A' <= c <= 'F'
+
+def unhex(s):
+    """Get the integer value of a hexadecimal number."""
+    bits = 0
+    for c in s:
+        if '0' <= c <= '9':
+            i = ord('0')
+        elif 'a' <= c <= 'f':
+            i = ord('a')-10
+        elif 'A' <= c <= 'F':
+            i = ord('A')-10
+        else:
+            break
+        bits = bits*16 + (ord(c) - i)
+    return bits
+
+
+
+def main():
+    import sys
+    import getopt
+    try:
+        opts, args = getopt.getopt(sys.argv[1:], 'td')
+    except getopt.error, msg:
+        sys.stdout = sys.stderr
+        print msg
+        print "usage: quopri [-t | -d] [file] ..."
+        print "-t: quote tabs"
+        print "-d: decode; default encode"
+        sys.exit(2)
+    deco = 0
+    tabs = 0
+    for o, a in opts:
+        if o == '-t': tabs = 1
+        if o == '-d': deco = 1
+    if tabs and deco:
+        sys.stdout = sys.stderr
+        print "-t and -d are mutually exclusive"
+        sys.exit(2)
+    if not args: args = ['-']
+    sts = 0
+    for file in args:
+        if file == '-':
+            fp = sys.stdin
+        else:
+            try:
+                fp = open(file)
+            except IOError, msg:
+                sys.stderr.write("%s: can't open (%s)\n" % (file, msg))
+                sts = 1
+                continue
+        if deco:
+            decode(fp, sys.stdout)
+        else:
+            encode(fp, sys.stdout, tabs)
+        if fp is not sys.stdin:
+            fp.close()
+    if sts:
+        sys.exit(sts)
+
+
+
+if __name__ == '__main__':
+    main()
diff --git a/lib-python/2.2/random.py b/lib-python/2.2/random.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/random.py
@@ -0,0 +1,779 @@
+"""Random variable generators.
+
+    integers
+    --------
+           uniform within range
+
+    sequences
+    ---------
+           pick random element
+           generate random permutation
+
+    distributions on the real line:
+    ------------------------------
+           uniform
+           normal (Gaussian)
+           lognormal
+           negative exponential
+           gamma
+           beta
+
+    distributions on the circle (angles 0 to 2pi)
+    ---------------------------------------------
+           circular uniform
+           von Mises
+
+Translated from anonymously contributed C/C++ source.
+
+Multi-threading note:  the random number generator used here is not thread-
+safe; it is possible that two calls return the same random value.  However,
+you can instantiate a different instance of Random() in each thread to get
+generators that don't share state, then use .setstate() and .jumpahead() to
+move the generators to disjoint segments of the full period.  For example,
+
+def create_generators(num, delta, firstseed=None):
+    ""\"Return list of num distinct generators.
+    Each generator has its own unique segment of delta elements from
+    Random.random()'s full period.
+    Seed the first generator with optional arg firstseed (default is
+    None, to seed from current time).
+    ""\"
+
+    from random import Random
+    g = Random(firstseed)
+    result = [g]
+    for i in range(num - 1):
+        laststate = g.getstate()
+        g = Random()
+        g.setstate(laststate)
+        g.jumpahead(delta)
+        result.append(g)
+    return result
+
+gens = create_generators(10, 1000000)
+
+That creates 10 distinct generators, which can be passed out to 10 distinct
+threads.  The generators don't share state so can be called safely in
+parallel.  So long as no thread calls its g.random() more than a million
+times (the second argument to create_generators), the sequences seen by
+each thread will not overlap.
+
+The period of the underlying Wichmann-Hill generator is 6,953,607,871,644,
+and that limits how far this technique can be pushed.
+
+Just for fun, note that since we know the period, .jumpahead() can also be
+used to "move backward in time":
+
+>>> g = Random(42)  # arbitrary
+>>> g.random()
+0.25420336316883324
+>>> g.jumpahead(6953607871644L - 1) # move *back* one
+>>> g.random()
+0.25420336316883324
+"""
+# XXX The docstring sucks.
+
+from math import log as _log, exp as _exp, pi as _pi, e as _e
+from math import sqrt as _sqrt, acos as _acos, cos as _cos, sin as _sin
+from math import floor as _floor
+
+__all__ = ["Random","seed","random","uniform","randint","choice",
+           "randrange","shuffle","normalvariate","lognormvariate",
+           "cunifvariate","expovariate","vonmisesvariate","gammavariate",
+           "stdgamma","gauss","betavariate","paretovariate","weibullvariate",
+           "getstate","setstate","jumpahead","whseed"]
+
+def _verify(name, computed, expected):
+    if abs(computed - expected) > 1e-7:
+        raise ValueError(
+            "computed value for %s deviates too much "
+            "(computed %g, expected %g)" % (name, computed, expected))
+
+NV_MAGICCONST = 4 * _exp(-0.5)/_sqrt(2.0)
+_verify('NV_MAGICCONST', NV_MAGICCONST, 1.71552776992141)
+
+TWOPI = 2.0*_pi
+_verify('TWOPI', TWOPI, 6.28318530718)
+
+LOG4 = _log(4.0)
+_verify('LOG4', LOG4, 1.38629436111989)
+
+SG_MAGICCONST = 1.0 + _log(4.5)
+_verify('SG_MAGICCONST', SG_MAGICCONST, 2.50407739677627)
+
+del _verify
+
+# Translated by Guido van Rossum from C source provided by
+# Adrian Baddeley.
+
+class Random:
+    """Random number generator base class used by bound module functions.
+
+    Used to instantiate instances of Random to get generators that don't
+    share state.  Especially useful for multi-threaded programs, creating
+    a different instance of Random for each thread, and using the jumpahead()
+    method to ensure that the generated sequences seen by each thread don't
+    overlap.
+
+    Class Random can also be subclassed if you want to use a different basic
+    generator of your own devising: in that case, override the following
+    methods:  random(), seed(), getstate(), setstate() and jumpahead().
+
+    """
+
+    VERSION = 1     # used by getstate/setstate
+
+    def __init__(self, x=None):
+        """Initialize an instance.
+
+        Optional argument x controls seeding, as for Random.seed().
+        """
+
+        self.seed(x)
+
+## -------------------- core generator -------------------
+
+    # Specific to Wichmann-Hill generator.  Subclasses wishing to use a
+    # different core generator should override the seed(), random(),
+    # getstate(), setstate() and jumpahead() methods.
+
+    def seed(self, a=None):
+        """Initialize internal state from hashable object.
+
+        None or no argument seeds from current time.
+
+        If a is not None or an int or long, hash(a) is used instead.
+
+        If a is an int or long, a is used directly.  Distinct values between
+        0 and 27814431486575L inclusive are guaranteed to yield distinct
+        internal states (this guarantee is specific to the default
+        Wichmann-Hill generator).
+        """
+
+        if a is None:
+            # Initialize from current time
+            import time
+            a = long(time.time() * 256)
+
+        if type(a) not in (type(3), type(3L)):
+            a = hash(a)
+
+        a, x = divmod(a, 30268)
+        a, y = divmod(a, 30306)
+        a, z = divmod(a, 30322)
+        self._seed = int(x)+1, int(y)+1, int(z)+1
+
+        self.gauss_next = None
+
+    def random(self):
+        """Get the next random number in the range [0.0, 1.0)."""
+
+        # Wichman-Hill random number generator.
+        #
+        # Wichmann, B. A. & Hill, I. D. (1982)
+        # Algorithm AS 183:
+        # An efficient and portable pseudo-random number generator
+        # Applied Statistics 31 (1982) 188-190
+        #
+        # see also:
+        #        Correction to Algorithm AS 183
+        #        Applied Statistics 33 (1984) 123
+        #
+        #        McLeod, A. I. (1985)
+        #        A remark on Algorithm AS 183
+        #        Applied Statistics 34 (1985),198-200
+
+        # This part is thread-unsafe:
+        # BEGIN CRITICAL SECTION
+        x, y, z = self._seed
+        x = (171 * x) % 30269
+        y = (172 * y) % 30307
+        z = (170 * z) % 30323
+        self._seed = x, y, z
+        # END CRITICAL SECTION
+
+        # Note:  on a platform using IEEE-754 double arithmetic, this can
+        # never return 0.0 (asserted by Tim; proof too long for a comment).
+        return (x/30269.0 + y/30307.0 + z/30323.0) % 1.0
+
+    def getstate(self):
+        """Return internal state; can be passed to setstate() later."""
+        return self.VERSION, self._seed, self.gauss_next
+
+    def setstate(self, state):
+        """Restore internal state from object returned by getstate()."""
+        version = state[0]
+        if version == 1:
+            version, self._seed, self.gauss_next = state
+        else:
+            raise ValueError("state with version %s passed to "
+                             "Random.setstate() of version %s" %
+                             (version, self.VERSION))
+
+    def jumpahead(self, n):
+        """Act as if n calls to random() were made, but quickly.
+
+        n is an int, greater than or equal to 0.
+
+        Example use:  If you have 2 threads and know that each will
+        consume no more than a million random numbers, create two Random
+        objects r1 and r2, then do
+            r2.setstate(r1.getstate())
+            r2.jumpahead(1000000)
+        Then r1 and r2 will use guaranteed-disjoint segments of the full
+        period.
+        """
+
+        if not n >= 0:
+            raise ValueError("n must be >= 0")
+        x, y, z = self._seed
+        x = int(x * pow(171, n, 30269)) % 30269
+        y = int(y * pow(172, n, 30307)) % 30307
+        z = int(z * pow(170, n, 30323)) % 30323
+        self._seed = x, y, z
+
+    def __whseed(self, x=0, y=0, z=0):
+        """Set the Wichmann-Hill seed from (x, y, z).
+
+        These must be integers in the range [0, 256).
+        """
+
+        if not type(x) == type(y) == type(z) == type(0):
+            raise TypeError('seeds must be integers')
+        if not (0 <= x < 256 and 0 <= y < 256 and 0 <= z < 256):
+            raise ValueError('seeds must be in range(0, 256)')
+        if 0 == x == y == z:
+            # Initialize from current time
+            import time
+            t = long(time.time() * 256)
+            t = int((t&0xffffff) ^ (t>>24))
+            t, x = divmod(t, 256)
+            t, y = divmod(t, 256)
+            t, z = divmod(t, 256)
+        # Zero is a poor seed, so substitute 1
+        self._seed = (x or 1, y or 1, z or 1)
+
+        self.gauss_next = None
+
+    def whseed(self, a=None):
+        """Seed from hashable object's hash code.
+
+        None or no argument seeds from current time.  It is not guaranteed
+        that objects with distinct hash codes lead to distinct internal
+        states.
+
+        This is obsolete, provided for compatibility with the seed routine
+        used prior to Python 2.1.  Use the .seed() method instead.
+        """
+
+        if a is None:
+            self.__whseed()
+            return
+        a = hash(a)
+        a, x = divmod(a, 256)
+        a, y = divmod(a, 256)
+        a, z = divmod(a, 256)
+        x = (x + a) % 256 or 1
+        y = (y + a) % 256 or 1
+        z = (z + a) % 256 or 1
+        self.__whseed(x, y, z)
+
+## ---- Methods below this point do not need to be overridden when
+## ---- subclassing for the purpose of using a different core generator.
+
+## -------------------- pickle support  -------------------
+
+    def __getstate__(self): # for pickle
+        return self.getstate()
+
+    def __setstate__(self, state):  # for pickle
+        self.setstate(state)
+
+## -------------------- integer methods  -------------------
+
+    def randrange(self, start, stop=None, step=1, int=int, default=None):
+        """Choose a random item from range(start, stop[, step]).
+
+        This fixes the problem with randint() which includes the
+        endpoint; in Python this is usually not what you want.
+        Do not supply the 'int' and 'default' arguments.
+        """
+
+        # This code is a bit messy to make it fast for the
+        # common case while still doing adequate error checking.
+        istart = int(start)
+        if istart != start:
+            raise ValueError, "non-integer arg 1 for randrange()"
+        if stop is default:
+            if istart > 0:
+                return int(self.random() * istart)
+            raise ValueError, "empty range for randrange()"
+
+        # stop argument supplied.
+        istop = int(stop)
+        if istop != stop:
+            raise ValueError, "non-integer stop for randrange()"
+        if step == 1 and istart < istop:
+            try:
+                return istart + int(self.random()*(istop - istart))
+            except OverflowError:
+                # This can happen if istop-istart > sys.maxint + 1, and
+                # multiplying by random() doesn't reduce it to something
+                # <= sys.maxint.  We know that the overall result fits
+                # in an int, and can still do it correctly via math.floor().
+                # But that adds another function call, so for speed we
+                # avoided that whenever possible.
+                return int(istart + _floor(self.random()*(istop - istart)))
+        if step == 1:
+            raise ValueError, "empty range for randrange()"
+
+        # Non-unit step argument supplied.
+        istep = int(step)
+        if istep != step:
+            raise ValueError, "non-integer step for randrange()"
+        if istep > 0:
+            n = (istop - istart + istep - 1) / istep
+        elif istep < 0:
+            n = (istop - istart + istep + 1) / istep
+        else:
+            raise ValueError, "zero step for randrange()"
+
+        if n <= 0:
+            raise ValueError, "empty range for randrange()"
+        return istart + istep*int(self.random() * n)
+
+    def randint(self, a, b):
+        """Return random integer in range [a, b], including both end points.
+        """
+
+        return self.randrange(a, b+1)
+
+## -------------------- sequence methods  -------------------
+
+    def choice(self, seq):
+        """Choose a random element from a non-empty sequence."""
+        return seq[int(self.random() * len(seq))]
+
+    def shuffle(self, x, random=None, int=int):
+        """x, random=random.random -> shuffle list x in place; return None.
+
+        Optional arg random is a 0-argument function returning a random
+        float in [0.0, 1.0); by default, the standard random.random.
+
+        Note that for even rather small len(x), the total number of
+        permutations of x is larger than the period of most random number
+        generators; this implies that "most" permutations of a long
+        sequence can never be generated.
+        """
+
+        if random is None:
+            random = self.random
+        for i in xrange(len(x)-1, 0, -1):
+            # pick an element in x[:i+1] with which to exchange x[i]
+            j = int(random() * (i+1))
+            x[i], x[j] = x[j], x[i]
+
+## -------------------- real-valued distributions  -------------------
+
+## -------------------- uniform distribution -------------------
+
+    def uniform(self, a, b):
+        """Get a random number in the range [a, b)."""
+        return a + (b-a) * self.random()
+
+## -------------------- normal distribution --------------------
+
+    def normalvariate(self, mu, sigma):
+        """Normal distribution.
+
+        mu is the mean, and sigma is the standard deviation.
+
+        """
+        # mu = mean, sigma = standard deviation
+
+        # Uses Kinderman and Monahan method. Reference: Kinderman,
+        # A.J. and Monahan, J.F., "Computer generation of random
+        # variables using the ratio of uniform deviates", ACM Trans
+        # Math Software, 3, (1977), pp257-260.
+
+        random = self.random
+        while 1:
+            u1 = random()
+            u2 = 1.0 - random()
+            z = NV_MAGICCONST*(u1-0.5)/u2
+            zz = z*z/4.0
+            if zz <= -_log(u2):
+                break
+        return mu + z*sigma
+
+## -------------------- lognormal distribution --------------------
+
+    def lognormvariate(self, mu, sigma):
+        """Log normal distribution.
+
+        If you take the natural logarithm of this distribution, you'll get a
+        normal distribution with mean mu and standard deviation sigma.
+        mu can have any value, and sigma must be greater than zero.
+
+        """
+        return _exp(self.normalvariate(mu, sigma))
+
+## -------------------- circular uniform --------------------
+
+    def cunifvariate(self, mean, arc):
+        """Circular uniform distribution.
+
+        mean is the mean angle, and arc is the range of the distribution,
+        centered around the mean angle.  Both values must be expressed in
+        radians.  Returned values range between mean - arc/2 and
+        mean + arc/2 and are normalized to between 0 and pi.
+
+        Deprecated in version 2.3.  Use:
+            (mean + arc * (Random.random() - 0.5)) % Math.pi
+
+        """
+        # mean: mean angle (in radians between 0 and pi)
+        # arc:  range of distribution (in radians between 0 and pi)
+
+        return (mean + arc * (self.random() - 0.5)) % _pi
+
+## -------------------- exponential distribution --------------------
+
+    def expovariate(self, lambd):
+        """Exponential distribution.
+
+        lambd is 1.0 divided by the desired mean.  (The parameter would be
+        called "lambda", but that is a reserved word in Python.)  Returned
+        values range from 0 to positive infinity.
+
+        """
+        # lambd: rate lambd = 1/mean
+        # ('lambda' is a Python reserved word)
+
+        random = self.random
+        u = random()
+        while u <= 1e-7:
+            u = random()
+        return -_log(u)/lambd
+
+## -------------------- von Mises distribution --------------------
+
+    def vonmisesvariate(self, mu, kappa):
+        """Circular data distribution.
+
+        mu is the mean angle, expressed in radians between 0 and 2*pi, and
+        kappa is the concentration parameter, which must be greater than or
+        equal to zero.  If kappa is equal to zero, this distribution reduces
+        to a uniform random angle over the range 0 to 2*pi.
+
+        """
+        # mu:    mean angle (in radians between 0 and 2*pi)
+        # kappa: concentration parameter kappa (>= 0)
+        # if kappa = 0 generate uniform random angle
+
+        # Based upon an algorithm published in: Fisher, N.I.,
+        # "Statistical Analysis of Circular Data", Cambridge
+        # University Press, 1993.
+
+        # Thanks to Magnus Kessler for a correction to the
+        # implementation of step 4.
+
+        random = self.random
+        if kappa <= 1e-6:
+            return TWOPI * random()
+
+        a = 1.0 + _sqrt(1.0 + 4.0 * kappa * kappa)
+        b = (a - _sqrt(2.0 * a))/(2.0 * kappa)
+        r = (1.0 + b * b)/(2.0 * b)
+
+        while 1:
+            u1 = random()
+
+            z = _cos(_pi * u1)
+            f = (1.0 + r * z)/(r + z)
+            c = kappa * (r - f)
+
+            u2 = random()
+
+            if not (u2 >= c * (2.0 - c) and u2 > c * _exp(1.0 - c)):
+                break
+
+        u3 = random()
+        if u3 > 0.5:
+            theta = (mu % TWOPI) + _acos(f)
+        else:
+            theta = (mu % TWOPI) - _acos(f)
+
+        return theta
+
+## -------------------- gamma distribution --------------------
+
+    def gammavariate(self, alpha, beta):
+        """Gamma distribution.  Not the gamma function!
+
+        Conditions on the parameters are alpha > 0 and beta > 0.
+
+        """
+
+        # alpha > 0, beta > 0, mean is alpha*beta, variance is alpha*beta**2
+
+        # Warning: a few older sources define the gamma distribution in terms
+        # of alpha > -1.0
+        if alpha <= 0.0 or beta <= 0.0:
+            raise ValueError, 'gammavariate: alpha and beta must be > 0.0'
+
+        random = self.random
+        if alpha > 1.0:
+
+            # Uses R.C.H. Cheng, "The generation of Gamma
+            # variables with non-integral shape parameters",
+            # Applied Statistics, (1977), 26, No. 1, p71-74
+
+            ainv = _sqrt(2.0 * alpha - 1.0)
+            bbb = alpha - LOG4
+            ccc = alpha + ainv
+
+            while 1:
+                u1 = random()
+                if not 1e-7 < u1 < .9999999:
+                    continue
+                u2 = 1.0 - random()
+                v = _log(u1/(1.0-u1))/ainv
+                x = alpha*_exp(v)
+                z = u1*u1*u2
+                r = bbb+ccc*v-x
+                if r + SG_MAGICCONST - 4.5*z >= 0.0 or r >= _log(z):
+                    return x * beta
+
+        elif alpha == 1.0:
+            # expovariate(1)
+            u = random()
+            while u <= 1e-7:
+                u = random()
+            return -_log(u) * beta
+
+        else:   # alpha is between 0 and 1 (exclusive)
+
+            # Uses ALGORITHM GS of Statistical Computing - Kennedy & Gentle
+
+            while 1:
+                u = random()
+                b = (_e + alpha)/_e
+                p = b*u
+                if p <= 1.0:
+                    x = pow(p, 1.0/alpha)
+                else:
+                    # p > 1
+                    x = -_log((b-p)/alpha)
+                u1 = random()
+                if not (((p <= 1.0) and (u1 > _exp(-x))) or
+                          ((p > 1)  and  (u1 > pow(x, alpha - 1.0)))):
+                    break
+            return x * beta
+
+
+    def stdgamma(self, alpha, ainv, bbb, ccc):
+        # This method was (and shall remain) undocumented.
+        # This method is deprecated
+        # for the following reasons:
+        # 1. Returns same as .gammavariate(alpha, 1.0)
+        # 2. Requires caller to provide 3 extra arguments
+        #    that are functions of alpha anyway
+        # 3. Can't be used for alpha < 0.5
+
+        # ainv = sqrt(2 * alpha - 1)
+        # bbb = alpha - log(4)
+        # ccc = alpha + ainv
+        import warnings
+        warnings.warn("The stdgamma function is deprecated; "
+                      "use gammavariate() instead",
+                      DeprecationWarning)
+        return self.gammavariate(alpha, 1.0)
+
+
+
+## -------------------- Gauss (faster alternative) --------------------
+
+    def gauss(self, mu, sigma):
+        """Gaussian distribution.
+
+        mu is the mean, and sigma is the standard deviation.  This is
+        slightly faster than the normalvariate() function.
+
+        Not thread-safe without a lock around calls.
+
+        """
+
+        # When x and y are two variables from [0, 1), uniformly
+        # distributed, then
+        #
+        #    cos(2*pi*x)*sqrt(-2*log(1-y))
+        #    sin(2*pi*x)*sqrt(-2*log(1-y))
+        #
+        # are two *independent* variables with normal distribution
+        # (mu = 0, sigma = 1).
+        # (Lambert Meertens)
+        # (corrected version; bug discovered by Mike Miller, fixed by LM)
+
+        # Multithreading note: When two threads call this function
+        # simultaneously, it is possible that they will receive the
+        # same return value.  The window is very small though.  To
+        # avoid this, you have to use a lock around all calls.  (I
+        # didn't want to slow this down in the serial case by using a
+        # lock here.)
+
+        random = self.random
+        z = self.gauss_next
+        self.gauss_next = None
+        if z is None:
+            x2pi = random() * TWOPI
+            g2rad = _sqrt(-2.0 * _log(1.0 - random()))
+            z = _cos(x2pi) * g2rad
+            self.gauss_next = _sin(x2pi) * g2rad
+
+        return mu + z*sigma
+
+## -------------------- beta --------------------
+## See
+## http://sourceforge.net/bugs/?func=detailbug&bug_id=130030&group_id=5470
+## for Ivan Frohne's insightful analysis of why the original implementation:
+##
+##    def betavariate(self, alpha, beta):
+##        # Discrete Event Simulation in C, pp 87-88.
+##
+##        y = self.expovariate(alpha)
+##        z = self.expovariate(1.0/beta)
+##        return z/(y+z)
+##
+## was dead wrong, and how it probably got that way.
+
+    def betavariate(self, alpha, beta):
+        """Beta distribution.
+
+        Conditions on the parameters are alpha > -1 and beta} > -1.
+        Returned values range between 0 and 1.
+
+        """
+
+        # This version due to Janne Sinkkonen, and matches all the std
+        # texts (e.g., Knuth Vol 2 Ed 3 pg 134 "the beta distribution").
+        y = self.gammavariate(alpha, 1.)
+        if y == 0:
+            return 0.0
+        else:
+            return y / (y + self.gammavariate(beta, 1.))
+
+## -------------------- Pareto --------------------
+
+    def paretovariate(self, alpha):
+        """Pareto distribution.  alpha is the shape parameter."""
+        # Jain, pg. 495
+
+        u = 1.0 - self.random()
+        return 1.0 / pow(u, 1.0/alpha)
+
+## -------------------- Weibull --------------------
+
+    def weibullvariate(self, alpha, beta):
+        """Weibull distribution.
+
+        alpha is the scale parameter and beta is the shape parameter.
+
+        """
+        # Jain, pg. 499; bug fix courtesy Bill Arms
+
+        u = 1.0 - self.random()
+        return alpha * pow(-_log(u), 1.0/beta)
+
+## -------------------- test program --------------------
+
+def _test_generator(n, funccall):
+    import time
+    print n, 'times', funccall
+    code = compile(funccall, funccall, 'eval')
+    sum = 0.0
+    sqsum = 0.0
+    smallest = 1e10
+    largest = -1e10
+    t0 = time.time()
+    for i in range(n):
+        x = eval(code)
+        sum = sum + x
+        sqsum = sqsum + x*x
+        smallest = min(x, smallest)
+        largest = max(x, largest)
+    t1 = time.time()
+    print round(t1-t0, 3), 'sec,',
+    avg = sum/n
+    stddev = _sqrt(sqsum/n - avg*avg)
+    print 'avg %g, stddev %g, min %g, max %g' % \
+              (avg, stddev, smallest, largest)
+
+def _test(N=20000):
+    print 'TWOPI         =', TWOPI
+    print 'LOG4          =', LOG4
+    print 'NV_MAGICCONST =', NV_MAGICCONST
+    print 'SG_MAGICCONST =', SG_MAGICCONST
+    _test_generator(N, 'random()')
+    _test_generator(N, 'normalvariate(0.0, 1.0)')
+    _test_generator(N, 'lognormvariate(0.0, 1.0)')
+    _test_generator(N, 'cunifvariate(0.0, 1.0)')
+    _test_generator(N, 'expovariate(1.0)')
+    _test_generator(N, 'vonmisesvariate(0.0, 1.0)')
+    _test_generator(N, 'gammavariate(0.01, 1.0)')
+    _test_generator(N, 'gammavariate(0.1, 1.0)')
+    _test_generator(N, 'gammavariate(0.1, 2.0)')
+    _test_generator(N, 'gammavariate(0.5, 1.0)')
+    _test_generator(N, 'gammavariate(0.9, 1.0)')
+    _test_generator(N, 'gammavariate(1.0, 1.0)')
+    _test_generator(N, 'gammavariate(2.0, 1.0)')
+    _test_generator(N, 'gammavariate(20.0, 1.0)')
+    _test_generator(N, 'gammavariate(200.0, 1.0)')
+    _test_generator(N, 'gauss(0.0, 1.0)')
+    _test_generator(N, 'betavariate(3.0, 3.0)')
+    _test_generator(N, 'paretovariate(1.0)')
+    _test_generator(N, 'weibullvariate(1.0, 1.0)')
+
+    # Test jumpahead.
+    s = getstate()
+    jumpahead(N)
+    r1 = random()
+    # now do it the slow way
+    setstate(s)
+    for i in range(N):
+        random()
+    r2 = random()
+    if r1 != r2:
+        raise ValueError("jumpahead test failed " + `(N, r1, r2)`)
+
+# Create one instance, seeded from current time, and export its methods
+# as module-level functions.  The functions are not threadsafe, and state
+# is shared across all uses (both in the user's code and in the Python
+# libraries), but that's fine for most programs and is easier for the
+# casual user than making them instantiate their own Random() instance.
+_inst = Random()
+seed = _inst.seed
+random = _inst.random
+uniform = _inst.uniform
+randint = _inst.randint
+choice = _inst.choice
+randrange = _inst.randrange
+shuffle = _inst.shuffle
+normalvariate = _inst.normalvariate
+lognormvariate = _inst.lognormvariate
+cunifvariate = _inst.cunifvariate
+expovariate = _inst.expovariate
+vonmisesvariate = _inst.vonmisesvariate
+gammavariate = _inst.gammavariate
+stdgamma = _inst.stdgamma
+gauss = _inst.gauss
+betavariate = _inst.betavariate
+paretovariate = _inst.paretovariate
+weibullvariate = _inst.weibullvariate
+getstate = _inst.getstate
+setstate = _inst.setstate
+jumpahead = _inst.jumpahead
+whseed = _inst.whseed
+
+if __name__ == '__main__':
+    _test()
diff --git a/lib-python/2.2/re.py b/lib-python/2.2/re.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/re.py
@@ -0,0 +1,33 @@
+"""Minimal "re" compatibility wrapper"""
+
+# If your regexps don't work well under 2.0b1, you can switch
+# to the old engine ("pre") down below.
+#
+# To help us fix any remaining bugs in the new engine, please
+# report what went wrong.  You can either use the following web
+# page:
+#
+#    http://sourceforge.net/bugs/?group_id=5470
+#
+# or send a mail to SRE's author:
+#
+#    Fredrik Lundh <effbot at telia.com>
+#
+# Make sure to include the pattern, the string SRE failed to
+# match, and what result you expected.
+#
+# thanks /F
+#
+
+engine = "sre"
+# engine = "pre"
+
+if engine == "sre":
+    # New unicode-aware engine
+    from sre import *
+    from sre import __all__
+else:
+    # Old 1.5.2 engine.  This one supports 8-bit strings only,
+    # and will be removed in 2.0 final.
+    from pre import *
+    from pre import __all__
diff --git a/lib-python/2.2/reconvert.py b/lib-python/2.2/reconvert.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/reconvert.py
@@ -0,0 +1,192 @@
+#! /usr/bin/env python1.5
+
+r"""Convert old ("regex") regular expressions to new syntax ("re").
+
+When imported as a module, there are two functions, with their own
+strings:
+
+  convert(s, syntax=None) -- convert a regex regular expression to re syntax
+
+  quote(s) -- return a quoted string literal
+
+When used as a script, read a Python string literal (or any other
+expression evaluating to a string) from stdin, and write the
+translated expression to stdout as a string literal.  Unless stdout is
+a tty, no trailing \n is written to stdout.  This is done so that it
+can be used with Emacs C-U M-| (shell-command-on-region with argument
+which filters the region through the shell command).
+
+No attempt has been made at coding for performance.
+
+Translation table...
+
+    \(    (     (unless RE_NO_BK_PARENS set)
+    \)    )     (unless RE_NO_BK_PARENS set)
+    \|    |     (unless RE_NO_BK_VBAR set)
+    \<    \b    (not quite the same, but alla...)
+    \>    \b    (not quite the same, but alla...)
+    \`    \A
+    \'    \Z
+
+Not translated...
+
+    .
+    ^
+    $
+    *
+    +           (unless RE_BK_PLUS_QM set, then to \+)
+    ?           (unless RE_BK_PLUS_QM set, then to \?)
+    \
+    \b
+    \B
+    \w
+    \W
+    \1 ... \9
+
+Special cases...
+
+    Non-printable characters are always replaced by their 3-digit
+    escape code (except \t, \n, \r, which use mnemonic escapes)
+
+    Newline is turned into | when RE_NEWLINE_OR is set
+
+XXX To be done...
+
+    [...]     (different treatment of backslashed items?)
+    [^...]    (different treatment of backslashed items?)
+    ^ $ * + ? (in some error contexts these are probably treated differently)
+    \vDD  \DD (in the regex docs but only works when RE_ANSI_HEX set)
+
+"""
+
+
+import warnings
+warnings.filterwarnings("ignore", ".* regex .*", DeprecationWarning, __name__,
+                        append=1)
+
+import regex
+from regex_syntax import * # RE_*
+
+__all__ = ["convert","quote"]
+
+# Default translation table
+mastertable = {
+    r'\<': r'\b',
+    r'\>': r'\b',
+    r'\`': r'\A',
+    r'\'': r'\Z',
+    r'\(': '(',
+    r'\)': ')',
+    r'\|': '|',
+    '(': r'\(',
+    ')': r'\)',
+    '|': r'\|',
+    '\t': r'\t',
+    '\n': r'\n',
+    '\r': r'\r',
+}
+
+
+def convert(s, syntax=None):
+    """Convert a regex regular expression to re syntax.
+
+    The first argument is the regular expression, as a string object,
+    just like it would be passed to regex.compile().  (I.e., pass the
+    actual string object -- string quotes must already have been
+    removed and the standard escape processing has already been done,
+    e.g. by eval().)
+
+    The optional second argument is the regex syntax variant to be
+    used.  This is an integer mask as passed to regex.set_syntax();
+    the flag bits are defined in regex_syntax.  When not specified, or
+    when None is given, the current regex syntax mask (as retrieved by
+    regex.get_syntax()) is used -- which is 0 by default.
+
+    The return value is a regular expression, as a string object that
+    could be passed to re.compile().  (I.e., no string quotes have
+    been added -- use quote() below, or repr().)
+
+    The conversion is not always guaranteed to be correct.  More
+    syntactical analysis should be performed to detect borderline
+    cases and decide what to do with them.  For example, 'x*?' is not
+    translated correctly.
+
+    """
+    table = mastertable.copy()
+    if syntax is None:
+        syntax = regex.get_syntax()
+    if syntax & RE_NO_BK_PARENS:
+        del table[r'\('], table[r'\)']
+        del table['('], table[')']
+    if syntax & RE_NO_BK_VBAR:
+        del table[r'\|']
+        del table['|']
+    if syntax & RE_BK_PLUS_QM:
+        table['+'] = r'\+'
+        table['?'] = r'\?'
+        table[r'\+'] = '+'
+        table[r'\?'] = '?'
+    if syntax & RE_NEWLINE_OR:
+        table['\n'] = '|'
+    res = ""
+
+    i = 0
+    end = len(s)
+    while i < end:
+        c = s[i]
+        i = i+1
+        if c == '\\':
+            c = s[i]
+            i = i+1
+            key = '\\' + c
+            key = table.get(key, key)
+            res = res + key
+        else:
+            c = table.get(c, c)
+            res = res + c
+    return res
+
+
+def quote(s, quote=None):
+    """Convert a string object to a quoted string literal.
+
+    This is similar to repr() but will return a "raw" string (r'...'
+    or r"...") when the string contains backslashes, instead of
+    doubling all backslashes.  The resulting string does *not* always
+    evaluate to the same string as the original; however it will do
+    just the right thing when passed into re.compile().
+
+    The optional second argument forces the string quote; it must be
+    a single character which is a valid Python string quote.
+
+    """
+    if quote is None:
+        q = "'"
+        altq = "'"
+        if q in s and altq not in s:
+            q = altq
+    else:
+        assert quote in ('"', "'")
+        q = quote
+    res = q
+    for c in s:
+        if c == q: c = '\\' + c
+        elif c < ' ' or c > '~': c = "\\%03o" % ord(c)
+        res = res + c
+    res = res + q
+    if '\\' in res:
+        res = 'r' + res
+    return res
+
+
+def main():
+    """Main program -- called when run as a script."""
+    import sys
+    s = eval(sys.stdin.read())
+    sys.stdout.write(quote(convert(s)))
+    if sys.stdout.isatty():
+        sys.stdout.write("\n")
+
+
+if __name__ == '__main__':
+    main()
diff --git a/lib-python/2.2/regex_syntax.py b/lib-python/2.2/regex_syntax.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/regex_syntax.py
@@ -0,0 +1,53 @@
+"""Constants for selecting regexp syntaxes for the obsolete regex module.
+
+This module is only for backward compatibility.  "regex" has now
+been replaced by the new regular expression module, "re".
+
+These bits are passed to regex.set_syntax() to choose among
+alternative regexp syntaxes.
+"""
+
+# 1 means plain parentheses serve as grouping, and backslash
+#   parentheses are needed for literal searching.
+# 0 means backslash-parentheses are grouping, and plain parentheses
+#   are for literal searching.
+RE_NO_BK_PARENS = 1
+
+# 1 means plain | serves as the "or"-operator, and \| is a literal.
+# 0 means \| serves as the "or"-operator, and | is a literal.
+RE_NO_BK_VBAR = 2
+
+# 0 means plain + or ? serves as an operator, and \+, \? are literals.
+# 1 means \+, \? are operators and plain +, ? are literals.
+RE_BK_PLUS_QM = 4
+
+# 1 means | binds tighter than ^ or $.
+# 0 means the contrary.
+RE_TIGHT_VBAR = 8
+
+# 1 means treat \n as an _OR operator
+# 0 means treat it as a normal character
+RE_NEWLINE_OR = 16
+
+# 0 means that a special characters (such as *, ^, and $) always have
+#   their special meaning regardless of the surrounding context.
+# 1 means that special characters may act as normal characters in some
+#   contexts.  Specifically, this applies to:
+#       ^ - only special at the beginning, or after ( or |
+#       $ - only special at the end, or before ) or |
+#       *, +, ? - only special when not after the beginning, (, or |
+RE_CONTEXT_INDEP_OPS = 32
+
+# ANSI sequences (\n etc) and \xhh
+RE_ANSI_HEX = 64
+
+# No GNU extensions
+RE_NO_GNU_EXTENSIONS = 128
+
+# Now define combinations of bits for the standard possibilities.
+RE_SYNTAX_AWK = (RE_NO_BK_PARENS | RE_NO_BK_VBAR | RE_CONTEXT_INDEP_OPS)
+RE_SYNTAX_EGREP = (RE_SYNTAX_AWK | RE_NEWLINE_OR)
+RE_SYNTAX_GREP = (RE_BK_PLUS_QM | RE_NEWLINE_OR)
+RE_SYNTAX_EMACS = 0
+
+# (Python's obsolete "regexp" module used a syntax similar to awk.)
diff --git a/lib-python/2.2/regsub.py b/lib-python/2.2/regsub.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/regsub.py
@@ -0,0 +1,198 @@
+"""Regexp-based split and replace using the obsolete regex module.
+
+This module is only for backward compatibility.  These operations
+are now provided by the new regular expression module, "re".
+
+sub(pat, repl, str):        replace first occurrence of pattern in string
+gsub(pat, repl, str):       replace all occurrences of pattern in string
+split(str, pat, maxsplit):  split string using pattern as delimiter
+splitx(str, pat, maxsplit): split string using pattern as delimiter plus
+                            return delimiters
+"""
+
+import warnings
+warnings.warn("the regsub module is deprecated; please use re.sub()",
+              DeprecationWarning)
+
+# Ignore further deprecation warnings about this module
+warnings.filterwarnings("ignore", "", DeprecationWarning, __name__)
+
+import regex
+
+__all__ = ["sub","gsub","split","splitx","capwords"]
+
+# Replace first occurrence of pattern pat in string str by replacement
+# repl.  If the pattern isn't found, the string is returned unchanged.
+# The replacement may contain references \digit to subpatterns and
+# escaped backslashes.  The pattern may be a string or an already
+# compiled pattern.
+
+def sub(pat, repl, str):
+    prog = compile(pat)
+    if prog.search(str) >= 0:
+        regs = prog.regs
+        a, b = regs[0]
+        str = str[:a] + expand(repl, regs, str) + str[b:]
+    return str
+
+
+# Replace all (non-overlapping) occurrences of pattern pat in string
+# str by replacement repl.  The same rules as for sub() apply.
+# Empty matches for the pattern are replaced only when not adjacent to
+# a previous match, so e.g. gsub('', '-', 'abc') returns '-a-b-c-'.
+
+def gsub(pat, repl, str):
+    prog = compile(pat)
+    new = ''
+    start = 0
+    first = 1
+    while prog.search(str, start) >= 0:
+        regs = prog.regs
+        a, b = regs[0]
+        if a == b == start and not first:
+            if start >= len(str) or prog.search(str, start+1) < 0:
+                break
+            regs = prog.regs
+            a, b = regs[0]
+        new = new + str[start:a] + expand(repl, regs, str)
+        start = b
+        first = 0
+    new = new + str[start:]
+    return new
+
+
+# Split string str in fields separated by delimiters matching pattern
+# pat.  Only non-empty matches for the pattern are considered, so e.g.
+# split('abc', '') returns ['abc'].
+# The optional 3rd argument sets the number of splits that are performed.
+
+def split(str, pat, maxsplit = 0):
+    return intsplit(str, pat, maxsplit, 0)
+
+# Split string str in fields separated by delimiters matching pattern
+# pat.  Only non-empty matches for the pattern are considered, so e.g.
+# split('abc', '') returns ['abc']. The delimiters are also included
+# in the list.
+# The optional 3rd argument sets the number of splits that are performed.
+
+
+def splitx(str, pat, maxsplit = 0):
+    return intsplit(str, pat, maxsplit, 1)
+
+# Internal function used to implement split() and splitx().
+
+def intsplit(str, pat, maxsplit, retain):
+    prog = compile(pat)
+    res = []
+    start = next = 0
+    splitcount = 0
+    while prog.search(str, next) >= 0:
+        regs = prog.regs
+        a, b = regs[0]
+        if a == b:
+            next = next + 1
+            if next >= len(str):
+                break
+        else:
+            res.append(str[start:a])
+            if retain:
+                res.append(str[a:b])
+            start = next = b
+            splitcount = splitcount + 1
+            if (maxsplit and (splitcount >= maxsplit)):
+                break
+    res.append(str[start:])
+    return res
+
+
+# Capitalize words split using a pattern
+
+def capwords(str, pat='[^a-zA-Z0-9_]+'):
+    words = splitx(str, pat)
+    for i in range(0, len(words), 2):
+        words[i] = words[i].capitalize()
+    return "".join(words)
+
+
+# Internal subroutines:
+# compile(pat): compile a pattern, caching already compiled patterns
+# expand(repl, regs, str): expand \digit escapes in replacement string
+
+
+# Manage a cache of compiled regular expressions.
+#
+# If the pattern is a string a compiled version of it is returned.  If
+# the pattern has been used before we return an already compiled
+# version from the cache; otherwise we compile it now and save the
+# compiled version in the cache, along with the syntax it was compiled
+# with.  Instead of a string, a compiled regular expression can also
+# be passed.
+
+cache = {}
+
+def compile(pat):
+    if type(pat) != type(''):
+        return pat              # Assume it is a compiled regex
+    key = (pat, regex.get_syntax())
+    if cache.has_key(key):
+        prog = cache[key]       # Get it from the cache
+    else:
+        prog = cache[key] = regex.compile(pat)
+    return prog
+
+
+def clear_cache():
+    global cache
+    cache = {}
+
+
+# Expand \digit in the replacement.
+# Each occurrence of \digit is replaced by the substring of str
+# indicated by regs[digit].  To include a literal \ in the
+# replacement, double it; other \ escapes are left unchanged (i.e.
+# the \ and the following character are both copied).
+
+def expand(repl, regs, str):
+    if '\\' not in repl:
+        return repl
+    new = ''
+    i = 0
+    ord0 = ord('0')
+    while i < len(repl):
+        c = repl[i]; i = i+1
+        if c != '\\' or i >= len(repl):
+            new = new + c
+        else:
+            c = repl[i]; i = i+1
+            if '0' <= c <= '9':
+                a, b = regs[ord(c)-ord0]
+                new = new + str[a:b]
+            elif c == '\\':
+                new = new + c
+            else:
+                new = new + '\\' + c
+    return new
+
+
+# Test program, reads sequences "pat repl str" from stdin.
+# Optional argument specifies pattern used to split lines.
+
+def test():
+    import sys
+    if sys.argv[1:]:
+        delpat = sys.argv[1]
+    else:
+        delpat = '[ \t\n]+'
+    while 1:
+        if sys.stdin.isatty(): sys.stderr.write('--> ')
+        line = sys.stdin.readline()
+        if not line: break
+        if line[-1] == '\n': line = line[:-1]
+        fields = split(line, delpat)
+        if len(fields) != 3:
+            print 'Sorry, not three fields'
+            print 'split:', `fields`
+            continue
+        [pat, repl, str] = split(line, delpat)
+        print 'sub :', `sub(pat, repl, str)`
+        print 'gsub:', `gsub(pat, repl, str)`
diff --git a/lib-python/2.2/repr.py b/lib-python/2.2/repr.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/repr.py
@@ -0,0 +1,95 @@
+"""Redo the `...` (representation) but with limits on most sizes."""
+
+__all__ = ["Repr","repr"]
+
+class Repr:
+    def __init__(self):
+        self.maxlevel = 6
+        self.maxtuple = 6
+        self.maxlist = 6
+        self.maxdict = 4
+        self.maxstring = 30
+        self.maxlong = 40
+        self.maxother = 20
+    def repr(self, x):
+        return self.repr1(x, self.maxlevel)
+    def repr1(self, x, level):
+        typename = type(x).__name__
+        if ' ' in typename:
+            parts = typename.split()
+            typename = '_'.join(parts)
+        if hasattr(self, 'repr_' + typename):
+            return getattr(self, 'repr_' + typename)(x, level)
+        else:
+            s = `x`
+            if len(s) > self.maxother:
+                i = max(0, (self.maxother-3)//2)
+                j = max(0, self.maxother-3-i)
+                s = s[:i] + '...' + s[len(s)-j:]
+            return s
+    def repr_tuple(self, x, level):
+        n = len(x)
+        if n == 0: return '()'
+        if level <= 0: return '(...)'
+        s = ''
+        for i in range(min(n, self.maxtuple)):
+            if s: s = s + ', '
+            s = s + self.repr1(x[i], level-1)
+        if n > self.maxtuple: s = s + ', ...'
+        elif n == 1: s = s + ','
+        return '(' + s + ')'
+    def repr_list(self, x, level):
+        n = len(x)
+        if n == 0: return '[]'
+        if level <= 0: return '[...]'
+        s = ''
+        for i in range(min(n, self.maxlist)):
+            if s: s = s + ', '
+            s = s + self.repr1(x[i], level-1)
+        if n > self.maxlist: s = s + ', ...'
+        return '[' + s + ']'
+    def repr_dict(self, x, level):
+        n = len(x)
+        if n == 0: return '{}'
+        if level <= 0: return '{...}'
+        s = ''
+        keys = x.keys()
+        keys.sort()
+        for i in range(min(n, self.maxdict)):
+            if s: s = s + ', '
+            key = keys[i]
+            s = s + self.repr1(key, level-1)
+            s = s + ': ' + self.repr1(x[key], level-1)
+        if n > self.maxdict: s = s + ', ...'
+        return '{' + s + '}'
+    def repr_str(self, x, level):
+        s = `x[:self.maxstring]`
+        if len(s) > self.maxstring:
+            i = max(0, (self.maxstring-3)//2)
+            j = max(0, self.maxstring-3-i)
+            s = `x[:i] + x[len(x)-j:]`
+            s = s[:i] + '...' + s[len(s)-j:]
+        return s
+    def repr_long(self, x, level):
+        s = `x` # XXX Hope this isn't too slow...
+        if len(s) > self.maxlong:
+            i = max(0, (self.maxlong-3)//2)
+            j = max(0, self.maxlong-3-i)
+            s = s[:i] + '...' + s[len(s)-j:]
+        return s
+    def repr_instance(self, x, level):
+        try:
+            s = `x`
+            # Bugs in x.__repr__() can cause arbitrary
+            # exceptions -- then make up something
+        except:
+            return '<' + x.__class__.__name__ + ' instance at ' + \
+                      hex(id(x))[2:] + '>'
+        if len(s) > self.maxstring:
+            i = max(0, (self.maxstring-3)//2)
+            j = max(0, self.maxstring-3-i)
+            s = s[:i] + '...' + s[len(s)-j:]
+        return s
+
+aRepr = Repr()
+repr = aRepr.repr
diff --git a/lib-python/2.2/rexec.py b/lib-python/2.2/rexec.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/rexec.py
@@ -0,0 +1,592 @@
+"""Restricted execution facilities.
+
+The class RExec exports methods r_exec(), r_eval(), r_execfile(), and
+r_import(), which correspond roughly to the built-in operations
+exec, eval(), execfile() and import, but executing the code in an
+environment that only exposes those built-in operations that are
+deemed safe.  To this end, a modest collection of 'fake' modules is
+created which mimics the standard modules by the same names.  It is a
+policy decision which built-in modules and operations are made
+available; this module provides a reasonable default, but derived
+classes can change the policies e.g. by overriding or extending class
+variables like ok_builtin_modules or methods like make_sys().
+
+XXX To do:
+- r_open should allow writing tmp dir
+- r_exec etc. with explicit globals/locals? (Use rexec("exec ... in ...")?)
+
+"""
+
+
+import sys
+import __builtin__
+import os
+import ihooks
+import imp
+
+__all__ = ["RExec"]
+
+class FileBase:
+
+    ok_file_methods = ('fileno', 'flush', 'isatty', 'read', 'readline',
+            'readlines', 'seek', 'tell', 'write', 'writelines')
+
+
+class FileWrapper(FileBase):
+
+    # XXX This is just like a Bastion -- should use that!
+
+    def __init__(self, f):
+        self.f = f
+        for m in self.ok_file_methods:
+            if not hasattr(self, m) and hasattr(f, m):
+                setattr(self, m, getattr(f, m))
+
+    def close(self):
+        self.flush()
+
+
+TEMPLATE = """
+def %s(self, *args):
+        return apply(getattr(self.mod, self.name).%s, args)
+"""
+
+class FileDelegate(FileBase):
+
+    def __init__(self, mod, name):
+        self.mod = mod
+        self.name = name
+
+    for m in FileBase.ok_file_methods + ('close',):
+        exec TEMPLATE % (m, m)
+
+
+class RHooks(ihooks.Hooks):
+
+    def __init__(self, *args):
+        # Hacks to support both old and new interfaces:
+        # old interface was RHooks(rexec[, verbose])
+        # new interface is RHooks([verbose])
+        verbose = 0
+        rexec = None
+        if args and type(args[-1]) == type(0):
+            verbose = args[-1]
+            args = args[:-1]
+        if args and hasattr(args[0], '__class__'):
+            rexec = args[0]
+            args = args[1:]
+        if args:
+            raise TypeError, "too many arguments"
+        ihooks.Hooks.__init__(self, verbose)
+        self.rexec = rexec
+
+    def set_rexec(self, rexec):
+        # Called by RExec instance to complete initialization
+        self.rexec = rexec
+
+    def get_suffixes(self):
+        return self.rexec.get_suffixes()
+
+    def is_builtin(self, name):
+        return self.rexec.is_builtin(name)
+
+    def init_builtin(self, name):
+        m = __import__(name)
+        return self.rexec.copy_except(m, ())
+
+    def init_frozen(self, name): raise SystemError, "don't use this"
+    def load_source(self, *args): raise SystemError, "don't use this"
+    def load_compiled(self, *args): raise SystemError, "don't use this"
+    def load_package(self, *args): raise SystemError, "don't use this"
+
+    def load_dynamic(self, name, filename, file):
+        return self.rexec.load_dynamic(name, filename, file)
+
+    def add_module(self, name):
+        return self.rexec.add_module(name)
+
+    def modules_dict(self):
+        return self.rexec.modules
+
+    def default_path(self):
+        return self.rexec.modules['sys'].path
+
+
+# XXX Backwards compatibility
+RModuleLoader = ihooks.FancyModuleLoader
+RModuleImporter = ihooks.ModuleImporter
+
+
+class RExec(ihooks._Verbose):
+    """Basic restricted execution framework.
+
+    Code executed in this restricted environment will only have access to
+    modules and functions that are deemed safe; you can subclass RExec to
+    add or remove capabilities as desired.
+
+    The RExec class can prevent code from performing unsafe operations like
+    reading or writing disk files, or using TCP/IP sockets.  However, it does
+    not protect against code using extremely large amounts of memory or
+    processor time.
+
+    """
+
+    ok_path = tuple(sys.path)           # That's a policy decision
+
+    ok_builtin_modules = ('audioop', 'array', 'binascii',
+                          'cmath', 'errno', 'imageop',
+                          'marshal', 'math', 'md5', 'operator',
+                          'parser', 'regex', 'pcre', 'rotor', 'select',
+                          'sha', '_sre', 'strop', 'struct', 'time')
+
+    ok_posix_names = ('error', 'fstat', 'listdir', 'lstat', 'readlink',
+                      'stat', 'times', 'uname', 'getpid', 'getppid',
+                      'getcwd', 'getuid', 'getgid', 'geteuid', 'getegid')
+
+    ok_sys_names = ('byteorder', 'copyright', 'exit', 'getdefaultencoding',
+                    'getrefcount', 'hexversion', 'maxint', 'maxunicode',
+                    'platform', 'ps1', 'ps2', 'version', 'version_info')
+
+    nok_builtin_names = ('open', 'file', 'reload', '__import__')
+
+    ok_file_types = (imp.C_EXTENSION, imp.PY_SOURCE)
+
+    def __init__(self, hooks = None, verbose = 0):
+        """Returns an instance of the RExec class.
+
+        The hooks parameter is an instance of the RHooks class or a subclass
+        of it.  If it is omitted or None, the default RHooks class is
+        instantiated.
+
+        Whenever the RExec module searches for a module (even a built-in one)
+        or reads a module's code, it doesn't actually go out to the file
+        system itself.  Rather, it calls methods of an RHooks instance that
+        was passed to or created by its constructor.  (Actually, the RExec
+        object doesn't make these calls --- they are made by a module loader
+        object that's part of the RExec object.  This allows another level of
+        flexibility, which can be useful when changing the mechanics of
+        import within the restricted environment.)
+
+        By providing an alternate RHooks object, we can control the file
+        system accesses made to import a module, without changing the
+        actual algorithm that controls the order in which those accesses are
+        made.  For instance, we could substitute an RHooks object that
+        passes all filesystem requests to a file server elsewhere, via some
+        RPC mechanism such as ILU.  Grail's applet loader uses this to support
+        importing applets from a URL for a directory.
+
+        If the verbose parameter is true, additional debugging output may be
+        sent to standard output.
+
+        """
+
+        raise RuntimeError, "This code is not secure in Python 2.2 and 2.3"
+
+        ihooks._Verbose.__init__(self, verbose)
+        # XXX There's a circular reference here:
+        self.hooks = hooks or RHooks(verbose)
+        self.hooks.set_rexec(self)
+        self.modules = {}
+        self.ok_dynamic_modules = self.ok_builtin_modules
+        list = []
+        for mname in self.ok_builtin_modules:
+            if mname in sys.builtin_module_names:
+                list.append(mname)
+        self.ok_builtin_modules = tuple(list)
+        self.set_trusted_path()
+        self.make_builtin()
+        self.make_initial_modules()
+        # make_sys must be last because it adds the already created
+        # modules to its builtin_module_names
+        self.make_sys()
+        self.loader = RModuleLoader(self.hooks, verbose)
+        self.importer = RModuleImporter(self.loader, verbose)
+
+    def set_trusted_path(self):
+        # Set the path from which dynamic modules may be loaded.
+        # Those dynamic modules must also occur in ok_builtin_modules
+        self.trusted_path = filter(os.path.isabs, sys.path)
+
+    def load_dynamic(self, name, filename, file):
+        if name not in self.ok_dynamic_modules:
+            raise ImportError, "untrusted dynamic module: %s" % name
+        if sys.modules.has_key(name):
+            src = sys.modules[name]
+        else:
+            src = imp.load_dynamic(name, filename, file)
+        dst = self.copy_except(src, [])
+        return dst
+
+    def make_initial_modules(self):
+        self.make_main()
+        self.make_osname()
+
+    # Helpers for RHooks
+
+    def get_suffixes(self):
+        return [item   # (suff, mode, type)
+                for item in imp.get_suffixes()
+                if item[2] in self.ok_file_types]
+
+    def is_builtin(self, mname):
+        return mname in self.ok_builtin_modules
+
+    # The make_* methods create specific built-in modules
+
+    def make_builtin(self):
+        m = self.copy_except(__builtin__, self.nok_builtin_names)
+        m.__import__ = self.r_import
+        m.reload = self.r_reload
+        m.open = m.file = self.r_open
+
+    def make_main(self):
+        m = self.add_module('__main__')
+
+    def make_osname(self):
+        osname = os.name
+        src = __import__(osname)
+        dst = self.copy_only(src, self.ok_posix_names)
+        dst.environ = e = {}
+        for key, value in os.environ.items():
+            e[key] = value
+
+    def make_sys(self):
+        m = self.copy_only(sys, self.ok_sys_names)
+        m.modules = self.modules
+        m.argv = ['RESTRICTED']
+        m.path = map(None, self.ok_path)
+        m.exc_info = self.r_exc_info
+        m = self.modules['sys']
+        l = self.modules.keys() + list(self.ok_builtin_modules)
+        l.sort()
+        m.builtin_module_names = tuple(l)
+
+    # The copy_* methods copy existing modules with some changes
+
+    def copy_except(self, src, exceptions):
+        dst = self.copy_none(src)
+        for name in dir(src):
+            setattr(dst, name, getattr(src, name))
+        for name in exceptions:
+            try:
+                delattr(dst, name)
+            except AttributeError:
+                pass
+        return dst
+
+    def copy_only(self, src, names):
+        dst = self.copy_none(src)
+        for name in names:
+            try:
+                value = getattr(src, name)
+            except AttributeError:
+                continue
+            setattr(dst, name, value)
+        return dst
+
+    def copy_none(self, src):
+        m = self.add_module(src.__name__)
+        m.__doc__ = src.__doc__
+        return m
+
+    # Add a module -- return an existing module or create one
+
+    def add_module(self, mname):
+        m = self.modules.get(mname)
+        if m is None:
+            self.modules[mname] = m = self.hooks.new_module(mname)
+        m.__builtins__ = self.modules['__builtin__']
+        return m
+
+    # The r* methods are public interfaces
+
+    def r_exec(self, code):
+        """Execute code within a restricted environment.
+
+        The code parameter must either be a string containing one or more
+        lines of Python code, or a compiled code object, which will be
+        executed in the restricted environment's __main__ module.
+
+        """
+        m = self.add_module('__main__')
+        exec code in m.__dict__
+
+    def r_eval(self, code):
+        """Evaluate code within a restricted environment.
+
+        The code parameter must either be a string containing a Python
+        expression, or a compiled code object, which will be evaluated in
+        the restricted environment's __main__ module.  The value of the
+        expression or code object will be returned.
+
+        """
+        m = self.add_module('__main__')
+        return eval(code, m.__dict__)
+
+    def r_execfile(self, file):
+        """Execute the Python code in the file in the restricted
+        environment's __main__ module.
+
+        """
+        m = self.add_module('__main__')
+        execfile(file, m.__dict__)
+
+    def r_import(self, mname, globals={}, locals={}, fromlist=[]):
+        """Import a module, raising an ImportError exception if the module
+        is considered unsafe.
+
+        This method is implicitly called by code executing in the
+        restricted environment.  Overriding this method in a subclass is
+        used to change the policies enforced by a restricted environment.
+
+        """
+        return self.importer.import_module(mname, globals, locals, fromlist)
+
+    def r_reload(self, m):
+        """Reload the module object, re-parsing and re-initializing it.
+
+        This method is implicitly called by code executing in the
+        restricted environment.  Overriding this method in a subclass is
+        used to change the policies enforced by a restricted environment.
+
+        """
+        return self.importer.reload(m)
+
+    def r_unload(self, m):
+        """Unload the module.
+
+        Removes it from the restricted environment's sys.modules dictionary.
+
+        This method is implicitly called by code executing in the
+        restricted environment.  Overriding this method in a subclass is
+        used to change the policies enforced by a restricted environment.
+
+        """
+        return self.importer.unload(m)
+
+    # The s_* methods are similar but also swap std{in,out,err}
+
+    def make_delegate_files(self):
+        s = self.modules['sys']
+        self.delegate_stdin = FileDelegate(s, 'stdin')
+        self.delegate_stdout = FileDelegate(s, 'stdout')
+        self.delegate_stderr = FileDelegate(s, 'stderr')
+        self.restricted_stdin = FileWrapper(sys.stdin)
+        self.restricted_stdout = FileWrapper(sys.stdout)
+        self.restricted_stderr = FileWrapper(sys.stderr)
+
+    def set_files(self):
+        if not hasattr(self, 'save_stdin'):
+            self.save_files()
+        if not hasattr(self, 'delegate_stdin'):
+            self.make_delegate_files()
+        s = self.modules['sys']
+        s.stdin = self.restricted_stdin
+        s.stdout = self.restricted_stdout
+        s.stderr = self.restricted_stderr
+        sys.stdin = self.delegate_stdin
+        sys.stdout = self.delegate_stdout
+        sys.stderr = self.delegate_stderr
+
+    def reset_files(self):
+        self.restore_files()
+        s = self.modules['sys']
+        self.restricted_stdin = s.stdin
+        self.restricted_stdout = s.stdout
+        self.restricted_stderr = s.stderr
+
+
+    def save_files(self):
+        self.save_stdin = sys.stdin
+        self.save_stdout = sys.stdout
+        self.save_stderr = sys.stderr
+
+    def restore_files(self):
+        sys.stdin = self.save_stdin
+        sys.stdout = self.save_stdout
+        sys.stderr = self.save_stderr
+
+    def s_apply(self, func, args=(), kw=None):
+        self.save_files()
+        try:
+            self.set_files()
+            if kw:
+                r = apply(func, args, kw)
+            else:
+                r = apply(func, args)
+        finally:
+            self.restore_files()
+        return r
+
+    def s_exec(self, *args):
+        """Execute code within a restricted environment.
+
+        Similar to the r_exec() method, but the code will be granted access
+        to restricted versions of the standard I/O streams sys.stdin,
+        sys.stderr, and sys.stdout.
+
+        The code parameter must either be a string containing one or more
+        lines of Python code, or a compiled code object, which will be
+        executed in the restricted environment's __main__ module.
+
+        """
+        return self.s_apply(self.r_exec, args)
+
+    def s_eval(self, *args):
+        """Evaluate code within a restricted environment.
+
+        Similar to the r_eval() method, but the code will be granted access
+        to restricted versions of the standard I/O streams sys.stdin,
+        sys.stderr, and sys.stdout.
+
+        The code parameter must either be a string containing a Python
+        expression, or a compiled code object, which will be evaluated in
+        the restricted environment's __main__ module.  The value of the
+        expression or code object will be returned.
+
+        """
+        return self.s_apply(self.r_eval, args)
+
+    def s_execfile(self, *args):
+        """Execute the Python code in the file in the restricted
+        environment's __main__ module.
+
+        Similar to the r_execfile() method, but the code will be granted
+        access to restricted versions of the standard I/O streams sys.stdin,
+        sys.stderr, and sys.stdout.
+
+        """
+        return self.s_apply(self.r_execfile, args)
+
+    def s_import(self, *args):
+        """Import a module, raising an ImportError exception if the module
+        is considered unsafe.
+
+        This method is implicitly called by code executing in the
+        restricted environment.  Overriding this method in a subclass is
+        used to change the policies enforced by a restricted environment.
+
+        Similar to the r_import() method, but has access to restricted
+        versions of the standard I/O streams sys.stdin, sys.stderr, and
+        sys.stdout.
+
+        """
+        return self.s_apply(self.r_import, args)
+
+    def s_reload(self, *args):
+        """Reload the module object, re-parsing and re-initializing it.
+
+        This method is implicitly called by code executing in the
+        restricted environment.  Overriding this method in a subclass is
+        used to change the policies enforced by a restricted environment.
+
+        Similar to the r_reload() method, but has access to restricted
+        versions of the standard I/O streams sys.stdin, sys.stderr, and
+        sys.stdout.
+
+        """
+        return self.s_apply(self.r_reload, args)
+
+    def s_unload(self, *args):
+        """Unload the module.
+
+        Removes it from the restricted environment's sys.modules dictionary.
+
+        This method is implicitly called by code executing in the
+        restricted environment.  Overriding this method in a subclass is
+        used to change the policies enforced by a restricted environment.
+
+        Similar to the r_unload() method, but has access to restricted
+        versions of the standard I/O streams sys.stdin, sys.stderr, and
+        sys.stdout.
+
+        """
+        return self.s_apply(self.r_unload, args)
+
+    # Restricted open(...)
+
+    def r_open(self, file, mode='r', buf=-1):
+        """Method called when open() is called in the restricted environment.
+
+        The arguments are identical to those of the open() function, and a
+        file object (or a class instance compatible with file objects)
+        should be returned.  RExec's default behaviour is allow opening
+        any file for reading, but forbidding any attempt to write a file.
+
+        This method is implicitly called by code executing in the
+        restricted environment.  Overriding this method in a subclass is
+        used to change the policies enforced by a restricted environment.
+
+        """
+        if mode not in ('r', 'rb'):
+            raise IOError, "can't open files for writing in restricted mode"
+        return open(file, mode, buf)
+
+    # Restricted version of sys.exc_info()
+
+    def r_exc_info(self):
+        ty, va, tr = sys.exc_info()
+        tr = None
+        return ty, va, tr
+
+
+def test():
+    import getopt, traceback
+    opts, args = getopt.getopt(sys.argv[1:], 'vt:')
+    verbose = 0
+    trusted = []
+    for o, a in opts:
+        if o == '-v':
+            verbose = verbose+1
+        if o == '-t':
+            trusted.append(a)
+    r = RExec(verbose=verbose)
+    if trusted:
+        r.ok_builtin_modules = r.ok_builtin_modules + tuple(trusted)
+    if args:
+        r.modules['sys'].argv = args
+        r.modules['sys'].path.insert(0, os.path.dirname(args[0]))
+    else:
+        r.modules['sys'].path.insert(0, "")
+    fp = sys.stdin
+    if args and args[0] != '-':
+        try:
+            fp = open(args[0])
+        except IOError, msg:
+            print "%s: can't open file %s" % (sys.argv[0], `args[0]`)
+            return 1
+    if fp.isatty():
+        print "*** RESTRICTED *** Python", sys.version
+        print 'Type "help", "copyright", "credits" or "license" ' \
+              'for more information.'
+
+        while 1:
+            try:
+                try:
+                    s = raw_input('>>> ')
+                except EOFError:
+                    print
+                    break
+                if s and s[0] != '#':
+                    s = s + '\n'
+                    c = compile(s, '<stdin>', 'single')
+                    r.s_exec(c)
+            except SystemExit, n:
+                return n
+            except:
+                traceback.print_exc()
+    else:
+        text = fp.read()
+        fp.close()
+        c = compile(text, fp.name, 'exec')
+        try:
+            r.s_exec(c)
+        except SystemExit, n:
+            return n
+        except:
+            traceback.print_exc()
+            return 1
+
+
+if __name__ == '__main__':
+    sys.exit(test())
diff --git a/lib-python/2.2/rfc822.py b/lib-python/2.2/rfc822.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/rfc822.py
@@ -0,0 +1,1010 @@
+"""RFC 2822 message manipulation.
+
+Note: This is only a very rough sketch of a full RFC-822 parser; in particular
+the tokenizing of addresses does not adhere to all the quoting rules.
+
+Note: RFC 2822 is a long awaited update to RFC 822.  This module should
+conform to RFC 2822, and is thus mis-named (it's not worth renaming it).  Some
+effort at RFC 2822 updates have been made, but a thorough audit has not been
+performed.  Consider any RFC 2822 non-conformance to be a bug.
+
+    RFC 2822: http://www.faqs.org/rfcs/rfc2822.html
+    RFC 822 : http://www.faqs.org/rfcs/rfc822.html (obsolete)
+
+Directions for use:
+
+To create a Message object: first open a file, e.g.:
+
+  fp = open(file, 'r')
+
+You can use any other legal way of getting an open file object, e.g. use
+sys.stdin or call os.popen().  Then pass the open file object to the Message()
+constructor:
+
+  m = Message(fp)
+
+This class can work with any input object that supports a readline method.  If
+the input object has seek and tell capability, the rewindbody method will
+work; also illegal lines will be pushed back onto the input stream.  If the
+input object lacks seek but has an `unread' method that can push back a line
+of input, Message will use that to push back illegal lines.  Thus this class
+can be used to parse messages coming from a buffered stream.
+
+The optional `seekable' argument is provided as a workaround for certain stdio
+libraries in which tell() discards buffered data before discovering that the
+lseek() system call doesn't work.  For maximum portability, you should set the
+seekable argument to zero to prevent that initial \code{tell} when passing in
+an unseekable object such as a a file object created from a socket object.  If
+it is 1 on entry -- which it is by default -- the tell() method of the open
+file object is called once; if this raises an exception, seekable is reset to
+0.  For other nonzero values of seekable, this test is not made.
+
+To get the text of a particular header there are several methods:
+
+  str = m.getheader(name)
+  str = m.getrawheader(name)
+
+where name is the name of the header, e.g. 'Subject'.  The difference is that
+getheader() strips the leading and trailing whitespace, while getrawheader()
+doesn't.  Both functions retain embedded whitespace (including newlines)
+exactly as they are specified in the header, and leave the case of the text
+unchanged.
+
+For addresses and address lists there are functions
+
+  realname, mailaddress = m.getaddr(name)
+  list = m.getaddrlist(name)
+
+where the latter returns a list of (realname, mailaddr) tuples.
+
+There is also a method
+
+  time = m.getdate(name)
+
+which parses a Date-like field and returns a time-compatible tuple,
+i.e. a tuple such as returned by time.localtime() or accepted by
+time.mktime().
+
+See the class definition for lower level access methods.
+
+There are also some utility functions here.
+"""
+# Cleanup and extensions by Eric S. Raymond <esr at thyrsus.com>
+
+import time
+
+__all__ = ["Message","AddressList","parsedate","parsedate_tz","mktime_tz"]
+
+_blanklines = ('\r\n', '\n')            # Optimization for islast()
+
+
+class Message:
+    """Represents a single RFC 2822-compliant message."""
+
+    def __init__(self, fp, seekable = 1):
+        """Initialize the class instance and read the headers."""
+        if seekable == 1:
+            # Exercise tell() to make sure it works
+            # (and then assume seek() works, too)
+            try:
+                fp.tell()
+            except (AttributeError, IOError):
+                seekable = 0
+            else:
+                seekable = 1
+        self.fp = fp
+        self.seekable = seekable
+        self.startofheaders = None
+        self.startofbody = None
+        #
+        if self.seekable:
+            try:
+                self.startofheaders = self.fp.tell()
+            except IOError:
+                self.seekable = 0
+        #
+        self.readheaders()
+        #
+        if self.seekable:
+            try:
+                self.startofbody = self.fp.tell()
+            except IOError:
+                self.seekable = 0
+
+    def rewindbody(self):
+        """Rewind the file to the start of the body (if seekable)."""
+        if not self.seekable:
+            raise IOError, "unseekable file"
+        self.fp.seek(self.startofbody)
+
+    def readheaders(self):
+        """Read header lines.
+
+        Read header lines up to the entirely blank line that terminates them.
+        The (normally blank) line that ends the headers is skipped, but not
+        included in the returned list.  If a non-header line ends the headers,
+        (which is an error), an attempt is made to backspace over it; it is
+        never included in the returned list.
+
+        The variable self.status is set to the empty string if all went well,
+        otherwise it is an error message.  The variable self.headers is a
+        completely uninterpreted list of lines contained in the header (so
+        printing them will reproduce the header exactly as it appears in the
+        file).
+        """
+        self.dict = {}
+        self.unixfrom = ''
+        self.headers = list = []
+        self.status = ''
+        headerseen = ""
+        firstline = 1
+        startofline = unread = tell = None
+        if hasattr(self.fp, 'unread'):
+            unread = self.fp.unread
+        elif self.seekable:
+            tell = self.fp.tell
+        while 1:
+            if tell:
+                try:
+                    startofline = tell()
+                except IOError:
+                    startofline = tell = None
+                    self.seekable = 0
+            line = self.fp.readline()
+            if not line:
+                self.status = 'EOF in headers'
+                break
+            # Skip unix From name time lines
+            if firstline and line.startswith('From '):
+                self.unixfrom = self.unixfrom + line
+                continue
+            firstline = 0
+            if headerseen and line[0] in ' \t':
+                # It's a continuation line.
+                list.append(line)
+                x = (self.dict[headerseen] + "\n " + line.strip())
+                self.dict[headerseen] = x.strip()
+                continue
+            elif self.iscomment(line):
+                # It's a comment.  Ignore it.
+                continue
+            elif self.islast(line):
+                # Note! No pushback here!  The delimiter line gets eaten.
+                break
+            headerseen = self.isheader(line)
+            if headerseen:
+                # It's a legal header line, save it.
+                list.append(line)
+                self.dict[headerseen] = line[len(headerseen)+1:].strip()
+                continue
+            else:
+                # It's not a header line; throw it back and stop here.
+                if not self.dict:
+                    self.status = 'No headers'
+                else:
+                    self.status = 'Non-header line where header expected'
+                # Try to undo the read.
+                if unread:
+                    unread(line)
+                elif tell:
+                    self.fp.seek(startofline)
+                else:
+                    self.status = self.status + '; bad seek'
+                break
+
+    def isheader(self, line):
+        """Determine whether a given line is a legal header.
+
+        This method should return the header name, suitably canonicalized.
+        You may override this method in order to use Message parsing on tagged
+        data in RFC 2822-like formats with special header formats.
+        """
+        i = line.find(':')
+        if i > 0:
+            return line[:i].lower()
+        else:
+            return None
+
+    def islast(self, line):
+        """Determine whether a line is a legal end of RFC 2822 headers.
+
+        You may override this method if your application wants to bend the
+        rules, e.g. to strip trailing whitespace, or to recognize MH template
+        separators ('--------').  For convenience (e.g. for code reading from
+        sockets) a line consisting of \r\n also matches.
+        """
+        return line in _blanklines
+
+    def iscomment(self, line):
+        """Determine whether a line should be skipped entirely.
+
+        You may override this method in order to use Message parsing on tagged
+        data in RFC 2822-like formats that support embedded comments or
+        free-text data.
+        """
+        return None
+
+    def getallmatchingheaders(self, name):
+        """Find all header lines matching a given header name.
+
+        Look through the list of headers and find all lines matching a given
+        header name (and their continuation lines).  A list of the lines is
+        returned, without interpretation.  If the header does not occur, an
+        empty list is returned.  If the header occurs multiple times, all
+        occurrences are returned.  Case is not important in the header name.
+        """
+        name = name.lower() + ':'
+        n = len(name)
+        list = []
+        hit = 0
+        for line in self.headers:
+            if line[:n].lower() == name:
+                hit = 1
+            elif not line[:1].isspace():
+                hit = 0
+            if hit:
+                list.append(line)
+        return list
+
+    def getfirstmatchingheader(self, name):
+        """Get the first header line matching name.
+
+        This is similar to getallmatchingheaders, but it returns only the
+        first matching header (and its continuation lines).
+        """
+        name = name.lower() + ':'
+        n = len(name)
+        list = []
+        hit = 0
+        for line in self.headers:
+            if hit:
+                if not line[:1].isspace():
+                    break
+            elif line[:n].lower() == name:
+                hit = 1
+            if hit:
+                list.append(line)
+        return list
+
+    def getrawheader(self, name):
+        """A higher-level interface to getfirstmatchingheader().
+
+        Return a string containing the literal text of the header but with the
+        keyword stripped.  All leading, trailing and embedded whitespace is
+        kept in the string, however.  Return None if the header does not
+        occur.
+        """
+
+        list = self.getfirstmatchingheader(name)
+        if not list:
+            return None
+        list[0] = list[0][len(name) + 1:]
+        return ''.join(list)
+
+    def getheader(self, name, default=None):
+        """Get the header value for a name.
+
+        This is the normal interface: it returns a stripped version of the
+        header value for a given header name, or None if it doesn't exist.
+        This uses the dictionary version which finds the *last* such header.
+        """
+        try:
+            return self.dict[name.lower()]
+        except KeyError:
+            return default
+    get = getheader
+
+    def getheaders(self, name):
+        """Get all values for a header.
+
+        This returns a list of values for headers given more than once; each
+        value in the result list is stripped in the same way as the result of
+        getheader().  If the header is not given, return an empty list.
+        """
+        result = []
+        current = ''
+        have_header = 0
+        for s in self.getallmatchingheaders(name):
+            if s[0].isspace():
+                if current:
+                    current = "%s\n %s" % (current, s.strip())
+                else:
+                    current = s.strip()
+            else:
+                if have_header:
+                    result.append(current)
+                current = s[s.find(":") + 1:].strip()
+                have_header = 1
+        if have_header:
+            result.append(current)
+        return result
+
+    def getaddr(self, name):
+        """Get a single address from a header, as a tuple.
+
+        An example return value:
+        ('Guido van Rossum', 'guido at cwi.nl')
+        """
+        # New, by Ben Escoto
+        alist = self.getaddrlist(name)
+        if alist:
+            return alist[0]
+        else:
+            return (None, None)
+
+    def getaddrlist(self, name):
+        """Get a list of addresses from a header.
+
+        Retrieves a list of addresses from a header, where each address is a
+        tuple as returned by getaddr().  Scans all named headers, so it works
+        properly with multiple To: or Cc: headers for example.
+        """
+        raw = []
+        for h in self.getallmatchingheaders(name):
+            if h[0] in ' \t':
+                raw.append(h)
+            else:
+                if raw:
+                    raw.append(', ')
+                i = h.find(':')
+                if i > 0:
+                    addr = h[i+1:]
+                raw.append(addr)
+        alladdrs = ''.join(raw)
+        a = AddrlistClass(alladdrs)
+        return a.getaddrlist()
+
+    def getdate(self, name):
+        """Retrieve a date field from a header.
+
+        Retrieves a date field from the named header, returning a tuple
+        compatible with time.mktime().
+        """
+        try:
+            data = self[name]
+        except KeyError:
+            return None
+        return parsedate(data)
+
+    def getdate_tz(self, name):
+        """Retrieve a date field from a header as a 10-tuple.
+
+        The first 9 elements make up a tuple compatible with time.mktime(),
+        and the 10th is the offset of the poster's time zone from GMT/UTC.
+        """
+        try:
+            data = self[name]
+        except KeyError:
+            return None
+        return parsedate_tz(data)
+
+
+    # Access as a dictionary (only finds *last* header of each type):
+
+    def __len__(self):
+        """Get the number of headers in a message."""
+        return len(self.dict)
+
+    def __getitem__(self, name):
+        """Get a specific header, as from a dictionary."""
+        return self.dict[name.lower()]
+
+    def __setitem__(self, name, value):
+        """Set the value of a header.
+
+        Note: This is not a perfect inversion of __getitem__, because any
+        changed headers get stuck at the end of the raw-headers list rather
+        than where the altered header was.
+        """
+        del self[name] # Won't fail if it doesn't exist
+        self.dict[name.lower()] = value
+        text = name + ": " + value
+        lines = text.split("\n")
+        for line in lines:
+            self.headers.append(line + "\n")
+
+    def __delitem__(self, name):
+        """Delete all occurrences of a specific header, if it is present."""
+        name = name.lower()
+        if not self.dict.has_key(name):
+            return
+        del self.dict[name]
+        name = name + ':'
+        n = len(name)
+        list = []
+        hit = 0
+        for i in range(len(self.headers)):
+            line = self.headers[i]
+            if line[:n].lower() == name:
+                hit = 1
+            elif not line[:1].isspace():
+                hit = 0
+            if hit:
+                list.append(i)
+        list.reverse()
+        for i in list:
+            del self.headers[i]
+
+    def setdefault(self, name, default=""):
+        lowername = name.lower()
+        if self.dict.has_key(lowername):
+            return self.dict[lowername]
+        else:
+            text = name + ": " + default
+            lines = text.split("\n")
+            for line in lines:
+                self.headers.append(line + "\n")
+            self.dict[lowername] = default
+            return default
+
+    def has_key(self, name):
+        """Determine whether a message contains the named header."""
+        return self.dict.has_key(name.lower())
+
+    def keys(self):
+        """Get all of a message's header field names."""
+        return self.dict.keys()
+
+    def values(self):
+        """Get all of a message's header field values."""
+        return self.dict.values()
+
+    def items(self):
+        """Get all of a message's headers.
+
+        Returns a list of name, value tuples.
+        """
+        return self.dict.items()
+
+    def __str__(self):
+        str = ''
+        for hdr in self.headers:
+            str = str + hdr
+        return str
+
+
+# Utility functions
+# -----------------
+
+# XXX Should fix unquote() and quote() to be really conformant.
+# XXX The inverses of the parse functions may also be useful.
+
+
+def unquote(str):
+    """Remove quotes from a string."""
+    if len(str) > 1:
+        if str[0] == '"' and str[-1:] == '"':
+            return str[1:-1]
+        if str[0] == '<' and str[-1:] == '>':
+            return str[1:-1]
+    return str
+
+
+def quote(str):
+    """Add quotes around a string."""
+    return str.replace('\\', '\\\\').replace('"', '\\"')
+
+
+def parseaddr(address):
+    """Parse an address into a (realname, mailaddr) tuple."""
+    a = AddressList(address)
+    list = a.addresslist
+    if not list:
+        return (None, None)
+    else:
+        return list[0]
+
+
+class AddrlistClass:
+    """Address parser class by Ben Escoto.
+
+    To understand what this class does, it helps to have a copy of
+    RFC 2822 in front of you.
+
+    http://www.faqs.org/rfcs/rfc2822.html
+
+    Note: this class interface is deprecated and may be removed in the future.
+    Use rfc822.AddressList instead.
+    """
+
+    def __init__(self, field):
+        """Initialize a new instance.
+
+        `field' is an unparsed address header field, containing one or more
+        addresses.
+        """
+        self.specials = '()<>@,:;.\"[]'
+        self.pos = 0
+        self.LWS = ' \t'
+        self.CR = '\r\n'
+        self.atomends = self.specials + self.LWS + self.CR
+        # Note that RFC 2822 now specifies `.' as obs-phrase, meaning that it
+        # is obsolete syntax.  RFC 2822 requires that we recognize obsolete
+        # syntax, so allow dots in phrases.
+        self.phraseends = self.atomends.replace('.', '')
+        self.field = field
+        self.commentlist = []
+
+    def gotonext(self):
+        """Parse up to the start of the next address."""
+        while self.pos < len(self.field):
+            if self.field[self.pos] in self.LWS + '\n\r':
+                self.pos = self.pos + 1
+            elif self.field[self.pos] == '(':
+                self.commentlist.append(self.getcomment())
+            else: break
+
+    def getaddrlist(self):
+        """Parse all addresses.
+
+        Returns a list containing all of the addresses.
+        """
+        result = []
+        while 1:
+            ad = self.getaddress()
+            if ad:
+                result += ad
+            else:
+                break
+        return result
+
+    def getaddress(self):
+        """Parse the next address."""
+        self.commentlist = []
+        self.gotonext()
+
+        oldpos = self.pos
+        oldcl = self.commentlist
+        plist = self.getphraselist()
+
+        self.gotonext()
+        returnlist = []
+
+        if self.pos >= len(self.field):
+            # Bad email address technically, no domain.
+            if plist:
+                returnlist = [(' '.join(self.commentlist), plist[0])]
+
+        elif self.field[self.pos] in '.@':
+            # email address is just an addrspec
+            # this isn't very efficient since we start over
+            self.pos = oldpos
+            self.commentlist = oldcl
+            addrspec = self.getaddrspec()
+            returnlist = [(' '.join(self.commentlist), addrspec)]
+
+        elif self.field[self.pos] == ':':
+            # address is a group
+            returnlist = []
+
+            fieldlen = len(self.field)
+            self.pos = self.pos + 1
+            while self.pos < len(self.field):
+                self.gotonext()
+                if self.pos < fieldlen and self.field[self.pos] == ';':
+                    self.pos = self.pos + 1
+                    break
+                returnlist = returnlist + self.getaddress()
+
+        elif self.field[self.pos] == '<':
+            # Address is a phrase then a route addr
+            routeaddr = self.getrouteaddr()
+
+            if self.commentlist:
+                returnlist = [(' '.join(plist) + ' (' + \
+                         ' '.join(self.commentlist) + ')', routeaddr)]
+            else: returnlist = [(' '.join(plist), routeaddr)]
+
+        else:
+            if plist:
+                returnlist = [(' '.join(self.commentlist), plist[0])]
+            elif self.field[self.pos] in self.specials:
+                self.pos = self.pos + 1
+
+        self.gotonext()
+        if self.pos < len(self.field) and self.field[self.pos] == ',':
+            self.pos = self.pos + 1
+        return returnlist
+
+    def getrouteaddr(self):
+        """Parse a route address (Return-path value).
+
+        This method just skips all the route stuff and returns the addrspec.
+        """
+        if self.field[self.pos] != '<':
+            return
+
+        expectroute = 0
+        self.pos = self.pos + 1
+        self.gotonext()
+        adlist = ""
+        while self.pos < len(self.field):
+            if expectroute:
+                self.getdomain()
+                expectroute = 0
+            elif self.field[self.pos] == '>':
+                self.pos = self.pos + 1
+                break
+            elif self.field[self.pos] == '@':
+                self.pos = self.pos + 1
+                expectroute = 1
+            elif self.field[self.pos] == ':':
+                self.pos = self.pos + 1
+            else:
+                adlist = self.getaddrspec()
+                self.pos = self.pos + 1
+                break
+            self.gotonext()
+
+        return adlist
+
+    def getaddrspec(self):
+        """Parse an RFC 2822 addr-spec."""
+        aslist = []
+
+        self.gotonext()
+        while self.pos < len(self.field):
+            if self.field[self.pos] == '.':
+                aslist.append('.')
+                self.pos = self.pos + 1
+            elif self.field[self.pos] == '"':
+                aslist.append('"%s"' % self.getquote())
+            elif self.field[self.pos] in self.atomends:
+                break
+            else: aslist.append(self.getatom())
+            self.gotonext()
+
+        if self.pos >= len(self.field) or self.field[self.pos] != '@':
+            return ''.join(aslist)
+
+        aslist.append('@')
+        self.pos = self.pos + 1
+        self.gotonext()
+        return ''.join(aslist) + self.getdomain()
+
+    def getdomain(self):
+        """Get the complete domain name from an address."""
+        sdlist = []
+        while self.pos < len(self.field):
+            if self.field[self.pos] in self.LWS:
+                self.pos = self.pos + 1
+            elif self.field[self.pos] == '(':
+                self.commentlist.append(self.getcomment())
+            elif self.field[self.pos] == '[':
+                sdlist.append(self.getdomainliteral())
+            elif self.field[self.pos] == '.':
+                self.pos = self.pos + 1
+                sdlist.append('.')
+            elif self.field[self.pos] in self.atomends:
+                break
+            else: sdlist.append(self.getatom())
+        return ''.join(sdlist)
+
+    def getdelimited(self, beginchar, endchars, allowcomments = 1):
+        """Parse a header fragment delimited by special characters.
+
+        `beginchar' is the start character for the fragment.  If self is not
+        looking at an instance of `beginchar' then getdelimited returns the
+        empty string.
+
+        `endchars' is a sequence of allowable end-delimiting characters.
+        Parsing stops when one of these is encountered.
+
+        If `allowcomments' is non-zero, embedded RFC 2822 comments are allowed
+        within the parsed fragment.
+        """
+        if self.field[self.pos] != beginchar:
+            return ''
+
+        slist = ['']
+        quote = 0
+        self.pos = self.pos + 1
+        while self.pos < len(self.field):
+            if quote == 1:
+                slist.append(self.field[self.pos])
+                quote = 0
+            elif self.field[self.pos] in endchars:
+                self.pos = self.pos + 1
+                break
+            elif allowcomments and self.field[self.pos] == '(':
+                slist.append(self.getcomment())
+            elif self.field[self.pos] == '\\':
+                quote = 1
+            else:
+                slist.append(self.field[self.pos])
+            self.pos = self.pos + 1
+
+        return ''.join(slist)
+
+    def getquote(self):
+        """Get a quote-delimited fragment from self's field."""
+        return self.getdelimited('"', '"\r', 0)
+
+    def getcomment(self):
+        """Get a parenthesis-delimited fragment from self's field."""
+        return self.getdelimited('(', ')\r', 1)
+
+    def getdomainliteral(self):
+        """Parse an RFC 2822 domain-literal."""
+        return '[%s]' % self.getdelimited('[', ']\r', 0)
+
+    def getatom(self, atomends=None):
+        """Parse an RFC 2822 atom.
+
+        Optional atomends specifies a different set of end token delimiters
+        (the default is to use self.atomends).  This is used e.g. in
+        getphraselist() since phrase endings must not include the `.' (which
+        is legal in phrases)."""
+        atomlist = ['']
+        if atomends is None:
+            atomends = self.atomends
+
+        while self.pos < len(self.field):
+            if self.field[self.pos] in atomends:
+                break
+            else: atomlist.append(self.field[self.pos])
+            self.pos = self.pos + 1
+
+        return ''.join(atomlist)
+
+    def getphraselist(self):
+        """Parse a sequence of RFC 2822 phrases.
+
+        A phrase is a sequence of words, which are in turn either RFC 2822
+        atoms or quoted-strings.  Phrases are canonicalized by squeezing all
+        runs of continuous whitespace into one space.
+        """
+        plist = []
+
+        while self.pos < len(self.field):
+            if self.field[self.pos] in self.LWS:
+                self.pos = self.pos + 1
+            elif self.field[self.pos] == '"':
+                plist.append(self.getquote())
+            elif self.field[self.pos] == '(':
+                self.commentlist.append(self.getcomment())
+            elif self.field[self.pos] in self.phraseends:
+                break
+            else:
+                plist.append(self.getatom(self.phraseends))
+
+        return plist
+
+class AddressList(AddrlistClass):
+    """An AddressList encapsulates a list of parsed RFC 2822 addresses."""
+    def __init__(self, field):
+        AddrlistClass.__init__(self, field)
+        if field:
+            self.addresslist = self.getaddrlist()
+        else:
+            self.addresslist = []
+
+    def __len__(self):
+        return len(self.addresslist)
+
+    def __str__(self):
+        return ", ".join(map(dump_address_pair, self.addresslist))
+
+    def __add__(self, other):
+        # Set union
+        newaddr = AddressList(None)
+        newaddr.addresslist = self.addresslist[:]
+        for x in other.addresslist:
+            if not x in self.addresslist:
+                newaddr.addresslist.append(x)
+        return newaddr
+
+    def __iadd__(self, other):
+        # Set union, in-place
+        for x in other.addresslist:
+            if not x in self.addresslist:
+                self.addresslist.append(x)
+        return self
+
+    def __sub__(self, other):
+        # Set difference
+        newaddr = AddressList(None)
+        for x in self.addresslist:
+            if not x in other.addresslist:
+                newaddr.addresslist.append(x)
+        return newaddr
+
+    def __isub__(self, other):
+        # Set difference, in-place
+        for x in other.addresslist:
+            if x in self.addresslist:
+                self.addresslist.remove(x)
+        return self
+
+    def __getitem__(self, index):
+        # Make indexing, slices, and 'in' work
+        return self.addresslist[index]
+
+def dump_address_pair(pair):
+    """Dump a (name, address) pair in a canonicalized form."""
+    if pair[0]:
+        return '"' + pair[0] + '" <' + pair[1] + '>'
+    else:
+        return pair[1]
+
+# Parse a date field
+
+_monthnames = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul',
+               'aug', 'sep', 'oct', 'nov', 'dec',
+               'january', 'february', 'march', 'april', 'may', 'june', 'july',
+               'august', 'september', 'october', 'november', 'december']
+_daynames = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
+
+# The timezone table does not include the military time zones defined
+# in RFC822, other than Z.  According to RFC1123, the description in
+# RFC822 gets the signs wrong, so we can't rely on any such time
+# zones.  RFC1123 recommends that numeric timezone indicators be used
+# instead of timezone names.
+
+_timezones = {'UT':0, 'UTC':0, 'GMT':0, 'Z':0,
+              'AST': -400, 'ADT': -300,  # Atlantic (used in Canada)
+              'EST': -500, 'EDT': -400,  # Eastern
+              'CST': -600, 'CDT': -500,  # Central
+              'MST': -700, 'MDT': -600,  # Mountain
+              'PST': -800, 'PDT': -700   # Pacific
+              }
+
+
+def parsedate_tz(data):
+    """Convert a date string to a time tuple.
+
+    Accounts for military timezones.
+    """
+    if not data:
+        return None
+    data = data.split()
+    if data[0][-1] in (',', '.') or data[0].lower() in _daynames:
+        # There's a dayname here. Skip it
+        del data[0]
+    if len(data) == 3: # RFC 850 date, deprecated
+        stuff = data[0].split('-')
+        if len(stuff) == 3:
+            data = stuff + data[1:]
+    if len(data) == 4:
+        s = data[3]
+        i = s.find('+')
+        if i > 0:
+            data[3:] = [s[:i], s[i+1:]]
+        else:
+            data.append('') # Dummy tz
+    if len(data) < 5:
+        return None
+    data = data[:5]
+    [dd, mm, yy, tm, tz] = data
+    mm = mm.lower()
+    if not mm in _monthnames:
+        dd, mm = mm, dd.lower()
+        if not mm in _monthnames:
+            return None
+    mm = _monthnames.index(mm)+1
+    if mm > 12: mm = mm - 12
+    if dd[-1] == ',':
+        dd = dd[:-1]
+    i = yy.find(':')
+    if i > 0:
+        yy, tm = tm, yy
+    if yy[-1] == ',':
+        yy = yy[:-1]
+    if not yy[0].isdigit():
+        yy, tz = tz, yy
+    if tm[-1] == ',':
+        tm = tm[:-1]
+    tm = tm.split(':')
+    if len(tm) == 2:
+        [thh, tmm] = tm
+        tss = '0'
+    elif len(tm) == 3:
+        [thh, tmm, tss] = tm
+    else:
+        return None
+    try:
+        yy = int(yy)
+        dd = int(dd)
+        thh = int(thh)
+        tmm = int(tmm)
+        tss = int(tss)
+    except ValueError:
+        return None
+    tzoffset = None
+    tz = tz.upper()
+    if _timezones.has_key(tz):
+        tzoffset = _timezones[tz]
+    else:
+        try:
+            tzoffset = int(tz)
+        except ValueError:
+            pass
+    # Convert a timezone offset into seconds ; -0500 -> -18000
+    if tzoffset:
+        if tzoffset < 0:
+            tzsign = -1
+            tzoffset = -tzoffset
+        else:
+            tzsign = 1
+        tzoffset = tzsign * ( (tzoffset//100)*3600 + (tzoffset % 100)*60)
+    tuple = (yy, mm, dd, thh, tmm, tss, 0, 0, 0, tzoffset)
+    return tuple
+
+
+def parsedate(data):
+    """Convert a time string to a time tuple."""
+    t = parsedate_tz(data)
+    if type(t) == type( () ):
+        return t[:9]
+    else: return t
+
+
+def mktime_tz(data):
+    """Turn a 10-tuple as returned by parsedate_tz() into a UTC timestamp."""
+    if data[9] is None:
+        # No zone info, so localtime is better assumption than GMT
+        return time.mktime(data[:8] + (-1,))
+    else:
+        t = time.mktime(data[:8] + (0,))
+        return t - data[9] - time.timezone
+
+def formatdate(timeval=None):
+    """Returns time format preferred for Internet standards.
+
+    Sun, 06 Nov 1994 08:49:37 GMT  ; RFC 822, updated by RFC 1123
+
+    According to RFC 1123, day and month names must always be in
+    English.  If not for that, this code could use strftime().  It
+    can't because strftime() honors the locale and could generated
+    non-English names.
+    """
+    if timeval is None:
+        timeval = time.time()
+    timeval = time.gmtime(timeval)
+    return "%s, %02d %s %04d %02d:%02d:%02d GMT" % (
+            ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"][timeval[6]],
+            timeval[2],
+            ["Jan", "Feb", "Mar", "Apr", "May", "Jun",
+             "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"][timeval[1]-1],
+                                timeval[0], timeval[3], timeval[4], timeval[5])
+
+
+# When used as script, run a small test program.
+# The first command line argument must be a filename containing one
+# message in RFC-822 format.
+
+if __name__ == '__main__':
+    import sys, os
+    file = os.path.join(os.environ['HOME'], 'Mail/inbox/1')
+    if sys.argv[1:]: file = sys.argv[1]
+    f = open(file, 'r')
+    m = Message(f)
+    print 'From:', m.getaddr('from')
+    print 'To:', m.getaddrlist('to')
+    print 'Subject:', m.getheader('subject')
+    print 'Date:', m.getheader('date')
+    date = m.getdate_tz('date')
+    tz = date[-1]
+    date = time.localtime(mktime_tz(date))
+    if date:
+        print 'ParsedDate:', time.asctime(date),
+        hhmmss = tz
+        hhmm, ss = divmod(hhmmss, 60)
+        hh, mm = divmod(hhmm, 60)
+        print "%+03d%02d" % (hh, mm),
+        if ss: print ".%02d" % ss,
+        print
+    else:
+        print 'ParsedDate:', None
+    m.rewindbody()
+    n = 0
+    while f.readline():
+        n = n + 1
+    print 'Lines:', n
+    print '-'*70
+    print 'len =', len(m)
+    if m.has_key('Date'): print 'Date =', m['Date']
+    if m.has_key('X-Nonsense'): pass
+    print 'keys =', m.keys()
+    print 'values =', m.values()
+    print 'items =', m.items()
diff --git a/lib-python/2.2/rlcompleter.py b/lib-python/2.2/rlcompleter.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/rlcompleter.py
@@ -0,0 +1,122 @@
+"""Word completion for GNU readline 2.0.
+
+This requires the latest extension to the readline module (the
+completes keywords, built-ins and globals in __main__; when completing
+NAME.NAME..., it evaluates (!) the expression up to the last dot and
+completes its attributes.
+
+It's very cool to do "import string" type "string.", hit the
+completion key (twice), and see the list of names defined by the
+string module!
+
+Tip: to use the tab key as the completion key, call
+
+    readline.parse_and_bind("tab: complete")
+
+Notes:
+
+- Exceptions raised by the completer function are *ignored* (and
+generally cause the completion to fail).  This is a feature -- since
+readline sets the tty device in raw (or cbreak) mode, printing a
+traceback wouldn't work well without some complicated hoopla to save,
+reset and restore the tty state.
+
+- The evaluation of the NAME.NAME... form may cause arbitrary
+application defined code to be executed if an object with a
+__getattr__ hook is found.  Since it is the responsibility of the
+application (or the user) to enable this feature, I consider this an
+acceptable risk.  More complicated expressions (e.g. function calls or
+indexing operations) are *not* evaluated.
+
+- GNU readline is also used by the built-in functions input() and
+raw_input(), and thus these also benefit/suffer from the completer
+features.  Clearly an interactive application can benefit by
+specifying its own completer function and using raw_input() for all
+its input.
+
+- When the original stdin is not a tty device, GNU readline is never
+used, and this module (and the readline module) are silently inactive.
+
+"""
+
+import readline
+import __builtin__
+import __main__
+
+__all__ = ["Completer"]
+
+class Completer:
+
+    def complete(self, text, state):
+        """Return the next possible completion for 'text'.
+
+        This is called successively with state == 0, 1, 2, ... until it
+        returns None.  The completion should begin with 'text'.
+
+        """
+        if state == 0:
+            if "." in text:
+                self.matches = self.attr_matches(text)
+            else:
+                self.matches = self.global_matches(text)
+        try:
+            return self.matches[state]
+        except IndexError:
+            return None
+
+    def global_matches(self, text):
+        """Compute matches when text is a simple name.
+
+        Return a list of all keywords, built-in functions and names
+        currently defines in __main__ that match.
+
+        """
+        import keyword
+        matches = []
+        n = len(text)
+        for list in [keyword.kwlist,
+                     __builtin__.__dict__.keys(),
+                     __main__.__dict__.keys()]:
+            for word in list:
+                if word[:n] == text and word != "__builtins__":
+                    matches.append(word)
+        return matches
+
+    def attr_matches(self, text):
+        """Compute matches when text contains a dot.
+
+        Assuming the text is of the form NAME.NAME....[NAME], and is
+        evaluatable in the globals of __main__, it will be evaluated
+        and its attributes (as revealed by dir()) are used as possible
+        completions.  (For class instances, class members are are also
+        considered.)
+
+        WARNING: this can still invoke arbitrary C code, if an object
+        with a __getattr__ hook is evaluated.
+
+        """
+        import re
+        m = re.match(r"(\w+(\.\w+)*)\.(\w*)", text)
+        if not m:
+            return
+        expr, attr = m.group(1, 3)
+        object = eval(expr, __main__.__dict__)
+        words = dir(object)
+        if hasattr(object,'__class__'):
+            words.append('__class__')
+            words = words + get_class_members(object.__class__)
+        matches = []
+        n = len(attr)
+        for word in words:
+            if word[:n] == attr and word != "__builtins__":
+                matches.append("%s.%s" % (expr, word))
+        return matches
+
+def get_class_members(klass):
+    ret = dir(klass)
+    if hasattr(klass,'__bases__'):
+        for base in klass.__bases__:
+            ret = ret + get_class_members(base)
+    return ret
+
+readline.set_completer(Completer().complete)
diff --git a/lib-python/2.2/robotparser.py b/lib-python/2.2/robotparser.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/robotparser.py
@@ -0,0 +1,262 @@
+""" robotparser.py
+
+    Copyright (C) 2000  Bastian Kleineidam
+
+    You can choose between two licenses when using this package:
+    1) GNU GPLv2
+    2) PSF license for Python 2.2
+
+    The robots.txt Exclusion Protocol is implemented as specified in
+    http://info.webcrawler.com/mak/projects/robots/norobots-rfc.html
+"""
+import re,urlparse,urllib
+
+__all__ = ["RobotFileParser"]
+
+debug = 0
+
+def _debug(msg):
+    if debug: print msg
+
+
+class RobotFileParser:
+    def __init__(self, url=''):
+        self.entries = []
+        self.disallow_all = 0
+        self.allow_all = 0
+        self.set_url(url)
+        self.last_checked = 0
+
+    def mtime(self):
+        return self.last_checked
+
+    def modified(self):
+        import time
+        self.last_checked = time.time()
+
+    def set_url(self, url):
+        self.url = url
+        self.host, self.path = urlparse.urlparse(url)[1:3]
+
+    def read(self):
+        opener = URLopener()
+        f = opener.open(self.url)
+        lines = []
+        line = f.readline()
+        while line:
+            lines.append(line.strip())
+            line = f.readline()
+        self.errcode = opener.errcode
+        if self.errcode == 401 or self.errcode == 403:
+            self.disallow_all = 1
+            _debug("disallow all")
+        elif self.errcode >= 400:
+            self.allow_all = 1
+            _debug("allow all")
+        elif self.errcode == 200 and lines:
+            _debug("parse lines")
+            self.parse(lines)
+
+    def parse(self, lines):
+        """parse the input lines from a robot.txt file.
+           We allow that a user-agent: line is not preceded by
+           one or more blank lines."""
+        state = 0
+        linenumber = 0
+        entry = Entry()
+
+        for line in lines:
+            linenumber = linenumber + 1
+            if not line:
+                if state==1:
+                    _debug("line %d: warning: you should insert"
+                           " allow: or disallow: directives below any"
+                           " user-agent: line" % linenumber)
+                    entry = Entry()
+                    state = 0
+                elif state==2:
+                    self.entries.append(entry)
+                    entry = Entry()
+                    state = 0
+            # remove optional comment and strip line
+            i = line.find('#')
+            if i>=0:
+                line = line[:i]
+            line = line.strip()
+            if not line:
+                continue
+            line = line.split(':', 1)
+            if len(line) == 2:
+                line[0] = line[0].strip().lower()
+                line[1] = line[1].strip()
+                if line[0] == "user-agent":
+                    if state==2:
+                        _debug("line %d: warning: you should insert a blank"
+                               " line before any user-agent"
+                               " directive" % linenumber)
+                        self.entries.append(entry)
+                        entry = Entry()
+                    entry.useragents.append(line[1])
+                    state = 1
+                elif line[0] == "disallow":
+                    if state==0:
+                        _debug("line %d: error: you must insert a user-agent:"
+                               " directive before this line" % linenumber)
+                    else:
+                        entry.rulelines.append(RuleLine(line[1], 0))
+                        state = 2
+                elif line[0] == "allow":
+                    if state==0:
+                        _debug("line %d: error: you must insert a user-agent:"
+                               " directive before this line" % linenumber)
+                    else:
+                        entry.rulelines.append(RuleLine(line[1], 1))
+                else:
+                    _debug("line %d: warning: unknown key %s" % (linenumber,
+                               line[0]))
+            else:
+                _debug("line %d: error: malformed line %s"%(linenumber, line))
+        if state==2:
+            self.entries.append(entry)
+        _debug("Parsed rules:\n%s" % str(self))
+
+
+    def can_fetch(self, useragent, url):
+        """using the parsed robots.txt decide if useragent can fetch url"""
+        _debug("Checking robot.txt allowance for:\n  user agent: %s\n  url: %s" %
+               (useragent, url))
+        if self.disallow_all:
+            return 0
+        if self.allow_all:
+            return 1
+        # search for given user agent matches
+        # the first match counts
+        url = urllib.quote(urlparse.urlparse(url)[2]) or "/"
+        for entry in self.entries:
+            if entry.applies_to(useragent):
+                return entry.allowance(url)
+        # agent not found ==> access granted
+        return 1
+
+
+    def __str__(self):
+        ret = ""
+        for entry in self.entries:
+            ret = ret + str(entry) + "\n"
+        return ret
+
+
+class RuleLine:
+    """A rule line is a single "Allow:" (allowance==1) or "Disallow:"
+       (allowance==0) followed by a path."""
+    def __init__(self, path, allowance):
+        self.path = urllib.quote(path)
+        self.allowance = allowance
+
+    def applies_to(self, filename):
+        return self.path=="*" or re.match(self.path, filename)
+
+    def __str__(self):
+        return (self.allowance and "Allow" or "Disallow")+": "+self.path
+
+
+class Entry:
+    """An entry has one or more user-agents and zero or more rulelines"""
+    def __init__(self):
+        self.useragents = []
+        self.rulelines = []
+
+    def __str__(self):
+        ret = ""
+        for agent in self.useragents:
+            ret = ret + "User-agent: "+agent+"\n"
+        for line in self.rulelines:
+            ret = ret + str(line) + "\n"
+        return ret
+
+    def applies_to(self, useragent):
+        """check if this entry applies to the specified agent"""
+        # split the name token and make it lower case
+        useragent = useragent.split("/")[0].lower()
+        for agent in self.useragents:
+            if agent=='*':
+                # we have the catch-all agent
+                return 1
+            agent = agent.lower()
+            # don't forget to re.escape
+            if re.search(re.escape(useragent), agent):
+                return 1
+        return 0
+
+    def allowance(self, filename):
+        """Preconditions:
+        - our agent applies to this entry
+        - filename is URL decoded"""
+        for line in self.rulelines:
+            _debug((filename, str(line), line.allowance))
+            if line.applies_to(filename):
+                return line.allowance
+        return 1
+
+class URLopener(urllib.FancyURLopener):
+    def __init__(self, *args):
+        apply(urllib.FancyURLopener.__init__, (self,) + args)
+        self.errcode = 200
+
+    def http_error_default(self, url, fp, errcode, errmsg, headers):
+        self.errcode = errcode
+        return urllib.FancyURLopener.http_error_default(self, url, fp, errcode,
+                                                        errmsg, headers)
+
+def _check(a,b):
+    if not b:
+        ac = "access denied"
+    else:
+        ac = "access allowed"
+    if a!=b:
+        print "failed"
+    else:
+        print "ok (%s)" % ac
+    print
+
+def _test():
+    global debug
+    rp = RobotFileParser()
+    debug = 1
+
+    # robots.txt that exists, gotten to by redirection
+    rp.set_url('http://www.musi-cal.com/robots.txt')
+    rp.read()
+
+    # test for re.escape
+    _check(rp.can_fetch('*', 'http://www.musi-cal.com/'), 1)
+    # this should match the first rule, which is a disallow
+    _check(rp.can_fetch('', 'http://www.musi-cal.com/'), 0)
+    # various cherry pickers
+    _check(rp.can_fetch('CherryPickerSE',
+                       'http://www.musi-cal.com/cgi-bin/event-search'
+                       '?city=San+Francisco'), 0)
+    _check(rp.can_fetch('CherryPickerSE/1.0',
+                       'http://www.musi-cal.com/cgi-bin/event-search'
+                       '?city=San+Francisco'), 0)
+    _check(rp.can_fetch('CherryPickerSE/1.5',
+                       'http://www.musi-cal.com/cgi-bin/event-search'
+                       '?city=San+Francisco'), 0)
+    # case sensitivity
+    _check(rp.can_fetch('ExtractorPro', 'http://www.musi-cal.com/blubba'), 0)
+    _check(rp.can_fetch('extractorpro', 'http://www.musi-cal.com/blubba'), 0)
+    # substring test
+    _check(rp.can_fetch('toolpak/1.1', 'http://www.musi-cal.com/blubba'), 0)
+    # tests for catch-all * agent
+    _check(rp.can_fetch('spam', 'http://www.musi-cal.com/search'), 0)
+    _check(rp.can_fetch('spam', 'http://www.musi-cal.com/Musician/me'), 1)
+    _check(rp.can_fetch('spam', 'http://www.musi-cal.com/'), 1)
+    _check(rp.can_fetch('spam', 'http://www.musi-cal.com/'), 1)
+
+    # robots.txt that does not exist
+    rp.set_url('http://www.lycos.com/robots.txt')
+    rp.read()
+    _check(rp.can_fetch('Mozilla', 'http://www.lycos.com/search'), 1)
+
+if __name__ == '__main__':
+    _test()
diff --git a/lib-python/2.2/sched.py b/lib-python/2.2/sched.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/sched.py
@@ -0,0 +1,106 @@
+"""A generally useful event scheduler class.
+
+Each instance of this class manages its own queue.
+No multi-threading is implied; you are supposed to hack that
+yourself, or use a single instance per application.
+
+Each instance is parametrized with two functions, one that is
+supposed to return the current time, one that is supposed to
+implement a delay.  You can implement real-time scheduling by
+substituting time and sleep from built-in module time, or you can
+implement simulated time by writing your own functions.  This can
+also be used to integrate scheduling with STDWIN events; the delay
+function is allowed to modify the queue.  Time can be expressed as
+integers or floating point numbers, as long as it is consistent.
+
+Events are specified by tuples (time, priority, action, argument).
+As in UNIX, lower priority numbers mean higher priority; in this
+way the queue can be maintained fully sorted.  Execution of the
+event means calling the action function, passing it the argument.
+Remember that in Python, multiple function arguments can be packed
+in a tuple.   The action function may be an instance method so it
+has another way to reference private data (besides global variables).
+Parameterless functions or methods cannot be used, however.
+"""
+
+# XXX The timefunc and delayfunc should have been defined as methods
+# XXX so you can define new kinds of schedulers using subclassing
+# XXX instead of having to define a module or class just to hold
+# XXX the global state of your particular time and delay functions.
+
+import bisect
+
+__all__ = ["scheduler"]
+
+class scheduler:
+    def __init__(self, timefunc, delayfunc):
+        """Initialize a new instance, passing the time and delay
+        functions"""
+        self.queue = []
+        self.timefunc = timefunc
+        self.delayfunc = delayfunc
+
+    def enterabs(self, time, priority, action, argument):
+        """Enter a new event in the queue at an absolute time.
+
+        Returns an ID for the event which can be used to remove it,
+        if necessary.
+
+        """
+        event = time, priority, action, argument
+        bisect.insort(self.queue, event)
+        return event # The ID
+
+    def enter(self, delay, priority, action, argument):
+        """A variant that specifies the time as a relative time.
+
+        This is actually the more commonly used interface.
+
+        """
+        time = self.timefunc() + delay
+        return self.enterabs(time, priority, action, argument)
+
+    def cancel(self, event):
+        """Remove an event from the queue.
+
+        This must be presented the ID as returned by enter().
+        If the event is not in the queue, this raises RuntimeError.
+
+        """
+        self.queue.remove(event)
+
+    def empty(self):
+        """Check whether the queue is empty."""
+        return len(self.queue) == 0
+
+    def run(self):
+        """Execute events until the queue is empty.
+
+        When there is a positive delay until the first event, the
+        delay function is called and the event is left in the queue;
+        otherwise, the event is removed from the queue and executed
+        (its action function is called, passing it the argument).  If
+        the delay function returns prematurely, it is simply
+        restarted.
+
+        It is legal for both the delay function and the action
+        function to to modify the queue or to raise an exception;
+        exceptions are not caught but the scheduler's state remains
+        well-defined so run() may be called again.
+
+        A questionably hack is added to allow other threads to run:
+        just after an event is executed, a delay of 0 is executed, to
+        avoid monopolizing the CPU when other threads are also
+        runnable.
+
+        """
+        q = self.queue
+        while q:
+            time, priority, action, argument = q[0]
+            now = self.timefunc()
+            if now < time:
+                self.delayfunc(time - now)
+            else:
+                del q[0]
+                void = apply(action, argument)
+                self.delayfunc(0)   # Let other threads run
diff --git a/lib-python/2.2/sgmllib.py b/lib-python/2.2/sgmllib.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/sgmllib.py
@@ -0,0 +1,516 @@
+"""A parser for SGML, using the derived class as a static DTD."""
+
+# XXX This only supports those SGML features used by HTML.
+
+# XXX There should be a way to distinguish between PCDATA (parsed
+# character data -- the normal case), RCDATA (replaceable character
+# data -- only char and entity references and end tags are special)
+# and CDATA (character data -- only end tags are special).  RCDATA is
+# not supported at all.
+
+
+import markupbase
+import re
+
+__all__ = ["SGMLParser"]
+
+# Regular expressions used for parsing
+
+interesting = re.compile('[&<]')
+incomplete = re.compile('&([a-zA-Z][a-zA-Z0-9]*|#[0-9]*)?|'
+                           '<([a-zA-Z][^<>]*|'
+                              '/([a-zA-Z][^<>]*)?|'
+                              '![^<>]*)?')
+
+entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]')
+charref = re.compile('&#([0-9]+)[^0-9]')
+
+starttagopen = re.compile('<[>a-zA-Z]')
+shorttagopen = re.compile('<[a-zA-Z][-.a-zA-Z0-9]*/')
+shorttag = re.compile('<([a-zA-Z][-.a-zA-Z0-9]*)/([^/]*)/')
+piclose = re.compile('>')
+endbracket = re.compile('[<>]')
+commentclose = re.compile(r'--\s*>')
+tagfind = re.compile('[a-zA-Z][-_.a-zA-Z0-9]*')
+attrfind = re.compile(
+    r'\s*([a-zA-Z_][-:.a-zA-Z_0-9]*)(\s*=\s*'
+    r'(\'[^\']*\'|"[^"]*"|[-a-zA-Z0-9./:;+*%?!&$\(\)_#=~\'"]*))?')
+
+
+class SGMLParseError(RuntimeError):
+    """Exception raised for all parse errors."""
+    pass
+
+
+# SGML parser base class -- find tags and call handler functions.
+# Usage: p = SGMLParser(); p.feed(data); ...; p.close().
+# The dtd is defined by deriving a class which defines methods
+# with special names to handle tags: start_foo and end_foo to handle
+# <foo> and </foo>, respectively, or do_foo to handle <foo> by itself.
+# (Tags are converted to lower case for this purpose.)  The data
+# between tags is passed to the parser by calling self.handle_data()
+# with some data as argument (the data may be split up in arbitrary
+# chunks).  Entity references are passed by calling
+# self.handle_entityref() with the entity reference as argument.
+
+class SGMLParser(markupbase.ParserBase):
+
+    def __init__(self, verbose=0):
+        """Initialize and reset this instance."""
+        self.verbose = verbose
+        self.reset()
+
+    def reset(self):
+        """Reset this instance. Loses all unprocessed data."""
+        self.rawdata = ''
+        self.stack = []
+        self.lasttag = '???'
+        self.nomoretags = 0
+        self.literal = 0
+        markupbase.ParserBase.reset(self)
+
+    def setnomoretags(self):
+        """Enter literal mode (CDATA) till EOF.
+
+        Intended for derived classes only.
+        """
+        self.nomoretags = self.literal = 1
+
+    def setliteral(self, *args):
+        """Enter literal mode (CDATA).
+
+        Intended for derived classes only.
+        """
+        self.literal = 1
+
+    def feed(self, data):
+        """Feed some data to the parser.
+
+        Call this as often as you want, with as little or as much text
+        as you want (may include '\n').  (This just saves the text,
+        all the processing is done by goahead().)
+        """
+
+        self.rawdata = self.rawdata + data
+        self.goahead(0)
+
+    def close(self):
+        """Handle the remaining data."""
+        self.goahead(1)
+
+    def error(self, message):
+        raise SGMLParseError(message)
+
+    # Internal -- handle data as far as reasonable.  May leave state
+    # and data to be processed by a subsequent call.  If 'end' is
+    # true, force handling all data as if followed by EOF marker.
+    def goahead(self, end):
+        rawdata = self.rawdata
+        i = 0
+        n = len(rawdata)
+        while i < n:
+            if self.nomoretags:
+                self.handle_data(rawdata[i:n])
+                i = n
+                break
+            match = interesting.search(rawdata, i)
+            if match: j = match.start()
+            else: j = n
+            if i < j:
+                self.handle_data(rawdata[i:j])
+            i = j
+            if i == n: break
+            if rawdata[i] == '<':
+                if starttagopen.match(rawdata, i):
+                    if self.literal:
+                        self.handle_data(rawdata[i])
+                        i = i+1
+                        continue
+                    k = self.parse_starttag(i)
+                    if k < 0: break
+                    i = k
+                    continue
+                if rawdata.startswith("</", i):
+                    k = self.parse_endtag(i)
+                    if k < 0: break
+                    i = k
+                    self.literal = 0
+                    continue
+                if self.literal:
+                    if n > (i + 1):
+                        self.handle_data("<")
+                        i = i+1
+                    else:
+                        # incomplete
+                        break
+                    continue
+                if rawdata.startswith("<!--", i):
+                    k = self.parse_comment(i)
+                    if k < 0: break
+                    i = k
+                    continue
+                if rawdata.startswith("<?", i):
+                    k = self.parse_pi(i)
+                    if k < 0: break
+                    i = i+k
+                    continue
+                if rawdata.startswith("<!", i):
+                    # This is some sort of declaration; in "HTML as
+                    # deployed," this should only be the document type
+                    # declaration ("<!DOCTYPE html...>").
+                    k = self.parse_declaration(i)
+                    if k < 0: break
+                    i = k
+                    continue
+            elif rawdata[i] == '&':
+                if self.literal:
+                    self.handle_data(rawdata[i])
+                    i = i+1
+                    continue
+                match = charref.match(rawdata, i)
+                if match:
+                    name = match.group(1)
+                    self.handle_charref(name)
+                    i = match.end(0)
+                    if rawdata[i-1] != ';': i = i-1
+                    continue
+                match = entityref.match(rawdata, i)
+                if match:
+                    name = match.group(1)
+                    self.handle_entityref(name)
+                    i = match.end(0)
+                    if rawdata[i-1] != ';': i = i-1
+                    continue
+            else:
+                self.error('neither < nor & ??')
+            # We get here only if incomplete matches but
+            # nothing else
+            match = incomplete.match(rawdata, i)
+            if not match:
+                self.handle_data(rawdata[i])
+                i = i+1
+                continue
+            j = match.end(0)
+            if j == n:
+                break # Really incomplete
+            self.handle_data(rawdata[i:j])
+            i = j
+        # end while
+        if end and i < n:
+            self.handle_data(rawdata[i:n])
+            i = n
+        self.rawdata = rawdata[i:]
+        # XXX if end: check for empty stack
+
+    # Internal -- parse comment, return length or -1 if not terminated
+    def parse_comment(self, i, report=1):
+        rawdata = self.rawdata
+        if rawdata[i:i+4] != '<!--':
+            self.error('unexpected call to parse_comment()')
+        match = commentclose.search(rawdata, i+4)
+        if not match:
+            return -1
+        if report:
+            j = match.start(0)
+            self.handle_comment(rawdata[i+4: j])
+        return match.end(0)
+
+    # Extensions for the DOCTYPE scanner:
+    _decl_otherchars = '='
+
+    # Internal -- parse processing instr, return length or -1 if not terminated
+    def parse_pi(self, i):
+        rawdata = self.rawdata
+        if rawdata[i:i+2] != '<?':
+            self.error('unexpected call to parse_pi()')
+        match = piclose.search(rawdata, i+2)
+        if not match:
+            return -1
+        j = match.start(0)
+        self.handle_pi(rawdata[i+2: j])
+        j = match.end(0)
+        return j-i
+
+    __starttag_text = None
+    def get_starttag_text(self):
+        return self.__starttag_text
+
+    # Internal -- handle starttag, return length or -1 if not terminated
+    def parse_starttag(self, i):
+        self.__starttag_text = None
+        start_pos = i
+        rawdata = self.rawdata
+        if shorttagopen.match(rawdata, i):
+            # SGML shorthand: <tag/data/ == <tag>data</tag>
+            # XXX Can data contain &... (entity or char refs)?
+            # XXX Can data contain < or > (tag characters)?
+            # XXX Can there be whitespace before the first /?
+            match = shorttag.match(rawdata, i)
+            if not match:
+                return -1
+            tag, data = match.group(1, 2)
+            self.__starttag_text = '<%s/' % tag
+            tag = tag.lower()
+            k = match.end(0)
+            self.finish_shorttag(tag, data)
+            self.__starttag_text = rawdata[start_pos:match.end(1) + 1]
+            return k
+        # XXX The following should skip matching quotes (' or ")
+        match = endbracket.search(rawdata, i+1)
+        if not match:
+            return -1
+        j = match.start(0)
+        # Now parse the data between i+1 and j into a tag and attrs
+        attrs = []
+        if rawdata[i:i+2] == '<>':
+            # SGML shorthand: <> == <last open tag seen>
+            k = j
+            tag = self.lasttag
+        else:
+            match = tagfind.match(rawdata, i+1)
+            if not match:
+                self.error('unexpected call to parse_starttag')
+            k = match.end(0)
+            tag = rawdata[i+1:k].lower()
+            self.lasttag = tag
+        while k < j:
+            match = attrfind.match(rawdata, k)
+            if not match: break
+            attrname, rest, attrvalue = match.group(1, 2, 3)
+            if not rest:
+                attrvalue = attrname
+            elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
+                 attrvalue[:1] == '"' == attrvalue[-1:]:
+                attrvalue = attrvalue[1:-1]
+            attrs.append((attrname.lower(), attrvalue))
+            k = match.end(0)
+        if rawdata[j] == '>':
+            j = j+1
+        self.__starttag_text = rawdata[start_pos:j]
+        self.finish_starttag(tag, attrs)
+        return j
+
+    # Internal -- parse endtag
+    def parse_endtag(self, i):
+        rawdata = self.rawdata
+        match = endbracket.search(rawdata, i+1)
+        if not match:
+            return -1
+        j = match.start(0)
+        tag = rawdata[i+2:j].strip().lower()
+        if rawdata[j] == '>':
+            j = j+1
+        self.finish_endtag(tag)
+        return j
+
+    # Internal -- finish parsing of <tag/data/ (same as <tag>data</tag>)
+    def finish_shorttag(self, tag, data):
+        self.finish_starttag(tag, [])
+        self.handle_data(data)
+        self.finish_endtag(tag)
+
+    # Internal -- finish processing of start tag
+    # Return -1 for unknown tag, 0 for open-only tag, 1 for balanced tag
+    def finish_starttag(self, tag, attrs):
+        try:
+            method = getattr(self, 'start_' + tag)
+        except AttributeError:
+            try:
+                method = getattr(self, 'do_' + tag)
+            except AttributeError:
+                self.unknown_starttag(tag, attrs)
+                return -1
+            else:
+                self.handle_starttag(tag, method, attrs)
+                return 0
+        else:
+            self.stack.append(tag)
+            self.handle_starttag(tag, method, attrs)
+            return 1
+
+    # Internal -- finish processing of end tag
+    def finish_endtag(self, tag):
+        if not tag:
+            found = len(self.stack) - 1
+            if found < 0:
+                self.unknown_endtag(tag)
+                return
+        else:
+            if tag not in self.stack:
+                try:
+                    method = getattr(self, 'end_' + tag)
+                except AttributeError:
+                    self.unknown_endtag(tag)
+                else:
+                    self.report_unbalanced(tag)
+                return
+            found = len(self.stack)
+            for i in range(found):
+                if self.stack[i] == tag: found = i
+        while len(self.stack) > found:
+            tag = self.stack[-1]
+            try:
+                method = getattr(self, 'end_' + tag)
+            except AttributeError:
+                method = None
+            if method:
+                self.handle_endtag(tag, method)
+            else:
+                self.unknown_endtag(tag)
+            del self.stack[-1]
+
+    # Overridable -- handle start tag
+    def handle_starttag(self, tag, method, attrs):
+        method(attrs)
+
+    # Overridable -- handle end tag
+    def handle_endtag(self, tag, method):
+        method()
+
+    # Example -- report an unbalanced </...> tag.
+    def report_unbalanced(self, tag):
+        if self.verbose:
+            print '*** Unbalanced </' + tag + '>'
+            print '*** Stack:', self.stack
+
+    def handle_charref(self, name):
+        """Handle character reference, no need to override."""
+        try:
+            n = int(name)
+        except ValueError:
+            self.unknown_charref(name)
+            return
+        if not 0 <= n <= 255:
+            self.unknown_charref(name)
+            return
+        self.handle_data(chr(n))
+
+    # Definition of entities -- derived classes may override
+    entitydefs = \
+            {'lt': '<', 'gt': '>', 'amp': '&', 'quot': '"', 'apos': '\''}
+
+    def handle_entityref(self, name):
+        """Handle entity references.
+
+        There should be no need to override this method; it can be
+        tailored by setting up the self.entitydefs mapping appropriately.
+        """
+        table = self.entitydefs
+        if table.has_key(name):
+            self.handle_data(table[name])
+        else:
+            self.unknown_entityref(name)
+            return
+
+    # Example -- handle data, should be overridden
+    def handle_data(self, data):
+        pass
+
+    # Example -- handle comment, could be overridden
+    def handle_comment(self, data):
+        pass
+
+    # Example -- handle declaration, could be overridden
+    def handle_decl(self, decl):
+        pass
+
+    # Example -- handle processing instruction, could be overridden
+    def handle_pi(self, data):
+        pass
+
+    # To be overridden -- handlers for unknown objects
+    def unknown_starttag(self, tag, attrs): pass
+    def unknown_endtag(self, tag): pass
+    def unknown_charref(self, ref): pass
+    def unknown_entityref(self, ref): pass
+
+
+class TestSGMLParser(SGMLParser):
+
+    def __init__(self, verbose=0):
+        self.testdata = ""
+        SGMLParser.__init__(self, verbose)
+
+    def handle_data(self, data):
+        self.testdata = self.testdata + data
+        if len(`self.testdata`) >= 70:
+            self.flush()
+
+    def flush(self):
+        data = self.testdata
+        if data:
+            self.testdata = ""
+            print 'data:', `data`
+
+    def handle_comment(self, data):
+        self.flush()
+        r = `data`
+        if len(r) > 68:
+            r = r[:32] + '...' + r[-32:]
+        print 'comment:', r
+
+    def unknown_starttag(self, tag, attrs):
+        self.flush()
+        if not attrs:
+            print 'start tag: <' + tag + '>'
+        else:
+            print 'start tag: <' + tag,
+            for name, value in attrs:
+                print name + '=' + '"' + value + '"',
+            print '>'
+
+    def unknown_endtag(self, tag):
+        self.flush()
+        print 'end tag: </' + tag + '>'
+
+    def unknown_entityref(self, ref):
+        self.flush()
+        print '*** unknown entity ref: &' + ref + ';'
+
+    def unknown_charref(self, ref):
+        self.flush()
+        print '*** unknown char ref: &#' + ref + ';'
+
+    def close(self):
+        SGMLParser.close(self)
+        self.flush()
+
+
+def test(args = None):
+    import sys
+
+    if not args:
+        args = sys.argv[1:]
+
+    if args and args[0] == '-s':
+        args = args[1:]
+        klass = SGMLParser
+    else:
+        klass = TestSGMLParser
+
+    if args:
+        file = args[0]
+    else:
+        file = 'test.html'
+
+    if file == '-':
+        f = sys.stdin
+    else:
+        try:
+            f = open(file, 'r')
+        except IOError, msg:
+            print file, ":", msg
+            sys.exit(1)
+
+    data = f.read()
+    if f is not sys.stdin:
+        f.close()
+
+    x = klass()
+    for c in data:
+        x.feed(c)
+    x.close()
+
+
+if __name__ == '__main__':
+    test()
diff --git a/lib-python/2.2/shelve.py b/lib-python/2.2/shelve.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/shelve.py
@@ -0,0 +1,158 @@
+"""Manage shelves of pickled objects.
+
+A "shelf" is a persistent, dictionary-like object.  The difference
+with dbm databases is that the values (not the keys!) in a shelf can
+be essentially arbitrary Python objects -- anything that the "pickle"
+module can handle.  This includes most class instances, recursive data
+types, and objects containing lots of shared sub-objects.  The keys
+are ordinary strings.
+
+To summarize the interface (key is a string, data is an arbitrary
+object):
+
+        import shelve
+        d = shelve.open(filename) # open, with (g)dbm filename -- no suffix
+
+        d[key] = data   # store data at key (overwrites old data if
+                        # using an existing key)
+        data = d[key]   # retrieve data at key (raise KeyError if no
+                        # such key)
+        del d[key]      # delete data stored at key (raises KeyError
+                        # if no such key)
+        flag = d.has_key(key)   # true if the key exists
+        list = d.keys() # a list of all existing keys (slow!)
+
+        d.close()       # close it
+
+Dependent on the implementation, closing a persistent dictionary may
+or may not be necessary to flush changes to disk.
+"""
+
+# Try using cPickle and cStringIO if available.
+
+try:
+    from cPickle import Pickler, Unpickler
+except ImportError:
+    from pickle import Pickler, Unpickler
+
+try:
+    from cStringIO import StringIO
+except ImportError:
+    from StringIO import StringIO
+
+__all__ = ["Shelf","BsdDbShelf","DbfilenameShelf","open"]
+
+class Shelf:
+    """Base class for shelf implementations.
+
+    This is initialized with a dictionary-like object.
+    See the module's __doc__ string for an overview of the interface.
+    """
+
+    def __init__(self, dict):
+        self.dict = dict
+
+    def keys(self):
+        return self.dict.keys()
+
+    def __len__(self):
+        return len(self.dict)
+
+    def has_key(self, key):
+        return self.dict.has_key(key)
+
+    def get(self, key, default=None):
+        if self.dict.has_key(key):
+            return self[key]
+        return default
+
+    def __getitem__(self, key):
+        f = StringIO(self.dict[key])
+        return Unpickler(f).load()
+
+    def __setitem__(self, key, value):
+        f = StringIO()
+        p = Pickler(f)
+        p.dump(value)
+        self.dict[key] = f.getvalue()
+
+    def __delitem__(self, key):
+        del self.dict[key]
+
+    def close(self):
+        try:
+            self.dict.close()
+        except:
+            pass
+        self.dict = 0
+
+    def __del__(self):
+        self.close()
+
+    def sync(self):
+        if hasattr(self.dict, 'sync'):
+            self.dict.sync()
+
+
+class BsdDbShelf(Shelf):
+    """Shelf implementation using the "BSD" db interface.
+
+    This adds methods first(), next(), previous(), last() and
+    set_location() that have no counterpart in [g]dbm databases.
+
+    The actual database must be opened using one of the "bsddb"
+    modules "open" routines (i.e. bsddb.hashopen, bsddb.btopen or
+    bsddb.rnopen) and passed to the constructor.
+
+    See the module's __doc__ string for an overview of the interface.
+    """
+
+    def __init__(self, dict):
+        Shelf.__init__(self, dict)
+
+    def set_location(self, key):
+        (key, value) = self.dict.set_location(key)
+        f = StringIO(value)
+        return (key, Unpickler(f).load())
+
+    def next(self):
+        (key, value) = self.dict.next()
+        f = StringIO(value)
+        return (key, Unpickler(f).load())
+
+    def previous(self):
+        (key, value) = self.dict.previous()
+        f = StringIO(value)
+        return (key, Unpickler(f).load())
+
+    def first(self):
+        (key, value) = self.dict.first()
+        f = StringIO(value)
+        return (key, Unpickler(f).load())
+
+    def last(self):
+        (key, value) = self.dict.last()
+        f = StringIO(value)
+        return (key, Unpickler(f).load())
+
+
+class DbfilenameShelf(Shelf):
+    """Shelf implementation using the "anydbm" generic dbm interface.
+
+    This is initialized with the filename for the dbm database.
+    See the module's __doc__ string for an overview of the interface.
+    """
+
+    def __init__(self, filename, flag='c'):
+        import anydbm
+        Shelf.__init__(self, anydbm.open(filename, flag))
+
+
+def open(filename, flag='c'):
+    """Open a persistent dictionary for reading and writing.
+
+    Argument is the filename for the dbm database.
+    See the module's __doc__ string for an overview of the interface.
+    """
+
+    return DbfilenameShelf(filename, flag)
diff --git a/lib-python/2.2/shlex.py b/lib-python/2.2/shlex.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/shlex.py
@@ -0,0 +1,209 @@
+"""A lexical analyzer class for simple shell-like syntaxes."""
+
+# Module and documentation by Eric S. Raymond, 21 Dec 1998
+# Input stacking and error message cleanup added by ESR, March 2000
+# push_source() and pop_source() made explicit by ESR, January 2001.
+
+import os.path
+import sys
+
+__all__ = ["shlex"]
+
+class shlex:
+    "A lexical analyzer class for simple shell-like syntaxes."
+    def __init__(self, instream=None, infile=None):
+        if instream:
+            self.instream = instream
+            self.infile = infile
+        else:
+            self.instream = sys.stdin
+            self.infile = None
+        self.commenters = '#'
+        self.wordchars = ('abcdfeghijklmnopqrstuvwxyz'
+                          'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_')
+        self.whitespace = ' \t\r\n'
+        self.quotes = '\'"'
+        self.state = ' '
+        self.pushback = []
+        self.lineno = 1
+        self.debug = 0
+        self.token = ''
+        self.filestack = []
+        self.source = None
+        if self.debug:
+            print 'shlex: reading from %s, line %d' \
+                  % (self.instream, self.lineno)
+
+    def push_token(self, tok):
+        "Push a token onto the stack popped by the get_token method"
+        if self.debug >= 1:
+            print "shlex: pushing token " + `tok`
+        self.pushback = [tok] + self.pushback
+
+    def push_source(self, newstream, newfile=None):
+        "Push an input source onto the lexer's input source stack."
+        self.filestack.insert(0, (self.infile, self.instream, self.lineno))
+        self.infile = newfile
+        self.instream = newstream
+        self.lineno = 1
+        if self.debug:
+            if newfile:
+                print 'shlex: pushing to file %s' % (self.infile,)
+            else:
+                print 'shlex: pushing to stream %s' % (self.instream,)
+
+    def pop_source(self):
+        "Pop the input source stack."
+        self.instream.close()
+        (self.infile, self.instream, self.lineno) = self.filestack[0]
+        self.filestack = self.filestack[1:]
+        if self.debug:
+            print 'shlex: popping to %s, line %d' \
+                  % (self.instream, self.lineno)
+        self.state = ' '
+
+    def get_token(self):
+        "Get a token from the input stream (or from stack if it's nonempty)"
+        if self.pushback:
+            tok = self.pushback[0]
+            self.pushback = self.pushback[1:]
+            if self.debug >= 1:
+                print "shlex: popping token " + `tok`
+            return tok
+        # No pushback.  Get a token.
+        raw = self.read_token()
+        # Handle inclusions
+        while raw == self.source:
+            spec = self.sourcehook(self.read_token())
+            if spec:
+                (newfile, newstream) = spec
+                self.push_source(newstream, newfile)
+            raw = self.get_token()
+        # Maybe we got EOF instead?
+        while raw == "":
+            if len(self.filestack) == 0:
+                return ""
+            else:
+                self.pop_source()
+                raw = self.get_token()
+         # Neither inclusion nor EOF
+        if self.debug >= 1:
+            if raw:
+                print "shlex: token=" + `raw`
+            else:
+                print "shlex: token=EOF"
+        return raw
+
+    def read_token(self):
+        "Read a token from the input stream (no pushback or inclusions)"
+        while 1:
+            nextchar = self.instream.read(1)
+            if nextchar == '\n':
+                self.lineno = self.lineno + 1
+            if self.debug >= 3:
+                print "shlex: in state", repr(self.state), \
+                      "I see character:", repr(nextchar)
+            if self.state is None:
+                self.token = ''        # past end of file
+                break
+            elif self.state == ' ':
+                if not nextchar:
+                    self.state = None  # end of file
+                    break
+                elif nextchar in self.whitespace:
+                    if self.debug >= 2:
+                        print "shlex: I see whitespace in whitespace state"
+                    if self.token:
+                        break   # emit current token
+                    else:
+                        continue
+                elif nextchar in self.commenters:
+                    self.instream.readline()
+                    self.lineno = self.lineno + 1
+                elif nextchar in self.wordchars:
+                    self.token = nextchar
+                    self.state = 'a'
+                elif nextchar in self.quotes:
+                    self.token = nextchar
+                    self.state = nextchar
+                else:
+                    self.token = nextchar
+                    if self.token:
+                        break   # emit current token
+                    else:
+                        continue
+            elif self.state in self.quotes:
+                self.token = self.token + nextchar
+                if nextchar == self.state:
+                    self.state = ' '
+                    break
+                elif not nextchar:      # end of file
+                    if self.debug >= 2:
+                        print "shlex: I see EOF in quotes state"
+                    # XXX what error should be raised here?
+                    raise ValueError, "No closing quotation"
+            elif self.state == 'a':
+                if not nextchar:
+                    self.state = None   # end of file
+                    break
+                elif nextchar in self.whitespace:
+                    if self.debug >= 2:
+                        print "shlex: I see whitespace in word state"
+                    self.state = ' '
+                    if self.token:
+                        break   # emit current token
+                    else:
+                        continue
+                elif nextchar in self.commenters:
+                    self.instream.readline()
+                    self.lineno = self.lineno + 1
+                elif nextchar in self.wordchars or nextchar in self.quotes:
+                    self.token = self.token + nextchar
+                else:
+                    self.pushback = [nextchar] + self.pushback
+                    if self.debug >= 2:
+                        print "shlex: I see punctuation in word state"
+                    self.state = ' '
+                    if self.token:
+                        break   # emit current token
+                    else:
+                        continue
+        result = self.token
+        self.token = ''
+        if self.debug > 1:
+            if result:
+                print "shlex: raw token=" + `result`
+            else:
+                print "shlex: raw token=EOF"
+        return result
+
+    def sourcehook(self, newfile):
+        "Hook called on a filename to be sourced."
+        if newfile[0] == '"':
+            newfile = newfile[1:-1]
+        # This implements cpp-like semantics for relative-path inclusion.
+        if type(self.infile) == type("") and not os.path.isabs(newfile):
+            newfile = os.path.join(os.path.dirname(self.infile), newfile)
+        return (newfile, open(newfile, "r"))
+
+    def error_leader(self, infile=None, lineno=None):
+        "Emit a C-compiler-like, Emacs-friendly error-message leader."
+        if not infile:
+            infile = self.infile
+        if not lineno:
+            lineno = self.lineno
+        return "\"%s\", line %d: " % (infile, lineno)
+
+
+if __name__ == '__main__':
+    if len(sys.argv) == 1:
+        lexer = shlex()
+    else:
+        file = sys.argv[1]
+        lexer = shlex(open(file), file)
+    while 1:
+        tt = lexer.get_token()
+        if tt:
+            print "Token: " + repr(tt)
+        else:
+            break
diff --git a/lib-python/2.2/shutil.py b/lib-python/2.2/shutil.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/shutil.py
@@ -0,0 +1,138 @@
+"""Utility functions for copying files and directory trees.
+
+XXX The functions here don't copy the resource fork or other metadata on Mac.
+
+"""
+
+import os
+import sys
+import stat
+
+__all__ = ["copyfileobj","copyfile","copymode","copystat","copy","copy2",
+           "copytree","rmtree"]
+
+def copyfileobj(fsrc, fdst, length=16*1024):
+    """copy data from file-like object fsrc to file-like object fdst"""
+    while 1:
+        buf = fsrc.read(length)
+        if not buf:
+            break
+        fdst.write(buf)
+
+
+def copyfile(src, dst):
+    """Copy data from src to dst"""
+    fsrc = None
+    fdst = None
+    try:
+        fsrc = open(src, 'rb')
+        fdst = open(dst, 'wb')
+        copyfileobj(fsrc, fdst)
+    finally:
+        if fdst:
+            fdst.close()
+        if fsrc:
+            fsrc.close()
+
+def copymode(src, dst):
+    """Copy mode bits from src to dst"""
+    if hasattr(os, 'chmod'):
+        st = os.stat(src)
+        mode = stat.S_IMODE(st[stat.ST_MODE])
+        os.chmod(dst, mode)
+
+def copystat(src, dst):
+    """Copy all stat info (mode bits, atime and mtime) from src to dst"""
+    st = os.stat(src)
+    mode = stat.S_IMODE(st[stat.ST_MODE])
+    if hasattr(os, 'utime'):
+        os.utime(dst, (st[stat.ST_ATIME], st[stat.ST_MTIME]))
+    if hasattr(os, 'chmod'):
+        os.chmod(dst, mode)
+
+
+def copy(src, dst):
+    """Copy data and mode bits ("cp src dst").
+
+    The destination may be a directory.
+
+    """
+    if os.path.isdir(dst):
+        dst = os.path.join(dst, os.path.basename(src))
+    copyfile(src, dst)
+    copymode(src, dst)
+
+def copy2(src, dst):
+    """Copy data and all stat info ("cp -p src dst").
+
+    The destination may be a directory.
+
+    """
+    if os.path.isdir(dst):
+        dst = os.path.join(dst, os.path.basename(src))
+    copyfile(src, dst)
+    copystat(src, dst)
+
+
+def copytree(src, dst, symlinks=0):
+    """Recursively copy a directory tree using copy2().
+
+    The destination directory must not already exist.
+    Error are reported to standard output.
+
+    If the optional symlinks flag is true, symbolic links in the
+    source tree result in symbolic links in the destination tree; if
+    it is false, the contents of the files pointed to by symbolic
+    links are copied.
+
+    XXX Consider this example code rather than the ultimate tool.
+
+    """
+    names = os.listdir(src)
+    os.mkdir(dst)
+    for name in names:
+        srcname = os.path.join(src, name)
+        dstname = os.path.join(dst, name)
+        try:
+            if symlinks and os.path.islink(srcname):
+                linkto = os.readlink(srcname)
+                os.symlink(linkto, dstname)
+            elif os.path.isdir(srcname):
+                copytree(srcname, dstname, symlinks)
+            else:
+                copy2(srcname, dstname)
+            # XXX What about devices, sockets etc.?
+        except (IOError, os.error), why:
+            print "Can't copy %s to %s: %s" % (`srcname`, `dstname`, str(why))
+
+def rmtree(path, ignore_errors=0, onerror=None):
+    """Recursively delete a directory tree.
+
+    If ignore_errors is set, errors are ignored; otherwise, if
+    onerror is set, it is called to handle the error; otherwise, an
+    exception is raised.
+
+    """
+    cmdtuples = []
+    _build_cmdtuple(path, cmdtuples)
+    for cmd in cmdtuples:
+        try:
+            apply(cmd[0], (cmd[1],))
+        except:
+            exc = sys.exc_info()
+            if ignore_errors:
+                pass
+            elif onerror:
+                onerror(cmd[0], cmd[1], exc)
+            else:
+                raise exc[0], (exc[1][0], exc[1][1] + ' removing '+cmd[1])
+
+# Helper for rmtree()
+def _build_cmdtuple(path, cmdtuples):
+    for f in os.listdir(path):
+        real_f = os.path.join(path,f)
+        if os.path.isdir(real_f) and not os.path.islink(real_f):
+            _build_cmdtuple(real_f, cmdtuples)
+        else:
+            cmdtuples.append((os.remove, real_f))
+    cmdtuples.append((os.rmdir, path))
diff --git a/lib-python/2.2/site-packages/README b/lib-python/2.2/site-packages/README
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/site-packages/README
@@ -0,0 +1,2 @@
+This directory exists so that 3rd party packages can be installed
+here.  Read the source for site.py for more details.
diff --git a/lib-python/2.2/site.py b/lib-python/2.2/site.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/site.py
@@ -0,0 +1,330 @@
+"""Append module search paths for third-party packages to sys.path.
+
+****************************************************************
+* This module is automatically imported during initialization. *
+****************************************************************
+
+In earlier versions of Python (up to 1.5a3), scripts or modules that
+needed to use site-specific modules would place ``import site''
+somewhere near the top of their code.  Because of the automatic
+import, this is no longer necessary (but code that does it still
+works).
+
+This will append site-specific paths to to the module search path.  On
+Unix, it starts with sys.prefix and sys.exec_prefix (if different) and
+appends lib/python<version>/site-packages as well as lib/site-python.
+On other platforms (mainly Mac and Windows), it uses just sys.prefix
+(and sys.exec_prefix, if different, but this is unlikely).  The
+resulting directories, if they exist, are appended to sys.path, and
+also inspected for path configuration files.
+
+A path configuration file is a file whose name has the form
+<package>.pth; its contents are additional directories (one per line)
+to be added to sys.path.  Non-existing directories (or
+non-directories) are never added to sys.path; no directory is added to
+sys.path more than once.  Blank lines and lines beginning with
+'#' are skipped. Lines starting with 'import' are executed.
+
+For example, suppose sys.prefix and sys.exec_prefix are set to
+/usr/local and there is a directory /usr/local/lib/python1.5/site-packages
+with three subdirectories, foo, bar and spam, and two path
+configuration files, foo.pth and bar.pth.  Assume foo.pth contains the
+following:
+
+  # foo package configuration
+  foo
+  bar
+  bletch
+
+and bar.pth contains:
+
+  # bar package configuration
+  bar
+
+Then the following directories are added to sys.path, in this order:
+
+  /usr/local/lib/python1.5/site-packages/bar
+  /usr/local/lib/python1.5/site-packages/foo
+
+Note that bletch is omitted because it doesn't exist; bar precedes foo
+because bar.pth comes alphabetically before foo.pth; and spam is
+omitted because it is not mentioned in either path configuration file.
+
+After these path manipulations, an attempt is made to import a module
+named sitecustomize, which can perform arbitrary additional
+site-specific customizations.  If this import fails with an
+ImportError exception, it is silently ignored.
+
+"""
+
+import sys, os
+
+
+def makepath(*paths):
+    dir = os.path.abspath(os.path.join(*paths))
+    return dir, os.path.normcase(dir)
+
+for m in sys.modules.values():
+    if hasattr(m, "__file__") and m.__file__:
+        m.__file__ = os.path.abspath(m.__file__)
+del m
+
+# This ensures that the initial path provided by the interpreter contains
+# only absolute pathnames, even if we're running from the build directory.
+L = []
+_dirs_in_sys_path = {}
+for dir in sys.path:
+    # Filter out paths that don't exist, but leave in the empty string
+    # since it's a special case. We also need to special-case the Mac,
+    # as file names are allowed on sys.path there.
+    if sys.platform != 'mac':
+        if dir and not os.path.isdir(dir):
+            continue
+    else:
+        if dir and not os.path.exists(dir):
+            continue
+    dir, dircase = makepath(dir)
+    if not _dirs_in_sys_path.has_key(dircase):
+        L.append(dir)
+        _dirs_in_sys_path[dircase] = 1
+sys.path[:] = L
+del dir, L
+
+# Append ./build/lib.<platform> in case we're running in the build dir
+# (especially for Guido :-)
+if (os.name == "posix" and sys.path and
+    os.path.basename(sys.path[-1]) == "Modules"):
+    from distutils.util import get_platform
+    s = "build/lib.%s-%.3s" % (get_platform(), sys.version)
+    s = os.path.join(os.path.dirname(sys.path[-1]), s)
+    sys.path.append(s)
+    del get_platform, s
+
+def _init_pathinfo():
+    global _dirs_in_sys_path
+    _dirs_in_sys_path = d = {}
+    for dir in sys.path:
+        if dir and not os.path.isdir(dir):
+            continue
+        dir, dircase = makepath(dir)
+        d[dircase] = 1
+
+def addsitedir(sitedir):
+    global _dirs_in_sys_path
+    if _dirs_in_sys_path is None:
+        _init_pathinfo()
+        reset = 1
+    else:
+        reset = 0
+    sitedir, sitedircase = makepath(sitedir)
+    if not _dirs_in_sys_path.has_key(sitedircase):
+        sys.path.append(sitedir)        # Add path component
+    try:
+        names = os.listdir(sitedir)
+    except os.error:
+        return
+    names.sort()
+    for name in names:
+        if name[-4:] == os.extsep + "pth":
+            addpackage(sitedir, name)
+    if reset:
+        _dirs_in_sys_path = None
+
+def addpackage(sitedir, name):
+    global _dirs_in_sys_path
+    if _dirs_in_sys_path is None:
+        _init_pathinfo()
+        reset = 1
+    else:
+        reset = 0
+    fullname = os.path.join(sitedir, name)
+    try:
+        f = open(fullname)
+    except IOError:
+        return
+    while 1:
+        dir = f.readline()
+        if not dir:
+            break
+        if dir[0] == '#':
+            continue
+        if dir.startswith("import"):
+            exec dir
+            continue
+        if dir[-1] == '\n':
+            dir = dir[:-1]
+        dir, dircase = makepath(sitedir, dir)
+        if not _dirs_in_sys_path.has_key(dircase) and os.path.exists(dir):
+            sys.path.append(dir)
+            _dirs_in_sys_path[dircase] = 1
+    if reset:
+        _dirs_in_sys_path = None
+
+prefixes = [sys.prefix]
+if sys.exec_prefix != sys.prefix:
+    prefixes.append(sys.exec_prefix)
+for prefix in prefixes:
+    if prefix:
+        if os.sep == '/':
+            sitedirs = [os.path.join(prefix,
+                                     "lib",
+                                     "python" + sys.version[:3],
+                                     "site-packages"),
+                        os.path.join(prefix, "lib", "site-python")]
+        else:
+            sitedirs = [prefix, os.path.join(prefix, "lib", "site-packages")]
+        for sitedir in sitedirs:
+            if os.path.isdir(sitedir):
+                addsitedir(sitedir)
+
+_dirs_in_sys_path = None
+
+
+# Define new built-ins 'quit' and 'exit'.
+# These are simply strings that display a hint on how to exit.
+if os.sep == ':':
+    exit = 'Use Cmd-Q to quit.'
+elif os.sep == '\\':
+    exit = 'Use Ctrl-Z plus Return to exit.'
+else:
+    exit = 'Use Ctrl-D (i.e. EOF) to exit.'
+import __builtin__
+__builtin__.quit = __builtin__.exit = exit
+del exit
+
+# interactive prompt objects for printing the license text, a list of
+# contributors and the copyright notice.
+class _Printer:
+    MAXLINES = 23
+
+    def __init__(self, name, data, files=(), dirs=()):
+        self.__name = name
+        self.__data = data
+        self.__files = files
+        self.__dirs = dirs
+        self.__lines = None
+
+    def __setup(self):
+        if self.__lines:
+            return
+        data = None
+        for dir in self.__dirs:
+            for file in self.__files:
+                file = os.path.join(dir, file)
+                try:
+                    fp = open(file)
+                    data = fp.read()
+                    fp.close()
+                    break
+                except IOError:
+                    pass
+            if data:
+                break
+        if not data:
+            data = self.__data
+        self.__lines = data.split('\n')
+        self.__linecnt = len(self.__lines)
+
+    def __repr__(self):
+        self.__setup()
+        if len(self.__lines) <= self.MAXLINES:
+            return "\n".join(self.__lines)
+        else:
+            return "Type %s() to see the full %s text" % ((self.__name,)*2)
+
+    def __call__(self):
+        self.__setup()
+        prompt = 'Hit Return for more, or q (and Return) to quit: '
+        lineno = 0
+        while 1:
+            try:
+                for i in range(lineno, lineno + self.MAXLINES):
+                    print self.__lines[i]
+            except IndexError:
+                break
+            else:
+                lineno += self.MAXLINES
+                key = None
+                while key is None:
+                    key = raw_input(prompt)
+                    if key not in ('', 'q'):
+                        key = None
+                if key == 'q':
+                    break
+
+__builtin__.copyright = _Printer("copyright", sys.copyright)
+if sys.platform[:4] == 'java':
+    __builtin__.credits = _Printer(
+        "credits",
+        "Jython is maintained by the Jython developers (www.jython.org).")
+else:
+    __builtin__.credits = _Printer("credits", """\
+Thanks to CWI, CNRI, BeOpen.com, Digital Creations and a cast of thousands
+for supporting Python development.  See www.python.org for more information.""")
+here = os.path.dirname(os.__file__)
+__builtin__.license = _Printer(
+    "license", "See http://www.python.org/%.3s/license.html" % sys.version,
+    ["LICENSE.txt", "LICENSE"],
+    [os.path.join(here, os.pardir), here, os.curdir])
+
+
+# Define new built-in 'help'.
+# This is a wrapper around pydoc.help (with a twist).
+
+class _Helper:
+    def __repr__(self):
+        return "Type help() for interactive help, " \
+               "or help(object) for help about object."
+    def __call__(self, *args, **kwds):
+        import pydoc
+        return pydoc.help(*args, **kwds)
+
+__builtin__.help = _Helper()
+
+
+# Set the string encoding used by the Unicode implementation.  The
+# default is 'ascii', but if you're willing to experiment, you can
+# change this.
+
+encoding = "ascii" # Default value set by _PyUnicode_Init()
+
+if 0:
+    # Enable to support locale aware default string encodings.
+    import locale
+    loc = locale.getdefaultlocale()
+    if loc[1]:
+        encoding = loc[1]
+
+if 0:
+    # Enable to switch off string to Unicode coercion and implicit
+    # Unicode to string conversion.
+    encoding = "undefined"
+
+if encoding != "ascii":
+    # On Non-Unicode builds this will raise an AttributeError...
+    sys.setdefaultencoding(encoding) # Needs Python Unicode build !
+
+#
+# Run custom site specific code, if available.
+#
+try:
+    import sitecustomize
+except ImportError:
+    pass
+
+#
+# Remove sys.setdefaultencoding() so that users cannot change the
+# encoding after initialization.  The test for presence is needed when
+# this module is run as a script, because this code is executed twice.
+#
+if hasattr(sys, "setdefaultencoding"):
+    del sys.setdefaultencoding
+
+def _test():
+    print "sys.path = ["
+    for dir in sys.path:
+        print "    %s," % `dir`
+    print "]"
+
+if __name__ == '__main__':
+    _test()
diff --git a/lib-python/2.2/smtpd.py b/lib-python/2.2/smtpd.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/smtpd.py
@@ -0,0 +1,543 @@
+#! /usr/bin/env python
+"""An RFC 2821 smtp proxy.
+
+Usage: %(program)s [options] [localhost:localport [remotehost:remoteport]]
+
+Options:
+
+    --nosetuid
+    -n
+        This program generally tries to setuid `nobody', unless this flag is
+        set.  The setuid call will fail if this program is not run as root (in
+        which case, use this flag).
+
+    --version
+    -V
+        Print the version number and exit.
+
+    --class classname
+    -c classname
+        Use `classname' as the concrete SMTP proxy class.  Uses `SMTPProxy' by
+        default.
+
+    --debug
+    -d
+        Turn on debugging prints.
+
+    --help
+    -h
+        Print this message and exit.
+
+Version: %(__version__)s
+
+If localhost is not given then `localhost' is used, and if localport is not
+given then 8025 is used.  If remotehost is not given then `localhost' is used,
+and if remoteport is not given, then 25 is used.
+"""
+
+
+# Overview:
+#
+# This file implements the minimal SMTP protocol as defined in RFC 821.  It
+# has a hierarchy of classes which implement the backend functionality for the
+# smtpd.  A number of classes are provided:
+#
+#   SMTPServer - the base class for the backend.  Raises NotImplementedError
+#   if you try to use it.
+#
+#   DebuggingServer - simply prints each message it receives on stdout.
+#
+#   PureProxy - Proxies all messages to a real smtpd which does final
+#   delivery.  One known problem with this class is that it doesn't handle
+#   SMTP errors from the backend server at all.  This should be fixed
+#   (contributions are welcome!).
+#
+#   MailmanProxy - An experimental hack to work with GNU Mailman
+#   <www.list.org>.  Using this server as your real incoming smtpd, your
+#   mailhost will automatically recognize and accept mail destined to Mailman
+#   lists when those lists are created.  Every message not destined for a list
+#   gets forwarded to a real backend smtpd, as with PureProxy.  Again, errors
+#   are not handled correctly yet.
+#
+# Please note that this script requires Python 2.0
+#
+# Author: Barry Warsaw <barry at digicool.com>
+#
+# TODO:
+#
+# - support mailbox delivery
+# - alias files
+# - ESMTP
+# - handle error codes from the backend smtpd
+
+import sys
+import os
+import errno
+import getopt
+import time
+import socket
+import asyncore
+import asynchat
+
+__all__ = ["SMTPServer","DebuggingServer","PureProxy","MailmanProxy"]
+
+program = sys.argv[0]
+__version__ = 'Python SMTP proxy version 0.2'
+
+
+class Devnull:
+    def write(self, msg): pass
+    def flush(self): pass
+
+
+DEBUGSTREAM = Devnull()
+NEWLINE = '\n'
+EMPTYSTRING = ''
+COMMASPACE = ', '
+
+
+
+def usage(code, msg=''):
+    print >> sys.stderr, __doc__ % globals()
+    if msg:
+        print >> sys.stderr, msg
+    sys.exit(code)
+
+
+
+class SMTPChannel(asynchat.async_chat):
+    COMMAND = 0
+    DATA = 1
+
+    def __init__(self, server, conn, addr):
+        asynchat.async_chat.__init__(self, conn)
+        self.__server = server
+        self.__conn = conn
+        self.__addr = addr
+        self.__line = []
+        self.__state = self.COMMAND
+        self.__greeting = 0
+        self.__mailfrom = None
+        self.__rcpttos = []
+        self.__data = ''
+        self.__fqdn = socket.getfqdn()
+        self.__peer = conn.getpeername()
+        print >> DEBUGSTREAM, 'Peer:', repr(self.__peer)
+        self.push('220 %s %s' % (self.__fqdn, __version__))
+        self.set_terminator('\r\n')
+
+    # Overrides base class for convenience
+    def push(self, msg):
+        asynchat.async_chat.push(self, msg + '\r\n')
+
+    # Implementation of base class abstract method
+    def collect_incoming_data(self, data):
+        self.__line.append(data)
+
+    # Implementation of base class abstract method
+    def found_terminator(self):
+        line = EMPTYSTRING.join(self.__line)
+        print >> DEBUGSTREAM, 'Data:', repr(line)
+        self.__line = []
+        if self.__state == self.COMMAND:
+            if not line:
+                self.push('500 Error: bad syntax')
+                return
+            method = None
+            i = line.find(' ')
+            if i < 0:
+                command = line.upper()
+                arg = None
+            else:
+                command = line[:i].upper()
+                arg = line[i+1:].strip()
+            method = getattr(self, 'smtp_' + command, None)
+            if not method:
+                self.push('502 Error: command "%s" not implemented' % command)
+                return
+            method(arg)
+            return
+        else:
+            if self.__state != self.DATA:
+                self.push('451 Internal confusion')
+                return
+            # Remove extraneous carriage returns and de-transparency according
+            # to RFC 821, Section 4.5.2.
+            data = []
+            for text in line.split('\r\n'):
+                if text and text[0] == '.':
+                    data.append(text[1:])
+                else:
+                    data.append(text)
+            self.__data = NEWLINE.join(data)
+            status = self.__server.process_message(self.__peer,
+                                                   self.__mailfrom,
+                                                   self.__rcpttos,
+                                                   self.__data)
+            self.__rcpttos = []
+            self.__mailfrom = None
+            self.__state = self.COMMAND
+            self.set_terminator('\r\n')
+            if not status:
+                self.push('250 Ok')
+            else:
+                self.push(status)
+
+    # SMTP and ESMTP commands
+    def smtp_HELO(self, arg):
+        if not arg:
+            self.push('501 Syntax: HELO hostname')
+            return
+        if self.__greeting:
+            self.push('503 Duplicate HELO/EHLO')
+        else:
+            self.__greeting = arg
+            self.push('250 %s' % self.__fqdn)
+
+    def smtp_NOOP(self, arg):
+        if arg:
+            self.push('501 Syntax: NOOP')
+        else:
+            self.push('250 Ok')
+
+    def smtp_QUIT(self, arg):
+        # args is ignored
+        self.push('221 Bye')
+        self.close_when_done()
+
+    # factored
+    def __getaddr(self, keyword, arg):
+        address = None
+        keylen = len(keyword)
+        if arg[:keylen].upper() == keyword:
+            address = arg[keylen:].strip()
+            if not address:
+                pass
+            elif address[0] == '<' and address[-1] == '>' and address != '<>':
+                # Addresses can be in the form <person at dom.com> but watch out
+                # for null address, e.g. <>
+                address = address[1:-1]
+        return address
+
+    def smtp_MAIL(self, arg):
+        print >> DEBUGSTREAM, '===> MAIL', arg
+        address = self.__getaddr('FROM:', arg)
+        if not address:
+            self.push('501 Syntax: MAIL FROM:<address>')
+            return
+        if self.__mailfrom:
+            self.push('503 Error: nested MAIL command')
+            return
+        self.__mailfrom = address
+        print >> DEBUGSTREAM, 'sender:', self.__mailfrom
+        self.push('250 Ok')
+
+    def smtp_RCPT(self, arg):
+        print >> DEBUGSTREAM, '===> RCPT', arg
+        if not self.__mailfrom:
+            self.push('503 Error: need MAIL command')
+            return
+        address = self.__getaddr('TO:', arg)
+        if not address:
+            self.push('501 Syntax: RCPT TO: <address>')
+            return
+        self.__rcpttos.append(address)
+        print >> DEBUGSTREAM, 'recips:', self.__rcpttos
+        self.push('250 Ok')
+
+    def smtp_RSET(self, arg):
+        if arg:
+            self.push('501 Syntax: RSET')
+            return
+        # Resets the sender, recipients, and data, but not the greeting
+        self.__mailfrom = None
+        self.__rcpttos = []
+        self.__data = ''
+        self.__state = self.COMMAND
+        self.push('250 Ok')
+
+    def smtp_DATA(self, arg):
+        if not self.__rcpttos:
+            self.push('503 Error: need RCPT command')
+            return
+        if arg:
+            self.push('501 Syntax: DATA')
+            return
+        self.__state = self.DATA
+        self.set_terminator('\r\n.\r\n')
+        self.push('354 End data with <CR><LF>.<CR><LF>')
+
+
+
+class SMTPServer(asyncore.dispatcher):
+    def __init__(self, localaddr, remoteaddr):
+        self._localaddr = localaddr
+        self._remoteaddr = remoteaddr
+        asyncore.dispatcher.__init__(self)
+        self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
+        # try to re-use a server port if possible
+        self.set_reuse_addr()
+        self.bind(localaddr)
+        self.listen(5)
+        print >> DEBUGSTREAM, \
+              '%s started at %s\n\tLocal addr: %s\n\tRemote addr:%s' % (
+            self.__class__.__name__, time.ctime(time.time()),
+            localaddr, remoteaddr)
+
+    def handle_accept(self):
+        conn, addr = self.accept()
+        print >> DEBUGSTREAM, 'Incoming connection from %s' % repr(addr)
+        channel = SMTPChannel(self, conn, addr)
+
+    # API for "doing something useful with the message"
+    def process_message(self, peer, mailfrom, rcpttos, data):
+        """Override this abstract method to handle messages from the client.
+
+        peer is a tuple containing (ipaddr, port) of the client that made the
+        socket connection to our smtp port.
+
+        mailfrom is the raw address the client claims the message is coming
+        from.
+
+        rcpttos is a list of raw addresses the client wishes to deliver the
+        message to.
+
+        data is a string containing the entire full text of the message,
+        headers (if supplied) and all.  It has been `de-transparencied'
+        according to RFC 821, Section 4.5.2.  In other words, a line
+        containing a `.' followed by other text has had the leading dot
+        removed.
+
+        This function should return None, for a normal `250 Ok' response;
+        otherwise it returns the desired response string in RFC 821 format.
+
+        """
+        raise NotImplementedError
+
+
+
+class DebuggingServer(SMTPServer):
+    # Do something with the gathered message
+    def process_message(self, peer, mailfrom, rcpttos, data):
+        inheaders = 1
+        lines = data.split('\n')
+        print '---------- MESSAGE FOLLOWS ----------'
+        for line in lines:
+            # headers first
+            if inheaders and not line:
+                print 'X-Peer:', peer[0]
+                inheaders = 0
+            print line
+        print '------------ END MESSAGE ------------'
+
+
+
+class PureProxy(SMTPServer):
+    def process_message(self, peer, mailfrom, rcpttos, data):
+        lines = data.split('\n')
+        # Look for the last header
+        i = 0
+        for line in lines:
+            if not line:
+                break
+            i += 1
+        lines.insert(i, 'X-Peer: %s' % peer[0])
+        data = NEWLINE.join(lines)
+        refused = self._deliver(mailfrom, rcpttos, data)
+        # TBD: what to do with refused addresses?
+        print >> DEBUGSTREAM, 'we got some refusals:', refused
+
+    def _deliver(self, mailfrom, rcpttos, data):
+        import smtplib
+        refused = {}
+        try:
+            s = smtplib.SMTP()
+            s.connect(self._remoteaddr[0], self._remoteaddr[1])
+            try:
+                refused = s.sendmail(mailfrom, rcpttos, data)
+            finally:
+                s.quit()
+        except smtplib.SMTPRecipientsRefused, e:
+            print >> DEBUGSTREAM, 'got SMTPRecipientsRefused'
+            refused = e.recipients
+        except (socket.error, smtplib.SMTPException), e:
+            print >> DEBUGSTREAM, 'got', e.__class__
+            # All recipients were refused.  If the exception had an associated
+            # error code, use it.  Otherwise,fake it with a non-triggering
+            # exception code.
+            errcode = getattr(e, 'smtp_code', -1)
+            errmsg = getattr(e, 'smtp_error', 'ignore')
+            for r in rcpttos:
+                refused[r] = (errcode, errmsg)
+        return refused
+
+
+
+class MailmanProxy(PureProxy):
+    def process_message(self, peer, mailfrom, rcpttos, data):
+        from cStringIO import StringIO
+        from Mailman import Utils
+        from Mailman import Message
+        from Mailman import MailList
+        # If the message is to a Mailman mailing list, then we'll invoke the
+        # Mailman script directly, without going through the real smtpd.
+        # Otherwise we'll forward it to the local proxy for disposition.
+        listnames = []
+        for rcpt in rcpttos:
+            local = rcpt.lower().split('@')[0]
+            # We allow the following variations on the theme
+            #   listname
+            #   listname-admin
+            #   listname-owner
+            #   listname-request
+            #   listname-join
+            #   listname-leave
+            parts = local.split('-')
+            if len(parts) > 2:
+                continue
+            listname = parts[0]
+            if len(parts) == 2:
+                command = parts[1]
+            else:
+                command = ''
+            if not Utils.list_exists(listname) or command not in (
+                    '', 'admin', 'owner', 'request', 'join', 'leave'):
+                continue
+            listnames.append((rcpt, listname, command))
+        # Remove all list recipients from rcpttos and forward what we're not
+        # going to take care of ourselves.  Linear removal should be fine
+        # since we don't expect a large number of recipients.
+        for rcpt, listname, command in listnames:
+            rcpttos.remove(rcpt)
+        # If there's any non-list destined recipients left,
+        print >> DEBUGSTREAM, 'forwarding recips:', ' '.join(rcpttos)
+        if rcpttos:
+            refused = self._deliver(mailfrom, rcpttos, data)
+            # TBD: what to do with refused addresses?
+            print >> DEBUGSTREAM, 'we got refusals:', refused
+        # Now deliver directly to the list commands
+        mlists = {}
+        s = StringIO(data)
+        msg = Message.Message(s)
+        # These headers are required for the proper execution of Mailman.  All
+        # MTAs in existance seem to add these if the original message doesn't
+        # have them.
+        if not msg.getheader('from'):
+            msg['From'] = mailfrom
+        if not msg.getheader('date'):
+            msg['Date'] = time.ctime(time.time())
+        for rcpt, listname, command in listnames:
+            print >> DEBUGSTREAM, 'sending message to', rcpt
+            mlist = mlists.get(listname)
+            if not mlist:
+                mlist = MailList.MailList(listname, lock=0)
+                mlists[listname] = mlist
+            # dispatch on the type of command
+            if command == '':
+                # post
+                msg.Enqueue(mlist, tolist=1)
+            elif command == 'admin':
+                msg.Enqueue(mlist, toadmin=1)
+            elif command == 'owner':
+                msg.Enqueue(mlist, toowner=1)
+            elif command == 'request':
+                msg.Enqueue(mlist, torequest=1)
+            elif command in ('join', 'leave'):
+                # TBD: this is a hack!
+                if command == 'join':
+                    msg['Subject'] = 'subscribe'
+                else:
+                    msg['Subject'] = 'unsubscribe'
+                msg.Enqueue(mlist, torequest=1)
+
+
+
+class Options:
+    setuid = 1
+    classname = 'PureProxy'
+
+
+
+def parseargs():
+    global DEBUGSTREAM
+    try:
+        opts, args = getopt.getopt(
+            sys.argv[1:], 'nVhc:d',
+            ['class=', 'nosetuid', 'version', 'help', 'debug'])
+    except getopt.error, e:
+        usage(1, e)
+
+    options = Options()
+    for opt, arg in opts:
+        if opt in ('-h', '--help'):
+            usage(0)
+        elif opt in ('-V', '--version'):
+            print >> sys.stderr, __version__
+            sys.exit(0)
+        elif opt in ('-n', '--nosetuid'):
+            options.setuid = 0
+        elif opt in ('-c', '--class'):
+            options.classname = arg
+        elif opt in ('-d', '--debug'):
+            DEBUGSTREAM = sys.stderr
+
+    # parse the rest of the arguments
+    if len(args) < 1:
+        localspec = 'localhost:8025'
+        remotespec = 'localhost:25'
+    elif len(args) < 2:
+        localspec = args[0]
+        remotespec = 'localhost:25'
+    elif len(args) < 3:
+        localspec = args[0]
+        remotespec = args[1]
+    else:
+        usage(1, 'Invalid arguments: %s' % COMMASPACE.join(args))
+
+    # split into host/port pairs
+    i = localspec.find(':')
+    if i < 0:
+        usage(1, 'Bad local spec: %s' % localspec)
+    options.localhost = localspec[:i]
+    try:
+        options.localport = int(localspec[i+1:])
+    except ValueError:
+        usage(1, 'Bad local port: %s' % localspec)
+    i = remotespec.find(':')
+    if i < 0:
+        usage(1, 'Bad remote spec: %s' % remotespec)
+    options.remotehost = remotespec[:i]
+    try:
+        options.remoteport = int(remotespec[i+1:])
+    except ValueError:
+        usage(1, 'Bad remote port: %s' % remotespec)
+    return options
+
+
+
+if __name__ == '__main__':
+    options = parseargs()
+    # Become nobody
+    if options.setuid:
+        try:
+            import pwd
+        except ImportError:
+            print >> sys.stderr, \
+                  'Cannot import module "pwd"; try running with -n option.'
+            sys.exit(1)
+        nobody = pwd.getpwnam('nobody')[2]
+        try:
+            os.setuid(nobody)
+        except OSError, e:
+            if e.errno != errno.EPERM: raise
+            print >> sys.stderr, \
+                  'Cannot setuid "nobody"; try running with -n option.'
+            sys.exit(1)
+    import __main__
+    class_ = getattr(__main__, options.classname)
+    proxy = class_((options.localhost, options.localport),
+                   (options.remotehost, options.remoteport))
+    try:
+        asyncore.loop()
+    except KeyboardInterrupt:
+        pass
diff --git a/lib-python/2.2/smtplib.py b/lib-python/2.2/smtplib.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/smtplib.py
@@ -0,0 +1,729 @@
+#! /usr/bin/env python
+
+'''SMTP/ESMTP client class.
+
+This should follow RFC 821 (SMTP), RFC 1869 (ESMTP), RFC 2554 (SMTP
+Authentication) and RFC 2487 (Secure SMTP over TLS).
+
+Notes:
+
+Please remember, when doing ESMTP, that the names of the SMTP service
+extensions are NOT the same thing as the option keywords for the RCPT
+and MAIL commands!
+
+Example:
+
+  >>> import smtplib
+  >>> s=smtplib.SMTP("localhost")
+  >>> print s.help()
+  This is Sendmail version 8.8.4
+  Topics:
+      HELO    EHLO    MAIL    RCPT    DATA
+      RSET    NOOP    QUIT    HELP    VRFY
+      EXPN    VERB    ETRN    DSN
+  For more info use "HELP <topic>".
+  To report bugs in the implementation send email to
+      sendmail-bugs at sendmail.org.
+  For local information send email to Postmaster at your site.
+  End of HELP info
+  >>> s.putcmd("vrfy","someone at here")
+  >>> s.getreply()
+  (250, "Somebody OverHere <somebody at here.my.org>")
+  >>> s.quit()
+'''
+
+# Author: The Dragon De Monsyne <dragondm at integral.org>
+# ESMTP support, test code and doc fixes added by
+#     Eric S. Raymond <esr at thyrsus.com>
+# Better RFC 821 compliance (MAIL and RCPT, and CRLF in data)
+#     by Carey Evans <c.evans at clear.net.nz>, for picky mail servers.
+# RFC 2554 (authentication) support by Gerhard Haering <gerhard at bigfoot.de>.
+#
+# This was modified from the Python 1.5 library HTTP lib.
+
+import socket
+import re
+import rfc822
+import types
+import base64
+import hmac
+
+__all__ = ["SMTPException","SMTPServerDisconnected","SMTPResponseException",
+           "SMTPSenderRefused","SMTPRecipientsRefused","SMTPDataError",
+           "SMTPConnectError","SMTPHeloError","SMTPAuthenticationError",
+           "quoteaddr","quotedata","SMTP"]
+
+SMTP_PORT = 25
+CRLF="\r\n"
+
+OLDSTYLE_AUTH = re.compile(r"auth=(.*)", re.I)
+
+def encode_base64(s, eol=None):
+    return "".join(base64.encodestring(s).split("\n"))
+
+# Exception classes used by this module.
+class SMTPException(Exception):
+    """Base class for all exceptions raised by this module."""
+
+class SMTPServerDisconnected(SMTPException):
+    """Not connected to any SMTP server.
+
+    This exception is raised when the server unexpectedly disconnects,
+    or when an attempt is made to use the SMTP instance before
+    connecting it to a server.
+    """
+
+class SMTPResponseException(SMTPException):
+    """Base class for all exceptions that include an SMTP error code.
+
+    These exceptions are generated in some instances when the SMTP
+    server returns an error code.  The error code is stored in the
+    `smtp_code' attribute of the error, and the `smtp_error' attribute
+    is set to the error message.
+    """
+
+    def __init__(self, code, msg):
+        self.smtp_code = code
+        self.smtp_error = msg
+        self.args = (code, msg)
+
+class SMTPSenderRefused(SMTPResponseException):
+    """Sender address refused.
+
+    In addition to the attributes set by on all SMTPResponseException
+    exceptions, this sets `sender' to the string that the SMTP refused.
+    """
+
+    def __init__(self, code, msg, sender):
+        self.smtp_code = code
+        self.smtp_error = msg
+        self.sender = sender
+        self.args = (code, msg, sender)
+
+class SMTPRecipientsRefused(SMTPException):
+    """All recipient addresses refused.
+
+    The errors for each recipient are accessible through the attribute
+    'recipients', which is a dictionary of exactly the same sort as
+    SMTP.sendmail() returns.
+    """
+
+    def __init__(self, recipients):
+        self.recipients = recipients
+        self.args = ( recipients,)
+
+
+class SMTPDataError(SMTPResponseException):
+    """The SMTP server didn't accept the data."""
+
+class SMTPConnectError(SMTPResponseException):
+    """Error during connection establishment."""
+
+class SMTPHeloError(SMTPResponseException):
+    """The server refused our HELO reply."""
+
+class SMTPAuthenticationError(SMTPResponseException):
+    """Authentication error.
+
+    Most probably the server didn't accept the username/password
+    combination provided.
+    """
+
+class SSLFakeSocket:
+    """A fake socket object that really wraps a SSLObject.
+
+    It only supports what is needed in smtplib.
+    """
+    def __init__(self, realsock, sslobj):
+        self.realsock = realsock
+        self.sslobj = sslobj
+
+    def send(self, str):
+        self.sslobj.write(str)
+        return len(str)
+
+    sendall = send
+
+    def close(self):
+        self.realsock.close()
+
+class SSLFakeFile:
+    """A fake file like object that really wraps a SSLObject.
+
+    It only supports what is needed in smtplib.
+    """
+    def __init__( self, sslobj):
+        self.sslobj = sslobj
+
+    def readline(self):
+        str = ""
+        chr = None
+        while chr != "\n":
+            chr = self.sslobj.read(1)
+            str += chr
+        return str
+
+    def close(self):
+        pass
+
+def quoteaddr(addr):
+    """Quote a subset of the email addresses defined by RFC 821.
+
+    Should be able to handle anything rfc822.parseaddr can handle.
+    """
+    m = (None, None)
+    try:
+        m=rfc822.parseaddr(addr)[1]
+    except AttributeError:
+        pass
+    if m == (None, None): # Indicates parse failure or AttributeError
+        #something weird here.. punt -ddm
+        return "<%s>" % addr
+    else:
+        return "<%s>" % m
+
+def quotedata(data):
+    """Quote data for email.
+
+    Double leading '.', and change Unix newline '\\n', or Mac '\\r' into
+    Internet CRLF end-of-line.
+    """
+    return re.sub(r'(?m)^\.', '..',
+        re.sub(r'(?:\r\n|\n|\r(?!\n))', CRLF, data))
+
+
+class SMTP:
+    """This class manages a connection to an SMTP or ESMTP server.
+    SMTP Objects:
+        SMTP objects have the following attributes:
+            helo_resp
+                This is the message given by the server in response to the
+                most recent HELO command.
+
+            ehlo_resp
+                This is the message given by the server in response to the
+                most recent EHLO command. This is usually multiline.
+
+            does_esmtp
+                This is a True value _after you do an EHLO command_, if the
+                server supports ESMTP.
+
+            esmtp_features
+                This is a dictionary, which, if the server supports ESMTP,
+                will _after you do an EHLO command_, contain the names of the
+                SMTP service extensions this server supports, and their
+                parameters (if any).
+
+                Note, all extension names are mapped to lower case in the
+                dictionary.
+
+        See each method's docstrings for details.  In general, there is a
+        method of the same name to perform each SMTP command.  There is also a
+        method called 'sendmail' that will do an entire mail transaction.
+        """
+    debuglevel = 0
+    file = None
+    helo_resp = None
+    ehlo_resp = None
+    does_esmtp = 0
+
+    def __init__(self, host = '', port = 0):
+        """Initialize a new instance.
+
+        If specified, `host' is the name of the remote host to which to
+        connect.  If specified, `port' specifies the port to which to connect.
+        By default, smtplib.SMTP_PORT is used.  An SMTPConnectError is raised
+        if the specified `host' doesn't respond correctly.
+
+        """
+        self.esmtp_features = {}
+        if host:
+            (code, msg) = self.connect(host, port)
+            if code != 220:
+                raise SMTPConnectError(code, msg)
+
+    def set_debuglevel(self, debuglevel):
+        """Set the debug output level.
+
+        A non-false value results in debug messages for connection and for all
+        messages sent to and received from the server.
+
+        """
+        self.debuglevel = debuglevel
+
+    def connect(self, host='localhost', port = 0):
+        """Connect to a host on a given port.
+
+        If the hostname ends with a colon (`:') followed by a number, and
+        there is no port specified, that suffix will be stripped off and the
+        number interpreted as the port number to use.
+
+        Note: This method is automatically invoked by __init__, if a host is
+        specified during instantiation.
+
+        """
+        if not port and (host.find(':') == host.rfind(':')):
+            i = host.rfind(':')
+            if i >= 0:
+                host, port = host[:i], host[i+1:]
+                try: port = int(port)
+                except ValueError:
+                    raise socket.error, "nonnumeric port"
+        if not port: port = SMTP_PORT
+        if self.debuglevel > 0: print 'connect:', (host, port)
+        msg = "getaddrinfo returns an empty list"
+        self.sock = None
+        for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
+            af, socktype, proto, canonname, sa = res
+            try:
+                self.sock = socket.socket(af, socktype, proto)
+                if self.debuglevel > 0: print 'connect:', (host, port)
+                self.sock.connect(sa)
+            except socket.error, msg:
+                if self.debuglevel > 0: print 'connect fail:', (host, port)
+                if self.sock:
+                    self.sock.close()
+                self.sock = None
+                continue
+            break
+        if not self.sock:
+            raise socket.error, msg
+        (code, msg) = self.getreply()
+        if self.debuglevel > 0: print "connect:", msg
+        return (code, msg)
+
+    def send(self, str):
+        """Send `str' to the server."""
+        if self.debuglevel > 0: print 'send:', `str`
+        if self.sock:
+            try:
+                self.sock.sendall(str)
+            except socket.error:
+                self.close()
+                raise SMTPServerDisconnected('Server not connected')
+        else:
+            raise SMTPServerDisconnected('please run connect() first')
+
+    def putcmd(self, cmd, args=""):
+        """Send a command to the server."""
+        if args == "":
+            str = '%s%s' % (cmd, CRLF)
+        else:
+            str = '%s %s%s' % (cmd, args, CRLF)
+        self.send(str)
+
+    def getreply(self):
+        """Get a reply from the server.
+
+        Returns a tuple consisting of:
+
+          - server response code (e.g. '250', or such, if all goes well)
+            Note: returns -1 if it can't read response code.
+
+          - server response string corresponding to response code (multiline
+            responses are converted to a single, multiline string).
+
+        Raises SMTPServerDisconnected if end-of-file is reached.
+        """
+        resp=[]
+        if self.file is None:
+            self.file = self.sock.makefile('rb')
+        while 1:
+            line = self.file.readline()
+            if line == '':
+                self.close()
+                raise SMTPServerDisconnected("Connection unexpectedly closed")
+            if self.debuglevel > 0: print 'reply:', `line`
+            resp.append(line[4:].strip())
+            code=line[:3]
+            # Check that the error code is syntactically correct.
+            # Don't attempt to read a continuation line if it is broken.
+            try:
+                errcode = int(code)
+            except ValueError:
+                errcode = -1
+                break
+            # Check if multiline response.
+            if line[3:4]!="-":
+                break
+
+        errmsg = "\n".join(resp)
+        if self.debuglevel > 0:
+            print 'reply: retcode (%s); Msg: %s' % (errcode,errmsg)
+        return errcode, errmsg
+
+    def docmd(self, cmd, args=""):
+        """Send a command, and return its response code."""
+        self.putcmd(cmd,args)
+        return self.getreply()
+
+    # std smtp commands
+    def helo(self, name=''):
+        """SMTP 'helo' command.
+        Hostname to send for this command defaults to the FQDN of the local
+        host.
+        """
+        if name:
+            self.putcmd("helo", name)
+        else:
+            self.putcmd("helo", socket.getfqdn())
+        (code,msg)=self.getreply()
+        self.helo_resp=msg
+        return (code,msg)
+
+    def ehlo(self, name=''):
+        """ SMTP 'ehlo' command.
+        Hostname to send for this command defaults to the FQDN of the local
+        host.
+        """
+        self.esmtp_features = {}
+        if name:
+            self.putcmd("ehlo", name)
+        else:
+            self.putcmd("ehlo", socket.getfqdn())
+        (code,msg)=self.getreply()
+        # According to RFC1869 some (badly written)
+        # MTA's will disconnect on an ehlo. Toss an exception if
+        # that happens -ddm
+        if code == -1 and len(msg) == 0:
+            self.close()
+            raise SMTPServerDisconnected("Server not connected")
+        self.ehlo_resp=msg
+        if code != 250:
+            return (code,msg)
+        self.does_esmtp=1
+        #parse the ehlo response -ddm
+        resp=self.ehlo_resp.split('\n')
+        del resp[0]
+        for each in resp:
+            # To be able to communicate with as many SMTP servers as possible,
+            # we have to take the old-style auth advertisement into account,
+            # because:
+            # 1) Else our SMTP feature parser gets confused.
+            # 2) There are some servers that only advertise the auth methods we
+            #    support using the old style.
+            auth_match = OLDSTYLE_AUTH.match(each)
+            if auth_match:
+                # This doesn't remove duplicates, but that's no problem
+                self.esmtp_features["auth"] = self.esmtp_features.get("auth", "") \
+                        + " " + auth_match.groups(0)[0]
+                continue
+
+            # RFC 1869 requires a space between ehlo keyword and parameters.
+            # It's actually stricter, in that only spaces are allowed between
+            # parameters, but were not going to check for that here.  Note
+            # that the space isn't present if there are no parameters.
+            m=re.match(r'(?P<feature>[A-Za-z0-9][A-Za-z0-9\-]*)',each)
+            if m:
+                feature=m.group("feature").lower()
+                params=m.string[m.end("feature"):].strip()
+                if feature == "auth":
+                    self.esmtp_features[feature] = self.esmtp_features.get(feature, "") \
+                            + " " + params
+                else:
+                    self.esmtp_features[feature]=params
+        return (code,msg)
+
+    def has_extn(self, opt):
+        """Does the server support a given SMTP service extension?"""
+        return self.esmtp_features.has_key(opt.lower())
+
+    def help(self, args=''):
+        """SMTP 'help' command.
+        Returns help text from server."""
+        self.putcmd("help", args)
+        return self.getreply()
+
+    def rset(self):
+        """SMTP 'rset' command -- resets session."""
+        return self.docmd("rset")
+
+    def noop(self):
+        """SMTP 'noop' command -- doesn't do anything :>"""
+        return self.docmd("noop")
+
+    def mail(self,sender,options=[]):
+        """SMTP 'mail' command -- begins mail xfer session."""
+        optionlist = ''
+        if options and self.does_esmtp:
+            optionlist = ' ' + ' '.join(options)
+        self.putcmd("mail", "FROM:%s%s" % (quoteaddr(sender) ,optionlist))
+        return self.getreply()
+
+    def rcpt(self,recip,options=[]):
+        """SMTP 'rcpt' command -- indicates 1 recipient for this mail."""
+        optionlist = ''
+        if options and self.does_esmtp:
+            optionlist = ' ' + ' '.join(options)
+        self.putcmd("rcpt","TO:%s%s" % (quoteaddr(recip),optionlist))
+        return self.getreply()
+
+    def data(self,msg):
+        """SMTP 'DATA' command -- sends message data to server.
+
+        Automatically quotes lines beginning with a period per rfc821.
+        Raises SMTPDataError if there is an unexpected reply to the
+        DATA command; the return value from this method is the final
+        response code received when the all data is sent.
+        """
+        self.putcmd("data")
+        (code,repl)=self.getreply()
+        if self.debuglevel >0 : print "data:", (code,repl)
+        if code != 354:
+            raise SMTPDataError(code,repl)
+        else:
+            q = quotedata(msg)
+            if q[-2:] != CRLF:
+                q = q + CRLF
+            q = q + "." + CRLF
+            self.send(q)
+            (code,msg)=self.getreply()
+            if self.debuglevel >0 : print "data:", (code,msg)
+            return (code,msg)
+
+    def verify(self, address):
+        """SMTP 'verify' command -- checks for address validity."""
+        self.putcmd("vrfy", quoteaddr(address))
+        return self.getreply()
+    # a.k.a.
+    vrfy=verify
+
+    def expn(self, address):
+        """SMTP 'verify' command -- checks for address validity."""
+        self.putcmd("expn", quoteaddr(address))
+        return self.getreply()
+
+    # some useful methods
+
+    def login(self, user, password):
+        """Log in on an SMTP server that requires authentication.
+
+        The arguments are:
+            - user:     The user name to authenticate with.
+            - password: The password for the authentication.
+
+        If there has been no previous EHLO or HELO command this session, this
+        method tries ESMTP EHLO first.
+
+        This method will return normally if the authentication was successful.
+
+        This method may raise the following exceptions:
+
+         SMTPHeloError            The server didn't reply properly to
+                                  the helo greeting.
+         SMTPAuthenticationError  The server didn't accept the username/
+                                  password combination.
+         SMTPException            No suitable authentication method was
+                                  found.
+        """
+
+        def encode_cram_md5(challenge, user, password):
+            challenge = base64.decodestring(challenge)
+            response = user + " " + hmac.HMAC(password, challenge).hexdigest()
+            return encode_base64(response, eol="")
+
+        def encode_plain(user, password):
+            return encode_base64("%s\0%s\0%s" % (user, user, password), eol="")
+
+
+        AUTH_PLAIN = "PLAIN"
+        AUTH_CRAM_MD5 = "CRAM-MD5"
+        AUTH_LOGIN = "LOGIN"
+
+        if self.helo_resp is None and self.ehlo_resp is None:
+            if not (200 <= self.ehlo()[0] <= 299):
+                (code, resp) = self.helo()
+                if not (200 <= code <= 299):
+                    raise SMTPHeloError(code, resp)
+
+        if not self.has_extn("auth"):
+            raise SMTPException("SMTP AUTH extension not supported by server.")
+
+        # Authentication methods the server supports:
+        authlist = self.esmtp_features["auth"].split()
+
+        # List of authentication methods we support: from preferred to
+        # less preferred methods. Except for the purpose of testing the weaker
+        # ones, we prefer stronger methods like CRAM-MD5:
+        preferred_auths = [AUTH_CRAM_MD5, AUTH_PLAIN, AUTH_LOGIN]
+
+        # Determine the authentication method we'll use
+        authmethod = None
+        for method in preferred_auths:
+            if method in authlist:
+                authmethod = method
+                break
+
+        if authmethod == AUTH_CRAM_MD5:
+            (code, resp) = self.docmd("AUTH", AUTH_CRAM_MD5)
+            if code == 503:
+                # 503 == 'Error: already authenticated'
+                return (code, resp)
+            (code, resp) = self.docmd(encode_cram_md5(resp, user, password))
+        elif authmethod == AUTH_PLAIN:
+            (code, resp) = self.docmd("AUTH",
+                AUTH_PLAIN + " " + encode_plain(user, password))
+        elif authmethod == AUTH_LOGIN:
+            (code, resp) = self.docmd("AUTH",
+                "%s %s" % (AUTH_LOGIN, encode_base64(user, eol="")))
+            if code != 334:
+                raise SMTPAuthenticationError(code, resp)
+            (code, resp) = self.docmd(encode_base64(password, eol=""))
+        elif authmethod == None:
+            raise SMTPException("No suitable authentication method found.")
+        if code not in [235, 503]:
+            # 235 == 'Authentication successful'
+            # 503 == 'Error: already authenticated'
+            raise SMTPAuthenticationError(code, resp)
+        return (code, resp)
+
+    def starttls(self, keyfile = None, certfile = None):
+        """Puts the connection to the SMTP server into TLS mode.
+
+        If the server supports TLS, this will encrypt the rest of the SMTP
+        session. If you provide the keyfile and certfile parameters,
+        the identity of the SMTP server and client can be checked. This,
+        however, depends on whether the socket module really checks the
+        certificates.
+        """
+        (resp, reply) = self.docmd("STARTTLS")
+        if resp == 220:
+            sslobj = socket.ssl(self.sock, keyfile, certfile)
+            self.sock = SSLFakeSocket(self.sock, sslobj)
+            self.file = SSLFakeFile(sslobj)
+        return (resp, reply)
+
+    def sendmail(self, from_addr, to_addrs, msg, mail_options=[],
+                 rcpt_options=[]):
+        """This command performs an entire mail transaction.
+
+        The arguments are:
+            - from_addr    : The address sending this mail.
+            - to_addrs     : A list of addresses to send this mail to.  A bare
+                             string will be treated as a list with 1 address.
+            - msg          : The message to send.
+            - mail_options : List of ESMTP options (such as 8bitmime) for the
+                             mail command.
+            - rcpt_options : List of ESMTP options (such as DSN commands) for
+                             all the rcpt commands.
+
+        If there has been no previous EHLO or HELO command this session, this
+        method tries ESMTP EHLO first.  If the server does ESMTP, message size
+        and each of the specified options will be passed to it.  If EHLO
+        fails, HELO will be tried and ESMTP options suppressed.
+
+        This method will return normally if the mail is accepted for at least
+        one recipient.  It returns a dictionary, with one entry for each
+        recipient that was refused.  Each entry contains a tuple of the SMTP
+        error code and the accompanying error message sent by the server.
+
+        This method may raise the following exceptions:
+
+         SMTPHeloError          The server didn't reply properly to
+                                the helo greeting.
+         SMTPRecipientsRefused  The server rejected ALL recipients
+                                (no mail was sent).
+         SMTPSenderRefused      The server didn't accept the from_addr.
+         SMTPDataError          The server replied with an unexpected
+                                error code (other than a refusal of
+                                a recipient).
+
+        Note: the connection will be open even after an exception is raised.
+
+        Example:
+
+         >>> import smtplib
+         >>> s=smtplib.SMTP("localhost")
+         >>> tolist=["one at one.org","two at two.org","three at three.org","four at four.org"]
+         >>> msg = '''\\
+         ... From: Me at my.org
+         ... Subject: testin'...
+         ...
+         ... This is a test '''
+         >>> s.sendmail("me at my.org",tolist,msg)
+         { "three at three.org" : ( 550 ,"User unknown" ) }
+         >>> s.quit()
+
+        In the above example, the message was accepted for delivery to three
+        of the four addresses, and one was rejected, with the error code
+        550.  If all addresses are accepted, then the method will return an
+        empty dictionary.
+
+        """
+        if self.helo_resp is None and self.ehlo_resp is None:
+            if not (200 <= self.ehlo()[0] <= 299):
+                (code,resp) = self.helo()
+                if not (200 <= code <= 299):
+                    raise SMTPHeloError(code, resp)
+        esmtp_opts = []
+        if self.does_esmtp:
+            # Hmmm? what's this? -ddm
+            # self.esmtp_features['7bit']=""
+            if self.has_extn('size'):
+                esmtp_opts.append("size=" + `len(msg)`)
+            for option in mail_options:
+                esmtp_opts.append(option)
+
+        (code,resp) = self.mail(from_addr, esmtp_opts)
+        if code != 250:
+            self.rset()
+            raise SMTPSenderRefused(code, resp, from_addr)
+        senderrs={}
+        if isinstance(to_addrs, types.StringTypes):
+            to_addrs = [to_addrs]
+        for each in to_addrs:
+            (code,resp)=self.rcpt(each, rcpt_options)
+            if (code != 250) and (code != 251):
+                senderrs[each]=(code,resp)
+        if len(senderrs)==len(to_addrs):
+            # the server refused all our recipients
+            self.rset()
+            raise SMTPRecipientsRefused(senderrs)
+        (code,resp) = self.data(msg)
+        if code != 250:
+            self.rset()
+            raise SMTPDataError(code, resp)
+        #if we got here then somebody got our mail
+        return senderrs
+
+
+    def close(self):
+        """Close the connection to the SMTP server."""
+        if self.file:
+            self.file.close()
+        self.file = None
+        if self.sock:
+            self.sock.close()
+        self.sock = None
+
+
+    def quit(self):
+        """Terminate the SMTP session."""
+        self.docmd("quit")
+        self.close()
+
+
+# Test the sendmail method, which tests most of the others.
+# Note: This always sends to localhost.
+if __name__ == '__main__':
+    import sys
+
+    def prompt(prompt):
+        sys.stdout.write(prompt + ": ")
+        return sys.stdin.readline().strip()
+
+    fromaddr = prompt("From")
+    toaddrs  = prompt("To").split(',')
+    print "Enter message, end with ^D:"
+    msg = ''
+    while 1:
+        line = sys.stdin.readline()
+        if not line:
+            break
+        msg = msg + line
+    print "Message length is " + `len(msg)`
+
+    server = SMTP('localhost')
+    server.set_debuglevel(1)
+    server.sendmail(fromaddr, toaddrs, msg)
+    server.quit()
diff --git a/lib-python/2.2/sndhdr.py b/lib-python/2.2/sndhdr.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/sndhdr.py
@@ -0,0 +1,228 @@
+"""Routines to help recognizing sound files.
+
+Function whathdr() recognizes various types of sound file headers.
+It understands almost all headers that SOX can decode.
+
+The return tuple contains the following items, in this order:
+- file type (as SOX understands it)
+- sampling rate (0 if unknown or hard to decode)
+- number of channels (0 if unknown or hard to decode)
+- number of frames in the file (-1 if unknown or hard to decode)
+- number of bits/sample, or 'U' for U-LAW, or 'A' for A-LAW
+
+If the file doesn't have a recognizable type, it returns None.
+If the file can't be opened, IOError is raised.
+
+To compute the total time, divide the number of frames by the
+sampling rate (a frame contains a sample for each channel).
+
+Function what() calls whathdr().  (It used to also use some
+heuristics for raw data, but this doesn't work very well.)
+
+Finally, the function test() is a simple main program that calls
+what() for all files mentioned on the argument list.  For directory
+arguments it calls what() for all files in that directory.  Default
+argument is "." (testing all files in the current directory).  The
+option -r tells it to recurse down directories found inside
+explicitly given directories.
+"""
+
+# The file structure is top-down except that the test program and its
+# subroutine come last.
+
+__all__ = ["what","whathdr"]
+
+def what(filename):
+    """Guess the type of a sound file"""
+    res = whathdr(filename)
+    return res
+
+
+def whathdr(filename):
+    """Recognize sound headers"""
+    f = open(filename, 'rb')
+    h = f.read(512)
+    for tf in tests:
+        res = tf(h, f)
+        if res:
+            return res
+    return None
+
+
+#-----------------------------------#
+# Subroutines per sound header type #
+#-----------------------------------#
+
+tests = []
+
+def test_aifc(h, f):
+    import aifc
+    if h[:4] != 'FORM':
+        return None
+    if h[8:12] == 'AIFC':
+        fmt = 'aifc'
+    elif h[8:12] == 'AIFF':
+        fmt = 'aiff'
+    else:
+        return None
+    f.seek(0)
+    try:
+        a = aifc.openfp(f, 'r')
+    except (EOFError, aifc.Error):
+        return None
+    return (fmt, a.getframerate(), a.getnchannels(), \
+            a.getnframes(), 8*a.getsampwidth())
+
+tests.append(test_aifc)
+
+
+def test_au(h, f):
+    if h[:4] == '.snd':
+        f = get_long_be
+    elif h[:4] in ('\0ds.', 'dns.'):
+        f = get_long_le
+    else:
+        return None
+    type = 'au'
+    hdr_size = f(h[4:8])
+    data_size = f(h[8:12])
+    encoding = f(h[12:16])
+    rate = f(h[16:20])
+    nchannels = f(h[20:24])
+    sample_size = 1 # default
+    if encoding == 1:
+        sample_bits = 'U'
+    elif encoding == 2:
+        sample_bits = 8
+    elif encoding == 3:
+        sample_bits = 16
+        sample_size = 2
+    else:
+        sample_bits = '?'
+    frame_size = sample_size * nchannels
+    return type, rate, nchannels, data_size/frame_size, sample_bits
+
+tests.append(test_au)
+
+
+def test_hcom(h, f):
+    if h[65:69] != 'FSSD' or h[128:132] != 'HCOM':
+        return None
+    divisor = get_long_be(h[128+16:128+20])
+    return 'hcom', 22050/divisor, 1, -1, 8
+
+tests.append(test_hcom)
+
+
+def test_voc(h, f):
+    if h[:20] != 'Creative Voice File\032':
+        return None
+    sbseek = get_short_le(h[20:22])
+    rate = 0
+    if 0 <= sbseek < 500 and h[sbseek] == '\1':
+        ratecode = ord(h[sbseek+4])
+        rate = int(1000000.0 / (256 - ratecode))
+    return 'voc', rate, 1, -1, 8
+
+tests.append(test_voc)
+
+
+def test_wav(h, f):
+    # 'RIFF' <len> 'WAVE' 'fmt ' <len>
+    if h[:4] != 'RIFF' or h[8:12] != 'WAVE' or h[12:16] != 'fmt ':
+        return None
+    style = get_short_le(h[20:22])
+    nchannels = get_short_le(h[22:24])
+    rate = get_long_le(h[24:28])
+    sample_bits = get_short_le(h[34:36])
+    return 'wav', rate, nchannels, -1, sample_bits
+
+tests.append(test_wav)
+
+
+def test_8svx(h, f):
+    if h[:4] != 'FORM' or h[8:12] != '8SVX':
+        return None
+    # Should decode it to get #channels -- assume always 1
+    return '8svx', 0, 1, 0, 8
+
+tests.append(test_8svx)
+
+
+def test_sndt(h, f):
+    if h[:5] == 'SOUND':
+        nsamples = get_long_le(h[8:12])
+        rate = get_short_le(h[20:22])
+        return 'sndt', rate, 1, nsamples, 8
+
+tests.append(test_sndt)
+
+
+def test_sndr(h, f):
+    if h[:2] == '\0\0':
+        rate = get_short_le(h[2:4])
+        if 4000 <= rate <= 25000:
+            return 'sndr', rate, 1, -1, 8
+
+tests.append(test_sndr)
+
+
+#---------------------------------------------#
+# Subroutines to extract numbers from strings #
+#---------------------------------------------#
+
+def get_long_be(s):
+    return (ord(s[0])<<24) | (ord(s[1])<<16) | (ord(s[2])<<8) | ord(s[3])
+
+def get_long_le(s):
+    return (ord(s[3])<<24) | (ord(s[2])<<16) | (ord(s[1])<<8) | ord(s[0])
+
+def get_short_be(s):
+    return (ord(s[0])<<8) | ord(s[1])
+
+def get_short_le(s):
+    return (ord(s[1])<<8) | ord(s[0])
+
+
+#--------------------#
+# Small test program #
+#--------------------#
+
+def test():
+    import sys
+    recursive = 0
+    if sys.argv[1:] and sys.argv[1] == '-r':
+        del sys.argv[1:2]
+        recursive = 1
+    try:
+        if sys.argv[1:]:
+            testall(sys.argv[1:], recursive, 1)
+        else:
+            testall(['.'], recursive, 1)
+    except KeyboardInterrupt:
+        sys.stderr.write('\n[Interrupted]\n')
+        sys.exit(1)
+
+def testall(list, recursive, toplevel):
+    import sys
+    import os
+    for filename in list:
+        if os.path.isdir(filename):
+            print filename + '/:',
+            if recursive or toplevel:
+                print 'recursing down:'
+                import glob
+                names = glob.glob(os.path.join(filename, '*'))
+                testall(names, recursive, 0)
+            else:
+                print '*** directory (use -r) ***'
+        else:
+            print filename + ':',
+            sys.stdout.flush()
+            try:
+                print what(filename)
+            except IOError:
+                print '*** not found ***'
+
+if __name__ == '__main__':
+    test()
diff --git a/lib-python/2.2/socket.py b/lib-python/2.2/socket.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/socket.py
@@ -0,0 +1,256 @@
+# Wrapper module for _socket, providing some additional facilities
+# implemented in Python.
+
+"""\
+This module provides socket operations and some related functions.
+On Unix, it supports IP (Internet Protocol) and Unix domain sockets.
+On other systems, it only supports IP. Functions specific for a
+socket are available as methods of the socket object.
+
+Functions:
+
+socket() -- create a new socket object
+fromfd() -- create a socket object from an open file descriptor [*]
+gethostname() -- return the current hostname
+gethostbyname() -- map a hostname to its IP number
+gethostbyaddr() -- map an IP number or hostname to DNS info
+getservbyname() -- map a service name and a protocol name to a port number
+getprotobyname() -- mape a protocol name (e.g. 'tcp') to a number
+ntohs(), ntohl() -- convert 16, 32 bit int from network to host byte order
+htons(), htonl() -- convert 16, 32 bit int from host to network byte order
+inet_aton() -- convert IP addr string (123.45.67.89) to 32-bit packed format
+inet_ntoa() -- convert 32-bit packed format IP to string (123.45.67.89)
+ssl() -- secure socket layer support (only available if configured)
+
+ [*] not available on all platforms!
+
+Special objects:
+
+SocketType -- type object for socket objects
+error -- exception raised for I/O errors
+
+Integer constants:
+
+AF_INET, AF_UNIX -- socket domains (first argument to socket() call)
+SOCK_STREAM, SOCK_DGRAM, SOCK_RAW -- socket types (second argument)
+
+Many other constants may be defined; these may be used in calls to
+the setsockopt() and getsockopt() methods.
+"""
+
+from _socket import *
+
+import os, sys
+
+__all__ = ["getfqdn"]
+import _socket
+__all__.extend(os._get_exports_list(_socket))
+
+if (sys.platform.lower().startswith("win")
+    or (hasattr(os, 'uname') and os.uname()[0] == "BeOS")
+    or (sys.platform=="riscos")):
+
+    _realsocketcall = _socket.socket
+
+    def socket(family=AF_INET, type=SOCK_STREAM, proto=0):
+        return _socketobject(_realsocketcall(family, type, proto))
+
+    try:
+        _realsslcall = _socket.ssl
+    except AttributeError:
+        pass # No ssl
+    else:
+        def ssl(sock, keyfile=None, certfile=None):
+            if hasattr(sock, "_sock"):
+                sock = sock._sock
+            return _realsslcall(sock, keyfile, certfile)
+
+
+# WSA error codes
+if sys.platform.lower().startswith("win"):
+    errorTab = {}
+    errorTab[10004] = "The operation was interrupted."
+    errorTab[10009] = "A bad file handle was passed."
+    errorTab[10013] = "Permission denied."
+    errorTab[10014] = "A fault occurred on the network??" # WSAEFAULT
+    errorTab[10022] = "An invalid operation was attempted."
+    errorTab[10035] = "The socket operation would block"
+    errorTab[10036] = "A blocking operation is already in progress."
+    errorTab[10048] = "The network address is in use."
+    errorTab[10054] = "The connection has been reset."
+    errorTab[10058] = "The network has been shut down."
+    errorTab[10060] = "The operation timed out."
+    errorTab[10061] = "Connection refused."
+    errorTab[10063] = "The name is too long."
+    errorTab[10064] = "The host is down."
+    errorTab[10065] = "The host is unreachable."
+    __all__.append("errorTab")
+del os, sys
+
+
+def getfqdn(name=''):
+    """Get fully qualified domain name from name.
+
+    An empty argument is interpreted as meaning the local host.
+
+    First the hostname returned by gethostbyaddr() is checked, then
+    possibly existing aliases. In case no FQDN is available, hostname
+    is returned.
+    """
+    name = name.strip()
+    if not name or name == '0.0.0.0':
+        name = gethostname()
+    try:
+        hostname, aliases, ipaddrs = gethostbyaddr(name)
+    except error:
+        pass
+    else:
+        aliases.insert(0, hostname)
+        for name in aliases:
+            if '.' in name:
+                break
+        else:
+            name = hostname
+    return name
+
+
+#
+# These classes are used by the socket() defined on Windows and BeOS
+# platforms to provide a best-effort implementation of the cleanup
+# semantics needed when sockets can't be dup()ed.
+#
+# These are not actually used on other platforms.
+#
+
+_socketmethods = (
+    'bind', 'connect', 'connect_ex', 'fileno', 'listen',
+    'getpeername', 'getsockname', 'getsockopt', 'setsockopt',
+    'recv', 'recvfrom', 'send', 'sendall', 'sendto', 'setblocking', 'shutdown')
+
+class _socketobject:
+
+    class _closedsocket:
+        def __getattr__(self, name):
+            raise error(9, 'Bad file descriptor')
+
+    def __init__(self, sock):
+        self._sock = sock
+
+    def close(self):
+        # Avoid referencing globals here
+        self._sock = self.__class__._closedsocket()
+
+    def accept(self):
+        sock, addr = self._sock.accept()
+        return _socketobject(sock), addr
+
+    def dup(self):
+        return _socketobject(self._sock)
+
+    def makefile(self, mode='r', bufsize=-1):
+        return _fileobject(self._sock, mode, bufsize)
+
+    _s = "def %s(self, *args): return self._sock.%s(*args)\n\n"
+    for _m in _socketmethods:
+        exec _s % (_m, _m)
+
+
+class _fileobject:
+
+    def __init__(self, sock, mode, bufsize):
+        self._sock = sock
+        self._mode = mode
+        if bufsize < 0:
+            bufsize = 512
+        self._rbufsize = max(1, bufsize)
+        self._wbufsize = bufsize
+        self._wbuf = self._rbuf = ""
+
+    def close(self):
+        try:
+            if self._sock:
+                self.flush()
+        finally:
+            self._sock = 0
+
+    def __del__(self):
+        self.close()
+
+    def flush(self):
+        if self._wbuf:
+            self._sock.sendall(self._wbuf)
+            self._wbuf = ""
+
+    def fileno(self):
+        return self._sock.fileno()
+
+    def write(self, data):
+        self._wbuf = self._wbuf + data
+        if self._wbufsize == 1:
+            if '\n' in data:
+                self.flush()
+        else:
+            if len(self._wbuf) >= self._wbufsize:
+                self.flush()
+
+    def writelines(self, list):
+        filter(self._sock.sendall, list)
+        self.flush()
+
+    def read(self, n=-1):
+        if n >= 0:
+            k = len(self._rbuf)
+            if n <= k:
+                data = self._rbuf[:n]
+                self._rbuf = self._rbuf[n:]
+                return data
+            n = n - k
+            L = [self._rbuf]
+            self._rbuf = ""
+            while n > 0:
+                new = self._sock.recv(max(n, self._rbufsize))
+                if not new: break
+                k = len(new)
+                if k > n:
+                    L.append(new[:n])
+                    self._rbuf = new[n:]
+                    break
+                L.append(new)
+                n = n - k
+            return "".join(L)
+        k = max(512, self._rbufsize)
+        L = [self._rbuf]
+        self._rbuf = ""
+        while 1:
+            new = self._sock.recv(k)
+            if not new: break
+            L.append(new)
+            k = min(k*2, 1024**2)
+        return "".join(L)
+
+    def readline(self, limit=-1):
+        data = ""
+        i = self._rbuf.find('\n')
+        while i < 0 and not (0 < limit <= len(self._rbuf)):
+            new = self._sock.recv(self._rbufsize)
+            if not new: break
+            i = new.find('\n')
+            if i >= 0: i = i + len(self._rbuf)
+            self._rbuf = self._rbuf + new
+        if i < 0: i = len(self._rbuf)
+        else: i = i+1
+        if 0 <= limit < len(self._rbuf): i = limit
+        data, self._rbuf = self._rbuf[:i], self._rbuf[i:]
+        return data
+
+    def readlines(self, sizehint = 0):
+        total = 0
+        list = []
+        while 1:
+            line = self.readline()
+            if not line: break
+            list.append(line)
+            total += len(line)
+            if sizehint and total >= sizehint:
+                break
+        return list
diff --git a/lib-python/2.2/sre.py b/lib-python/2.2/sre.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/sre.py
@@ -0,0 +1,311 @@
+#
+# Secret Labs' Regular Expression Engine
+#
+# re-compatible interface for the sre matching engine
+#
+# Copyright (c) 1998-2001 by Secret Labs AB.  All rights reserved.
+#
+# This version of the SRE library can be redistributed under CNRI's
+# Python 1.6 license.  For any other use, please contact Secret Labs
+# AB (info at pythonware.com).
+#
+# Portions of this engine have been developed in cooperation with
+# CNRI.  Hewlett-Packard provided funding for 1.6 integration and
+# other compatibility work.
+#
+
+r"""Support for regular expressions (RE).
+
+This module provides regular expression matching operations similar to
+those found in Perl.  It supports both 8-bit and Unicode strings; both
+the pattern and the strings being processed can contain null bytes and
+characters outside the US ASCII range.
+
+Regular expressions can contain both special and ordinary characters.
+Most ordinary characters, like "A", "a", or "0", are the simplest
+regular expressions; they simply match themselves.  You can
+concatenate ordinary characters, so last matches the string 'last'.
+
+The special characters are:
+    "."      Matches any character except a newline.
+    "^"      Matches the start of the string.
+    "$"      Matches the end of the string.
+    "*"      Matches 0 or more (greedy) repetitions of the preceding RE.
+             Greedy means that it will match as many repetitions as possible.
+    "+"      Matches 1 or more (greedy) repetitions of the preceding RE.
+    "?"      Matches 0 or 1 (greedy) of the preceding RE.
+    *?,+?,?? Non-greedy versions of the previous three special characters.
+    {m,n}    Matches from m to n repetitions of the preceding RE.
+    {m,n}?   Non-greedy version of the above.
+    "\\"      Either escapes special characters or signals a special sequence.
+    []       Indicates a set of characters.
+             A "^" as the first character indicates a complementing set.
+    "|"      A|B, creates an RE that will match either A or B.
+    (...)    Matches the RE inside the parentheses.
+             The contents can be retrieved or matched later in the string.
+    (?iLmsux) Set the I, L, M, S, U, or X flag for the RE (see below).
+    (?:...)  Non-grouping version of regular parentheses.
+    (?P<name>...) The substring matched by the group is accessible by name.
+    (?P=name)     Matches the text matched earlier by the group named name.
+    (?#...)  A comment; ignored.
+    (?=...)  Matches if ... matches next, but doesn't consume the string.
+    (?!...)  Matches if ... doesn't match next.
+
+The special sequences consist of "\\" and a character from the list
+below.  If the ordinary character is not on the list, then the
+resulting RE will match the second character.
+    \number  Matches the contents of the group of the same number.
+    \A       Matches only at the start of the string.
+    \Z       Matches only at the end of the string.
+    \b       Matches the empty string, but only at the start or end of a word.
+    \B       Matches the empty string, but not at the start or end of a word.
+    \d       Matches any decimal digit; equivalent to the set [0-9].
+    \D       Matches any non-digit character; equivalent to the set [^0-9].
+    \s       Matches any whitespace character; equivalent to [ \t\n\r\f\v].
+    \S       Matches any non-whitespace character; equiv. to [^ \t\n\r\f\v].
+    \w       Matches any alphanumeric character; equivalent to [a-zA-Z0-9_].
+             With LOCALE, it will match the set [0-9_] plus characters defined
+             as letters for the current locale.
+    \W       Matches the complement of \w.
+    \\       Matches a literal backslash.
+
+This module exports the following functions:
+    match    Match a regular expression pattern to the beginning of a string.
+    search   Search a string for the presence of a pattern.
+    sub      Substitute occurrences of a pattern found in a string.
+    subn     Same as sub, but also return the number of substitutions made.
+    split    Split a string by the occurrences of a pattern.
+    findall  Find all occurrences of a pattern in a string.
+    compile  Compile a pattern into a RegexObject.
+    purge    Clear the regular expression cache.
+    escape   Backslash all non-alphanumerics in a string.
+
+Some of the functions in this module takes flags as optional parameters:
+    I  IGNORECASE  Perform case-insensitive matching.
+    L  LOCALE      Make \w, \W, \b, \B, dependent on the current locale.
+    M  MULTILINE   "^" matches the beginning of lines as well as the string.
+                   "$" matches the end of lines as well as the string.
+    S  DOTALL      "." matches any character at all, including the newline.
+    X  VERBOSE     Ignore whitespace and comments for nicer looking RE's.
+    U  UNICODE     Make \w, \W, \b, \B, dependent on the Unicode locale.
+
+This module also defines an exception 'error'.
+
+"""
+
+import sys
+import sre_compile
+import sre_parse
+
+# public symbols
+__all__ = [ "match", "search", "sub", "subn", "split", "findall",
+    "compile", "purge", "template", "escape", "I", "L", "M", "S", "X",
+    "U", "IGNORECASE", "LOCALE", "MULTILINE", "DOTALL", "VERBOSE",
+    "UNICODE", "error" ]
+
+__version__ = "2.2.1"
+
+# this module works under 1.5.2 and later.  don't use string methods
+import string
+
+# flags
+I = IGNORECASE = sre_compile.SRE_FLAG_IGNORECASE # ignore case
+L = LOCALE = sre_compile.SRE_FLAG_LOCALE # assume current 8-bit locale
+U = UNICODE = sre_compile.SRE_FLAG_UNICODE # assume unicode locale
+M = MULTILINE = sre_compile.SRE_FLAG_MULTILINE # make anchors look for newline
+S = DOTALL = sre_compile.SRE_FLAG_DOTALL # make dot match newline
+X = VERBOSE = sre_compile.SRE_FLAG_VERBOSE # ignore whitespace and comments
+
+# sre extensions (experimental, don't rely on these)
+T = TEMPLATE = sre_compile.SRE_FLAG_TEMPLATE # disable backtracking
+DEBUG = sre_compile.SRE_FLAG_DEBUG # dump pattern after compilation
+
+# sre exception
+error = sre_compile.error
+
+# --------------------------------------------------------------------
+# public interface
+
+def match(pattern, string, flags=0):
+    """Try to apply the pattern at the start of the string, returning
+    a match object, or None if no match was found."""
+    return _compile(pattern, flags).match(string)
+
+def search(pattern, string, flags=0):
+    """Scan through string looking for a match to the pattern, returning
+    a match object, or None if no match was found."""
+    return _compile(pattern, flags).search(string)
+
+def sub(pattern, repl, string, count=0):
+    """Return the string obtained by replacing the leftmost
+    non-overlapping occurrences of the pattern in string by the
+    replacement repl"""
+    return _compile(pattern, 0).sub(repl, string, count)
+
+def subn(pattern, repl, string, count=0):
+    """Return a 2-tuple containing (new_string, number).
+    new_string is the string obtained by replacing the leftmost
+    non-overlapping occurrences of the pattern in the source
+    string by the replacement repl.  number is the number of
+    substitutions that were made."""
+    return _compile(pattern, 0).subn(repl, string, count)
+
+def split(pattern, string, maxsplit=0):
+    """Split the source string by the occurrences of the pattern,
+    returning a list containing the resulting substrings."""
+    return _compile(pattern, 0).split(string, maxsplit)
+
+def findall(pattern, string):
+    """Return a list of all non-overlapping matches in the string.
+
+    If one or more groups are present in the pattern, return a
+    list of groups; this will be a list of tuples if the pattern
+    has more than one group.
+
+    Empty matches are included in the result."""
+    return _compile(pattern, 0).findall(string)
+
+if sys.hexversion >= 0x02020000:
+    __all__.append("finditer")
+    def finditer(pattern, string):
+        """Return an iterator over all non-overlapping matches in the
+        string.  For each match, the iterator returns a match object.
+
+        Empty matches are included in the result."""
+        return _compile(pattern, 0).finditer(string)
+
+def compile(pattern, flags=0):
+    "Compile a regular expression pattern, returning a pattern object."
+    return _compile(pattern, flags)
+
+def purge():
+    "Clear the regular expression cache"
+    _cache.clear()
+    _cache_repl.clear()
+
+def template(pattern, flags=0):
+    "Compile a template pattern, returning a pattern object"
+    return _compile(pattern, flags|T)
+
+def escape(pattern):
+    "Escape all non-alphanumeric characters in pattern."
+    s = list(pattern)
+    for i in range(len(pattern)):
+        c = pattern[i]
+        if not ("a" <= c <= "z" or "A" <= c <= "Z" or "0" <= c <= "9"):
+            if c == "\000":
+                s[i] = "\\000"
+            else:
+                s[i] = "\\" + c
+    return _join(s, pattern)
+
+# --------------------------------------------------------------------
+# internals
+
+_cache = {}
+_cache_repl = {}
+
+_pattern_type = type(sre_compile.compile("", 0))
+
+_MAXCACHE = 100
+
+def _join(seq, sep):
+    # internal: join into string having the same type as sep
+    return string.join(seq, sep[:0])
+
+def _compile(*key):
+    # internal: compile pattern
+    p = _cache.get(key)
+    if p is not None:
+        return p
+    pattern, flags = key
+    if type(pattern) is _pattern_type:
+        return pattern
+    if type(pattern) not in sre_compile.STRING_TYPES:
+        raise TypeError, "first argument must be string or compiled pattern"
+    try:
+        p = sre_compile.compile(pattern, flags)
+    except error, v:
+        raise error, v # invalid expression
+    if len(_cache) >= _MAXCACHE:
+        _cache.clear()
+    _cache[key] = p
+    return p
+
+def _compile_repl(*key):
+    # internal: compile replacement pattern
+    p = _cache_repl.get(key)
+    if p is not None:
+        return p
+    repl, pattern = key
+    try:
+        p = sre_parse.parse_template(repl, pattern)
+    except error, v:
+        raise error, v # invalid expression
+    if len(_cache_repl) >= _MAXCACHE:
+        _cache_repl.clear()
+    _cache_repl[key] = p
+    return p
+
+def _expand(pattern, match, template):
+    # internal: match.expand implementation hook
+    template = sre_parse.parse_template(template, pattern)
+    return sre_parse.expand_template(template, match)
+
+def _subx(pattern, template):
+    # internal: pattern.sub/subn implementation helper
+    template = _compile_repl(template, pattern)
+    if not template[0] and len(template[1]) == 1:
+        # literal replacement
+        return template[1][0]
+    def filter(match, template=template):
+        return sre_parse.expand_template(template, match)
+    return filter
+
+# register myself for pickling
+
+import copy_reg
+
+def _pickle(p):
+    return _compile, (p.pattern, p.flags)
+
+copy_reg.pickle(_pattern_type, _pickle, _compile)
+
+# --------------------------------------------------------------------
+# experimental stuff (see python-dev discussions for details)
+
+class Scanner:
+    def __init__(self, lexicon, flags=0):
+        from sre_constants import BRANCH, SUBPATTERN
+        self.lexicon = lexicon
+        # combine phrases into a compound pattern
+        p = []
+        s = sre_parse.Pattern()
+        s.flags = flags
+        for phrase, action in lexicon:
+            p.append(sre_parse.SubPattern(s, [
+                (SUBPATTERN, (len(p)+1, sre_parse.parse(phrase, flags))),
+                ]))
+        p = sre_parse.SubPattern(s, [(BRANCH, (None, p))])
+        s.groups = len(p)
+        self.scanner = sre_compile.compile(p)
+    def scan(self, string):
+        result = []
+        append = result.append
+        match = self.scanner.scanner(string).match
+        i = 0
+        while 1:
+            m = match()
+            if not m:
+                break
+            j = m.end()
+            if i == j:
+                break
+            action = self.lexicon[m.lastindex-1][1]
+            if callable(action):
+                self.match = m
+                action = action(self, m.group())
+            if action is not None:
+                append(action)
+            i = j
+        return result, string[i:]
diff --git a/lib-python/2.2/sre_compile.py b/lib-python/2.2/sre_compile.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/sre_compile.py
@@ -0,0 +1,455 @@
+#
+# Secret Labs' Regular Expression Engine
+#
+# convert template to internal format
+#
+# Copyright (c) 1997-2001 by Secret Labs AB.  All rights reserved.
+#
+# See the sre.py file for information on usage and redistribution.
+#
+
+"""Internal support module for sre"""
+
+import _sre, sys
+
+from sre_constants import *
+
+assert _sre.MAGIC == MAGIC, "SRE module mismatch"
+
+MAXCODE = 65535
+
+def _compile(code, pattern, flags):
+    # internal: compile a (sub)pattern
+    emit = code.append
+    for op, av in pattern:
+        if op in (LITERAL, NOT_LITERAL):
+            if flags & SRE_FLAG_IGNORECASE:
+                emit(OPCODES[OP_IGNORE[op]])
+                emit(_sre.getlower(av, flags))
+            else:
+                emit(OPCODES[op])
+                emit(av)
+        elif op is IN:
+            if flags & SRE_FLAG_IGNORECASE:
+                emit(OPCODES[OP_IGNORE[op]])
+                def fixup(literal, flags=flags):
+                    return _sre.getlower(literal, flags)
+            else:
+                emit(OPCODES[op])
+                fixup = lambda x: x
+            skip = len(code); emit(0)
+            _compile_charset(av, flags, code, fixup)
+            code[skip] = len(code) - skip
+        elif op is ANY:
+            if flags & SRE_FLAG_DOTALL:
+                emit(OPCODES[ANY_ALL])
+            else:
+                emit(OPCODES[ANY])
+        elif op in (REPEAT, MIN_REPEAT, MAX_REPEAT):
+            if flags & SRE_FLAG_TEMPLATE:
+                raise error, "internal: unsupported template operator"
+                emit(OPCODES[REPEAT])
+                skip = len(code); emit(0)
+                emit(av[0])
+                emit(av[1])
+                _compile(code, av[2], flags)
+                emit(OPCODES[SUCCESS])
+                code[skip] = len(code) - skip
+            elif _simple(av) and op == MAX_REPEAT:
+                emit(OPCODES[REPEAT_ONE])
+                skip = len(code); emit(0)
+                emit(av[0])
+                emit(av[1])
+                _compile(code, av[2], flags)
+                emit(OPCODES[SUCCESS])
+                code[skip] = len(code) - skip
+            else:
+                emit(OPCODES[REPEAT])
+                skip = len(code); emit(0)
+                emit(av[0])
+                emit(av[1])
+                _compile(code, av[2], flags)
+                code[skip] = len(code) - skip
+                if op == MAX_REPEAT:
+                    emit(OPCODES[MAX_UNTIL])
+                else:
+                    emit(OPCODES[MIN_UNTIL])
+        elif op is SUBPATTERN:
+            if av[0]:
+                emit(OPCODES[MARK])
+                emit((av[0]-1)*2)
+            # _compile_info(code, av[1], flags)
+            _compile(code, av[1], flags)
+            if av[0]:
+                emit(OPCODES[MARK])
+                emit((av[0]-1)*2+1)
+        elif op in (SUCCESS, FAILURE):
+            emit(OPCODES[op])
+        elif op in (ASSERT, ASSERT_NOT):
+            emit(OPCODES[op])
+            skip = len(code); emit(0)
+            if av[0] >= 0:
+                emit(0) # look ahead
+            else:
+                lo, hi = av[1].getwidth()
+                if lo != hi:
+                    raise error, "look-behind requires fixed-width pattern"
+                emit(lo) # look behind
+            _compile(code, av[1], flags)
+            emit(OPCODES[SUCCESS])
+            code[skip] = len(code) - skip
+        elif op is CALL:
+            emit(OPCODES[op])
+            skip = len(code); emit(0)
+            _compile(code, av, flags)
+            emit(OPCODES[SUCCESS])
+            code[skip] = len(code) - skip
+        elif op is AT:
+            emit(OPCODES[op])
+            if flags & SRE_FLAG_MULTILINE:
+                av = AT_MULTILINE.get(av, av)
+            if flags & SRE_FLAG_LOCALE:
+                av = AT_LOCALE.get(av, av)
+            elif flags & SRE_FLAG_UNICODE:
+                av = AT_UNICODE.get(av, av)
+            emit(ATCODES[av])
+        elif op is BRANCH:
+            emit(OPCODES[op])
+            tail = []
+            for av in av[1]:
+                skip = len(code); emit(0)
+                # _compile_info(code, av, flags)
+                _compile(code, av, flags)
+                emit(OPCODES[JUMP])
+                tail.append(len(code)); emit(0)
+                code[skip] = len(code) - skip
+            emit(0) # end of branch
+            for tail in tail:
+                code[tail] = len(code) - tail
+        elif op is CATEGORY:
+            emit(OPCODES[op])
+            if flags & SRE_FLAG_LOCALE:
+                av = CH_LOCALE[av]
+            elif flags & SRE_FLAG_UNICODE:
+                av = CH_UNICODE[av]
+            emit(CHCODES[av])
+        elif op is GROUPREF:
+            if flags & SRE_FLAG_IGNORECASE:
+                emit(OPCODES[OP_IGNORE[op]])
+            else:
+                emit(OPCODES[op])
+            emit(av-1)
+        else:
+            raise ValueError, ("unsupported operand type", op)
+
+def _compile_charset(charset, flags, code, fixup=None):
+    # compile charset subprogram
+    emit = code.append
+    if not fixup:
+        fixup = lambda x: x
+    for op, av in _optimize_charset(charset, fixup):
+        emit(OPCODES[op])
+        if op is NEGATE:
+            pass
+        elif op is LITERAL:
+            emit(fixup(av))
+        elif op is RANGE:
+            emit(fixup(av[0]))
+            emit(fixup(av[1]))
+        elif op is CHARSET:
+            code.extend(av)
+        elif op is BIGCHARSET:
+            code.extend(av)
+        elif op is CATEGORY:
+            if flags & SRE_FLAG_LOCALE:
+                emit(CHCODES[CH_LOCALE[av]])
+            elif flags & SRE_FLAG_UNICODE:
+                emit(CHCODES[CH_UNICODE[av]])
+            else:
+                emit(CHCODES[av])
+        else:
+            raise error, "internal: unsupported set operator"
+    emit(OPCODES[FAILURE])
+
+def _optimize_charset(charset, fixup):
+    # internal: optimize character set
+    out = []
+    charmap = [0]*256
+    try:
+        for op, av in charset:
+            if op is NEGATE:
+                out.append((op, av))
+            elif op is LITERAL:
+                charmap[fixup(av)] = 1
+            elif op is RANGE:
+                for i in range(fixup(av[0]), fixup(av[1])+1):
+                    charmap[i] = 1
+            elif op is CATEGORY:
+                # XXX: could append to charmap tail
+                return charset # cannot compress
+    except IndexError:
+        if sys.maxunicode != 65535:
+            # XXX: big charsets don't work in UCS-4 builds
+            return charset
+        # character set contains unicode characters
+        return _optimize_unicode(charset, fixup)
+    # compress character map
+    i = p = n = 0
+    runs = []
+    for c in charmap:
+        if c:
+            if n == 0:
+                p = i
+            n = n + 1
+        elif n:
+            runs.append((p, n))
+            n = 0
+        i = i + 1
+    if n:
+        runs.append((p, n))
+    if len(runs) <= 2:
+        # use literal/range
+        for p, n in runs:
+            if n == 1:
+                out.append((LITERAL, p))
+            else:
+                out.append((RANGE, (p, p+n-1)))
+        if len(out) < len(charset):
+            return out
+    else:
+        # use bitmap
+        data = _mk_bitmap(charmap)
+        out.append((CHARSET, data))
+        return out
+    return charset
+
+def _mk_bitmap(bits):
+    data = []
+    m = 1; v = 0
+    for c in bits:
+        if c:
+            v = v + m
+        m = m << 1
+        if m > MAXCODE:
+            data.append(v)
+            m = 1; v = 0
+    return data
+
+# To represent a big charset, first a bitmap of all characters in the
+# set is constructed. Then, this bitmap is sliced into chunks of 256
+# characters, duplicate chunks are eliminitated, and each chunk is
+# given a number. In the compiled expression, the charset is
+# represented by a 16-bit word sequence, consisting of one word for
+# the number of different chunks, a sequence of 256 bytes (128 words)
+# of chunk numbers indexed by their original chunk position, and a
+# sequence of chunks (16 words each).
+
+# Compression is normally good: in a typical charset, large ranges of
+# Unicode will be either completely excluded (e.g. if only cyrillic
+# letters are to be matched), or completely included (e.g. if large
+# subranges of Kanji match). These ranges will be represented by
+# chunks of all one-bits or all zero-bits.
+
+# Matching can be also done efficiently: the more significant byte of
+# the Unicode character is an index into the chunk number, and the
+# less significant byte is a bit index in the chunk (just like the
+# CHARSET matching).
+
+def _optimize_unicode(charset, fixup):
+    charmap = [0]*65536
+    negate = 0
+    for op, av in charset:
+        if op is NEGATE:
+            negate = 1
+        elif op is LITERAL:
+            charmap[fixup(av)] = 1
+        elif op is RANGE:
+            for i in range(fixup(av[0]), fixup(av[1])+1):
+                charmap[i] = 1
+        elif op is CATEGORY:
+            # XXX: could expand category
+            return charset # cannot compress
+    if negate:
+        for i in range(65536):
+            charmap[i] = not charmap[i]
+    comps = {}
+    mapping = [0]*256
+    block = 0
+    data = []
+    for i in range(256):
+        chunk = tuple(charmap[i*256:(i+1)*256])
+        new = comps.setdefault(chunk, block)
+        mapping[i] = new
+        if new == block:
+            block = block + 1
+            data = data + _mk_bitmap(chunk)
+    header = [block]
+    assert MAXCODE == 65535
+    for i in range(128):
+        if sys.byteorder == 'big':
+            header.append(256*mapping[2*i]+mapping[2*i+1])
+        else:
+            header.append(mapping[2*i]+256*mapping[2*i+1])
+    data[0:0] = header
+    return [(BIGCHARSET, data)]
+
+def _simple(av):
+    # check if av is a "simple" operator
+    lo, hi = av[2].getwidth()
+    if lo == 0 and hi == MAXREPEAT:
+        raise error, "nothing to repeat"
+    return lo == hi == 1 and av[2][0][0] != SUBPATTERN
+
+def _compile_info(code, pattern, flags):
+    # internal: compile an info block.  in the current version,
+    # this contains min/max pattern width, and an optional literal
+    # prefix or a character map
+    lo, hi = pattern.getwidth()
+    if lo == 0:
+        return # not worth it
+    # look for a literal prefix
+    prefix = []
+    prefix_skip = 0
+    charset = [] # not used
+    if not (flags & SRE_FLAG_IGNORECASE):
+        # look for literal prefix
+        for op, av in pattern.data:
+            if op is LITERAL:
+                if len(prefix) == prefix_skip:
+                    prefix_skip = prefix_skip + 1
+                prefix.append(av)
+            elif op is SUBPATTERN and len(av[1]) == 1:
+                op, av = av[1][0]
+                if op is LITERAL:
+                    prefix.append(av)
+                else:
+                    break
+            else:
+                break
+        # if no prefix, look for charset prefix
+        if not prefix and pattern.data:
+            op, av = pattern.data[0]
+            if op is SUBPATTERN and av[1]:
+                op, av = av[1][0]
+                if op is LITERAL:
+                    charset.append((op, av))
+                elif op is BRANCH:
+                    c = []
+                    for p in av[1]:
+                        if not p:
+                            break
+                        op, av = p[0]
+                        if op is LITERAL:
+                            c.append((op, av))
+                        else:
+                            break
+                    else:
+                        charset = c
+            elif op is BRANCH:
+                c = []
+                for p in av[1]:
+                    if not p:
+                        break
+                    op, av = p[0]
+                    if op is LITERAL:
+                        c.append((op, av))
+                    else:
+                        break
+                else:
+                    charset = c
+            elif op is IN:
+                charset = av
+##     if prefix:
+##         print "*** PREFIX", prefix, prefix_skip
+##     if charset:
+##         print "*** CHARSET", charset
+    # add an info block
+    emit = code.append
+    emit(OPCODES[INFO])
+    skip = len(code); emit(0)
+    # literal flag
+    mask = 0
+    if prefix:
+        mask = SRE_INFO_PREFIX
+        if len(prefix) == prefix_skip == len(pattern.data):
+            mask = mask + SRE_INFO_LITERAL
+    elif charset:
+        mask = mask + SRE_INFO_CHARSET
+    emit(mask)
+    # pattern length
+    if lo < MAXCODE:
+        emit(lo)
+    else:
+        emit(MAXCODE)
+        prefix = prefix[:MAXCODE]
+    if hi < MAXCODE:
+        emit(hi)
+    else:
+        emit(0)
+    # add literal prefix
+    if prefix:
+        emit(len(prefix)) # length
+        emit(prefix_skip) # skip
+        code.extend(prefix)
+        # generate overlap table
+        table = [-1] + ([0]*len(prefix))
+        for i in range(len(prefix)):
+            table[i+1] = table[i]+1
+            while table[i+1] > 0 and prefix[i] != prefix[table[i+1]-1]:
+                table[i+1] = table[table[i+1]-1]+1
+        code.extend(table[1:]) # don't store first entry
+    elif charset:
+        _compile_charset(charset, flags, code)
+    code[skip] = len(code) - skip
+
+STRING_TYPES = [type("")]
+
+try:
+    STRING_TYPES.append(type(unicode("")))
+except NameError:
+    pass
+
+def _code(p, flags):
+
+    flags = p.pattern.flags | flags
+    code = []
+
+    # compile info block
+    _compile_info(code, p, flags)
+
+    # compile the pattern
+    _compile(code, p.data, flags)
+
+    code.append(OPCODES[SUCCESS])
+
+    return code
+
+def compile(p, flags=0):
+    # internal: convert pattern list to internal format
+
+    if type(p) in STRING_TYPES:
+        import sre_parse
+        pattern = p
+        p = sre_parse.parse(p, flags)
+    else:
+        pattern = None
+
+    code = _code(p, flags)
+
+    # print code
+
+    # XXX: <fl> get rid of this limitation!
+    assert p.pattern.groups <= 100,\
+           "sorry, but this version only supports 100 named groups"
+
+    # map in either direction
+    groupindex = p.pattern.groupdict
+    indexgroup = [None] * p.pattern.groups
+    for k, i in groupindex.items():
+        indexgroup[i] = k
+
+    return _sre.compile(
+        pattern, flags, code,
+        p.pattern.groups-1,
+        groupindex, indexgroup
+        )
diff --git a/lib-python/2.2/sre_constants.py b/lib-python/2.2/sre_constants.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/sre_constants.py
@@ -0,0 +1,259 @@
+#
+# Secret Labs' Regular Expression Engine
+#
+# various symbols used by the regular expression engine.
+# run this script to update the _sre include files!
+#
+# Copyright (c) 1998-2001 by Secret Labs AB.  All rights reserved.
+#
+# See the sre.py file for information on usage and redistribution.
+#
+
+"""Internal support module for sre"""
+
+# update when constants are added or removed
+
+MAGIC = 20010701
+
+# max code word in this release
+
+MAXREPEAT = 65535
+
+# SRE standard exception (access as sre.error)
+# should this really be here?
+
+class error(Exception):
+    pass
+
+# operators
+
+FAILURE = "failure"
+SUCCESS = "success"
+
+ANY = "any"
+ANY_ALL = "any_all"
+ASSERT = "assert"
+ASSERT_NOT = "assert_not"
+AT = "at"
+BIGCHARSET = "bigcharset"
+BRANCH = "branch"
+CALL = "call"
+CATEGORY = "category"
+CHARSET = "charset"
+GROUPREF = "groupref"
+GROUPREF_IGNORE = "groupref_ignore"
+IN = "in"
+IN_IGNORE = "in_ignore"
+INFO = "info"
+JUMP = "jump"
+LITERAL = "literal"
+LITERAL_IGNORE = "literal_ignore"
+MARK = "mark"
+MAX_REPEAT = "max_repeat"
+MAX_UNTIL = "max_until"
+MIN_REPEAT = "min_repeat"
+MIN_UNTIL = "min_until"
+NEGATE = "negate"
+NOT_LITERAL = "not_literal"
+NOT_LITERAL_IGNORE = "not_literal_ignore"
+RANGE = "range"
+REPEAT = "repeat"
+REPEAT_ONE = "repeat_one"
+SUBPATTERN = "subpattern"
+
+# positions
+AT_BEGINNING = "at_beginning"
+AT_BEGINNING_LINE = "at_beginning_line"
+AT_BEGINNING_STRING = "at_beginning_string"
+AT_BOUNDARY = "at_boundary"
+AT_NON_BOUNDARY = "at_non_boundary"
+AT_END = "at_end"
+AT_END_LINE = "at_end_line"
+AT_END_STRING = "at_end_string"
+AT_LOC_BOUNDARY = "at_loc_boundary"
+AT_LOC_NON_BOUNDARY = "at_loc_non_boundary"
+AT_UNI_BOUNDARY = "at_uni_boundary"
+AT_UNI_NON_BOUNDARY = "at_uni_non_boundary"
+
+# categories
+CATEGORY_DIGIT = "category_digit"
+CATEGORY_NOT_DIGIT = "category_not_digit"
+CATEGORY_SPACE = "category_space"
+CATEGORY_NOT_SPACE = "category_not_space"
+CATEGORY_WORD = "category_word"
+CATEGORY_NOT_WORD = "category_not_word"
+CATEGORY_LINEBREAK = "category_linebreak"
+CATEGORY_NOT_LINEBREAK = "category_not_linebreak"
+CATEGORY_LOC_WORD = "category_loc_word"
+CATEGORY_LOC_NOT_WORD = "category_loc_not_word"
+CATEGORY_UNI_DIGIT = "category_uni_digit"
+CATEGORY_UNI_NOT_DIGIT = "category_uni_not_digit"
+CATEGORY_UNI_SPACE = "category_uni_space"
+CATEGORY_UNI_NOT_SPACE = "category_uni_not_space"
+CATEGORY_UNI_WORD = "category_uni_word"
+CATEGORY_UNI_NOT_WORD = "category_uni_not_word"
+CATEGORY_UNI_LINEBREAK = "category_uni_linebreak"
+CATEGORY_UNI_NOT_LINEBREAK = "category_uni_not_linebreak"
+
+OPCODES = [
+
+    # failure=0 success=1 (just because it looks better that way :-)
+    FAILURE, SUCCESS,
+
+    ANY, ANY_ALL,
+    ASSERT, ASSERT_NOT,
+    AT,
+    BRANCH,
+    CALL,
+    CATEGORY,
+    CHARSET, BIGCHARSET,
+    GROUPREF, GROUPREF_IGNORE,
+    IN, IN_IGNORE,
+    INFO,
+    JUMP,
+    LITERAL, LITERAL_IGNORE,
+    MARK,
+    MAX_UNTIL,
+    MIN_UNTIL,
+    NOT_LITERAL, NOT_LITERAL_IGNORE,
+    NEGATE,
+    RANGE,
+    REPEAT,
+    REPEAT_ONE,
+    SUBPATTERN
+
+]
+
+ATCODES = [
+    AT_BEGINNING, AT_BEGINNING_LINE, AT_BEGINNING_STRING, AT_BOUNDARY,
+    AT_NON_BOUNDARY, AT_END, AT_END_LINE, AT_END_STRING,
+    AT_LOC_BOUNDARY, AT_LOC_NON_BOUNDARY, AT_UNI_BOUNDARY,
+    AT_UNI_NON_BOUNDARY
+]
+
+CHCODES = [
+    CATEGORY_DIGIT, CATEGORY_NOT_DIGIT, CATEGORY_SPACE,
+    CATEGORY_NOT_SPACE, CATEGORY_WORD, CATEGORY_NOT_WORD,
+    CATEGORY_LINEBREAK, CATEGORY_NOT_LINEBREAK, CATEGORY_LOC_WORD,
+    CATEGORY_LOC_NOT_WORD, CATEGORY_UNI_DIGIT, CATEGORY_UNI_NOT_DIGIT,
+    CATEGORY_UNI_SPACE, CATEGORY_UNI_NOT_SPACE, CATEGORY_UNI_WORD,
+    CATEGORY_UNI_NOT_WORD, CATEGORY_UNI_LINEBREAK,
+    CATEGORY_UNI_NOT_LINEBREAK
+]
+
+def makedict(list):
+    d = {}
+    i = 0
+    for item in list:
+        d[item] = i
+        i = i + 1
+    return d
+
+OPCODES = makedict(OPCODES)
+ATCODES = makedict(ATCODES)
+CHCODES = makedict(CHCODES)
+
+# replacement operations for "ignore case" mode
+OP_IGNORE = {
+    GROUPREF: GROUPREF_IGNORE,
+    IN: IN_IGNORE,
+    LITERAL: LITERAL_IGNORE,
+    NOT_LITERAL: NOT_LITERAL_IGNORE
+}
+
+AT_MULTILINE = {
+    AT_BEGINNING: AT_BEGINNING_LINE,
+    AT_END: AT_END_LINE
+}
+
+AT_LOCALE = {
+    AT_BOUNDARY: AT_LOC_BOUNDARY,
+    AT_NON_BOUNDARY: AT_LOC_NON_BOUNDARY
+}
+
+AT_UNICODE = {
+    AT_BOUNDARY: AT_UNI_BOUNDARY,
+    AT_NON_BOUNDARY: AT_UNI_NON_BOUNDARY
+}
+
+CH_LOCALE = {
+    CATEGORY_DIGIT: CATEGORY_DIGIT,
+    CATEGORY_NOT_DIGIT: CATEGORY_NOT_DIGIT,
+    CATEGORY_SPACE: CATEGORY_SPACE,
+    CATEGORY_NOT_SPACE: CATEGORY_NOT_SPACE,
+    CATEGORY_WORD: CATEGORY_LOC_WORD,
+    CATEGORY_NOT_WORD: CATEGORY_LOC_NOT_WORD,
+    CATEGORY_LINEBREAK: CATEGORY_LINEBREAK,
+    CATEGORY_NOT_LINEBREAK: CATEGORY_NOT_LINEBREAK
+}
+
+CH_UNICODE = {
+    CATEGORY_DIGIT: CATEGORY_UNI_DIGIT,
+    CATEGORY_NOT_DIGIT: CATEGORY_UNI_NOT_DIGIT,
+    CATEGORY_SPACE: CATEGORY_UNI_SPACE,
+    CATEGORY_NOT_SPACE: CATEGORY_UNI_NOT_SPACE,
+    CATEGORY_WORD: CATEGORY_UNI_WORD,
+    CATEGORY_NOT_WORD: CATEGORY_UNI_NOT_WORD,
+    CATEGORY_LINEBREAK: CATEGORY_UNI_LINEBREAK,
+    CATEGORY_NOT_LINEBREAK: CATEGORY_UNI_NOT_LINEBREAK
+}
+
+# flags
+SRE_FLAG_TEMPLATE = 1 # template mode (disable backtracking)
+SRE_FLAG_IGNORECASE = 2 # case insensitive
+SRE_FLAG_LOCALE = 4 # honour system locale
+SRE_FLAG_MULTILINE = 8 # treat target as multiline string
+SRE_FLAG_DOTALL = 16 # treat target as a single string
+SRE_FLAG_UNICODE = 32 # use unicode locale
+SRE_FLAG_VERBOSE = 64 # ignore whitespace and comments
+SRE_FLAG_DEBUG = 128 # debugging
+
+# flags for INFO primitive
+SRE_INFO_PREFIX = 1 # has prefix
+SRE_INFO_LITERAL = 2 # entire pattern is literal (given by prefix)
+SRE_INFO_CHARSET = 4 # pattern starts with character from given set
+
+if __name__ == "__main__":
+    import string
+    def dump(f, d, prefix):
+        items = d.items()
+        items.sort(lambda a, b: cmp(a[1], b[1]))
+        for k, v in items:
+            f.write("#define %s_%s %s\n" % (prefix, string.upper(k), v))
+    f = open("sre_constants.h", "w")
+    f.write("""\
+/*
+ * Secret Labs' Regular Expression Engine
+ *
+ * regular expression matching engine
+ *
+ * NOTE: This file is generated by sre_constants.py.  If you need
+ * to change anything in here, edit sre_constants.py and run it.
+ *
+ * Copyright (c) 1997-2001 by Secret Labs AB.  All rights reserved.
+ *
+ * See the _sre.c file for information on usage and redistribution.
+ */
+
+""")
+
+    f.write("#define SRE_MAGIC %d\n" % MAGIC)
+
+    dump(f, OPCODES, "SRE_OP")
+    dump(f, ATCODES, "SRE")
+    dump(f, CHCODES, "SRE")
+
+    f.write("#define SRE_FLAG_TEMPLATE %d\n" % SRE_FLAG_TEMPLATE)
+    f.write("#define SRE_FLAG_IGNORECASE %d\n" % SRE_FLAG_IGNORECASE)
+    f.write("#define SRE_FLAG_LOCALE %d\n" % SRE_FLAG_LOCALE)
+    f.write("#define SRE_FLAG_MULTILINE %d\n" % SRE_FLAG_MULTILINE)
+    f.write("#define SRE_FLAG_DOTALL %d\n" % SRE_FLAG_DOTALL)
+    f.write("#define SRE_FLAG_UNICODE %d\n" % SRE_FLAG_UNICODE)
+    f.write("#define SRE_FLAG_VERBOSE %d\n" % SRE_FLAG_VERBOSE)
+
+    f.write("#define SRE_INFO_PREFIX %d\n" % SRE_INFO_PREFIX)
+    f.write("#define SRE_INFO_LITERAL %d\n" % SRE_INFO_LITERAL)
+    f.write("#define SRE_INFO_CHARSET %d\n" % SRE_INFO_CHARSET)
+
+    f.close()
+    print "done"
diff --git a/lib-python/2.2/sre_parse.py b/lib-python/2.2/sre_parse.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/sre_parse.py
@@ -0,0 +1,738 @@
+#
+# Secret Labs' Regular Expression Engine
+#
+# convert re-style regular expression to sre pattern
+#
+# Copyright (c) 1998-2001 by Secret Labs AB.  All rights reserved.
+#
+# See the sre.py file for information on usage and redistribution.
+#
+
+"""Internal support module for sre"""
+
+# XXX: show string offset and offending character for all errors
+
+# this module works under 1.5.2 and later.  don't use string methods
+import string, sys
+
+from sre_constants import *
+
+SPECIAL_CHARS = ".\\[{()*+?^$|"
+REPEAT_CHARS = "*+?{"
+
+DIGITS = tuple("0123456789")
+
+OCTDIGITS = tuple("01234567")
+HEXDIGITS = tuple("0123456789abcdefABCDEF")
+
+WHITESPACE = tuple(" \t\n\r\v\f")
+
+ESCAPES = {
+    r"\a": (LITERAL, ord("\a")),
+    r"\b": (LITERAL, ord("\b")),
+    r"\f": (LITERAL, ord("\f")),
+    r"\n": (LITERAL, ord("\n")),
+    r"\r": (LITERAL, ord("\r")),
+    r"\t": (LITERAL, ord("\t")),
+    r"\v": (LITERAL, ord("\v")),
+    r"\\": (LITERAL, ord("\\"))
+}
+
+CATEGORIES = {
+    r"\A": (AT, AT_BEGINNING_STRING), # start of string
+    r"\b": (AT, AT_BOUNDARY),
+    r"\B": (AT, AT_NON_BOUNDARY),
+    r"\d": (IN, [(CATEGORY, CATEGORY_DIGIT)]),
+    r"\D": (IN, [(CATEGORY, CATEGORY_NOT_DIGIT)]),
+    r"\s": (IN, [(CATEGORY, CATEGORY_SPACE)]),
+    r"\S": (IN, [(CATEGORY, CATEGORY_NOT_SPACE)]),
+    r"\w": (IN, [(CATEGORY, CATEGORY_WORD)]),
+    r"\W": (IN, [(CATEGORY, CATEGORY_NOT_WORD)]),
+    r"\Z": (AT, AT_END_STRING), # end of string
+}
+
+FLAGS = {
+    # standard flags
+    "i": SRE_FLAG_IGNORECASE,
+    "L": SRE_FLAG_LOCALE,
+    "m": SRE_FLAG_MULTILINE,
+    "s": SRE_FLAG_DOTALL,
+    "x": SRE_FLAG_VERBOSE,
+    # extensions
+    "t": SRE_FLAG_TEMPLATE,
+    "u": SRE_FLAG_UNICODE,
+}
+
+# figure out best way to convert hex/octal numbers to integers
+try:
+    int("10", 8)
+    atoi = int # 2.0 and later
+except TypeError:
+    atoi = string.atoi # 1.5.2
+
+class Pattern:
+    # master pattern object.  keeps track of global attributes
+    def __init__(self):
+        self.flags = 0
+        self.open = []
+        self.groups = 1
+        self.groupdict = {}
+    def opengroup(self, name=None):
+        gid = self.groups
+        self.groups = gid + 1
+        if name:
+            ogid = self.groupdict.get(name, None)
+            if ogid is not None:
+                raise error, ("redefinition of group name %s as group %d; "
+                              "was group %d" % (repr(name), gid,  ogid))
+            self.groupdict[name] = gid
+        self.open.append(gid)
+        return gid
+    def closegroup(self, gid):
+        self.open.remove(gid)
+    def checkgroup(self, gid):
+        return gid < self.groups and gid not in self.open
+
+class SubPattern:
+    # a subpattern, in intermediate form
+    def __init__(self, pattern, data=None):
+        self.pattern = pattern
+        if not data:
+            data = []
+        self.data = data
+        self.width = None
+    def dump(self, level=0):
+        nl = 1
+        for op, av in self.data:
+            print level*"  " + op,; nl = 0
+            if op == "in":
+                # member sublanguage
+                print; nl = 1
+                for op, a in av:
+                    print (level+1)*"  " + op, a
+            elif op == "branch":
+                print; nl = 1
+                i = 0
+                for a in av[1]:
+                    if i > 0:
+                        print level*"  " + "or"
+                    a.dump(level+1); nl = 1
+                    i = i + 1
+            elif type(av) in (type(()), type([])):
+                for a in av:
+                    if isinstance(a, SubPattern):
+                        if not nl: print
+                        a.dump(level+1); nl = 1
+                    else:
+                        print a, ; nl = 0
+            else:
+                print av, ; nl = 0
+            if not nl: print
+    def __repr__(self):
+        return repr(self.data)
+    def __len__(self):
+        return len(self.data)
+    def __delitem__(self, index):
+        del self.data[index]
+    def __getitem__(self, index):
+        return self.data[index]
+    def __setitem__(self, index, code):
+        self.data[index] = code
+    def __getslice__(self, start, stop):
+        return SubPattern(self.pattern, self.data[start:stop])
+    def insert(self, index, code):
+        self.data.insert(index, code)
+    def append(self, code):
+        self.data.append(code)
+    def getwidth(self):
+        # determine the width (min, max) for this subpattern
+        if self.width:
+            return self.width
+        lo = hi = 0L
+        for op, av in self.data:
+            if op is BRANCH:
+                i = sys.maxint
+                j = 0
+                for av in av[1]:
+                    l, h = av.getwidth()
+                    i = min(i, l)
+                    j = max(j, h)
+                lo = lo + i
+                hi = hi + j
+            elif op is CALL:
+                i, j = av.getwidth()
+                lo = lo + i
+                hi = hi + j
+            elif op is SUBPATTERN:
+                i, j = av[1].getwidth()
+                lo = lo + i
+                hi = hi + j
+            elif op in (MIN_REPEAT, MAX_REPEAT):
+                i, j = av[2].getwidth()
+                lo = lo + long(i) * av[0]
+                hi = hi + long(j) * av[1]
+            elif op in (ANY, RANGE, IN, LITERAL, NOT_LITERAL, CATEGORY):
+                lo = lo + 1
+                hi = hi + 1
+            elif op == SUCCESS:
+                break
+        self.width = int(min(lo, sys.maxint)), int(min(hi, sys.maxint))
+        return self.width
+
+class Tokenizer:
+    def __init__(self, string):
+        self.string = string
+        self.index = 0
+        self.__next()
+    def __next(self):
+        if self.index >= len(self.string):
+            self.next = None
+            return
+        char = self.string[self.index]
+        if char[0] == "\\":
+            try:
+                c = self.string[self.index + 1]
+            except IndexError:
+                raise error, "bogus escape (end of line)"
+            char = char + c
+        self.index = self.index + len(char)
+        self.next = char
+    def match(self, char, skip=1):
+        if char == self.next:
+            if skip:
+                self.__next()
+            return 1
+        return 0
+    def get(self):
+        this = self.next
+        self.__next()
+        return this
+    def tell(self):
+        return self.index, self.next
+    def seek(self, index):
+        self.index, self.next = index
+
+def isident(char):
+    return "a" <= char <= "z" or "A" <= char <= "Z" or char == "_"
+
+def isdigit(char):
+    return "0" <= char <= "9"
+
+def isname(name):
+    # check that group name is a valid string
+    if not isident(name[0]):
+        return 0
+    for char in name:
+        if not isident(char) and not isdigit(char):
+            return 0
+    return 1
+
+def _group(escape, groups):
+    # check if the escape string represents a valid group
+    try:
+        gid = atoi(escape[1:])
+        if gid and gid < groups:
+            return gid
+    except ValueError:
+        pass
+    return None # not a valid group
+
+def _class_escape(source, escape):
+    # handle escape code inside character class
+    code = ESCAPES.get(escape)
+    if code:
+        return code
+    code = CATEGORIES.get(escape)
+    if code:
+        return code
+    try:
+        if escape[1:2] == "x":
+            # hexadecimal escape (exactly two digits)
+            while source.next in HEXDIGITS and len(escape) < 4:
+                escape = escape + source.get()
+            escape = escape[2:]
+            if len(escape) != 2:
+                raise error, "bogus escape: %s" % repr("\\" + escape)
+            return LITERAL, atoi(escape, 16) & 0xff
+        elif escape[1:2] in OCTDIGITS:
+            # octal escape (up to three digits)
+            while source.next in OCTDIGITS and len(escape) < 5:
+                escape = escape + source.get()
+            escape = escape[1:]
+            return LITERAL, atoi(escape, 8) & 0xff
+        if len(escape) == 2:
+            return LITERAL, ord(escape[1])
+    except ValueError:
+        pass
+    raise error, "bogus escape: %s" % repr(escape)
+
+def _escape(source, escape, state):
+    # handle escape code in expression
+    code = CATEGORIES.get(escape)
+    if code:
+        return code
+    code = ESCAPES.get(escape)
+    if code:
+        return code
+    try:
+        if escape[1:2] == "x":
+            # hexadecimal escape
+            while source.next in HEXDIGITS and len(escape) < 4:
+                escape = escape + source.get()
+            if len(escape) != 4:
+                raise ValueError
+            return LITERAL, atoi(escape[2:], 16) & 0xff
+        elif escape[1:2] == "0":
+            # octal escape
+            while source.next in OCTDIGITS and len(escape) < 4:
+                escape = escape + source.get()
+            return LITERAL, atoi(escape[1:], 8) & 0xff
+        elif escape[1:2] in DIGITS:
+            # octal escape *or* decimal group reference (sigh)
+            here = source.tell()
+            if source.next in DIGITS:
+                escape = escape + source.get()
+                if (escape[1] in OCTDIGITS and escape[2] in OCTDIGITS and
+                    source.next in OCTDIGITS):
+                    # got three octal digits; this is an octal escape
+                    escape = escape + source.get()
+                    return LITERAL, atoi(escape[1:], 8) & 0xff
+            # got at least one decimal digit; this is a group reference
+            group = _group(escape, state.groups)
+            if group:
+                if not state.checkgroup(group):
+                    raise error, "cannot refer to open group"
+                return GROUPREF, group
+            raise ValueError
+        if len(escape) == 2:
+            return LITERAL, ord(escape[1])
+    except ValueError:
+        pass
+    raise error, "bogus escape: %s" % repr(escape)
+
+def _parse_sub(source, state, nested=1):
+    # parse an alternation: a|b|c
+
+    items = []
+    while 1:
+        items.append(_parse(source, state))
+        if source.match("|"):
+            continue
+        if not nested:
+            break
+        if not source.next or source.match(")", 0):
+            break
+        else:
+            raise error, "pattern not properly closed"
+
+    if len(items) == 1:
+        return items[0]
+
+    subpattern = SubPattern(state)
+
+    # check if all items share a common prefix
+    while 1:
+        prefix = None
+        for item in items:
+            if not item:
+                break
+            if prefix is None:
+                prefix = item[0]
+            elif item[0] != prefix:
+                break
+        else:
+            # all subitems start with a common "prefix".
+            # move it out of the branch
+            for item in items:
+                del item[0]
+            subpattern.append(prefix)
+            continue # check next one
+        break
+
+    # check if the branch can be replaced by a character set
+    for item in items:
+        if len(item) != 1 or item[0][0] != LITERAL:
+            break
+    else:
+        # we can store this as a character set instead of a
+        # branch (the compiler may optimize this even more)
+        set = []
+        for item in items:
+            set.append(item[0])
+        subpattern.append((IN, set))
+        return subpattern
+
+    subpattern.append((BRANCH, (None, items)))
+    return subpattern
+
+def _parse(source, state):
+    # parse a simple pattern
+
+    subpattern = SubPattern(state)
+
+    while 1:
+
+        if source.next in ("|", ")"):
+            break # end of subpattern
+        this = source.get()
+        if this is None:
+            break # end of pattern
+
+        if state.flags & SRE_FLAG_VERBOSE:
+            # skip whitespace and comments
+            if this in WHITESPACE:
+                continue
+            if this == "#":
+                while 1:
+                    this = source.get()
+                    if this in (None, "\n"):
+                        break
+                continue
+
+        if this and this[0] not in SPECIAL_CHARS:
+            subpattern.append((LITERAL, ord(this)))
+
+        elif this == "[":
+            # character set
+            set = []
+##          if source.match(":"):
+##              pass # handle character classes
+            if source.match("^"):
+                set.append((NEGATE, None))
+            # check remaining characters
+            start = set[:]
+            while 1:
+                this = source.get()
+                if this == "]" and set != start:
+                    break
+                elif this and this[0] == "\\":
+                    code1 = _class_escape(source, this)
+                elif this:
+                    code1 = LITERAL, ord(this)
+                else:
+                    raise error, "unexpected end of regular expression"
+                if source.match("-"):
+                    # potential range
+                    this = source.get()
+                    if this == "]":
+                        if code1[0] is IN:
+                            code1 = code1[1][0]
+                        set.append(code1)
+                        set.append((LITERAL, ord("-")))
+                        break
+                    else:
+                        if this[0] == "\\":
+                            code2 = _class_escape(source, this)
+                        else:
+                            code2 = LITERAL, ord(this)
+                        if code1[0] != LITERAL or code2[0] != LITERAL:
+                            raise error, "bad character range"
+                        lo = code1[1]
+                        hi = code2[1]
+                        if hi < lo:
+                            raise error, "bad character range"
+                        set.append((RANGE, (lo, hi)))
+                else:
+                    if code1[0] is IN:
+                        code1 = code1[1][0]
+                    set.append(code1)
+
+            # XXX: <fl> should move set optimization to compiler!
+            if len(set)==1 and set[0][0] is LITERAL:
+                subpattern.append(set[0]) # optimization
+            elif len(set)==2 and set[0][0] is NEGATE and set[1][0] is LITERAL:
+                subpattern.append((NOT_LITERAL, set[1][1])) # optimization
+            else:
+                # XXX: <fl> should add charmap optimization here
+                subpattern.append((IN, set))
+
+        elif this and this[0] in REPEAT_CHARS:
+            # repeat previous item
+            if this == "?":
+                min, max = 0, 1
+            elif this == "*":
+                min, max = 0, MAXREPEAT
+
+            elif this == "+":
+                min, max = 1, MAXREPEAT
+            elif this == "{":
+                here = source.tell()
+                min, max = 0, MAXREPEAT
+                lo = hi = ""
+                while source.next in DIGITS:
+                    lo = lo + source.get()
+                if source.match(","):
+                    while source.next in DIGITS:
+                        hi = hi + source.get()
+                else:
+                    hi = lo
+                if not source.match("}"):
+                    subpattern.append((LITERAL, ord(this)))
+                    source.seek(here)
+                    continue
+                if lo:
+                    min = atoi(lo)
+                if hi:
+                    max = atoi(hi)
+                if max < min:
+                    raise error, "bad repeat interval"
+            else:
+                raise error, "not supported"
+            # figure out which item to repeat
+            if subpattern:
+                item = subpattern[-1:]
+            else:
+                item = None
+            if not item or (len(item) == 1 and item[0][0] == AT):
+                raise error, "nothing to repeat"
+            if item[0][0] in (MIN_REPEAT, MAX_REPEAT):
+                raise error, "multiple repeat"
+            if source.match("?"):
+                subpattern[-1] = (MIN_REPEAT, (min, max, item))
+            else:
+                subpattern[-1] = (MAX_REPEAT, (min, max, item))
+
+        elif this == ".":
+            subpattern.append((ANY, None))
+
+        elif this == "(":
+            group = 1
+            name = None
+            if source.match("?"):
+                group = 0
+                # options
+                if source.match("P"):
+                    # python extensions
+                    if source.match("<"):
+                        # named group: skip forward to end of name
+                        name = ""
+                        while 1:
+                            char = source.get()
+                            if char is None:
+                                raise error, "unterminated name"
+                            if char == ">":
+                                break
+                            name = name + char
+                        group = 1
+                        if not isname(name):
+                            raise error, "bad character in group name"
+                    elif source.match("="):
+                        # named backreference
+                        name = ""
+                        while 1:
+                            char = source.get()
+                            if char is None:
+                                raise error, "unterminated name"
+                            if char == ")":
+                                break
+                            name = name + char
+                        if not isname(name):
+                            raise error, "bad character in group name"
+                        gid = state.groupdict.get(name)
+                        if gid is None:
+                            raise error, "unknown group name"
+                        subpattern.append((GROUPREF, gid))
+                        continue
+                    else:
+                        char = source.get()
+                        if char is None:
+                            raise error, "unexpected end of pattern"
+                        raise error, "unknown specifier: ?P%s" % char
+                elif source.match(":"):
+                    # non-capturing group
+                    group = 2
+                elif source.match("#"):
+                    # comment
+                    while 1:
+                        if source.next is None or source.next == ")":
+                            break
+                        source.get()
+                    if not source.match(")"):
+                        raise error, "unbalanced parenthesis"
+                    continue
+                elif source.next in ("=", "!", "<"):
+                    # lookahead assertions
+                    char = source.get()
+                    dir = 1
+                    if char == "<":
+                        if source.next not in ("=", "!"):
+                            raise error, "syntax error"
+                        dir = -1 # lookbehind
+                        char = source.get()
+                    p = _parse_sub(source, state)
+                    if not source.match(")"):
+                        raise error, "unbalanced parenthesis"
+                    if char == "=":
+                        subpattern.append((ASSERT, (dir, p)))
+                    else:
+                        subpattern.append((ASSERT_NOT, (dir, p)))
+                    continue
+                else:
+                    # flags
+                    if not FLAGS.has_key(source.next):
+                        raise error, "unexpected end of pattern"
+                    while FLAGS.has_key(source.next):
+                        state.flags = state.flags | FLAGS[source.get()]
+            if group:
+                # parse group contents
+                if group == 2:
+                    # anonymous group
+                    group = None
+                else:
+                    group = state.opengroup(name)
+                p = _parse_sub(source, state)
+                if not source.match(")"):
+                    raise error, "unbalanced parenthesis"
+                if group is not None:
+                    state.closegroup(group)
+                subpattern.append((SUBPATTERN, (group, p)))
+            else:
+                while 1:
+                    char = source.get()
+                    if char is None:
+                        raise error, "unexpected end of pattern"
+                    if char == ")":
+                        break
+                    raise error, "unknown extension"
+
+        elif this == "^":
+            subpattern.append((AT, AT_BEGINNING))
+
+        elif this == "$":
+            subpattern.append((AT, AT_END))
+
+        elif this and this[0] == "\\":
+            code = _escape(source, this, state)
+            subpattern.append(code)
+
+        else:
+            raise error, "parser error"
+
+    return subpattern
+
+def parse(str, flags=0, pattern=None):
+    # parse 're' pattern into list of (opcode, argument) tuples
+
+    source = Tokenizer(str)
+
+    if pattern is None:
+        pattern = Pattern()
+    pattern.flags = flags
+    pattern.str = str
+
+    p = _parse_sub(source, pattern, 0)
+
+    tail = source.get()
+    if tail == ")":
+        raise error, "unbalanced parenthesis"
+    elif tail:
+        raise error, "bogus characters at end of regular expression"
+
+    if flags & SRE_FLAG_DEBUG:
+        p.dump()
+
+    if not (flags & SRE_FLAG_VERBOSE) and p.pattern.flags & SRE_FLAG_VERBOSE:
+        # the VERBOSE flag was switched on inside the pattern.  to be
+        # on the safe side, we'll parse the whole thing again...
+        return parse(str, p.pattern.flags)
+
+    return p
+
+def parse_template(source, pattern):
+    # parse 're' replacement string into list of literals and
+    # group references
+    s = Tokenizer(source)
+    p = []
+    a = p.append
+    def literal(literal, p=p):
+        if p and p[-1][0] is LITERAL:
+            p[-1] = LITERAL, p[-1][1] + literal
+        else:
+            p.append((LITERAL, literal))
+    sep = source[:0]
+    if type(sep) is type(""):
+        makechar = chr
+    else:
+        makechar = unichr
+    while 1:
+        this = s.get()
+        if this is None:
+            break # end of replacement string
+        if this and this[0] == "\\":
+            # group
+            if this == "\\g":
+                name = ""
+                if s.match("<"):
+                    while 1:
+                        char = s.get()
+                        if char is None:
+                            raise error, "unterminated group name"
+                        if char == ">":
+                            break
+                        name = name + char
+                if not name:
+                    raise error, "bad group name"
+                try:
+                    index = atoi(name)
+                except ValueError:
+                    if not isname(name):
+                        raise error, "bad character in group name"
+                    try:
+                        index = pattern.groupindex[name]
+                    except KeyError:
+                        raise IndexError, "unknown group name"
+                a((MARK, index))
+            elif len(this) > 1 and this[1] in DIGITS:
+                code = None
+                while 1:
+                    group = _group(this, pattern.groups+1)
+                    if group:
+                        if (s.next not in DIGITS or
+                            not _group(this + s.next, pattern.groups+1)):
+                            code = MARK, group
+                            break
+                    elif s.next in OCTDIGITS:
+                        this = this + s.get()
+                    else:
+                        break
+                if not code:
+                    this = this[1:]
+                    code = LITERAL, makechar(atoi(this[-6:], 8) & 0xff)
+                if code[0] is LITERAL:
+                    literal(code[1])
+                else:
+                    a(code)
+            else:
+                try:
+                    this = makechar(ESCAPES[this][1])
+                except KeyError:
+                    pass
+                literal(this)
+        else:
+            literal(this)
+    # convert template to groups and literals lists
+    i = 0
+    groups = []
+    literals = []
+    for c, s in p:
+        if c is MARK:
+            groups.append((i, s))
+            literals.append(None)
+        else:
+            literals.append(s)
+        i = i + 1
+    return groups, literals
+
+def expand_template(template, match):
+    g = match.group
+    sep = match.string[:0]
+    groups, literals = template
+    literals = literals[:]
+    try:
+        for index, group in groups:
+            literals[index] = s = g(group)
+            if s is None:
+                raise IndexError
+    except IndexError:
+        raise error, "empty group"
+    return string.join(literals, sep)
diff --git a/lib-python/2.2/stat.py b/lib-python/2.2/stat.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/stat.py
@@ -0,0 +1,86 @@
+"""Constants/functions for interpreting results of os.stat() and os.lstat().
+
+Suggested usage: from stat import *
+"""
+
+# XXX Strictly spoken, this module may have to be adapted for each POSIX
+# implementation; in practice, however, the numeric constants used by
+# stat() are almost universal (even for stat() emulations on non-UNIX
+# systems like MS-DOS).
+
+# Indices for stat struct members in tuple returned by os.stat()
+
+ST_MODE  = 0
+ST_INO   = 1
+ST_DEV   = 2
+ST_NLINK = 3
+ST_UID   = 4
+ST_GID   = 5
+ST_SIZE  = 6
+ST_ATIME = 7
+ST_MTIME = 8
+ST_CTIME = 9
+
+# Extract bits from the mode
+
+def S_IMODE(mode):
+    return mode & 07777
+
+def S_IFMT(mode):
+    return mode & 0170000
+
+# Constants used as S_IFMT() for various file types
+# (not all are implemented on all systems)
+
+S_IFDIR  = 0040000
+S_IFCHR  = 0020000
+S_IFBLK  = 0060000
+S_IFREG  = 0100000
+S_IFIFO  = 0010000
+S_IFLNK  = 0120000
+S_IFSOCK = 0140000
+
+# Functions to test for each file type
+
+def S_ISDIR(mode):
+    return S_IFMT(mode) == S_IFDIR
+
+def S_ISCHR(mode):
+    return S_IFMT(mode) == S_IFCHR
+
+def S_ISBLK(mode):
+    return S_IFMT(mode) == S_IFBLK
+
+def S_ISREG(mode):
+    return S_IFMT(mode) == S_IFREG
+
+def S_ISFIFO(mode):
+    return S_IFMT(mode) == S_IFIFO
+
+def S_ISLNK(mode):
+    return S_IFMT(mode) == S_IFLNK
+
+def S_ISSOCK(mode):
+    return S_IFMT(mode) == S_IFSOCK
+
+# Names for permission bits
+
+S_ISUID = 04000
+S_ISGID = 02000
+S_ENFMT = S_ISGID
+S_ISVTX = 01000
+S_IREAD = 00400
+S_IWRITE = 00200
+S_IEXEC = 00100
+S_IRWXU = 00700
+S_IRUSR = 00400
+S_IWUSR = 00200
+S_IXUSR = 00100
+S_IRWXG = 00070
+S_IRGRP = 00040
+S_IWGRP = 00020
+S_IXGRP = 00010
+S_IRWXO = 00007
+S_IROTH = 00004
+S_IWOTH = 00002
+S_IXOTH = 00001
diff --git a/lib-python/2.2/statcache.py b/lib-python/2.2/statcache.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/statcache.py
@@ -0,0 +1,77 @@
+"""Maintain a cache of stat() information on files.
+
+There are functions to reset the cache or to selectively remove items.
+"""
+
+import os as _os
+from stat import *
+
+__all__ = ["stat","reset","forget","forget_prefix","forget_dir",
+           "forget_except_prefix","isdir"]
+
+# The cache.  Keys are pathnames, values are os.stat outcomes.
+# Remember that multiple threads may be calling this!  So, e.g., that
+# cache.has_key(path) returns 1 doesn't mean the cache will still contain
+# path on the next line.  Code defensively.
+
+cache = {}
+
+def stat(path):
+    """Stat a file, possibly out of the cache."""
+    ret = cache.get(path, None)
+    if ret is None:
+        cache[path] = ret = _os.stat(path)
+    return ret
+
+def reset():
+    """Clear the cache."""
+    cache.clear()
+
+# For thread saftey, always use forget() internally too.
+def forget(path):
+    """Remove a given item from the cache, if it exists."""
+    try:
+        del cache[path]
+    except KeyError:
+        pass
+
+def forget_prefix(prefix):
+    """Remove all pathnames with a given prefix."""
+    for path in cache.keys():
+        if path.startswith(prefix):
+            forget(path)
+
+def forget_dir(prefix):
+    """Forget a directory and all entries except for entries in subdirs."""
+
+    # Remove trailing separator, if any.  This is tricky to do in a
+    # x-platform way.  For example, Windows accepts both / and \ as
+    # separators, and if there's nothing *but* a separator we want to
+    # preserve that this is the root.  Only os.path has the platform
+    # knowledge we need.
+    from os.path import split, join
+    prefix = split(join(prefix, "xxx"))[0]
+    forget(prefix)
+    for path in cache.keys():
+        # First check that the path at least starts with the prefix, so
+        # that when it doesn't we can avoid paying for split().
+        if path.startswith(prefix) and split(path)[0] == prefix:
+            forget(path)
+
+def forget_except_prefix(prefix):
+    """Remove all pathnames except with a given prefix.
+
+    Normally used with prefix = '/' after a chdir().
+    """
+
+    for path in cache.keys():
+        if not path.startswith(prefix):
+            forget(path)
+
+def isdir(path):
+    """Return 1 if directory, else 0."""
+    try:
+        st = stat(path)
+    except _os.error:
+        return 0
+    return S_ISDIR(st[ST_MODE])
diff --git a/lib-python/2.2/statvfs.py b/lib-python/2.2/statvfs.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/statvfs.py
@@ -0,0 +1,15 @@
+"""Constants for interpreting the results of os.statvfs() and os.fstatvfs()."""
+
+# Indices for statvfs struct members in the tuple returned by
+# os.statvfs() and os.fstatvfs().
+
+F_BSIZE   = 0           # Preferred file system block size
+F_FRSIZE  = 1           # Fundamental file system block size
+F_BLOCKS  = 2           # Total number of file system blocks (FRSIZE)
+F_BFREE   = 3           # Total number of free blocks
+F_BAVAIL  = 4           # Free blocks available to non-superuser
+F_FILES   = 5           # Total number of file nodes
+F_FFREE   = 6           # Total number of free file nodes
+F_FAVAIL  = 7           # Free nodes available to non-superuser
+F_FLAG    = 8           # Flags (see your local statvfs man page)
+F_NAMEMAX = 9           # Maximum file name length
diff --git a/lib-python/2.2/string.py b/lib-python/2.2/string.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/string.py
@@ -0,0 +1,387 @@
+"""A collection of string operations (most are no longer used in Python 1.6).
+
+Warning: most of the code you see here isn't normally used nowadays.  With
+Python 1.6, many of these functions are implemented as methods on the
+standard string object. They used to be implemented by a built-in module
+called strop, but strop is now obsolete itself.
+
+Public module variables:
+
+whitespace -- a string containing all characters considered whitespace
+lowercase -- a string containing all characters considered lowercase letters
+uppercase -- a string containing all characters considered uppercase letters
+letters -- a string containing all characters considered letters
+digits -- a string containing all characters considered decimal digits
+hexdigits -- a string containing all characters considered hexadecimal digits
+octdigits -- a string containing all characters considered octal digits
+punctuation -- a string containing all characters considered punctuation
+printable -- a string containing all characters considered printable
+
+"""
+
+# Some strings for ctype-style character classification
+whitespace = ' \t\n\r\v\f'
+lowercase = 'abcdefghijklmnopqrstuvwxyz'
+uppercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+letters = lowercase + uppercase
+ascii_lowercase = lowercase
+ascii_uppercase = uppercase
+ascii_letters = ascii_lowercase + ascii_uppercase
+digits = '0123456789'
+hexdigits = digits + 'abcdef' + 'ABCDEF'
+octdigits = '01234567'
+punctuation = """!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
+printable = digits + letters + punctuation + whitespace
+
+# Case conversion helpers
+_idmap = ''
+for i in range(256): _idmap = _idmap + chr(i)
+del i
+
+# Backward compatible names for exceptions
+index_error = ValueError
+atoi_error = ValueError
+atof_error = ValueError
+atol_error = ValueError
+
+# convert UPPER CASE letters to lower case
+def lower(s):
+    """lower(s) -> string
+
+    Return a copy of the string s converted to lowercase.
+
+    """
+    return s.lower()
+
+# Convert lower case letters to UPPER CASE
+def upper(s):
+    """upper(s) -> string
+
+    Return a copy of the string s converted to uppercase.
+
+    """
+    return s.upper()
+
+# Swap lower case letters and UPPER CASE
+def swapcase(s):
+    """swapcase(s) -> string
+
+    Return a copy of the string s with upper case characters
+    converted to lowercase and vice versa.
+
+    """
+    return s.swapcase()
+
+# Strip leading and trailing tabs and spaces
+def strip(s, chars=None):
+    """strip(s [,chars]) -> string
+
+    Return a copy of the string s with leading and trailing
+    whitespace removed.
+    If chars is given and not None, remove characters in chars instead.
+    If chars is unicode, S will be converted to unicode before stripping.
+
+    """
+    return s.strip(chars)
+
+# Strip leading tabs and spaces
+def lstrip(s, chars=None):
+    """lstrip(s [,chars]) -> string
+
+    Return a copy of the string s with leading whitespace removed.
+    If chars is given and not None, remove characters in chars instead.
+    If chars is unicode, S will be converted to unicode before stripping.
+
+    """
+    return s.lstrip(chars)
+
+# Strip trailing tabs and spaces
+def rstrip(s, chars=None):
+    """rstrip(s [,chars]) -> string
+
+    Return a copy of the string s with trailing whitespace removed.
+    If chars is given and not None, remove characters in chars instead.
+    If chars is unicode, S will be converted to unicode before stripping.
+
+    """
+    return s.rstrip(chars)
+
+
+# Split a string into a list of space/tab-separated words
+def split(s, sep=None, maxsplit=-1):
+    """split(s [,sep [,maxsplit]]) -> list of strings
+
+    Return a list of the words in the string s, using sep as the
+    delimiter string.  If maxsplit is given, splits at no more than
+    maxsplit places (resulting in at most maxsplit+1 words).  If sep
+    is not specified, any whitespace string is a separator.
+
+    (split and splitfields are synonymous)
+
+    """
+    return s.split(sep, maxsplit)
+splitfields = split
+
+# Join fields with optional separator
+def join(words, sep = ' '):
+    """join(list [,sep]) -> string
+
+    Return a string composed of the words in list, with
+    intervening occurrences of sep.  The default separator is a
+    single space.
+
+    (joinfields and join are synonymous)
+
+    """
+    return sep.join(words)
+joinfields = join
+
+# Find substring, raise exception if not found
+def index(s, *args):
+    """index(s, sub [,start [,end]]) -> int
+
+    Like find but raises ValueError when the substring is not found.
+
+    """
+    return s.index(*args)
+
+# Find last substring, raise exception if not found
+def rindex(s, *args):
+    """rindex(s, sub [,start [,end]]) -> int
+
+    Like rfind but raises ValueError when the substring is not found.
+
+    """
+    return s.rindex(*args)
+
+# Count non-overlapping occurrences of substring
+def count(s, *args):
+    """count(s, sub[, start[,end]]) -> int
+
+    Return the number of occurrences of substring sub in string
+    s[start:end].  Optional arguments start and end are
+    interpreted as in slice notation.
+
+    """
+    return s.count(*args)
+
+# Find substring, return -1 if not found
+def find(s, *args):
+    """find(s, sub [,start [,end]]) -> in
+
+    Return the lowest index in s where substring sub is found,
+    such that sub is contained within s[start,end].  Optional
+    arguments start and end are interpreted as in slice notation.
+
+    Return -1 on failure.
+
+    """
+    return s.find(*args)
+
+# Find last substring, return -1 if not found
+def rfind(s, *args):
+    """rfind(s, sub [,start [,end]]) -> int
+
+    Return the highest index in s where substring sub is found,
+    such that sub is contained within s[start,end].  Optional
+    arguments start and end are interpreted as in slice notation.
+
+    Return -1 on failure.
+
+    """
+    return s.rfind(*args)
+
+# for a bit of speed
+_float = float
+_int = int
+_long = long
+try:
+    _StringTypes = (str, unicode)
+except NameError:
+    _StringTypes = (str,)
+
+# Convert string to float
+def atof(s):
+    """atof(s) -> float
+
+    Return the floating point number represented by the string s.
+
+    """
+    return _float(s)
+
+
+# Convert string to integer
+def atoi(s , base=10):
+    """atoi(s [,base]) -> int
+
+    Return the integer represented by the string s in the given
+    base, which defaults to 10.  The string s must consist of one
+    or more digits, possibly preceded by a sign.  If base is 0, it
+    is chosen from the leading characters of s, 0 for octal, 0x or
+    0X for hexadecimal.  If base is 16, a preceding 0x or 0X is
+    accepted.
+
+    """
+    return _int(s, base)
+
+
+# Convert string to long integer
+def atol(s, base=10):
+    """atol(s [,base]) -> long
+
+    Return the long integer represented by the string s in the
+    given base, which defaults to 10.  The string s must consist
+    of one or more digits, possibly preceded by a sign.  If base
+    is 0, it is chosen from the leading characters of s, 0 for
+    octal, 0x or 0X for hexadecimal.  If base is 16, a preceding
+    0x or 0X is accepted.  A trailing L or l is not accepted,
+    unless base is 0.
+
+    """
+    return _long(s, base)
+
+
+# Left-justify a string
+def ljust(s, width):
+    """ljust(s, width) -> string
+
+    Return a left-justified version of s, in a field of the
+    specified width, padded with spaces as needed.  The string is
+    never truncated.
+
+    """
+    return s.ljust(width)
+
+# Right-justify a string
+def rjust(s, width):
+    """rjust(s, width) -> string
+
+    Return a right-justified version of s, in a field of the
+    specified width, padded with spaces as needed.  The string is
+    never truncated.
+
+    """
+    return s.rjust(width)
+
+# Center a string
+def center(s, width):
+    """center(s, width) -> string
+
+    Return a center version of s, in a field of the specified
+    width. padded with spaces as needed.  The string is never
+    truncated.
+
+    """
+    return s.center(width)
+
+# Zero-fill a number, e.g., (12, 3) --> '012' and (-3, 3) --> '-03'
+# Decadent feature: the argument may be a string or a number
+# (Use of this is deprecated; it should be a string as with ljust c.s.)
+def zfill(x, width):
+    """zfill(x, width) -> string
+
+    Pad a numeric string x with zeros on the left, to fill a field
+    of the specified width.  The string x is never truncated.
+
+    """
+    if not isinstance(x, _StringTypes):
+        x = repr(x)
+    return x.zfill(width)
+
+# Expand tabs in a string.
+# Doesn't take non-printing chars into account, but does understand \n.
+def expandtabs(s, tabsize=8):
+    """expandtabs(s [,tabsize]) -> string
+
+    Return a copy of the string s with all tab characters replaced
+    by the appropriate number of spaces, depending on the current
+    column, and the tabsize (default 8).
+
+    """
+    return s.expandtabs(tabsize)
+
+# Character translation through look-up table.
+def translate(s, table, deletions=""):
+    """translate(s,table [,deletions]) -> string
+
+    Return a copy of the string s, where all characters occurring
+    in the optional argument deletions are removed, and the
+    remaining characters have been mapped through the given
+    translation table, which must be a string of length 256.  The
+    deletions argument is not allowed for Unicode strings.
+
+    """
+    if deletions:
+        return s.translate(table, deletions)
+    else:
+        # Add s[:0] so that if s is Unicode and table is an 8-bit string,
+        # table is converted to Unicode.  This means that table *cannot*
+        # be a dictionary -- for that feature, use u.translate() directly.
+        return s.translate(table + s[:0])
+
+# Capitalize a string, e.g. "aBc  dEf" -> "Abc  def".
+def capitalize(s):
+    """capitalize(s) -> string
+
+    Return a copy of the string s with only its first character
+    capitalized.
+
+    """
+    return s.capitalize()
+
+# Capitalize the words in a string, e.g. " aBc  dEf " -> "Abc Def".
+# See also regsub.capwords().
+def capwords(s, sep=None):
+    """capwords(s, [sep]) -> string
+
+    Split the argument into words using split, capitalize each
+    word using capitalize, and join the capitalized words using
+    join. Note that this replaces runs of whitespace characters by
+    a single space.
+
+    """
+    return join(map(capitalize, s.split(sep)), sep or ' ')
+
+# Construct a translation string
+_idmapL = None
+def maketrans(fromstr, tostr):
+    """maketrans(frm, to) -> string
+
+    Return a translation table (a string of 256 bytes long)
+    suitable for use in string.translate.  The strings frm and to
+    must be of the same length.
+
+    """
+    if len(fromstr) != len(tostr):
+        raise ValueError, "maketrans arguments must have same length"
+    global _idmapL
+    if not _idmapL:
+        _idmapL = map(None, _idmap)
+    L = _idmapL[:]
+    fromstr = map(ord, fromstr)
+    for i in range(len(fromstr)):
+        L[fromstr[i]] = tostr[i]
+    return join(L, "")
+
+# Substring replacement (global)
+def replace(s, old, new, maxsplit=-1):
+    """replace (str, old, new[, maxsplit]) -> string
+
+    Return a copy of string str with all occurrences of substring
+    old replaced by new. If the optional argument maxsplit is
+    given, only the first maxsplit occurrences are replaced.
+
+    """
+    return s.replace(old, new, maxsplit)
+
+
+# Try importing optional built-in module "strop" -- if it exists,
+# it redefines some string operations that are 100-1000 times faster.
+# It also defines values for whitespace, lowercase and uppercase
+# that match <ctype.h>'s definitions.
+
+try:
+    from strop import maketrans, lowercase, uppercase, whitespace
+    letters = lowercase + uppercase
+except ImportError:
+    pass                                          # Use the original versions
diff --git a/lib-python/2.2/stringold.py b/lib-python/2.2/stringold.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/stringold.py
@@ -0,0 +1,430 @@
+# module 'string' -- A collection of string operations
+
+# Warning: most of the code you see here isn't normally used nowadays.  With
+# Python 1.6, many of these functions are implemented as methods on the
+# standard string object. They used to be implemented by a built-in module
+# called strop, but strop is now obsolete itself.
+
+"""Common string manipulations.
+
+Public module variables:
+
+whitespace -- a string containing all characters considered whitespace
+lowercase -- a string containing all characters considered lowercase letters
+uppercase -- a string containing all characters considered uppercase letters
+letters -- a string containing all characters considered letters
+digits -- a string containing all characters considered decimal digits
+hexdigits -- a string containing all characters considered hexadecimal digits
+octdigits -- a string containing all characters considered octal digits
+
+"""
+
+# Some strings for ctype-style character classification
+whitespace = ' \t\n\r\v\f'
+lowercase = 'abcdefghijklmnopqrstuvwxyz'
+uppercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+letters = lowercase + uppercase
+digits = '0123456789'
+hexdigits = digits + 'abcdef' + 'ABCDEF'
+octdigits = '01234567'
+
+# Case conversion helpers
+_idmap = ''
+for i in range(256): _idmap = _idmap + chr(i)
+del i
+
+# Backward compatible names for exceptions
+index_error = ValueError
+atoi_error = ValueError
+atof_error = ValueError
+atol_error = ValueError
+
+# convert UPPER CASE letters to lower case
+def lower(s):
+    """lower(s) -> string
+
+    Return a copy of the string s converted to lowercase.
+
+    """
+    return s.lower()
+
+# Convert lower case letters to UPPER CASE
+def upper(s):
+    """upper(s) -> string
+
+    Return a copy of the string s converted to uppercase.
+
+    """
+    return s.upper()
+
+# Swap lower case letters and UPPER CASE
+def swapcase(s):
+    """swapcase(s) -> string
+
+    Return a copy of the string s with upper case characters
+    converted to lowercase and vice versa.
+
+    """
+    return s.swapcase()
+
+# Strip leading and trailing tabs and spaces
+def strip(s):
+    """strip(s) -> string
+
+    Return a copy of the string s with leading and trailing
+    whitespace removed.
+
+    """
+    return s.strip()
+
+# Strip leading tabs and spaces
+def lstrip(s):
+    """lstrip(s) -> string
+
+    Return a copy of the string s with leading whitespace removed.
+
+    """
+    return s.lstrip()
+
+# Strip trailing tabs and spaces
+def rstrip(s):
+    """rstrip(s) -> string
+
+    Return a copy of the string s with trailing whitespace
+    removed.
+
+    """
+    return s.rstrip()
+
+
+# Split a string into a list of space/tab-separated words
+def split(s, sep=None, maxsplit=0):
+    """split(str [,sep [,maxsplit]]) -> list of strings
+
+    Return a list of the words in the string s, using sep as the
+    delimiter string.  If maxsplit is nonzero, splits into at most
+    maxsplit words If sep is not specified, any whitespace string
+    is a separator.  Maxsplit defaults to 0.
+
+    (split and splitfields are synonymous)
+
+    """
+    return s.split(sep, maxsplit)
+splitfields = split
+
+# Join fields with optional separator
+def join(words, sep = ' '):
+    """join(list [,sep]) -> string
+
+    Return a string composed of the words in list, with
+    intervening occurrences of sep.  The default separator is a
+    single space.
+
+    (joinfields and join are synonymous)
+
+    """
+    return sep.join(words)
+joinfields = join
+
+# for a little bit of speed
+_apply = apply
+
+# Find substring, raise exception if not found
+def index(s, *args):
+    """index(s, sub [,start [,end]]) -> int
+
+    Like find but raises ValueError when the substring is not found.
+
+    """
+    return _apply(s.index, args)
+
+# Find last substring, raise exception if not found
+def rindex(s, *args):
+    """rindex(s, sub [,start [,end]]) -> int
+
+    Like rfind but raises ValueError when the substring is not found.
+
+    """
+    return _apply(s.rindex, args)
+
+# Count non-overlapping occurrences of substring
+def count(s, *args):
+    """count(s, sub[, start[,end]]) -> int
+
+    Return the number of occurrences of substring sub in string
+    s[start:end].  Optional arguments start and end are
+    interpreted as in slice notation.
+
+    """
+    return _apply(s.count, args)
+
+# Find substring, return -1 if not found
+def find(s, *args):
+    """find(s, sub [,start [,end]]) -> in
+
+    Return the lowest index in s where substring sub is found,
+    such that sub is contained within s[start,end].  Optional
+    arguments start and end are interpreted as in slice notation.
+
+    Return -1 on failure.
+
+    """
+    return _apply(s.find, args)
+
+# Find last substring, return -1 if not found
+def rfind(s, *args):
+    """rfind(s, sub [,start [,end]]) -> int
+
+    Return the highest index in s where substring sub is found,
+    such that sub is contained within s[start,end].  Optional
+    arguments start and end are interpreted as in slice notation.
+
+    Return -1 on failure.
+
+    """
+    return _apply(s.rfind, args)
+
+# for a bit of speed
+_float = float
+_int = int
+_long = long
+_StringType = type('')
+
+# Convert string to float
+def atof(s):
+    """atof(s) -> float
+
+    Return the floating point number represented by the string s.
+
+    """
+    if type(s) == _StringType:
+        return _float(s)
+    else:
+        raise TypeError('argument 1: expected string, %s found' %
+                        type(s).__name__)
+
+# Convert string to integer
+def atoi(*args):
+    """atoi(s [,base]) -> int
+
+    Return the integer represented by the string s in the given
+    base, which defaults to 10.  The string s must consist of one
+    or more digits, possibly preceded by a sign.  If base is 0, it
+    is chosen from the leading characters of s, 0 for octal, 0x or
+    0X for hexadecimal.  If base is 16, a preceding 0x or 0X is
+    accepted.
+
+    """
+    try:
+        s = args[0]
+    except IndexError:
+        raise TypeError('function requires at least 1 argument: %d given' %
+                        len(args))
+    # Don't catch type error resulting from too many arguments to int().  The
+    # error message isn't compatible but the error type is, and this function
+    # is complicated enough already.
+    if type(s) == _StringType:
+        return _apply(_int, args)
+    else:
+        raise TypeError('argument 1: expected string, %s found' %
+                        type(s).__name__)
+
+
+# Convert string to long integer
+def atol(*args):
+    """atol(s [,base]) -> long
+
+    Return the long integer represented by the string s in the
+    given base, which defaults to 10.  The string s must consist
+    of one or more digits, possibly preceded by a sign.  If base
+    is 0, it is chosen from the leading characters of s, 0 for
+    octal, 0x or 0X for hexadecimal.  If base is 16, a preceding
+    0x or 0X is accepted.  A trailing L or l is not accepted,
+    unless base is 0.
+
+    """
+    try:
+        s = args[0]
+    except IndexError:
+        raise TypeError('function requires at least 1 argument: %d given' %
+                        len(args))
+    # Don't catch type error resulting from too many arguments to long().  The
+    # error message isn't compatible but the error type is, and this function
+    # is complicated enough already.
+    if type(s) == _StringType:
+        return _apply(_long, args)
+    else:
+        raise TypeError('argument 1: expected string, %s found' %
+                        type(s).__name__)
+
+
+# Left-justify a string
+def ljust(s, width):
+    """ljust(s, width) -> string
+
+    Return a left-justified version of s, in a field of the
+    specified width, padded with spaces as needed.  The string is
+    never truncated.
+
+    """
+    n = width - len(s)
+    if n <= 0: return s
+    return s + ' '*n
+
+# Right-justify a string
+def rjust(s, width):
+    """rjust(s, width) -> string
+
+    Return a right-justified version of s, in a field of the
+    specified width, padded with spaces as needed.  The string is
+    never truncated.
+
+    """
+    n = width - len(s)
+    if n <= 0: return s
+    return ' '*n + s
+
+# Center a string
+def center(s, width):
+    """center(s, width) -> string
+
+    Return a center version of s, in a field of the specified
+    width. padded with spaces as needed.  The string is never
+    truncated.
+
+    """
+    n = width - len(s)
+    if n <= 0: return s
+    half = n/2
+    if n%2 and width%2:
+        # This ensures that center(center(s, i), j) = center(s, j)
+        half = half+1
+    return ' '*half +  s + ' '*(n-half)
+
+# Zero-fill a number, e.g., (12, 3) --> '012' and (-3, 3) --> '-03'
+# Decadent feature: the argument may be a string or a number
+# (Use of this is deprecated; it should be a string as with ljust c.s.)
+def zfill(x, width):
+    """zfill(x, width) -> string
+
+    Pad a numeric string x with zeros on the left, to fill a field
+    of the specified width.  The string x is never truncated.
+
+    """
+    if type(x) == type(''): s = x
+    else: s = `x`
+    n = len(s)
+    if n >= width: return s
+    sign = ''
+    if s[0] in ('-', '+'):
+        sign, s = s[0], s[1:]
+    return sign + '0'*(width-n) + s
+
+# Expand tabs in a string.
+# Doesn't take non-printing chars into account, but does understand \n.
+def expandtabs(s, tabsize=8):
+    """expandtabs(s [,tabsize]) -> string
+
+    Return a copy of the string s with all tab characters replaced
+    by the appropriate number of spaces, depending on the current
+    column, and the tabsize (default 8).
+
+    """
+    res = line = ''
+    for c in s:
+        if c == '\t':
+            c = ' '*(tabsize - len(line) % tabsize)
+        line = line + c
+        if c == '\n':
+            res = res + line
+            line = ''
+    return res + line
+
+# Character translation through look-up table.
+def translate(s, table, deletions=""):
+    """translate(s,table [,deletechars]) -> string
+
+    Return a copy of the string s, where all characters occurring
+    in the optional argument deletechars are removed, and the
+    remaining characters have been mapped through the given
+    translation table, which must be a string of length 256.
+
+    """
+    return s.translate(table, deletions)
+
+# Capitalize a string, e.g. "aBc  dEf" -> "Abc  def".
+def capitalize(s):
+    """capitalize(s) -> string
+
+    Return a copy of the string s with only its first character
+    capitalized.
+
+    """
+    return s.capitalize()
+
+# Capitalize the words in a string, e.g. " aBc  dEf " -> "Abc Def".
+# See also regsub.capwords().
+def capwords(s, sep=None):
+    """capwords(s, [sep]) -> string
+
+    Split the argument into words using split, capitalize each
+    word using capitalize, and join the capitalized words using
+    join. Note that this replaces runs of whitespace characters by
+    a single space.
+
+    """
+    return join(map(capitalize, s.split(sep)), sep or ' ')
+
+# Construct a translation string
+_idmapL = None
+def maketrans(fromstr, tostr):
+    """maketrans(frm, to) -> string
+
+    Return a translation table (a string of 256 bytes long)
+    suitable for use in string.translate.  The strings frm and to
+    must be of the same length.
+
+    """
+    if len(fromstr) != len(tostr):
+        raise ValueError, "maketrans arguments must have same length"
+    global _idmapL
+    if not _idmapL:
+        _idmapL = map(None, _idmap)
+    L = _idmapL[:]
+    fromstr = map(ord, fromstr)
+    for i in range(len(fromstr)):
+        L[fromstr[i]] = tostr[i]
+    return join(L, "")
+
+# Substring replacement (global)
+def replace(s, old, new, maxsplit=0):
+    """replace (str, old, new[, maxsplit]) -> string
+
+    Return a copy of string str with all occurrences of substring
+    old replaced by new. If the optional argument maxsplit is
+    given, only the first maxsplit occurrences are replaced.
+
+    """
+    return s.replace(old, new, maxsplit)
+
+
+# XXX: transitional
+#
+# If string objects do not have methods, then we need to use the old string.py
+# library, which uses strop for many more things than just the few outlined
+# below.
+try:
+    ''.upper
+except AttributeError:
+    from stringold import *
+
+# Try importing optional built-in module "strop" -- if it exists,
+# it redefines some string operations that are 100-1000 times faster.
+# It also defines values for whitespace, lowercase and uppercase
+# that match <ctype.h>'s definitions.
+
+try:
+    from strop import maketrans, lowercase, uppercase, whitespace
+    letters = lowercase + uppercase
+except ImportError:
+    pass                                          # Use the original versions
diff --git a/lib-python/2.2/sunau.py b/lib-python/2.2/sunau.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/sunau.py
@@ -0,0 +1,474 @@
+"""Stuff to parse Sun and NeXT audio files.
+
+An audio file consists of a header followed by the data.  The structure
+of the header is as follows.
+
+        +---------------+
+        | magic word    |
+        +---------------+
+        | header size   |
+        +---------------+
+        | data size     |
+        +---------------+
+        | encoding      |
+        +---------------+
+        | sample rate   |
+        +---------------+
+        | # of channels |
+        +---------------+
+        | info          |
+        |               |
+        +---------------+
+
+The magic word consists of the 4 characters '.snd'.  Apart from the
+info field, all header fields are 4 bytes in size.  They are all
+32-bit unsigned integers encoded in big-endian byte order.
+
+The header size really gives the start of the data.
+The data size is the physical size of the data.  From the other
+parameters the number of frames can be calculated.
+The encoding gives the way in which audio samples are encoded.
+Possible values are listed below.
+The info field currently consists of an ASCII string giving a
+human-readable description of the audio file.  The info field is
+padded with NUL bytes to the header size.
+
+Usage.
+
+Reading audio files:
+        f = sunau.open(file, 'r')
+where file is either the name of a file or an open file pointer.
+The open file pointer must have methods read(), seek(), and close().
+When the setpos() and rewind() methods are not used, the seek()
+method is not  necessary.
+
+This returns an instance of a class with the following public methods:
+        getnchannels()  -- returns number of audio channels (1 for
+                           mono, 2 for stereo)
+        getsampwidth()  -- returns sample width in bytes
+        getframerate()  -- returns sampling frequency
+        getnframes()    -- returns number of audio frames
+        getcomptype()   -- returns compression type ('NONE' or 'ULAW')
+        getcompname()   -- returns human-readable version of
+                           compression type ('not compressed' matches 'NONE')
+        getparams()     -- returns a tuple consisting of all of the
+                           above in the above order
+        getmarkers()    -- returns None (for compatibility with the
+                           aifc module)
+        getmark(id)     -- raises an error since the mark does not
+                           exist (for compatibility with the aifc module)
+        readframes(n)   -- returns at most n frames of audio
+        rewind()        -- rewind to the beginning of the audio stream
+        setpos(pos)     -- seek to the specified position
+        tell()          -- return the current position
+        close()         -- close the instance (make it unusable)
+The position returned by tell() and the position given to setpos()
+are compatible and have nothing to do with the actual position in the
+file.
+The close() method is called automatically when the class instance
+is destroyed.
+
+Writing audio files:
+        f = sunau.open(file, 'w')
+where file is either the name of a file or an open file pointer.
+The open file pointer must have methods write(), tell(), seek(), and
+close().
+
+This returns an instance of a class with the following public methods:
+        setnchannels(n) -- set the number of channels
+        setsampwidth(n) -- set the sample width
+        setframerate(n) -- set the frame rate
+        setnframes(n)   -- set the number of frames
+        setcomptype(type, name)
+                        -- set the compression type and the
+                           human-readable compression type
+        setparams(tuple)-- set all parameters at once
+        tell()          -- return current position in output file
+        writeframesraw(data)
+                        -- write audio frames without pathing up the
+                           file header
+        writeframes(data)
+                        -- write audio frames and patch up the file header
+        close()         -- patch up the file header and close the
+                           output file
+You should set the parameters before the first writeframesraw or
+writeframes.  The total number of frames does not need to be set,
+but when it is set to the correct value, the header does not have to
+be patched up.
+It is best to first set all parameters, perhaps possibly the
+compression type, and then write audio frames using writeframesraw.
+When all frames have been written, either call writeframes('') or
+close() to patch up the sizes in the header.
+The close() method is called automatically when the class instance
+is destroyed.
+"""
+
+# from <multimedia/audio_filehdr.h>
+AUDIO_FILE_MAGIC = 0x2e736e64
+AUDIO_FILE_ENCODING_MULAW_8 = 1
+AUDIO_FILE_ENCODING_LINEAR_8 = 2
+AUDIO_FILE_ENCODING_LINEAR_16 = 3
+AUDIO_FILE_ENCODING_LINEAR_24 = 4
+AUDIO_FILE_ENCODING_LINEAR_32 = 5
+AUDIO_FILE_ENCODING_FLOAT = 6
+AUDIO_FILE_ENCODING_DOUBLE = 7
+AUDIO_FILE_ENCODING_ADPCM_G721 = 23
+AUDIO_FILE_ENCODING_ADPCM_G722 = 24
+AUDIO_FILE_ENCODING_ADPCM_G723_3 = 25
+AUDIO_FILE_ENCODING_ADPCM_G723_5 = 26
+AUDIO_FILE_ENCODING_ALAW_8 = 27
+
+# from <multimedia/audio_hdr.h>
+AUDIO_UNKNOWN_SIZE = 0xFFFFFFFFL        # ((unsigned)(~0))
+
+_simple_encodings = [AUDIO_FILE_ENCODING_MULAW_8,
+                     AUDIO_FILE_ENCODING_LINEAR_8,
+                     AUDIO_FILE_ENCODING_LINEAR_16,
+                     AUDIO_FILE_ENCODING_LINEAR_24,
+                     AUDIO_FILE_ENCODING_LINEAR_32,
+                     AUDIO_FILE_ENCODING_ALAW_8]
+
+class Error(Exception):
+    pass
+
+def _read_u32(file):
+    x = 0L
+    for i in range(4):
+        byte = file.read(1)
+        if byte == '':
+            raise EOFError
+        x = x*256 + ord(byte)
+    return x
+
+def _write_u32(file, x):
+    data = []
+    for i in range(4):
+        d, m = divmod(x, 256)
+        data.insert(0, m)
+        x = d
+    for i in range(4):
+        file.write(chr(int(data[i])))
+
+class Au_read:
+
+    def __init__(self, f):
+        if type(f) == type(''):
+            import __builtin__
+            f = __builtin__.open(f, 'rb')
+        self.initfp(f)
+
+    def __del__(self):
+        if self._file:
+            self.close()
+
+    def initfp(self, file):
+        self._file = file
+        self._soundpos = 0
+        magic = int(_read_u32(file))
+        if magic != AUDIO_FILE_MAGIC:
+            raise Error, 'bad magic number'
+        self._hdr_size = int(_read_u32(file))
+        if self._hdr_size < 24:
+            raise Error, 'header size too small'
+        if self._hdr_size > 100:
+            raise Error, 'header size ridiculously large'
+        self._data_size = _read_u32(file)
+        if self._data_size != AUDIO_UNKNOWN_SIZE:
+            self._data_size = int(self._data_size)
+        self._encoding = int(_read_u32(file))
+        if self._encoding not in _simple_encodings:
+            raise Error, 'encoding not (yet) supported'
+        if self._encoding in (AUDIO_FILE_ENCODING_MULAW_8,
+                  AUDIO_FILE_ENCODING_ALAW_8):
+            self._sampwidth = 2
+            self._framesize = 1
+        elif self._encoding == AUDIO_FILE_ENCODING_LINEAR_8:
+            self._framesize = self._sampwidth = 1
+        elif self._encoding == AUDIO_FILE_ENCODING_LINEAR_16:
+            self._framesize = self._sampwidth = 2
+        elif self._encoding == AUDIO_FILE_ENCODING_LINEAR_24:
+            self._framesize = self._sampwidth = 3
+        elif self._encoding == AUDIO_FILE_ENCODING_LINEAR_32:
+            self._framesize = self._sampwidth = 4
+        else:
+            raise Error, 'unknown encoding'
+        self._framerate = int(_read_u32(file))
+        self._nchannels = int(_read_u32(file))
+        self._framesize = self._framesize * self._nchannels
+        if self._hdr_size > 24:
+            self._info = file.read(self._hdr_size - 24)
+            for i in range(len(self._info)):
+                if self._info[i] == '\0':
+                    self._info = self._info[:i]
+                    break
+        else:
+            self._info = ''
+
+    def getfp(self):
+        return self._file
+
+    def getnchannels(self):
+        return self._nchannels
+
+    def getsampwidth(self):
+        return self._sampwidth
+
+    def getframerate(self):
+        return self._framerate
+
+    def getnframes(self):
+        if self._data_size == AUDIO_UNKNOWN_SIZE:
+            return AUDIO_UNKNOWN_SIZE
+        if self._encoding in _simple_encodings:
+            return self._data_size / self._framesize
+        return 0                # XXX--must do some arithmetic here
+
+    def getcomptype(self):
+        if self._encoding == AUDIO_FILE_ENCODING_MULAW_8:
+            return 'ULAW'
+        elif self._encoding == AUDIO_FILE_ENCODING_ALAW_8:
+            return 'ALAW'
+        else:
+            return 'NONE'
+
+    def getcompname(self):
+        if self._encoding == AUDIO_FILE_ENCODING_MULAW_8:
+            return 'CCITT G.711 u-law'
+        elif self._encoding == AUDIO_FILE_ENCODING_ALAW_8:
+            return 'CCITT G.711 A-law'
+        else:
+            return 'not compressed'
+
+    def getparams(self):
+        return self.getnchannels(), self.getsampwidth(), \
+                  self.getframerate(), self.getnframes(), \
+                  self.getcomptype(), self.getcompname()
+
+    def getmarkers(self):
+        return None
+
+    def getmark(self, id):
+        raise Error, 'no marks'
+
+    def readframes(self, nframes):
+        if self._encoding in _simple_encodings:
+            if nframes == AUDIO_UNKNOWN_SIZE:
+                data = self._file.read()
+            else:
+                data = self._file.read(nframes * self._framesize * self._nchannels)
+            if self._encoding == AUDIO_FILE_ENCODING_MULAW_8:
+                import audioop
+                data = audioop.ulaw2lin(data, self._sampwidth)
+            return data
+        return None             # XXX--not implemented yet
+
+    def rewind(self):
+        self._soundpos = 0
+        self._file.seek(self._hdr_size)
+
+    def tell(self):
+        return self._soundpos
+
+    def setpos(self, pos):
+        if pos < 0 or pos > self.getnframes():
+            raise Error, 'position not in range'
+        self._file.seek(pos * self._framesize + self._hdr_size)
+        self._soundpos = pos
+
+    def close(self):
+        self._file = None
+
+class Au_write:
+
+    def __init__(self, f):
+        if type(f) == type(''):
+            import __builtin__
+            f = __builtin__.open(f, 'wb')
+        self.initfp(f)
+
+    def __del__(self):
+        if self._file:
+            self.close()
+
+    def initfp(self, file):
+        self._file = file
+        self._framerate = 0
+        self._nchannels = 0
+        self._sampwidth = 0
+        self._framesize = 0
+        self._nframes = AUDIO_UNKNOWN_SIZE
+        self._nframeswritten = 0
+        self._datawritten = 0
+        self._datalength = 0
+        self._info = ''
+        self._comptype = 'ULAW' # default is U-law
+
+    def setnchannels(self, nchannels):
+        if self._nframeswritten:
+            raise Error, 'cannot change parameters after starting to write'
+        if nchannels not in (1, 2, 4):
+            raise Error, 'only 1, 2, or 4 channels supported'
+        self._nchannels = nchannels
+
+    def getnchannels(self):
+        if not self._nchannels:
+            raise Error, 'number of channels not set'
+        return self._nchannels
+
+    def setsampwidth(self, sampwidth):
+        if self._nframeswritten:
+            raise Error, 'cannot change parameters after starting to write'
+        if sampwidth not in (1, 2, 4):
+            raise Error, 'bad sample width'
+        self._sampwidth = sampwidth
+
+    def getsampwidth(self):
+        if not self._framerate:
+            raise Error, 'sample width not specified'
+        return self._sampwidth
+
+    def setframerate(self, framerate):
+        if self._nframeswritten:
+            raise Error, 'cannot change parameters after starting to write'
+        self._framerate = framerate
+
+    def getframerate(self):
+        if not self._framerate:
+            raise Error, 'frame rate not set'
+        return self._framerate
+
+    def setnframes(self, nframes):
+        if self._nframeswritten:
+            raise Error, 'cannot change parameters after starting to write'
+        if nframes < 0:
+            raise Error, '# of frames cannot be negative'
+        self._nframes = nframes
+
+    def getnframes(self):
+        return self._nframeswritten
+
+    def setcomptype(self, type, name):
+        if type in ('NONE', 'ULAW'):
+            self._comptype = type
+        else:
+            raise Error, 'unknown compression type'
+
+    def getcomptype(self):
+        return self._comptype
+
+    def getcompname(self):
+        if self._comptype == 'ULAW':
+            return 'CCITT G.711 u-law'
+        elif self._comptype == 'ALAW':
+            return 'CCITT G.711 A-law'
+        else:
+            return 'not compressed'
+
+    def setparams(self, (nchannels, sampwidth, framerate, nframes, comptype, compname)):
+        self.setnchannels(nchannels)
+        self.setsampwidth(sampwidth)
+        self.setframerate(framerate)
+        self.setnframes(nframes)
+        self.setcomptype(comptype, compname)
+
+    def getparams(self):
+        return self.getnchannels(), self.getsampwidth(), \
+                  self.getframerate(), self.getnframes(), \
+                  self.getcomptype(), self.getcompname()
+
+    def tell(self):
+        return self._nframeswritten
+
+    def writeframesraw(self, data):
+        self._ensure_header_written()
+        nframes = len(data) / self._framesize
+        if self._comptype == 'ULAW':
+            import audioop
+            data = audioop.lin2ulaw(data, self._sampwidth)
+        self._file.write(data)
+        self._nframeswritten = self._nframeswritten + nframes
+        self._datawritten = self._datawritten + len(data)
+
+    def writeframes(self, data):
+        self.writeframesraw(data)
+        if self._nframeswritten != self._nframes or \
+                  self._datalength != self._datawritten:
+            self._patchheader()
+
+    def close(self):
+        self._ensure_header_written()
+        if self._nframeswritten != self._nframes or \
+                  self._datalength != self._datawritten:
+            self._patchheader()
+        self._file.flush()
+        self._file = None
+
+    #
+    # private methods
+    #
+
+    def _ensure_header_written(self):
+        if not self._nframeswritten:
+            if not self._nchannels:
+                raise Error, '# of channels not specified'
+            if not self._sampwidth:
+                raise Error, 'sample width not specified'
+            if not self._framerate:
+                raise Error, 'frame rate not specified'
+            self._write_header()
+
+    def _write_header(self):
+        if self._comptype == 'NONE':
+            if self._sampwidth == 1:
+                encoding = AUDIO_FILE_ENCODING_LINEAR_8
+                self._framesize = 1
+            elif self._sampwidth == 2:
+                encoding = AUDIO_FILE_ENCODING_LINEAR_16
+                self._framesize = 2
+            elif self._sampwidth == 4:
+                encoding = AUDIO_FILE_ENCODING_LINEAR_32
+                self._framesize = 4
+            else:
+                raise Error, 'internal error'
+        elif self._comptype == 'ULAW':
+            encoding = AUDIO_FILE_ENCODING_MULAW_8
+            self._framesize = 1
+        else:
+            raise Error, 'internal error'
+        self._framesize = self._framesize * self._nchannels
+        _write_u32(self._file, AUDIO_FILE_MAGIC)
+        header_size = 25 + len(self._info)
+        header_size = (header_size + 7) & ~7
+        _write_u32(self._file, header_size)
+        if self._nframes == AUDIO_UNKNOWN_SIZE:
+            length = AUDIO_UNKNOWN_SIZE
+        else:
+            length = self._nframes * self._framesize
+        _write_u32(self._file, length)
+        self._datalength = length
+        _write_u32(self._file, encoding)
+        _write_u32(self._file, self._framerate)
+        _write_u32(self._file, self._nchannels)
+        self._file.write(self._info)
+        self._file.write('\0'*(header_size - len(self._info) - 24))
+
+    def _patchheader(self):
+        self._file.seek(8)
+        _write_u32(self._file, self._datawritten)
+        self._datalength = self._datawritten
+        self._file.seek(0, 2)
+
+def open(f, mode=None):
+    if mode is None:
+        if hasattr(f, 'mode'):
+            mode = f.mode
+        else:
+            mode = 'rb'
+    if mode in ('r', 'rb'):
+        return Au_read(f)
+    elif mode in ('w', 'wb'):
+        return Au_write(f)
+    else:
+        raise Error, "mode must be 'r', 'rb', 'w', or 'wb'"
+
+openfp = open
diff --git a/lib-python/2.2/sunaudio.py b/lib-python/2.2/sunaudio.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/sunaudio.py
@@ -0,0 +1,44 @@
+"""Interpret sun audio headers."""
+
+MAGIC = '.snd'
+
+class error(Exception):
+    pass
+
+
+def get_long_be(s):
+    """Convert a 4-char value to integer."""
+    return (ord(s[0])<<24) | (ord(s[1])<<16) | (ord(s[2])<<8) | ord(s[3])
+
+
+def gethdr(fp):
+    """Read a sound header from an open file."""
+    if fp.read(4) != MAGIC:
+        raise error, 'gethdr: bad magic word'
+    hdr_size = get_long_be(fp.read(4))
+    data_size = get_long_be(fp.read(4))
+    encoding = get_long_be(fp.read(4))
+    sample_rate = get_long_be(fp.read(4))
+    channels = get_long_be(fp.read(4))
+    excess = hdr_size - 24
+    if excess < 0:
+        raise error, 'gethdr: bad hdr_size'
+    if excess > 0:
+        info = fp.read(excess)
+    else:
+        info = ''
+    return (data_size, encoding, sample_rate, channels, info)
+
+
+def printhdr(file):
+    """Read and print the sound header of a named file."""
+    hdr = gethdr(open(file, 'r'))
+    data_size, encoding, sample_rate, channels, info = hdr
+    while info[-1:] == '\0':
+        info = info[:-1]
+    print 'File name:  ', file
+    print 'Data size:  ', data_size
+    print 'Encoding:   ', encoding
+    print 'Sample rate:', sample_rate
+    print 'Channels:   ', channels
+    print 'Info:       ', `info`
diff --git a/lib-python/2.2/symbol.py b/lib-python/2.2/symbol.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/symbol.py
@@ -0,0 +1,95 @@
+#! /usr/bin/env python
+
+"""Non-terminal symbols of Python grammar (from "graminit.h")."""
+
+#  This file is automatically generated; please don't muck it up!
+#
+#  To update the symbols in this file, 'cd' to the top directory of
+#  the python source tree after building the interpreter and run:
+#
+#    python Lib/symbol.py
+
+#--start constants--
+single_input = 256
+file_input = 257
+eval_input = 258
+funcdef = 259
+parameters = 260
+varargslist = 261
+fpdef = 262
+fplist = 263
+stmt = 264
+simple_stmt = 265
+small_stmt = 266
+expr_stmt = 267
+augassign = 268
+print_stmt = 269
+del_stmt = 270
+pass_stmt = 271
+flow_stmt = 272
+break_stmt = 273
+continue_stmt = 274
+return_stmt = 275
+yield_stmt = 276
+raise_stmt = 277
+import_stmt = 278
+import_as_name = 279
+dotted_as_name = 280
+dotted_name = 281
+global_stmt = 282
+exec_stmt = 283
+assert_stmt = 284
+compound_stmt = 285
+if_stmt = 286
+while_stmt = 287
+for_stmt = 288
+try_stmt = 289
+except_clause = 290
+suite = 291
+test = 292
+and_test = 293
+not_test = 294
+comparison = 295
+comp_op = 296
+expr = 297
+xor_expr = 298
+and_expr = 299
+shift_expr = 300
+arith_expr = 301
+term = 302
+factor = 303
+power = 304
+atom = 305
+listmaker = 306
+lambdef = 307
+trailer = 308
+subscriptlist = 309
+subscript = 310
+sliceop = 311
+exprlist = 312
+testlist = 313
+testlist_safe = 314
+dictmaker = 315
+classdef = 316
+arglist = 317
+argument = 318
+list_iter = 319
+list_for = 320
+list_if = 321
+#--end constants--
+
+sym_name = {}
+for _name, _value in globals().items():
+    if type(_value) is type(0):
+        sym_name[_value] = _name
+
+
+def main():
+    import sys
+    import token
+    if len(sys.argv) == 1:
+        sys.argv = sys.argv + ["Include/graminit.h", "Lib/symbol.py"]
+    token.main()
+
+if __name__ == "__main__":
+    main()
diff --git a/lib-python/2.2/symtable.py b/lib-python/2.2/symtable.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/symtable.py
@@ -0,0 +1,255 @@
+"""Interface to the compiler's internal symbol tables"""
+
+import _symtable
+from _symtable import USE, DEF_GLOBAL, DEF_LOCAL, DEF_PARAM, \
+     DEF_STAR, DEF_DOUBLESTAR, DEF_INTUPLE, DEF_FREE, \
+     DEF_FREE_GLOBAL, DEF_FREE_CLASS, DEF_IMPORT, DEF_BOUND, \
+     OPT_IMPORT_STAR, OPT_EXEC, OPT_BARE_EXEC
+
+import weakref
+
+__all__ = ["symtable", "SymbolTable", "newSymbolTable", "Class",
+           "Function", "Symbol"]
+
+def symtable(code, filename, compile_type):
+    raw = _symtable.symtable(code, filename, compile_type)
+    return newSymbolTable(raw[0], filename)
+
+class SymbolTableFactory:
+    def __init__(self):
+        self.__memo = weakref.WeakValueDictionary()
+
+    def new(self, table, filename):
+        if table.type == _symtable.TYPE_FUNCTION:
+            return Function(table, filename)
+        if table.type == _symtable.TYPE_CLASS:
+            return Class(table, filename)
+        return SymbolTable(table, filename)
+
+    def __call__(self, table, filename):
+        key = table, filename
+        obj = self.__memo.get(key, None)
+        if obj is None:
+            obj = self.__memo[key] = self.new(table, filename)
+        return obj
+
+newSymbolTable = SymbolTableFactory()
+
+def bool(x):
+    """Helper to force boolean result to 1 or 0"""
+    if x:
+        return 1
+    return 0
+
+def is_free(flags):
+    if (flags & (USE | DEF_FREE)) \
+       and (flags & (DEF_LOCAL | DEF_PARAM | DEF_GLOBAL)):
+        return 1
+    if flags & DEF_FREE_CLASS:
+        return 1
+    return 0
+
+class SymbolTable:
+    def __init__(self, raw_table, filename):
+        self._table = raw_table
+        self._filename = filename
+        self._symbols = {}
+
+    def __repr__(self):
+        if self.__class__ == SymbolTable:
+            kind = ""
+        else:
+            kind = "%s " % self.__class__.__name__
+
+        if self._table.name == "global":
+            return "<%sSymbolTable for module %s>" % (kind, self._filename)
+        else:
+            return "<%sSymbolTable for %s in %s>" % (kind, self._table.name,
+                                                     self._filename)
+
+    def get_type(self):
+        if self._table.type == _symtable.TYPE_MODULE:
+            return "module"
+        if self._table.type == _symtable.TYPE_FUNCTION:
+            return "function"
+        if self._table.type == _symtable.TYPE_CLASS:
+            return "class"
+        assert self._table.type in (1, 2, 3), \
+               "unexpected type: %s" % self._table.type
+
+    def get_id(self):
+        return self._table.id
+
+    def get_name(self):
+        return self._table.name
+
+    def get_lineno(self):
+        return self._table.lineno
+
+    def is_optimized(self):
+        return bool(self._table.type == _symtable.TYPE_FUNCTION
+                    and not self._table.optimized)
+
+    def is_nested(self):
+        return bool(self._table.nested)
+
+    def has_children(self):
+        return bool(self._table.children)
+
+    def has_exec(self):
+        """Return true if the scope uses exec"""
+        return bool(self._table.optimized & (OPT_EXEC | OPT_BARE_EXEC))
+
+    def has_import_star(self):
+        """Return true if the scope uses import *"""
+        return bool(self._table.optimized & OPT_IMPORT_STAR)
+
+    def get_identifiers(self):
+        return self._table.symbols.keys()
+
+    def lookup(self, name):
+        sym = self._symbols.get(name)
+        if sym is None:
+            flags = self._table.symbols[name]
+            namespaces = self.__check_children(name)
+            sym = self._symbols[name] = Symbol(name, flags, namespaces)
+        return sym
+
+    def get_symbols(self):
+        return [self.lookup(ident) for ident in self.get_identifiers()]
+
+    def __check_children(self, name):
+        return [newSymbolTable(st, self._filename)
+                for st in self._table.children
+                if st.name == name]
+
+    def get_children(self):
+        return [newSymbolTable(st, self._filename)
+                for st in self._table.children]
+
+class Function(SymbolTable):
+
+    # Default values for instance variables
+    __params = None
+    __locals = None
+    __frees = None
+    __globals = None
+
+    def __idents_matching(self, test_func):
+        return tuple([ident for ident in self.get_identifiers()
+                      if test_func(self._table.symbols[ident])])
+
+    def get_parameters(self):
+        if self.__params is None:
+            self.__params = self.__idents_matching(lambda x:x & DEF_PARAM)
+        return self.__params
+
+    def get_locals(self):
+        if self.__locals is None:
+            self.__locals = self.__idents_matching(lambda x:x & DEF_BOUND)
+        return self.__locals
+
+    def get_globals(self):
+        if self.__globals is None:
+            glob = DEF_GLOBAL | DEF_FREE_GLOBAL
+            self.__globals = self.__idents_matching(lambda x:x & glob)
+        return self.__globals
+
+    def get_frees(self):
+        if self.__frees is None:
+            self.__frees = self.__idents_matching(is_free)
+        return self.__frees
+
+class Class(SymbolTable):
+
+    __methods = None
+
+    def get_methods(self):
+        if self.__methods is None:
+            d = {}
+            for st in self._table.children:
+                d[st.name] = 1
+            self.__methods = tuple(d.keys())
+        return self.__methods
+
+class Symbol:
+    def __init__(self, name, flags, namespaces=None):
+        self.__name = name
+        self.__flags = flags
+        self.__namespaces = namespaces or ()
+
+    def __repr__(self):
+        return "<symbol '%s'>" % self.__name
+
+    def get_name(self):
+        return self.__name
+
+    def is_referenced(self):
+        return bool(self.__flags & _symtable.USE)
+
+    def is_parameter(self):
+        return bool(self.__flags & DEF_PARAM)
+
+    def is_global(self):
+        return bool((self.__flags & DEF_GLOBAL)
+                    or (self.__flags & DEF_FREE_GLOBAL))
+
+    def is_vararg(self):
+        return bool(self.__flags & DEF_STAR)
+
+    def is_keywordarg(self):
+        return bool(self.__flags & DEF_DOUBLESTAR)
+
+    def is_local(self):
+        return bool(self.__flags & DEF_BOUND)
+
+    def is_free(self):
+        if (self.__flags & (USE | DEF_FREE)) \
+            and (self.__flags & (DEF_LOCAL | DEF_PARAM | DEF_GLOBAL)):
+            return 1
+        if self.__flags & DEF_FREE_CLASS:
+            return 1
+        return 0
+
+    def is_imported(self):
+        return bool(self.__flags & DEF_IMPORT)
+
+    def is_assigned(self):
+        return bool(self.__flags & DEF_LOCAL)
+
+    def is_in_tuple(self):
+        return bool(self.__flags & DEF_INTUPLE)
+
+    def is_namespace(self):
+        """Returns true if name binding introduces new namespace.
+
+        If the name is used as the target of a function or class
+        statement, this will be true.
+
+        Note that a single name can be bound to multiple objects.  If
+        is_namespace() is true, the name may also be bound to other
+        objects, like an int or list, that does not introduce a new
+        namespace.
+        """
+        return bool(self.__namespaces)
+
+    def get_namespaces(self):
+        """Return a list of namespaces bound to this name"""
+        return self.__namespaces
+
+    def get_namespace(self):
+        """Returns the single namespace bound to this name.
+
+        Raises ValueError if the name is bound to multiple namespaces.
+        """
+        if len(self.__namespaces) != 1:
+            raise ValueError, "name is bound to multiple namespaces"
+        return self.__namespaces[0]
+
+if __name__ == "__main__":
+    import os, sys
+    src = open(sys.argv[0]).read()
+    mod = symtable(src, os.path.split(sys.argv[0])[1], "exec")
+    for ident in mod.get_identifiers():
+        info = mod.lookup(ident)
+        print info, info.is_local(), info.is_namespace()
diff --git a/lib-python/2.2/tabnanny.py b/lib-python/2.2/tabnanny.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/tabnanny.py
@@ -0,0 +1,327 @@
+#! /usr/bin/env python
+
+"""The Tab Nanny despises ambiguous indentation.  She knows no mercy.
+
+tabnanny -- Detection of ambiguous indentation
+
+For the time being this module is intended to be called as a script.
+However it is possible to import it into an IDE and use the function
+check() described below.
+
+Warning: The API provided by this module is likely to change in future
+releases; such changes may not be backward compatible.
+"""
+
+# Released to the public domain, by Tim Peters, 15 April 1998.
+
+# XXX Note: this is now a standard library module.
+# XXX The API needs to undergo changes however; the current code is too
+# XXX script-like.  This will be addressed later.
+
+__version__ = "6"
+
+import os
+import sys
+import getopt
+import tokenize
+if not hasattr(tokenize, 'NL'):
+    raise ValueError("tokenize.NL doesn't exist -- tokenize module too old")
+
+__all__ = ["check", "NannyNag", "process_tokens"]
+
+verbose = 0
+filename_only = 0
+
+def errprint(*args):
+    sep = ""
+    for arg in args:
+        sys.stderr.write(sep + str(arg))
+        sep = " "
+    sys.stderr.write("\n")
+
+def main():
+    global verbose, filename_only
+    try:
+        opts, args = getopt.getopt(sys.argv[1:], "qv")
+    except getopt.error, msg:
+        errprint(msg)
+        return
+    for o, a in opts:
+        if o == '-q':
+            filename_only = filename_only + 1
+        if o == '-v':
+            verbose = verbose + 1
+    if not args:
+        errprint("Usage:", sys.argv[0], "[-v] file_or_directory ...")
+        return
+    for arg in args:
+        check(arg)
+
+class NannyNag:
+    """
+    Raised by tokeneater() if detecting an ambiguous indent.
+    Captured and handled in check().
+    """
+    def __init__(self, lineno, msg, line):
+        self.lineno, self.msg, self.line = lineno, msg, line
+    def get_lineno(self):
+        return self.lineno
+    def get_msg(self):
+        return self.msg
+    def get_line(self):
+        return self.line
+
+def check(file):
+    """check(file_or_dir)
+
+    If file_or_dir is a directory and not a symbolic link, then recursively
+    descend the directory tree named by file_or_dir, checking all .py files
+    along the way. If file_or_dir is an ordinary Python source file, it is
+    checked for whitespace related problems. The diagnostic messages are
+    written to standard output using the print statement.
+    """
+
+    if os.path.isdir(file) and not os.path.islink(file):
+        if verbose:
+            print "%s: listing directory" % `file`
+        names = os.listdir(file)
+        for name in names:
+            fullname = os.path.join(file, name)
+            if (os.path.isdir(fullname) and
+                not os.path.islink(fullname) or
+                os.path.normcase(name[-3:]) == ".py"):
+                check(fullname)
+        return
+
+    try:
+        f = open(file)
+    except IOError, msg:
+        errprint("%s: I/O Error: %s" % (`file`, str(msg)))
+        return
+
+    if verbose > 1:
+        print "checking", `file`, "..."
+
+    try:
+        process_tokens(tokenize.generate_tokens(f.readline))
+
+    except tokenize.TokenError, msg:
+        errprint("%s: Token Error: %s" % (`file`, str(msg)))
+        return
+
+    except NannyNag, nag:
+        badline = nag.get_lineno()
+        line = nag.get_line()
+        if verbose:
+            print "%s: *** Line %d: trouble in tab city! ***" % (
+                `file`, badline)
+            print "offending line:", `line`
+            print nag.get_msg()
+        else:
+            if ' ' in file: file = '"' + file + '"'
+            if filename_only: print file
+            else: print file, badline, `line`
+        return
+
+    if verbose:
+        print "%s: Clean bill of health." % `file`
+
+class Whitespace:
+    # the characters used for space and tab
+    S, T = ' \t'
+
+    # members:
+    #   raw
+    #       the original string
+    #   n
+    #       the number of leading whitespace characters in raw
+    #   nt
+    #       the number of tabs in raw[:n]
+    #   norm
+    #       the normal form as a pair (count, trailing), where:
+    #       count
+    #           a tuple such that raw[:n] contains count[i]
+    #           instances of S * i + T
+    #       trailing
+    #           the number of trailing spaces in raw[:n]
+    #       It's A Theorem that m.indent_level(t) ==
+    #       n.indent_level(t) for all t >= 1 iff m.norm == n.norm.
+    #   is_simple
+    #       true iff raw[:n] is of the form (T*)(S*)
+
+    def __init__(self, ws):
+        self.raw  = ws
+        S, T = Whitespace.S, Whitespace.T
+        count = []
+        b = n = nt = 0
+        for ch in self.raw:
+            if ch == S:
+                n = n + 1
+                b = b + 1
+            elif ch == T:
+                n = n + 1
+                nt = nt + 1
+                if b >= len(count):
+                    count = count + [0] * (b - len(count) + 1)
+                count[b] = count[b] + 1
+                b = 0
+            else:
+                break
+        self.n    = n
+        self.nt   = nt
+        self.norm = tuple(count), b
+        self.is_simple = len(count) <= 1
+
+    # return length of longest contiguous run of spaces (whether or not
+    # preceding a tab)
+    def longest_run_of_spaces(self):
+        count, trailing = self.norm
+        return max(len(count)-1, trailing)
+
+    def indent_level(self, tabsize):
+        # count, il = self.norm
+        # for i in range(len(count)):
+        #    if count[i]:
+        #        il = il + (i/tabsize + 1)*tabsize * count[i]
+        # return il
+
+        # quicker:
+        # il = trailing + sum (i/ts + 1)*ts*count[i] =
+        # trailing + ts * sum (i/ts + 1)*count[i] =
+        # trailing + ts * sum i/ts*count[i] + count[i] =
+        # trailing + ts * [(sum i/ts*count[i]) + (sum count[i])] =
+        # trailing + ts * [(sum i/ts*count[i]) + num_tabs]
+        # and note that i/ts*count[i] is 0 when i < ts
+
+        count, trailing = self.norm
+        il = 0
+        for i in range(tabsize, len(count)):
+            il = il + i/tabsize * count[i]
+        return trailing + tabsize * (il + self.nt)
+
+    # return true iff self.indent_level(t) == other.indent_level(t)
+    # for all t >= 1
+    def equal(self, other):
+        return self.norm == other.norm
+
+    # return a list of tuples (ts, i1, i2) such that
+    # i1 == self.indent_level(ts) != other.indent_level(ts) == i2.
+    # Intended to be used after not self.equal(other) is known, in which
+    # case it will return at least one witnessing tab size.
+    def not_equal_witness(self, other):
+        n = max(self.longest_run_of_spaces(),
+                other.longest_run_of_spaces()) + 1
+        a = []
+        for ts in range(1, n+1):
+            if self.indent_level(ts) != other.indent_level(ts):
+                a.append( (ts,
+                           self.indent_level(ts),
+                           other.indent_level(ts)) )
+        return a
+
+    # Return true iff self.indent_level(t) < other.indent_level(t)
+    # for all t >= 1.
+    # The algorithm is due to Vincent Broman.
+    # Easy to prove it's correct.
+    # XXXpost that.
+    # Trivial to prove n is sharp (consider T vs ST).
+    # Unknown whether there's a faster general way.  I suspected so at
+    # first, but no longer.
+    # For the special (but common!) case where M and N are both of the
+    # form (T*)(S*), M.less(N) iff M.len() < N.len() and
+    # M.num_tabs() <= N.num_tabs(). Proof is easy but kinda long-winded.
+    # XXXwrite that up.
+    # Note that M is of the form (T*)(S*) iff len(M.norm[0]) <= 1.
+    def less(self, other):
+        if self.n >= other.n:
+            return 0
+        if self.is_simple and other.is_simple:
+            return self.nt <= other.nt
+        n = max(self.longest_run_of_spaces(),
+                other.longest_run_of_spaces()) + 1
+        # the self.n >= other.n test already did it for ts=1
+        for ts in range(2, n+1):
+            if self.indent_level(ts) >= other.indent_level(ts):
+                return 0
+        return 1
+
+    # return a list of tuples (ts, i1, i2) such that
+    # i1 == self.indent_level(ts) >= other.indent_level(ts) == i2.
+    # Intended to be used after not self.less(other) is known, in which
+    # case it will return at least one witnessing tab size.
+    def not_less_witness(self, other):
+        n = max(self.longest_run_of_spaces(),
+                other.longest_run_of_spaces()) + 1
+        a = []
+        for ts in range(1, n+1):
+            if self.indent_level(ts) >= other.indent_level(ts):
+                a.append( (ts,
+                           self.indent_level(ts),
+                           other.indent_level(ts)) )
+        return a
+
+def format_witnesses(w):
+    import string
+    firsts = map(lambda tup: str(tup[0]), w)
+    prefix = "at tab size"
+    if len(w) > 1:
+        prefix = prefix + "s"
+    return prefix + " " + string.join(firsts, ', ')
+
+def process_tokens(tokens):
+    INDENT = tokenize.INDENT
+    DEDENT = tokenize.DEDENT
+    NEWLINE = tokenize.NEWLINE
+    JUNK = tokenize.COMMENT, tokenize.NL
+    indents = [Whitespace("")]
+    check_equal = 0
+
+    for (type, token, start, end, line) in tokens:
+        if type == NEWLINE:
+            # a program statement, or ENDMARKER, will eventually follow,
+            # after some (possibly empty) run of tokens of the form
+            #     (NL | COMMENT)* (INDENT | DEDENT+)?
+            # If an INDENT appears, setting check_equal is wrong, and will
+            # be undone when we see the INDENT.
+            check_equal = 1
+
+        elif type == INDENT:
+            check_equal = 0
+            thisguy = Whitespace(token)
+            if not indents[-1].less(thisguy):
+                witness = indents[-1].not_less_witness(thisguy)
+                msg = "indent not greater e.g. " + format_witnesses(witness)
+                raise NannyNag(start[0], msg, line)
+            indents.append(thisguy)
+
+        elif type == DEDENT:
+            # there's nothing we need to check here!  what's important is
+            # that when the run of DEDENTs ends, the indentation of the
+            # program statement (or ENDMARKER) that triggered the run is
+            # equal to what's left at the top of the indents stack
+
+            # Ouch!  This assert triggers if the last line of the source
+            # is indented *and* lacks a newline -- then DEDENTs pop out
+            # of thin air.
+            # assert check_equal  # else no earlier NEWLINE, or an earlier INDENT
+            check_equal = 1
+
+            del indents[-1]
+
+        elif check_equal and type not in JUNK:
+            # this is the first "real token" following a NEWLINE, so it
+            # must be the first token of the next program statement, or an
+            # ENDMARKER; the "line" argument exposes the leading whitespace
+            # for this statement; in the case of ENDMARKER, line is an empty
+            # string, so will properly match the empty string with which the
+            # "indents" stack was seeded
+            check_equal = 0
+            thisguy = Whitespace(line)
+            if not indents[-1].equal(thisguy):
+                witness = indents[-1].not_equal_witness(thisguy)
+                msg = "indent not equal e.g. " + format_witnesses(witness)
+                raise NannyNag(start[0], msg, line)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/lib-python/2.2/telnetlib.py b/lib-python/2.2/telnetlib.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/telnetlib.py
@@ -0,0 +1,593 @@
+"""TELNET client class.
+
+Based on RFC 854: TELNET Protocol Specification, by J. Postel and
+J. Reynolds
+
+Example:
+
+>>> from telnetlib import Telnet
+>>> tn = Telnet('www.python.org', 79)   # connect to finger port
+>>> tn.write('guido\r\n')
+>>> print tn.read_all()
+Login       Name               TTY         Idle    When    Where
+guido    Guido van Rossum      pts/2        <Dec  2 11:10> snag.cnri.reston..
+
+>>>
+
+Note that read_all() won't read until eof -- it just reads some data
+-- but it guarantees to read at least one byte unless EOF is hit.
+
+It is possible to pass a Telnet object to select.select() in order to
+wait until more data is available.  Note that in this case,
+read_eager() may return '' even if there was data on the socket,
+because the protocol negotiation may have eaten the data.  This is why
+EOFError is needed in some cases to distinguish between "no data" and
+"connection closed" (since the socket also appears ready for reading
+when it is closed).
+
+Bugs:
+- may hang when connection is slow in the middle of an IAC sequence
+
+To do:
+- option negotiation
+- timeout should be intrinsic to the connection object instead of an
+  option on one of the read calls only
+
+"""
+
+
+# Imported modules
+import sys
+import socket
+import select
+
+__all__ = ["Telnet"]
+
+# Tunable parameters
+DEBUGLEVEL = 0
+
+# Telnet protocol defaults
+TELNET_PORT = 23
+
+# Telnet protocol characters (don't change)
+IAC  = chr(255) # "Interpret As Command"
+DONT = chr(254)
+DO   = chr(253)
+WONT = chr(252)
+WILL = chr(251)
+theNULL = chr(0)
+
+# Telnet protocol options code (don't change)
+# These ones all come from arpa/telnet.h
+BINARY = chr(0) # 8-bit data path
+ECHO = chr(1) # echo
+RCP = chr(2) # prepare to reconnect
+SGA = chr(3) # suppress go ahead
+NAMS = chr(4) # approximate message size
+STATUS = chr(5) # give status
+TM = chr(6) # timing mark
+RCTE = chr(7) # remote controlled transmission and echo
+NAOL = chr(8) # negotiate about output line width
+NAOP = chr(9) # negotiate about output page size
+NAOCRD = chr(10) # negotiate about CR disposition
+NAOHTS = chr(11) # negotiate about horizontal tabstops
+NAOHTD = chr(12) # negotiate about horizontal tab disposition
+NAOFFD = chr(13) # negotiate about formfeed disposition
+NAOVTS = chr(14) # negotiate about vertical tab stops
+NAOVTD = chr(15) # negotiate about vertical tab disposition
+NAOLFD = chr(16) # negotiate about output LF disposition
+XASCII = chr(17) # extended ascii character set
+LOGOUT = chr(18) # force logout
+BM = chr(19) # byte macro
+DET = chr(20) # data entry terminal
+SUPDUP = chr(21) # supdup protocol
+SUPDUPOUTPUT = chr(22) # supdup output
+SNDLOC = chr(23) # send location
+TTYPE = chr(24) # terminal type
+EOR = chr(25) # end or record
+TUID = chr(26) # TACACS user identification
+OUTMRK = chr(27) # output marking
+TTYLOC = chr(28) # terminal location number
+VT3270REGIME = chr(29) # 3270 regime
+X3PAD = chr(30) # X.3 PAD
+NAWS = chr(31) # window size
+TSPEED = chr(32) # terminal speed
+LFLOW = chr(33) # remote flow control
+LINEMODE = chr(34) # Linemode option
+XDISPLOC = chr(35) # X Display Location
+OLD_ENVIRON = chr(36) # Old - Environment variables
+AUTHENTICATION = chr(37) # Authenticate
+ENCRYPT = chr(38) # Encryption option
+NEW_ENVIRON = chr(39) # New - Environment variables
+# the following ones come from
+# http://www.iana.org/assignments/telnet-options
+# Unfortunately, that document does not assign identifiers
+# to all of them, so we are making them up
+TN3270E = chr(40) # TN3270E
+XAUTH = chr(41) # XAUTH
+CHARSET = chr(42) # CHARSET
+RSP = chr(43) # Telnet Remote Serial Port
+COM_PORT_OPTION = chr(44) # Com Port Control Option
+SUPPRESS_LOCAL_ECHO = chr(45) # Telnet Suppress Local Echo
+TLS = chr(46) # Telnet Start TLS
+KERMIT = chr(47) # KERMIT
+SEND_URL = chr(48) # SEND-URL
+FORWARD_X = chr(49) # FORWARD_X
+PRAGMA_LOGON = chr(138) # TELOPT PRAGMA LOGON
+SSPI_LOGON = chr(139) # TELOPT SSPI LOGON
+PRAGMA_HEARTBEAT = chr(140) # TELOPT PRAGMA HEARTBEAT
+EXOPL = chr(255) # Extended-Options-List
+
+class Telnet:
+
+    """Telnet interface class.
+
+    An instance of this class represents a connection to a telnet
+    server.  The instance is initially not connected; the open()
+    method must be used to establish a connection.  Alternatively, the
+    host name and optional port number can be passed to the
+    constructor, too.
+
+    Don't try to reopen an already connected instance.
+
+    This class has many read_*() methods.  Note that some of them
+    raise EOFError when the end of the connection is read, because
+    they can return an empty string for other reasons.  See the
+    individual doc strings.
+
+    read_until(expected, [timeout])
+        Read until the expected string has been seen, or a timeout is
+        hit (default is no timeout); may block.
+
+    read_all()
+        Read all data until EOF; may block.
+
+    read_some()
+        Read at least one byte or EOF; may block.
+
+    read_very_eager()
+        Read all data available already queued or on the socket,
+        without blocking.
+
+    read_eager()
+        Read either data already queued or some data available on the
+        socket, without blocking.
+
+    read_lazy()
+        Read all data in the raw queue (processing it first), without
+        doing any socket I/O.
+
+    read_very_lazy()
+        Reads all data in the cooked queue, without doing any socket
+        I/O.
+
+    set_option_negotiation_callback(callback)
+        Each time a telnet option is read on the input flow, this callback
+        (if set) is called with the following parameters :
+        callback(telnet socket, command (DO/DONT/WILL/WONT), option)
+        No other action is done afterwards by telnetlib.
+
+    """
+
+    def __init__(self, host=None, port=0):
+        """Constructor.
+
+        When called without arguments, create an unconnected instance.
+        With a hostname argument, it connects the instance; a port
+        number is optional.
+
+        """
+        self.debuglevel = DEBUGLEVEL
+        self.host = host
+        self.port = port
+        self.sock = None
+        self.rawq = ''
+        self.irawq = 0
+        self.cookedq = ''
+        self.eof = 0
+        self.option_callback = None
+        if host:
+            self.open(host, port)
+
+    def open(self, host, port=0):
+        """Connect to a host.
+
+        The optional second argument is the port number, which
+        defaults to the standard telnet port (23).
+
+        Don't try to reopen an already connected instance.
+
+        """
+        self.eof = 0
+        if not port:
+            port = TELNET_PORT
+        self.host = host
+        self.port = port
+        msg = "getaddrinfo returns an empty list"
+        for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
+            af, socktype, proto, canonname, sa = res
+            try:
+                self.sock = socket.socket(af, socktype, proto)
+                self.sock.connect(sa)
+            except socket.error, msg:
+                if self.sock:
+                    self.sock.close()
+                self.sock = None
+                continue
+            break
+        if not self.sock:
+            raise socket.error, msg
+
+    def __del__(self):
+        """Destructor -- close the connection."""
+        self.close()
+
+    def msg(self, msg, *args):
+        """Print a debug message, when the debug level is > 0.
+
+        If extra arguments are present, they are substituted in the
+        message using the standard string formatting operator.
+
+        """
+        if self.debuglevel > 0:
+            print 'Telnet(%s,%d):' % (self.host, self.port),
+            if args:
+                print msg % args
+            else:
+                print msg
+
+    def set_debuglevel(self, debuglevel):
+        """Set the debug level.
+
+        The higher it is, the more debug output you get (on sys.stdout).
+
+        """
+        self.debuglevel = debuglevel
+
+    def close(self):
+        """Close the connection."""
+        if self.sock:
+            self.sock.close()
+        self.sock = 0
+        self.eof = 1
+
+    def get_socket(self):
+        """Return the socket object used internally."""
+        return self.sock
+
+    def fileno(self):
+        """Return the fileno() of the socket object used internally."""
+        return self.sock.fileno()
+
+    def write(self, buffer):
+        """Write a string to the socket, doubling any IAC characters.
+
+        Can block if the connection is blocked.  May raise
+        socket.error if the connection is closed.
+
+        """
+        if IAC in buffer:
+            buffer = buffer.replace(IAC, IAC+IAC)
+        self.msg("send %s", `buffer`)
+        self.sock.sendall(buffer)
+
+    def read_until(self, match, timeout=None):
+        """Read until a given string is encountered or until timeout.
+
+        When no match is found, return whatever is available instead,
+        possibly the empty string.  Raise EOFError if the connection
+        is closed and no cooked data is available.
+
+        """
+        n = len(match)
+        self.process_rawq()
+        i = self.cookedq.find(match)
+        if i >= 0:
+            i = i+n
+            buf = self.cookedq[:i]
+            self.cookedq = self.cookedq[i:]
+            return buf
+        s_reply = ([self], [], [])
+        s_args = s_reply
+        if timeout is not None:
+            s_args = s_args + (timeout,)
+        while not self.eof and apply(select.select, s_args) == s_reply:
+            i = max(0, len(self.cookedq)-n)
+            self.fill_rawq()
+            self.process_rawq()
+            i = self.cookedq.find(match, i)
+            if i >= 0:
+                i = i+n
+                buf = self.cookedq[:i]
+                self.cookedq = self.cookedq[i:]
+                return buf
+        return self.read_very_lazy()
+
+    def read_all(self):
+        """Read all data until EOF; block until connection closed."""
+        self.process_rawq()
+        while not self.eof:
+            self.fill_rawq()
+            self.process_rawq()
+        buf = self.cookedq
+        self.cookedq = ''
+        return buf
+
+    def read_some(self):
+        """Read at least one byte of cooked data unless EOF is hit.
+
+        Return '' if EOF is hit.  Block if no data is immediately
+        available.
+
+        """
+        self.process_rawq()
+        while not self.cookedq and not self.eof:
+            self.fill_rawq()
+            self.process_rawq()
+        buf = self.cookedq
+        self.cookedq = ''
+        return buf
+
+    def read_very_eager(self):
+        """Read everything that's possible without blocking in I/O (eager).
+
+        Raise EOFError if connection closed and no cooked data
+        available.  Return '' if no cooked data available otherwise.
+        Don't block unless in the midst of an IAC sequence.
+
+        """
+        self.process_rawq()
+        while not self.eof and self.sock_avail():
+            self.fill_rawq()
+            self.process_rawq()
+        return self.read_very_lazy()
+
+    def read_eager(self):
+        """Read readily available data.
+
+        Raise EOFError if connection closed and no cooked data
+        available.  Return '' if no cooked data available otherwise.
+        Don't block unless in the midst of an IAC sequence.
+
+        """
+        self.process_rawq()
+        while not self.cookedq and not self.eof and self.sock_avail():
+            self.fill_rawq()
+            self.process_rawq()
+        return self.read_very_lazy()
+
+    def read_lazy(self):
+        """Process and return data that's already in the queues (lazy).
+
+        Raise EOFError if connection closed and no data available.
+        Return '' if no cooked data available otherwise.  Don't block
+        unless in the midst of an IAC sequence.
+
+        """
+        self.process_rawq()
+        return self.read_very_lazy()
+
+    def read_very_lazy(self):
+        """Return any data available in the cooked queue (very lazy).
+
+        Raise EOFError if connection closed and no data available.
+        Return '' if no cooked data available otherwise.  Don't block.
+
+        """
+        buf = self.cookedq
+        self.cookedq = ''
+        if not buf and self.eof and not self.rawq:
+            raise EOFError, 'telnet connection closed'
+        return buf
+
+    def set_option_negotiation_callback(self, callback):
+        """Provide a callback function called after each receipt of a telnet option."""
+        self.option_callback = callback
+
+    def process_rawq(self):
+        """Transfer from raw queue to cooked queue.
+
+        Set self.eof when connection is closed.  Don't block unless in
+        the midst of an IAC sequence.
+
+        """
+        buf = ''
+        try:
+            while self.rawq:
+                c = self.rawq_getchar()
+                if c == theNULL:
+                    continue
+                if c == "\021":
+                    continue
+                if c != IAC:
+                    buf = buf + c
+                    continue
+                c = self.rawq_getchar()
+                if c == IAC:
+                    buf = buf + c
+                elif c in (DO, DONT):
+                    opt = self.rawq_getchar()
+                    self.msg('IAC %s %d', c == DO and 'DO' or 'DONT', ord(opt))
+                    if self.option_callback:
+                        self.option_callback(self.sock, c, opt)
+                    else:
+                        self.sock.sendall(IAC + WONT + opt)
+                elif c in (WILL, WONT):
+                    opt = self.rawq_getchar()
+                    self.msg('IAC %s %d',
+                             c == WILL and 'WILL' or 'WONT', ord(opt))
+                    if self.option_callback:
+                        self.option_callback(self.sock, c, opt)
+                    else:
+                        self.sock.sendall(IAC + DONT + opt)
+                else:
+                    self.msg('IAC %d not recognized' % ord(c))
+        except EOFError: # raised by self.rawq_getchar()
+            pass
+        self.cookedq = self.cookedq + buf
+
+    def rawq_getchar(self):
+        """Get next char from raw queue.
+
+        Block if no data is immediately available.  Raise EOFError
+        when connection is closed.
+
+        """
+        if not self.rawq:
+            self.fill_rawq()
+            if self.eof:
+                raise EOFError
+        c = self.rawq[self.irawq]
+        self.irawq = self.irawq + 1
+        if self.irawq >= len(self.rawq):
+            self.rawq = ''
+            self.irawq = 0
+        return c
+
+    def fill_rawq(self):
+        """Fill raw queue from exactly one recv() system call.
+
+        Block if no data is immediately available.  Set self.eof when
+        connection is closed.
+
+        """
+        if self.irawq >= len(self.rawq):
+            self.rawq = ''
+            self.irawq = 0
+        # The buffer size should be fairly small so as to avoid quadratic
+        # behavior in process_rawq() above
+        buf = self.sock.recv(50)
+        self.msg("recv %s", `buf`)
+        self.eof = (not buf)
+        self.rawq = self.rawq + buf
+
+    def sock_avail(self):
+        """Test whether data is available on the socket."""
+        return select.select([self], [], [], 0) == ([self], [], [])
+
+    def interact(self):
+        """Interaction function, emulates a very dumb telnet client."""
+        if sys.platform == "win32":
+            self.mt_interact()
+            return
+        while 1:
+            rfd, wfd, xfd = select.select([self, sys.stdin], [], [])
+            if self in rfd:
+                try:
+                    text = self.read_eager()
+                except EOFError:
+                    print '*** Connection closed by remote host ***'
+                    break
+                if text:
+                    sys.stdout.write(text)
+                    sys.stdout.flush()
+            if sys.stdin in rfd:
+                line = sys.stdin.readline()
+                if not line:
+                    break
+                self.write(line)
+
+    def mt_interact(self):
+        """Multithreaded version of interact()."""
+        import thread
+        thread.start_new_thread(self.listener, ())
+        while 1:
+            line = sys.stdin.readline()
+            if not line:
+                break
+            self.write(line)
+
+    def listener(self):
+        """Helper for mt_interact() -- this executes in the other thread."""
+        while 1:
+            try:
+                data = self.read_eager()
+            except EOFError:
+                print '*** Connection closed by remote host ***'
+                return
+            if data:
+                sys.stdout.write(data)
+            else:
+                sys.stdout.flush()
+
+    def expect(self, list, timeout=None):
+        """Read until one from a list of a regular expressions matches.
+
+        The first argument is a list of regular expressions, either
+        compiled (re.RegexObject instances) or uncompiled (strings).
+        The optional second argument is a timeout, in seconds; default
+        is no timeout.
+
+        Return a tuple of three items: the index in the list of the
+        first regular expression that matches; the match object
+        returned; and the text read up till and including the match.
+
+        If EOF is read and no text was read, raise EOFError.
+        Otherwise, when nothing matches, return (-1, None, text) where
+        text is the text received so far (may be the empty string if a
+        timeout happened).
+
+        If a regular expression ends with a greedy match (e.g. '.*')
+        or if more than one expression can match the same input, the
+        results are undeterministic, and may depend on the I/O timing.
+
+        """
+        re = None
+        list = list[:]
+        indices = range(len(list))
+        for i in indices:
+            if not hasattr(list[i], "search"):
+                if not re: import re
+                list[i] = re.compile(list[i])
+        while 1:
+            self.process_rawq()
+            for i in indices:
+                m = list[i].search(self.cookedq)
+                if m:
+                    e = m.end()
+                    text = self.cookedq[:e]
+                    self.cookedq = self.cookedq[e:]
+                    return (i, m, text)
+            if self.eof:
+                break
+            if timeout is not None:
+                r, w, x = select.select([self.fileno()], [], [], timeout)
+                if not r:
+                    break
+            self.fill_rawq()
+        text = self.read_very_lazy()
+        if not text and self.eof:
+            raise EOFError
+        return (-1, None, text)
+
+
+def test():
+    """Test program for telnetlib.
+
+    Usage: python telnetlib.py [-d] ... [host [port]]
+
+    Default host is localhost; default port is 23.
+
+    """
+    debuglevel = 0
+    while sys.argv[1:] and sys.argv[1] == '-d':
+        debuglevel = debuglevel+1
+        del sys.argv[1]
+    host = 'localhost'
+    if sys.argv[1:]:
+        host = sys.argv[1]
+    port = 0
+    if sys.argv[2:]:
+        portstr = sys.argv[2]
+        try:
+            port = int(portstr)
+        except ValueError:
+            port = socket.getservbyname(portstr, 'tcp')
+    tn = Telnet()
+    tn.set_debuglevel(debuglevel)
+    tn.open(host, port)
+    tn.interact()
+    tn.close()
+
+if __name__ == '__main__':
+    test()
diff --git a/lib-python/2.2/tempfile.py b/lib-python/2.2/tempfile.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/tempfile.py
@@ -0,0 +1,244 @@
+"""Temporary files and filenames."""
+
+# XXX This tries to be not UNIX specific, but I don't know beans about
+# how to choose a temp directory or filename on MS-DOS or other
+# systems so it may have to be changed...
+
+import os
+
+__all__ = ["mktemp", "TemporaryFile", "tempdir", "gettempprefix"]
+
+# Parameters that the caller may set to override the defaults
+tempdir = None
+template = None
+
+def gettempdir():
+    """Function to calculate the directory to use."""
+    global tempdir
+    if tempdir is not None:
+        return tempdir
+
+    # _gettempdir_inner deduces whether a candidate temp dir is usable by
+    # trying to create a file in it, and write to it.  If that succeeds,
+    # great, it closes the file and unlinks it.  There's a race, though:
+    # the *name* of the test file it tries is the same across all threads
+    # under most OSes (Linux is an exception), and letting multiple threads
+    # all try to open, write to, close, and unlink a single file can cause
+    # a variety of bogus errors (e.g., you cannot unlink a file under
+    # Windows if anyone has it open, and two threads cannot create the
+    # same file in O_EXCL mode under Unix).  The simplest cure is to serialize
+    # calls to _gettempdir_inner.  This isn't a real expense, because the
+    # first thread to succeed sets the global tempdir, and all subsequent
+    # calls to gettempdir() reuse that without trying _gettempdir_inner.
+    _tempdir_lock.acquire()
+    try:
+        return _gettempdir_inner()
+    finally:
+        _tempdir_lock.release()
+
+def _gettempdir_inner():
+    """Function to calculate the directory to use."""
+    global tempdir
+    if tempdir is not None:
+        return tempdir
+    try:
+        pwd = os.getcwd()
+    except (AttributeError, os.error):
+        pwd = os.curdir
+    attempdirs = ['/tmp', '/var/tmp', '/usr/tmp', pwd]
+    if os.name == 'nt':
+        attempdirs.insert(0, 'C:\\TEMP')
+        attempdirs.insert(0, '\\TEMP')
+    elif os.name == 'mac':
+        import macfs, MACFS
+        try:
+            refnum, dirid = macfs.FindFolder(MACFS.kOnSystemDisk,
+                                             MACFS.kTemporaryFolderType, 1)
+            dirname = macfs.FSSpec((refnum, dirid, '')).as_pathname()
+            attempdirs.insert(0, dirname)
+        except macfs.error:
+            pass
+    elif os.name == 'riscos':
+        scrapdir = os.getenv('Wimp$ScrapDir')
+        if scrapdir:
+            attempdirs.insert(0, scrapdir)
+    for envname in 'TMPDIR', 'TEMP', 'TMP':
+        if os.environ.has_key(envname):
+            attempdirs.insert(0, os.environ[envname])
+    testfile = gettempprefix() + 'test'
+    for dir in attempdirs:
+        try:
+            filename = os.path.join(dir, testfile)
+            if os.name == 'posix':
+                try:
+                    fd = os.open(filename,
+                                 os.O_RDWR | os.O_CREAT | os.O_EXCL, 0700)
+                except OSError:
+                    pass
+                else:
+                    fp = os.fdopen(fd, 'w')
+                    fp.write('blat')
+                    fp.close()
+                    os.unlink(filename)
+                    del fp, fd
+                    tempdir = dir
+                    break
+            else:
+                fp = open(filename, 'w')
+                fp.write('blat')
+                fp.close()
+                os.unlink(filename)
+                tempdir = dir
+                break
+        except IOError:
+            pass
+    if tempdir is None:
+        msg = "Can't find a usable temporary directory amongst " + `attempdirs`
+        raise IOError, msg
+    return tempdir
+
+
+# template caches the result of gettempprefix, for speed, when possible.
+# XXX unclear why this isn't "_template"; left it "template" for backward
+# compatibility.
+if os.name == "posix":
+    # We don't try to cache the template on posix:  the pid may change on us
+    # between calls due to a fork, and on Linux the pid changes even for
+    # another thread in the same process.  Since any attempt to keep the
+    # cache in synch would have to call os.getpid() anyway in order to make
+    # sure the pid hasn't changed between calls, a cache wouldn't save any
+    # time.  In addition, a cache is difficult to keep correct with the pid
+    # changing willy-nilly, and earlier attempts proved buggy (races).
+    template = None
+
+# Else the pid never changes, so gettempprefix always returns the same
+# string.
+elif os.name == "nt":
+    template = '~' + `os.getpid()` + '-'
+elif os.name in ('mac', 'riscos'):
+    template = 'Python-Tmp-'
+else:
+    template = 'tmp' # XXX might choose a better one
+
+def gettempprefix():
+    """Function to calculate a prefix of the filename to use.
+
+    This incorporates the current process id on systems that support such a
+    notion, so that concurrent processes don't generate the same prefix.
+    """
+
+    global template
+    if template is None:
+        return '@' + `os.getpid()` + '.'
+    else:
+        return template
+
+
+def mktemp(suffix=""):
+    """User-callable function to return a unique temporary file name."""
+    dir = gettempdir()
+    pre = gettempprefix()
+    while 1:
+        i = _counter.get_next()
+        file = os.path.join(dir, pre + str(i) + suffix)
+        if not os.path.exists(file):
+            return file
+
+
+class TemporaryFileWrapper:
+    """Temporary file wrapper
+
+    This class provides a wrapper around files opened for temporary use.
+    In particular, it seeks to automatically remove the file when it is
+    no longer needed.
+    """
+
+    # Cache the unlinker so we don't get spurious errors at shutdown
+    # when the module-level "os" is None'd out.  Note that this must
+    # be referenced as self.unlink, because the name TemporaryFileWrapper
+    # may also get None'd out before __del__ is called.
+    unlink = os.unlink
+
+    def __init__(self, file, path):
+        self.file = file
+        self.path = path
+        self.close_called = 0
+
+    def close(self):
+        if not self.close_called:
+            self.close_called = 1
+            self.file.close()
+            self.unlink(self.path)
+
+    def __del__(self):
+        self.close()
+
+    def __getattr__(self, name):
+        file = self.__dict__['file']
+        a = getattr(file, name)
+        if type(a) != type(0):
+            setattr(self, name, a)
+        return a
+
+
+def TemporaryFile(mode='w+b', bufsize=-1, suffix=""):
+    """Create and return a temporary file (opened read-write by default)."""
+    name = mktemp(suffix)
+    if os.name == 'posix':
+        # Unix -- be very careful
+        fd = os.open(name, os.O_RDWR|os.O_CREAT|os.O_EXCL, 0700)
+        try:
+            os.unlink(name)
+            return os.fdopen(fd, mode, bufsize)
+        except:
+            os.close(fd)
+            raise
+    else:
+        # Non-unix -- can't unlink file that's still open, use wrapper
+        file = open(name, mode, bufsize)
+        return TemporaryFileWrapper(file, name)
+
+# In order to generate unique names, mktemp() uses _counter.get_next().
+# This returns a unique integer on each call, in a threadsafe way (i.e.,
+# multiple threads will never see the same integer).  The integer will
+# usually be a Python int, but if _counter.get_next() is called often
+# enough, it will become a Python long.
+# Note that the only names that survive this next block of code
+# are "_counter" and "_tempdir_lock".
+
+class _ThreadSafeCounter:
+    def __init__(self, mutex, initialvalue=0):
+        self.mutex = mutex
+        self.i = initialvalue
+
+    def get_next(self):
+        self.mutex.acquire()
+        result = self.i
+        try:
+            newi = result + 1
+        except OverflowError:
+            newi = long(result) + 1
+        self.i = newi
+        self.mutex.release()
+        return result
+
+try:
+    import thread
+
+except ImportError:
+    class _DummyMutex:
+        def acquire(self):
+            pass
+
+        release = acquire
+
+    _counter = _ThreadSafeCounter(_DummyMutex())
+    _tempdir_lock = _DummyMutex()
+    del _DummyMutex
+
+else:
+    _counter = _ThreadSafeCounter(thread.allocate_lock())
+    _tempdir_lock = thread.allocate_lock()
+    del thread
+
+del _ThreadSafeCounter
diff --git a/lib-python/2.2/test/README b/lib-python/2.2/test/README
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/README
@@ -0,0 +1,372 @@
+                      Writing Python Regression Tests
+                      -------------------------------
+                               Skip Montanaro
+                              (skip at mojam.com)
+
+
+Introduction
+
+If you add a new module to Python or modify the functionality of an existing
+module, you should write one or more test cases to exercise that new
+functionality.  There are different ways to do this within the regression
+testing facility provided with Python; any particular test should use only
+one of these options.  Each option requires writing a test module using the
+conventions of the the selected option:
+
+    - PyUnit based tests
+    - doctest based tests
+    - "traditional" Python test modules
+
+Regardless of the mechanics of the testing approach you choose,
+you will be writing unit tests (isolated tests of functions and objects
+defined by the module) using white box techniques.  Unlike black box
+testing, where you only have the external interfaces to guide your test case
+writing, in white box testing you can see the code being tested and tailor
+your test cases to exercise it more completely.  In particular, you will be
+able to refer to the C and Python code in the CVS repository when writing
+your regression test cases.
+
+
+PyUnit based tests
+
+The PyUnit framework is based on the ideas of unit testing as espoused
+by Kent Beck and the Extreme Programming (XP) movement.  The specific
+interface provided by the framework is tightly based on the JUnit
+Java implementation of Beck's original SmallTalk test framework.  Please
+see the documentation of the unittest module for detailed information on
+the interface and general guidelines on writing PyUnit based tests.
+
+The test_support helper module provides a two functions for use by
+PyUnit based tests in the Python regression testing framework:
+run_unittest() takes a unittest.TestCase derived class as a parameter
+and runs the tests defined in that class, and run_suite() takes a
+populated TestSuite instance and runs the tests..  All test methods in
+the Python regression framework have names that start with "test_" and
+use lower-case names with words separated with underscores.
+
+All PyUnit-based tests in the Python test suite use boilerplate that
+looks like this:
+
+    import unittest
+    import test_support
+
+    class MyTestCase(unittest.TestCase):
+        # define test methods here...
+
+    def test_main():
+        test_support.run_unittest(MyTestCase)
+
+    if __name__ == "__main__":
+        test_main()
+
+This has the advantage that it allows the unittest module to be used
+as a script to run individual tests as well as working well with the
+regrtest framework.
+
+
+doctest based tests
+
+Tests written to use doctest are actually part of the docstrings for
+the module being tested.  Each test is written as a display of an
+interactive session, including the Python prompts, statements that would
+be typed by the user, and the output of those statements (including
+tracebacks, although only the exception msg needs to be retained then).
+The module in the test package is simply a wrapper that causes doctest
+to run over the tests in the module.  The test for the difflib module
+provides a convenient example:
+
+    import difflib, test_support
+    test_support.run_doctest(difflib)
+
+If the test is successful, nothing is written to stdout (so you should not
+create a corresponding output/test_difflib file), but running regrtest
+with -v will give a detailed report, the same as if passing -v to doctest.
+
+A second argument can be passed to run_doctest to tell doctest to search
+sys.argv for -v instead of using test_support's idea of verbosity.  This
+is useful for writing doctest-based tests that aren't simply running a
+doctest'ed Lib module, but contain the doctests themselves.  Then at
+times you may want to run such a test directly as a doctest, independent
+of the regrtest framework.  The tail end of test_descrtut.py is a good
+example:
+
+    def test_main(verbose=None):
+        import test_support, test.test_descrtut
+        test_support.run_doctest(test.test_descrtut, verbose)
+
+    if __name__ == "__main__":
+        test_main(1)
+
+If run via regrtest, test_main() is called (by regrtest) without specifying
+verbose, and then test_support's idea of verbosity is used.  But when
+run directly, test_main(1) is called, and then doctest's idea of verbosity
+is used.
+
+See the documentation for the doctest module for information on
+writing tests using the doctest framework.
+
+
+"traditional" Python test modules
+
+The mechanics of how the "traditional" test system operates are fairly
+straightforward.  When a test case is run, the output is compared with the
+expected output that is stored in .../Lib/test/output.  If the test runs to
+completion and the actual and expected outputs match, the test succeeds, if
+not, it fails.  If an ImportError or test_support.TestSkipped error is
+raised, the test is not run.
+
+
+Executing Test Cases
+
+If you are writing test cases for module spam, you need to create a file
+in .../Lib/test named test_spam.py.  In addition, if the tests are expected
+to write to stdout during a successful run, you also need to create an
+expected output file in .../Lib/test/output named test_spam ("..."
+represents the top-level directory in the Python source tree, the directory
+containing the configure script).  If needed, generate the initial version
+of the test output file by executing:
+
+    ./python Lib/test/regrtest.py -g test_spam.py
+
+from the top-level directory.
+
+Any time you modify test_spam.py you need to generate a new expected
+output file.  Don't forget to desk check the generated output to make sure
+it's really what you expected to find!  All in all it's usually better
+not to have an expected-out file (note that doctest- and unittest-based
+tests do not).
+
+To run a single test after modifying a module, simply run regrtest.py
+without the -g flag:
+
+    ./python Lib/test/regrtest.py test_spam.py
+
+While debugging a regression test, you can of course execute it
+independently of the regression testing framework and see what it prints:
+
+    ./python Lib/test/test_spam.py
+
+To run the entire test suite:
+
+[UNIX, + other platforms where "make" works] Make the "test" target at the
+top level:
+
+    make test
+
+{WINDOWS] Run rt.bat from your PCBuild directory.  Read the comments at
+the top of rt.bat for the use of special -d, -O and -q options processed
+by rt.bat.
+
+[OTHER] You can simply execute the two runs of regrtest (optimized and
+non-optimized) directly:
+
+    ./python Lib/test/regrtest.py
+    ./python -O Lib/test/regrtest.py
+
+But note that this way picks up whatever .pyc and .pyo files happen to be
+around.  The makefile and rt.bat ways run the tests twice, the first time
+removing all .pyc and .pyo files from the subtree rooted at Lib/.
+
+Test cases generate output based upon values computed by the test code.
+When executed, regrtest.py compares the actual output generated by executing
+the test case with the expected output and reports success or failure.  It
+stands to reason that if the actual and expected outputs are to match, they
+must not contain any machine dependencies.  This means your test cases
+should not print out absolute machine addresses (e.g. the return value of
+the id() builtin function) or floating point numbers with large numbers of
+significant digits (unless you understand what you are doing!).
+
+
+Test Case Writing Tips
+
+Writing good test cases is a skilled task and is too complex to discuss in
+detail in this short document.  Many books have been written on the subject.
+I'll show my age by suggesting that Glenford Myers' "The Art of Software
+Testing", published in 1979, is still the best introduction to the subject
+available.  It is short (177 pages), easy to read, and discusses the major
+elements of software testing, though its publication predates the
+object-oriented software revolution, so doesn't cover that subject at all.
+Unfortunately, it is very expensive (about $100 new).  If you can borrow it
+or find it used (around $20), I strongly urge you to pick up a copy.
+
+The most important goal when writing test cases is to break things.  A test
+case that doesn't uncover a bug is much less valuable than one that does.
+In designing test cases you should pay attention to the following:
+
+    * Your test cases should exercise all the functions and objects defined
+      in the module, not just the ones meant to be called by users of your
+      module.  This may require you to write test code that uses the module
+      in ways you don't expect (explicitly calling internal functions, for
+      example - see test_atexit.py).
+
+    * You should consider any boundary values that may tickle exceptional
+      conditions (e.g. if you were writing regression tests for division,
+      you might well want to generate tests with numerators and denominators
+      at the limits of floating point and integer numbers on the machine
+      performing the tests as well as a denominator of zero).
+
+    * You should exercise as many paths through the code as possible.  This
+      may not always be possible, but is a goal to strive for.  In
+      particular, when considering if statements (or their equivalent), you
+      want to create test cases that exercise both the true and false
+      branches.  For loops, you should create test cases that exercise the
+      loop zero, one and multiple times.
+
+    * You should test with obviously invalid input.  If you know that a
+      function requires an integer input, try calling it with other types of
+      objects to see how it responds.
+
+    * You should test with obviously out-of-range input.  If the domain of a
+      function is only defined for positive integers, try calling it with a
+      negative integer.
+
+    * If you are going to fix a bug that wasn't uncovered by an existing
+      test, try to write a test case that exposes the bug (preferably before
+      fixing it).
+
+    * If you need to create a temporary file, you can use the filename in
+      test_support.TESTFN to do so.  It is important to remove the file
+      when done; other tests should be able to use the name without cleaning
+      up after your test.
+
+
+Regression Test Writing Rules
+
+Each test case is different.  There is no "standard" form for a Python
+regression test case, though there are some general rules (note that
+these mostly apply only to the "classic" tests; unittest- and doctest-
+based tests should follow the conventions natural to those frameworks):
+
+    * If your test case detects a failure, raise TestFailed (found in
+      test_support).
+
+    * Import everything you'll need as early as possible.
+
+    * If you'll be importing objects from a module that is at least
+      partially platform-dependent, only import those objects you need for
+      the current test case to avoid spurious ImportError exceptions that
+      prevent the test from running to completion.
+
+    * Print all your test case results using the print statement.  For
+      non-fatal errors, print an error message (or omit a successful
+      completion print) to indicate the failure, but proceed instead of
+      raising TestFailed.
+
+    * Use "assert" sparingly, if at all.  It's usually better to just print
+      what you got, and rely on regrtest's got-vs-expected comparison to
+      catch deviations from what you expect.  assert statements aren't
+      executed at all when regrtest is run in -O mode; and, because they
+      cause the test to stop immediately, can lead to a long & tedious
+      test-fix, test-fix, test-fix, ... cycle when things are badly broken
+      (and note that "badly broken" often includes running the test suite
+      for the first time on new platforms or under new implementations of
+      the language).
+
+
+Miscellaneous
+
+There is a test_support module you can import from your test case.  It
+provides the following useful objects:
+
+    * TestFailed - raise this exception when your regression test detects a
+      failure.
+
+    * TestSkipped - raise this if the test could not be run because the
+      platform doesn't offer all the required facilities (like large
+      file support), even if all the required modules are available.
+
+    * verbose - you can use this variable to control print output.  Many
+      modules use it.  Search for "verbose" in the test_*.py files to see
+      lots of examples.
+
+    * verify(condition, reason='test failed').  Use this instead of
+
+          assert condition[, reason]
+
+      verify() has two advantages over assert:  it works even in -O mode,
+      and it raises TestFailed on failure instead of AssertionError.
+
+    * TESTFN - a string that should always be used as the filename when you
+      need to create a temp file.  Also use try/finally to ensure that your
+      temp files are deleted before your test completes.  Note that you
+      cannot unlink an open file on all operating systems, so also be sure
+      to close temp files before trying to unlink them.
+
+    * sortdict(dict) - acts like repr(dict.items()), but sorts the items
+      first.  This is important when printing a dict value, because the
+      order of items produced by dict.items() is not defined by the
+      language.
+
+    * findfile(file) - you can call this function to locate a file somewhere
+      along sys.path or in the Lib/test tree - see test_linuxaudiodev.py for
+      an example of its use.
+
+    * use_large_resources - true iff tests requiring large time or space
+      should be run.
+
+    * fcmp(x,y) - you can call this function to compare two floating point
+      numbers when you expect them to only be approximately equal withing a
+      fuzz factor (test_support.FUZZ, which defaults to 1e-6).
+
+NOTE:  Always import something from test_support like so:
+
+    from test_support import verbose
+
+or like so:
+
+    import test_support
+    ... use test_support.verbose in the code ...
+
+Never import anything from test_support like this:
+
+    from test.test_support import verbose
+
+"test" is a package already, so can refer to modules it contains without
+"test." qualification.  If you do an explicit "test.xxx" qualification, that
+can fool Python into believing test.xxx is a module distinct from the xxx
+in the current package, and you can end up importing two distinct copies of
+xxx.  This is especially bad if xxx=test_support, as regrtest.py can (and
+routinely does) overwrite its "verbose" and "use_large_resources"
+attributes:  if you get a second copy of test_support loaded, it may not
+have the same values for those as regrtest intended.
+
+
+Python and C statement coverage results are currently available at
+
+    http://www.musi-cal.com/~skip/python/Python/dist/src/
+
+As of this writing (July, 2000) these results are being generated nightly.
+You can refer to the summaries and the test coverage output files to see
+where coverage is adequate or lacking and write test cases to beef up the
+coverage.
+
+
+Some Non-Obvious regrtest Features
+
+    * Automagic test detection:  When you create a new test file
+      test_spam.py, you do not need to modify regrtest (or anything else)
+      to advertise its existence.  regrtest searches for and runs all
+      modules in the test directory with names of the form test_xxx.py.
+
+    * Miranda output:  If, when running test_spam.py, regrtest does not
+      find an expected-output file test/output/test_spam, regrtest
+      pretends that it did find one, containing the single line
+
+      test_spam
+
+      This allows new tests that don't expect to print anything to stdout
+      to not bother creating expected-output files.
+
+    * Two-stage testing:  To run test_spam.py, regrtest imports test_spam
+      as a module.  Most tests run to completion as a side-effect of
+      getting imported.  After importing test_spam, regrtest also executes
+      test_spam.test_main(), if test_spam has a "test_main" attribute.
+      This is rarely required with the "traditional" Python tests, and
+      you shouldn't create a module global with name test_main unless
+      you're specifically exploiting this gimmick.  This usage does
+      prove useful with PyUnit-based tests as well, however; defining
+      a test_main() which is run by regrtest and a script-stub in the
+      test module ("if __name__ == '__main__': test_main()") allows
+      the test to be used like any other Python test and also work
+      with the unittest.py-as-a-script approach, allowing a developer
+      to run specific tests from the command line.
diff --git a/lib-python/2.2/test/__init__.py b/lib-python/2.2/test/__init__.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/__init__.py
@@ -0,0 +1,1 @@
+# Dummy file to make this directory a package.
diff --git a/lib-python/2.2/test/audiotest.au b/lib-python/2.2/test/audiotest.au
new file mode 100644
index 0000000000000000000000000000000000000000..1ad28ce1fdd781e4370fdf68577e8a35b4ed8a8e
GIT binary patch
literal 23493
zc${3k$&TE}mIe5_yYTA0Oi7_BMxW=269|GJ2!d!XY{7-CY{_i{-1Vk^pwV4bDWy_U
zYKjQOD8)SZ1c$@nAP53naKQyvT=E0>8P?Z4q^wuX2!-68G}?6Ux#wIny6}JfpZ~|-
z{`R-(-~Q%Y!vFuv-~Lwl-yi;eI9xZ+^E65F+;tq+fp-}kId$M&o~CIAuUzU<N?ka^
zt|Ek82Pg9^C$?={h+vyI#Gwwsa1cz8X_^*7wv8~t#74-14-A97b1<?UI0ctj7CXT%
zgKOXod(Da=ht=R<c5il#jag$HhqZ}S0e7`Av<TWo;4PFfO=Mw1l3P;jrIEk1{Nd<&
zyv&`{v~^W#HtLmntz4-#8v|9O at hlkK-(Or at -`=0yU7t^9VVvm_-&0jl#ws!~+)h+v
zMG<(}HdP(_4i5auEb>w-H<kWC)VOY6lqD-6G<0c}rEwDH&>ZTfnVVx`P)wp|5qsl_
z7sSL;T7$OK=Q+t#B|#7d-9bmtG{ZJ^Qxhe)lcq_g4Wo(?404zSDRrGNikE2^22o0F
zUFdcDg8`>18X?Hg6|UFo_Ii?TrQUopoBGQjiu^G57UTJ3G+#t%W>c8DG)bK-#Ay-{
zLNkKWEVXmjG&HH-=AcDzE6ql{B8&V$MTAa$I6UZtZW@@z;c_~=_cE+XjaI+Q>n=^B
zFf}Z(J7|i%J`9bKIEJmLy}l`GS>P|P?w1QMHk|~MbeZ~NKgbhDRt(*8qd1%{!X$A;
zlNiViGrWkQn>nxMrj>h<vy4q$*V}@r7&Ojkl0!R1UeQgsaWoDHHso4c9dIPh?PUnF
zt~7fxFCy2XM3aSXrP66P728syOi>k~4ow_rwr;zYVH!MVND`qboqJ%ynAfrG#EUST
zgApT1u}wSn7U6Ij#W__i8`-HJh07$zSdoZ<v)Io@^Vm-#%c6#phO=-u at x3^=GMX*t
zqw(<0Tg0(tr3fd{BAd+fh`0hzB$GnXSwN|&wR?O=)J)6Hz{KQ!qtk71vaXX18HUtr
zaqX^ZU at J<&0+mKXY_w(F)T7M6rdF at Zjh<p4HOdXFsEwB19_Yw1!pL$>)bGI at +IdK)
zUKTlyC}P=g;vgH&63?Zw<jN>d;q4^yDB)$-vg5$d?`Dx7r=mtJI|`QJ#V80uDk(@d
zLf>DUUC)R2KF)$H84ahyZ#VPFI0h3zX6PsWWC2F6i;^O9mP?#C#fT`~cDGyW>)_{+
z1Fal1+I*vD!RSY}tV`We6{bj*ILXy`S?-q`?N(JmI?hv5wZ(qBIcN_|1KQ>y$CR5r
zzTZc#4K6@%YRJk!7EQ;EgNYY<S*BUoLebP8-`y{RsY8jGk;L;x^PACpo`$M!BP*Kt
z<D2n3Tu|HA!AhcN9^K3Xa9KgMR5P2$)8Q=6LPM1~Q$d;Q<zu+DWw-k(r=pZNvlN at Q
z&>HllzL60hT+<@F)T&xt)yX60J|?be)Y at i8wj<ZRhnAaWsb%&>6cc+CB2!n3b*<C0
zD79t*V4z&8sNJ at KvARqwO;<`abI`Zb)S3sTq6?*x(Q2C{#>4w4N~692WuoOU8IB0n
zajS1xb~;;l^N3>85~zZ5GRENdff`diO>+x|#-)ymQ!r9W;xtIC7(fY1Q<Oy1&leG3
zJr<~q;~<Xb<3;Q{(0yG|lfa){Pl71ZI1wy{EW>Fs44h2EZ4(K(n<PO1h{d=2@<7c=
zL<6Ge9jT>ihO1CLcM(+`$BwZptCnE$!a!94F$~kS4P6DWFflfCQ;=nOfL&)f at sk+K
zoYaz3<mP^!TZW=^dz3T7Ji?CJ72AR$(QKUMu}&np*TpiO<;yhJ6r&1nFhHMwyYLfa
zt3=032j-HZKr$lP1=usFtRO+xNFL7<C)Xri<v7<!r{O#au;KEmOTzKZB$!eM>8j|^
z<Z3#G+gX_^IT~0)N|riNz%?_8%(8i&nH=g2Fz-gozzL8EMqI<P<HbwtD=?RU>zd`;
z)Hm@!ZC1^;PTkOqHA!tMoNAE~UdFB~I5plCqhPuuw}1qKB3d}T59iY~(Q&Vn^EjT&
z$UU@)M|DTU6Z;+orbtV&%u#2U24mZo^+C&(0IqN1NowS69jj?_=cP9|QWdF9y3UN=
zz=-r|N!5GAy-(6R+mj`sZ4Z)pI1GbOp$)|x;B*?$33io+CK^JzbW at n|zS>gcULK6I
zFiAO7N0OjKUV;`W)~iU;JMPS<3nyr%bs{O+JaV!$;BYA$p!yikgc)|5by?AB?qZ?`
z%CwxW$4!6h4&65H%(U4tWvv#(x9yR5yJ>Ij at 7xylQ(L%qxJu=qefj*|A}Y;{;5lD^
zTl~25&M%3Vq*kmPUMGe3OQkffBpa2B-4eflq-_NK(Tghoy0Z7nZ#}JGTyyIh_p0~d
z`P<&6rr#8c%5U0L^@I1Um`>D}iuCIF*Xr-i)8fh5R&l2MmWp#XTX&<&jmqHFLdZUM
zpBB at ORBg6@D;=+WI)1h>OXzm}aP98P^Ue0jUbFUjU42f(SI$l;jio0<mVSQ`4|@kn
z at b&StO?9ucroOh;c8*Su9^K*Ei}=u3D}CAxo^<iuENd(4Bnvw2-YdVz*LQL0>NF7w
zM_aXbyZK<_Xut_)C;9fNRN46Y&L%I@(YpHnJKnh at Tb)<?!7qgau at b&N;vX;V<?CsG
zbK`2#=uUQ{4R!Kv=L8oEFXo3ko&Kk|@~-b#+w){g)8{D_H(IY|FFNLa at A=|{i`|L*
zYw6>MfIOB5FU)uM{*Jk`-haJ!^7x6=Hjdt3)}G7j;cNP}u;xB1j&2Tu*nC?4^XuZ-
zW9OO{W(zcKo_^a<PLc6^YvuzB+*Xx$*IK2VYfa at +Ilac4J1=&w-hB7|QQuwcyiG^N
zTEW<eUmpkmd at f$!?9N|SgkGWj?(poNk9w&_kN3-qwbmD;&$ZI_wtl;$%4YTcT}5cz
zk19KNhCbhSsxLNfU!6veHs at jg<9#e^rSf3<+j0BRCWPVd-kZhtv*z9F^R*p8N(F!P
zdii8Mc>k$$zP;I;AJ6T=&e7XSY5nNS?xWf)o6`36bZy)CufxIPPIz2IpGK3;j+$?^
z{WqW7e{PevH-)Jfh}#h=nm+&aaQlTwhsP~DebM(jn#`AfTm1TBOyAD7Jb%l~x|iij
z@$;YE?ZVC8_vdMbQ>8jFUT&HHap?WsnY{DY_;>MOr+V;YtMSL~`QJ-}*YCH31)pwJ
zzjETEoma=xf4kjzHC(Gr<7 at uud$YJ!ieG(CA77*&`xW;~NqwGvfBLBS$LFKR#f{f{
zefjp9uC0Gci@#L<bN}SuxZ3N_4Y0?c{Nx>}JX at Q-J}Ir0_udEV=BXDotTXkWg`NN0
zzx+F0d%xeb&*b5H`CNbUy!`R~;q&$9Cw~mf*JIRPpDcDBb<lr&$^X5n|NdnYO}e+8
z?UU|Wv9R~{aQ*p{)3?{%O8a0|@3!?PJ at Oy>cmFon`SgA>Q at bbqwdGcUuYG)X`gDC`
z_~vL^OU=VAZqQq|%kSPDKH?tjzKz;D-;ORG4KxUO#p6HDH at 4S~-;CF+e6;_htcio_
z&ha0&<(F$W?=Cj-bLZqqoA#UH#;3QJPt?apyLm at Gx<6cNO$HSh?srGO-u>h7JwM$(
zKE8iixh|G!o$qgNp6kEdzMC``+4a%dRv?;%t>~49%1_ANX(^)Q!;5Ewtics}|L at J&
zqs8WzgTYnHKi_#yzHbYS at _$|Kt=Z4gx9huIVOM*CjX~QiyxTq8xcz(fA-s1Av%4p}
zfQ-hI{(lCaUKU?|Jf0k_JA0*aGkd9RzxZ?azPkR8^v`cs&c at C0Go4m~t>=^1qgeQN
z{^08^t(V`f*E_{BU+lj6e6(Z#divqZ#q{xe^cXd_`;UI*es7;YdAxOeW8ZI>e{46@
zp6Q6oS8xBEw*Ps2a5-$PeVc6F$LOy7%hs#S<9~m=@%<w4e|_<`wbffMmV3_Wf9!uO
zmB0P=<<s5z#YLf1X{y5Gm#^t at t@!VE?T-16r<T4^Z=uE1I{Lrgy)AXf+pm9o>qmd^
zU80es*8T8tuhsjzb4iajcg*q5`gXBX_-EynH(OhKq)$#JmGURG^Ih|&`nvxcKHGjQ
z-hJOo%EIq*&95lMj<);uC|vueaya}m-1I*5dbWSv-D<tg|9oEghdAqgf3%f<oF;3I
zPM>=JdGeKixi&rgI<A&Ztjc1I$c<m6SLvnvOj*Py&9$|p{+!>gb8dF@|GfQN-ddjS
z{(hMG at 5cDMFm$!j;pe@%_`hDxjnmSLwWPN7{PCklkDd*`Ue+G{Qc0$7ov!kQ+GxP*
zt?v81*Ne+v&W at zt8<P0-WU^VS4SM#6*MFXGJyGvY-+m|f>m=xJTWa}nuz#VH{<mVc
z_@*S4Pd;2*Ri#+q=C8k<l^(t5;-hJ=v^=%VEu&E>@c-%T$!nX{X!krSo!yP^A9Y{W
zO3l~%yJqiKE<5=cy1}=zNy)}(_2tQ*pYFB452~#%)$*o0Hj7&@#4Ymb;Ip{>WGBBr
zlFNj~^tn)xdzJUQZ)V0n%OQCubw&Bm-fXQudsf!}>twgJRwzymkCyyw=r1;;N~ON}
zY4?rC|Fh!eZ;Zg4?B4h2=Ch45eYO8l+jw4?e*d(v$D^a^POnhdDtvo=a^C*6)TSSz
z#GD+QjQE|MVqN+D&(A^q*P at wxNGTfMrkkzev+WZ7?RwXGx>><ruQMeYo#Zb!Up#qQ
zxO#JT+yBRQ3GL>t;$Pn=J1 at 5?jppIoy`l9hvbFc})Vn!5vpO5kN>%C4PhXwFuSLb$
zGbx!|nbk&Nv&08~9Df1d-1fz<mL8 at PT54`>6s6#|{f`J<_ at lj>aCCp0l-lK;vNihc
z=o8)E*ts$G18tIx!eV`MqbR1YPQIWQo7?ehZ<vjy-)0?QXRB;Y{y5sRHy*FwNxP=H
zRHt6OxVE-YY5#e6nC|@Y!l-^QjC<wgl&`(qD7MwTHwP!;FAb&rPH}F8Z{HvYuWc8E
z#p~OB>G9^KeR_*8(df==RM&PY?d<iz$7p9`J(j<a`8d2fuhgEutd!j2*H;IfwdS^Y
zV&C486K_!~Jg<~R`fm5*vi|aA*!o0ASJ~x7t-tZ2R-{+IpY97Ui<{(P`u&0)&%gC|
zcA6DA`}6(YmAJm4R`<fu4W5pWP<q}fNsG6qyR^9RR4^{QnG5v1R^8etRp{*>_q+0T
z>4hCmzm1boa$hdJESD<i<n76^x>?w+P3)_QH6)An=6bEvHP1fp50w{Nt<sG%o+t~;
ztik8ShIMp$K$|btnw4QNUs5mOx~1(_SqeWNeF{2T&xPLY-Nn6k at 9SK%#&^-=?br8<
zLHVhyo*f?FPvVIrG>ff at Hh;JOr#C39@#4kt>CB6LuHP&-tLpUa{`<LHdLoJEU(e@(
z8%k2UQYsC?cgJ6r()Ocn>*Dmr10gPQm11G1k$pJ)GO at NFwd#jQXJZR#nNZy<tQYLB
zy9aah@=>X{e|fVI&>-tHwhHUp`tke2k-ha~t8{vOI#Ee~-W`-nTU*lb?dfS;EIjSq
zobTN^IloY?PPwqd`|nP^E_yqU`_9qcWvt0)qDhr<sqP-U`+V-MKdp2R&X1>k+!>mA
zt6V6k0Zf3uz4MrNPxfvtfio^Oy#|5Dy8h$KY4&WZ)IYvCo;CH}c#^@`ReH&%y-$nU
z`kJU6os7)BVjY`lrBp6kcYhomx=(jD`op`^WnGv33#+ at cS!&YVy-!|!{V~s-o-I&M
zvyLTH*e;ay>pzYU^B0Ae&B@|)(Zu|1YWLT-3Z2EDC%bWV>yfOSA1zH)$^y0fVrR4F
z?0)((R at Wc5>&K at TQ{BM}xBsHJUcvhxzTA-QM~%|y;kS{A at Y3wRD8c7@A3tA#i`Gil
z``>PI>?G1)v$WkvK7aanLAHLa)-S&toG)mOy3Ln`?S}vU>z7+~``7Yd{ONE!m13?`
z-71v2!?y>Yr~Qpz%hK at U!E`*BNktgB3V-~^m)&J&?P=M%{Ic(jx$&U5T`X4AZ*RWr
zdG)pDWpcT at ACCFqpuAHoROI6~U%tkrXTaO1Uk<ZT>%Lap-rgw*7k?b=Mc|u2%)TBu
zx9w#Uz^_!~79UQ&ME$~}YI}ZovRrg9>Gz(O3q5P^!}}Y%@Jo4Xc6WG(v*uMrFYJ`I
z_|g0OFL|%{E8n~PeCyLr=5hK~xmYt#|JXgwD;rz2 at o4YP4jShUaJO2q<9_|}{yr=}
z=E_Gf$(A$tW-cqGLV1vUzWFe>%a7E`$@iP3X&Gm>S1y$b0@*wNJVDJzYVq=FHdEP&
zpj0Yu^ZL=@?p;=S(%rgWcr(F9=XO>u?G$+V`qRP5qP(@WGf&U&lt3H$bWqsZsrKhz
z&UV9o?Xk#*r^_Tm;f;fK%H^VnPQHA)NJ`HtJJa;$##mUx$m!R%%Ux-7aI(7)>R^16
z<7kni=+1Gvjb2$qlh0Sz>Wl5|_TA*@Ru9x at VF~3zrPj;u_IB^m;-*-=9^YNdo;C-C
z-fq-;-16{bcPUq&D81>4KhudmcU-PoEp`U}{^%&Hbf3yNJdRUa#)~|aTfM4a-+#V2
zb<3Tn3dt^AOXH1@;$E{>@0pXm`;(+9tO>SzlG=Jlj~ozx<ziKg_m`(`MOYJ!_#|@8
zzU`%f*e(^D>TGv-Ov}9|vXPvot|5}?e4=zf2bkmi^Zn(3e~Pr(w`Hs$<H{RawQ6ag
zdq;=+W3#m(b??WsWj7axBj03sU@^SdJGvr;Vy82j-_5&J7~d|Pa<SAFle<sH*Lks3
z?hoVJxMQ~#w;?X>6v{n3`EYS=mH2JzZh01(t~|L8?P77K)J3zs*{QKDZ4o*>$lRRH
zZ>D)0X0#$kC(*T0tUnV5(KpZ2Jn!V<K5Ms|0$yH^hFW!dty#(f>T$VoeKSkywfexl
z|9)_~unL>Cst>SN)Z3ot=sVCjbNc=L$?2jZcFDK#RlpN2P1NqQ7cX`iTCnfK$higo
z{$lrj7R2|*xBg&zYr87ZvA*n<+b?u3yo_ho+&dknS*O|7^84d2Clj+&Xz^Z(^g2?O
z6Gz>ybans!_#;eo%icEfY-ngCJUuua8~s|l*Au(N-iw#(YmHiVJ3aPJ&kv3+gI;5x
zc$brdC=xfD<zC_ at xUUY-$e<M<=7i3UgQEaVDv_Em1+~(zXoG5Tqtw;ntE=~4_rE4r
z)5Xam@@1N4KGqB6R!fugpzk!ccOLDmS31Q;RndpzT(5~{G9TT1y52u^PRQun_;lzH
zB!^OcbM573xh4(LT+j+-j-TBPul$?)lf$9UahmOFX4)my?fSERS4#=O0X at 0!BGJt0
zVmSLUA9-cB+Rqh)I-DYSn$)R at C=D(zj}GQDU7E(X*Yk0*@b5<{?KJv51=&MwlnY at K
z+I<%;oJDrKaP&r7z{w)JF(PY6dr|MTY(*5QI*y52?+cg)(Qz>G+G$Gy?VDJVs?3Pi
z+b*{h)v{-~CT*9?WmVItZb$L;B9|q@$<pQY;?B$a?f$?fCK5Dc`?=Ar4HVl=r{~{>
zzNK1L5T=PmT;gGHnU08Pyd2)l7g)EEKMzBjM)^3^xUy)FJd3VIUc~i#1IxB-1?6;<
z8C*p|Gz^1pcPwVeI(2MQQVE$-v9ev`O=Rd-*~IU;UBe}rYgu`Go5^Lc7*RFkiM)uo
zWlJ^fEO+fBm<3s{*;Z{gLo at f%vuK&`sy0Hhn+4;zKWJ%j9J%RaI1R*3tt at bwqT+0M
z>ltE8p?Q+I*ZKDwGN~IfazRuF;bNL~dmWj&OB!4s9e)Q)={mZt^P1u$X1%mk=?SKe
z at 27_+^BaxgAWnT_u^5fxUaQv8MP0HcBj7?>tIG3=YUuHNytKJ$d!R_NoQ=<~r$po%
znyedozPP<wsC>D>^?5-I$KP(}d9zpHTJ2uT^+)$<uT?Cy%GEaKPfjkD(O}T!2b?5(
z7vH~wxwI?Ia-}WIM+c|Vg~)Z=^;(aM&W;bSV#nzBn+;AM-RvJ+UdG4}x-CvjuTKuo
zrxB65<#MeTEDpb3p3XB at XqKuC&6|9`8c%$auWT3VYCJw4-;DiGY?TW2ft~oX$#4>y
zT)l$CbWTU($uz*-Zn-9Qb%gxoGBvo4(RQ((y5V9 at Rk_9~eU;}-mr|YUgA3qHBU!wl
zSmqSj&_zzv5z(PxSW1bA4L3=^9ym2Osik4pMY<pu at iLgBBu*EZDGNG+2_p{8DAGhR
zvM0+lh$FB%PVA~Ni88Qag4EtX3B1u|HdzL<-01aXBMhP-4*b+s20iTDPp)U<;dE;A
z{idD;qoo%HDOP)(UP>-+rZ at 9tm`k-zO$y at CJdJaMXXBjYXWzpSMV44ERvNOm94`<O
zo3&D*()Q!i>&zE=old3H at 6+!93X<3;*Q&LiFrOS=FLAd~=R2IH2Uo+f-Kdmn-GOMT
z-uc<xqTQ-gk%2Xo&F|-E(C+FcrbNq+?$3st%_~Wk>&OgmW=j|z%W{aNYq!zq9mzS#
zN?qMmlk3qq8}OV&C`JlCy#GGx==E-%=ccJd^DvUSoUA#-P<olWoME}&;$eJ=WCp=9
zA8>-emO=&YB0rn9&061ZZO!VEB=i)Cmkk at 4)<8dA98Q{gNy at _z5ye|9rkbkD$U>GT
zM at e`?x>{A9rJgIQneY3Cq_uU!qKY!MhD!xTSD4!=X=`>gUpiv1B3dL+`ACe0u_D*{
zfsr6yw_|_i2>p&0`+?FZ8^QVMvRT|2c$0gh*S4-MzPrs*S at CWsq+046o_@Hm6xW65
z at G@7M+2#2+y-=!U=f`)#c42(^@lvU6_l=W#n)I#9`OI!t`Xs)O`Pz2n;_Tb9{j at AB
z_vQen`N`>0+ITUTeLHmuFACR3Cuw!9YF`}2y=`%L@*Nj;ww$Z)w|wDw`S$BoQhnUf
zPfloeU74I5(Pn8|363XH`FTZqd-6?uT1vlOdG+Vw)zOh(UoUmN!+T?Uvpjoy5m$a4
zEWUo at S}&8&_eaKhZD55*eyv=r&)!_*?MK4(*HM|@3O`Iw++wH2g-5sDVx=;DeW4B>
zDTjym>ryp*x17WUq2I{9-3U9?B7JqNb{_Lb$Mm@@Ctns$D071%y}l%c`i}hSK<ln4
zM;H1tG0IL8#dG_OEqCUnh0<2<)z^W%X6!GO4f`S;r%LSBt2=tC#ht=-_4i##Sm%%P
zVv$^3WO3r8l?o?P8Pi&^{Qi?0m6ZkB-oeq~D4uDkvTbNs)dL6_2Y2L1_9p61!BVHE
zCr9JFQM5avW_hM1o}S|*8BY8{QS7G6$>HT4?(^JWU}lloF^-RO;w^9HJ9Slp7<PGk
z8)&j5^ES=pzBc)mn0~&Pb*r+P%qf}AV$?M)0zZ-{+OeZ!lE%7b+fiiO2~8~?VS)@p
z<s~V}RLwExu>!=w!#t%{s`Ig|Xeu}8^tmHCaO3o(Rq<5QuSM5!CX1LWX<f5h?=Swa
z2Fqo9vsO6)CfWUR)sjS&FID`W!uQmJ+4IS4Fs#3f^HLU{?Go-r7c2c#;AGs<-i7_k
zk#)OS$vVPvzJFv>f6y%nSv{B2hWe&GIlp$7YdS8Q^RwNV?2~$-i*fT#&CB_pFE19O
zu>U0N7xKx$SC>o%<%*mL;<+gnmS2ATKJ at L%)7x%ga`N at e4X1^jo`gkwF0^-M$G;qZ
zv!&9 at tL@Us;n_{NuwGWAHX?M$_qQfr|MB%ulzZ!wo$A+v>G9A}*Q8!6qWQfbZ|ZM;
zxqOEQbel at 0gZ<^nwbELz7^P+6jMY}9__|#EJ$xB*TWIIvb98djCdG~@M1gy&H-wGx
z{y*OD?HEd@$H{+&NB5&nz165&VW7^0dZGMkr}Z{kyXUu!-teP*a3 at I>siH-(K9B45
zZTsCXpWg5EPkW_-ax{zf(l)9zE5skrZ+cTKci%j^e)qh6AgyQF at YdLK22rQZ*@>D@
zI~}Psc=PDw-G=hHy=mq1JL3Z)exuv8Q!%(xM7O&AN9)Bap>!0k(LlJdjwZFSQcM)r
zw3b>&Zf~ECetA86ay(qaz8uBJ!~Sivp_ehJnMlawPW#Oh=k2=oS$a7NdS3QD<Hx=7
zjHeRuS{BEZ_C~+H3LfuykDXZy&rbsJxY@|cqHOr7;@b7<t2O at 3Gx3xB>@2Os(R?vj
z7>$`YpmL-OcE3LR{O|9N9-SUO702E7#pGu2RVvI%O{|(zH8#2Go9B%;&p0SP3`*oC
z8!MB+U@;IgA+Z#r+rB*i<@d>>{j10Bu-V at C7tT??INoWy20zrXrZwOE8ohrMe`#%;
zhK1?5;D^(qlGWXy9cq26(*FJN{N3aD&z-F+<N5KGad&DLZyRODZsk)|bcA=$?DuQw
zVSn?I7DnG%<2b2MqfGLFnY6{m_v>GNUpzZId~9D73-9&8!CgZRx70z}bbMV>-oEU-
zEq1?>wd;Im`L!3`n=<c~71QN>VbFcAzIbDmhbNCFX+i(oaSwnW<C3T#E;dDLeDe6q
z4c|Mc)Mnkz<$393tk at k~(`-XeWtDzeyFE6`$9Vgu-w3~}$%$sGW_O_4Ty9$G-LiZ>
zcZ{pP?8 at RaFozztWt9_hq*Fv>@Y3ZHlc9zn;CZ>8r at 4kSUAA2dXsBh|K(i@|Bg>?w
ziLs3>4OuuhB`wtjMbUB at tQyf7Hgc?)*g}|ECMLF_0J-B7lqpUa+ca}A5Nd21k!xTV
zBep5WuxyC3W}Z`s(wM-<1koH37h{V$I-w?U9g?!`QAwN=$3&(s8>VIu2MT1aYh%#6
zh*F$HDKLaIM>r)G2n`3Zy;BZEbj)_4xNOgriyhVigbW9}#57FPurM?f!zq%(KRAQ1
z<zmwT+GRTyvNaue9bDzumYcbnkvJCHW&vm51=UrPU=^C{8VF>LhOiAMGy(os4VT);
zb#iJYo?*Mxg7MT4g6ebICWPkDMcak`P at o`A4%9ET4a)dDadT*IiuJ_H6 at YW3pfop8
zMxiY(GN3Ck!Wps?tT}P&BuS2v$TU*K8_t6E)Mc)-9bHJx9Hd*scG$KqQ_%?2Mq=Mf
z97@@qFH>WCObF3+OW|xsks%@^Hi!+FL?WBH#crZ2D)iscbwjlD(8Mw(Mr6R4i!f6v
zf^On0^-z#xw&_?n#SW*ay2|saYr!;29<?Hv0dryFWsKud7<-xHW_Cs}m9;$0aGrzZ
zgr?~d-Laub7%Msn{1J%1FiHZ19EsC8IfNOpl??laAEz=AW{w6{<#?)=z*I{b8T2*2
z4fk|)Ofj?&Kpbb5>E?t|+r at Mo$#IefxpS2W*;waDv&*TDZCgaNEZuPpBQ*_ at x?Yy#
z5lpZptGeVW9CSX5$t*Foh!~dZN`1b|Co|KHh#5q&8>7riO&c6S6D`b#Gh;qUiOp*e
za8qmtA$6!BIksvcN!K8ZTe_7a%MWv+NeY-*o=4Hr4}sn%{xr%GFi{I7mK{b}?!YwU
z7Sj9Jz$8H=2Y*y$qzSSlNUEsyx`w10G`9w9n;W+Cg_HYP5Q9mgG$F38V+(B4)NJIW
zFzI0JF2N2+J}2_0!{y8)Bqh;2g8A^>bRGg}O=mt+0&FXxYGuTU!G%BzD^%l<&=p!;
zp`$8&NfMA~XtH7`k}%+TL6SI)?}KvDx*Xqb54!N7EC{MB%A%qIWS9o{yQ!+GpsR{X
zG(y175dcc+rnyU#G!CNB3l<9y74zw6GMh{$!|`Z7pZVTo?%mxFZ-;mAdp#TvC!@(?
zGV^8&VBo$F<+41ZZbrdzK^N#S%21mHASQ at Hf6#CD+O2M@(P}iB&04DlFIufmtJmpv
zoA9#DiGnl`1n2}e5Cuh+;1}AdseoXHZ39LS>Ja8h1QVDonD!{iqBIMWaOwMtz+VRN
zcez;3rqjuII+ at QG^W}2s!%1)IEtcNG^F1%}{J;xCC>%m(vm{}gKHZ$D4h9 at -d!=D$
znxd?#h96ZCMUcciw2T*d_L?2=hJCVnEr_Bhfue>o8gyHQ_F5)D6u2%l+i?inK+5)%
z(iOxq(9G$NBVYZ(=@m{sT<Egn=er*zSJ$$V*{XQ{uyHj@*}?ko<9PUBb&6feO090i
zwvs*^Y|pCA_NZFs!!}iR=>KwPtPekr|MBt9kA8j*ro{dl55Md%*<r1=u_BvojwP%c
z(1TTT6F6cRcsRn<5k<Zi02nU4r4JJ~U%~_hQ3{5bPv?H(%@=;=<nRf|ab4g<Ji41i
zvw1F7%6%I}!zJpMcM6q$esUXRR%7esX2<d7_ep>A#U^~59Gp9?N_lIi#CvB8qSZIn
z%WaNBqwjQci!-^t5KpoSuc at _;ovIz#8QoU7a7=q0eVJo=Gu4}b8afWoj&G#sX5{MD
z;%@0(olbJcOM)bg?oalE$=!{o>;AXl at ceWP9u1n(o4eWQ#1EHqAnx>b>IF-xsu1s}
zWA)4CVWg?4R^Dn)W=M3+=C;A{hAy@?c7!9`E^C7qJKQyu%2+5CWWER9P}nM~M<6KG
z{!ZDtG=;J;s8wQ(Gc~PT?YYT8L+x1-NA4$vAS9+izp1WA6j^o_hsRCiFHLuOHgxQp
z+huwcn#pjMC)l69)#l;#Qi+Bq8ORNfgm;#GyGQ|dgQL?v_L6~|-^{VOj6^vn{oEsb
zzonvsoD*WLD4?k&h;yYP`&@%)e6`my-Tju>v$<{EoDLLu-fZhmy-Q at F*{iC?mv&$D
zq%G5!v~_dV?pUOw0io&itMc%uG#G}ZO5z7<7DjC at F~o4 at Vu|nL%UzDU9#@*n;nJQ3
zk!nTM&hHZd87cpGXX1mnfTv?RS_}cNW6O+YOPWcNzZ*;5v0ca}85u7946#IFmPRR+
z`O%Fc-Yo}JBiG{C4GCw+0t8#^AgQCxvyMB`+bvt{<pexLlsUj68Q5H>-IE{~JDED*
z<!*<UG((UzQ4nOsGSmU!fF at h6C0mB3XxOrd17ybm5JzmgIL#dhwN6YiP4g at XgCqnv
zVmOq<z8?pE6vtthCIL9c9L|J6oWMtM>chVzVeX&0$;!ENhXPD6?{aPUwG0bELxI-V
zE9YabsOhRI1AKwI%7A8oWC{S8Br6g?8k}NSr81lY@(9PudG!bXHCNlzE!#pCuoi#}
zAb2cZL408E;D<#G$IY`8nzX{4{3raxDFda|zaLNpSFsi|3;-|!9D0BwV0z99Au!uj
zzk*5tC59V}o-vNb!arhr-JwBj|GZ)SYx5WdHmrvOK3#3AXV3;e?3ZoKe}Fl-6Eu~w
zefc)4j>X><3_=NeRmy%i1$et^5If5{NGZcv2B36>!3?9x>eq1}o*rO*AuD8M1y(&<
zUHyRI?3F`U>)4#E2<{4tSD?7M{GmBe{X<jOptENJEC$ODn5!x>iz$+<vd#0dszO{v
zK$>B4bjF*p2}TA}MPPz3YYJo1 at XBR<$yOEuMuXXAeXzwmPGB}5Tx4;WxDJK*2cM=e
zIgFO)ls&A#9yG`@u+fCN*iG0|2=E5}z^x$mQVg|PK>5L at kwGlNh8Wa_Z9!qv08 at m2
zrmmZYVyHF?2rx;8Z8*rl>?LM%suRO8v4cT<xKPewPf5TC8Kw=dFx(S{!)1?Cz^Gav
zWY~ib&@~pO?VLT30oDXWU^O31ra&XmYVv?rF?I!v4L*hufe~g}!lq!s&_!Z{)N<Ic
zu?evq#B3Z26BG0XyA8oiTY;*;$gvR^CNu_!Gh*`#?STOy2x_#Mba64<78+(EXY~{Z
zka)zbZe^MVn^eXCOi&BX$~s^UScQyZVD^{?dT1a!2etv_p(RXxIc&O^g}AH9WCehj
zz!Wnu`9U3lRDc>{e=wV4%;W{Ddu9Bq+c7U<WC=>JJ2C5E&zIOM4`Y8}09G>xM0dqf
zpxmnT%4gW)DHf}g8LvYU at E@G8T4C(r65uyFQ()8yNQUhQM5>9?7>tG00^VzyB+NV*
zE|i$MqSyds8Bi-^vIk#)w1>dlRWnvJeqcEMB6Jzi1EXiMbvYf|L~}wz>-7~km`9cb
zu%&8n4!i#SG|DX?1sXRHs5c9+1Z+`a)C*M5V(vi;v&2$(l`bY;ssk_69{7nx{UD{_
z)s7}gcsU$<jv<PQW&lS8Yju}FLN%gmf~eCmgluBS5=+Rb%;yM{O;n@<y|%LX?bOHM
zl_qmpfO;84ftzuL!YjNJPj2Ut%}WZ{A+TX^f8sA_%4xvAIYbw?v&iJZn=LltFobxz
z3}#ZMVzqCf<!G6T15gn<n at MEKT$oG)l=HD5TlVt)cBXMXPUKW3evvvDgp=`AH}B~N
zXIlQHA6tqf*%k#nc91q`k?1U%%2Eyr%1DB-Pi$G2O|U#C&cJR>v|u9$^2<OLjx$iA
z1i}D#kwa`#2MZw#!5LT~hFQ!GfpIW;%k=o_i6$mX*rYSWhW;?j(ltW`Yy!6az-<{m
zS+>C{VYX+kupNvP5CyaF1y at ke-r!jd!!gQ2CDfKPlXciLRq$~Nf*k<B&LILZ_q17S
zz!D)85Q7p76yA10W#)w44o+GYD~~}&14%Uje%M?&E7Nxv9A<!up!_WY;A3iF>&!GU
zG&lyU1=oTB2G~Ma*P&}Vbdd!_$H~wN5x}>=moba&EYw(lnU3YM5CdSbN<f%4WhoHw
zbPFlSe0VgB<yM%#LNCF1a^_2x3E+bORsh<N!(0Shi*?l^EJE5WMFHyEMKon;6c{m=
z<lxo-KaOLvd~-&A1~gWgo!Kl5z!Wl2W`JVrFg7sdDOAF|Q-iial?KrPq$oqT91)h)
zfnd*~D=|z4u_ObQl2ZmkPz<1(Er-H2>?Hhxu{dn5AYg#gYtS0TP;dquip>NAx>cZs
zv4iX3s+GiI0SeBz*rWiztPhk~2YbSeJqHJ{VHz3)a}1G!J!faK(C9FfWvL$qfG#T!
z$juOty at If47=XDDCmwpsazJP`(#!}fli``mf;$*Bi-HJ0XcU`888&JTW>Fp}6?@bS
zMgb<vW_*oB7Zz1nzycU#frp7X7O2TXPK&jQnLPtn_B0%25uU6_1%uxeAcN7-2WW+X
zW at yd=3EY|8di6jbkQj!`jBhZ*T=B)#u?kW4gN9noFIk25)wn(c6apcexepBRUuD?1
zuPDx{%ui;uy350j9vIx}_$ddm{qsWmA==pZM<xG5HU2n$D99hzKD6j3<$9oo<Y%e>
zaXdUL`Lo{t{`mj*=4TD~FQ!Qz$~ojmADn-=<EjAp$=dLbrmdQXe>TMV(cT~2^5?A|
zbnuTyE?4yLFMjsl{d*W{^7B)6gC7&K>hLNKyFX9=7>l3HbAFE0>d{d9$7KFc#6PO~
z>- at izBY!pbr;29G at _})>|I+*a9=cTl>!E26Metvf at S{<yax0ttYf2wR1T(t5nsxYb
zR&<|DyUPd;W{?l)N*T>S51q+Cl2DeFQVnLb*wQ6IB>sF6*|NwhB+Al&K;)8j0+xwD
z`{F^f=OnXPYUrS&81Mm<m>{I}ep_B9fn#U}8vso;6jOIDM+>3VRQ$+uS4f{Bt|w at q
zna*dl-E0|4XF&ja6B6m&0Z;N^cAc5Mu7+bLfEoeWvDh3KBz8|{*`UgsQwrkGwNf{i
z8UrbH+}qnk?KDwHlML7tO#-dc;h@~`TdcRMBnlwVI4SXCtx at ZfB)dP)<a*hKo*)&N
z;UZS6Z1pxdy>|MQK{O0u5CE;Fp<1bkSsEVRo6Uk8+{Cu2TBbA22IaEorvC9*Ef+O!
znCgy!4QHH5t(r=*=;}Cc)!WO3Zz7slP7r8tTZ20DvpZaF3cl}}B+G43QJT~f6+{-{
z_0VkalIH;%&lV(299<P5&e^t_A54W-!yO_{3GaPdi!)Gt5aUd(-!}FxP`&O<+`cs(
z#-fx3mS8x#th#1Hygv!rsy3x%A)3!61wyx^xu)2+F)z3WV at cvYuiG%w8PMs4BXY8<
zn}lkOeta}iY$L;!UgAv=2KuFHh{}M<1}CNGp-fYccAJ(>LG(@>MbQb6e?#SX=YH;>
zW$wr=of<h!$B7F;R#za_DvFg)a}!9VEA$~kfS8&?d;zMifVt?Z?aorm3G-YYAi@~_
zk}X at fSOL@2R7AoE86kvg*`z82fH(nsWzXXg#F(NPfki|7)fI at 1IdMX=TE{RT)T?Z@
zAs}{=C%I-ojUa~twz at z}4U`wKLDgAe6gx2yb>Q}(q7suaa}zqOGREjGkQLM16wJ)9
z9FXoBb|I!gU{z#Amh*dJL?qJ&C^CTUW>g{6k}YT*=sZE5OsO$)96`)tIf>?uhI6c#
z88z5by#l at riFu1vQ4cddoJWS7=DOg9j*N)h9+30MFwbDvQH;CZBoXw$k-5}Ig6oQP
z9#6)y{aqGC;83zRPx>^rL?uoPEqC~aV#JqvK21fzff;gUu>zd~Lr5VUQl%pbXgWvk
zjig&(7sv;vBC)Cf8HZqOa05<GzZ)=xiija56X3bfa!#TYs4fxvf?#=bJG#{sESj1T
zd6`2qjbpOW!MY%bl6_~V6JIw?AoF$@vPT2 at 3N{QNMyez#dKlTkl}T(>)RZ)dfzN9y
za5o?~EZeNokrCfSI7gZ);Vh(V7a=uO2(B=&nt~=ancQh1^juZh^X7hJQQJ_Es;HWx
z>h4^OuM-?3hAsfT#LF-?*(M-EQdLQo?U=~(8zl*;pomlv9S>AtZb<-tk|8UqGgZ^u
zg*J__rdU+cLl-n9QsDNIsw=Xy5b1S7f{3zJ4pqy6;edabtZkMiFC1ktBVnATik<5c
zn4!kf&^EAa;4&ynGSEywr&plOzB*v_<pHa(!M=$g&$A?_Y`qQm1XGeaHuV*<gHJr<
zUs<+-MGPiD8L<N7U(u$A(3%5vV&@rEv1}F2K?RH^+15%xQbt=DUB`?I0z+aX9=v$9
za$vIX#9{=PIbp(`u`s4+iOmY-I5A8XgCUsc24TF83Eg~^vj7`lD%I6p*K&+Z1M=z0
zP$7geL(23BE^>9KGzIRXV+SZ|W~3AWV*`wcEa((l^aJ%tk)fJ0wH&r=C+C>~K5M`=
z#AZ*7XBLy*80kRwp)f=>&_}M8>aOJ)>{(-rWd(o^1I4#86P*4b+Oh5+#yZ*74fc~F
zJ7 at WW+yeFu)Ev41<Q!ABl4r8j6~ah{3)~yvBun#z#t93?&<7J~rUk-ClfeQo4m_HN
zIpc0Xxxr(QuIS9zj6Aloah!r!h3JP>JGEh)bxG4iL32GRzKTek+LGothUq1SovVUu
z2%Mzf=bCq7n^__ at mCzCMvz(8DF5q{6VBl;lgcq3^MRHrQHQ5~n5WuW~<?@_msL7p>
zT*Z14DP7%CWp|oFn73qvL=FU<J4N|)s=0~LQ%zIPmdk7zJ78q003jVMV`Vwi0daaV
zXdNf^oG_-qYE?loc+m~uiclemB`~7l#-Wo&tJA6~ix6T|GYOIFQ;39?2D-w^6POSz
zORCIsvW>J9fPd}*91z2K$VI}q7z#u at 7%jm5Fv>$Op{cvdYQSi^OyjXc0|Z2uEfuA)
zAG3^@WiZ!aNft0?dzDTEGiF}`!q$Kui1ZKEh*=U2G`?vt>}Lx%EdRF>BCBl7sw$S5
z1a9TBZxvyy#_R%?-C=na at J{BYKlmlVE6)Ew%2yN>+YclR{p5M8q$<lXGKssQj*Q8%
z^ABGxg4yZSBGAK{&R;bC2U|ry==cxb?))V04;1;~@*gbtfg3YM$vpQjlDb-k|BF!n
z;Fk}q{eRH-zxe;E)QVQK<P2UB+kce|Kd*Qo(mxZknEb5chjt+Uq8u>(i_Zh~V at Woq
zvf(b!R%HD}%UKSAu&idDWe>||582y?0b at hUGEMBZHY4_IR`7#lTaCH>url?KE_?XK
zmxtsp{)<ijcWRlf<~^k3*dM0!a+VBT<*nIQy68H at ET;b+C^9HMhpy71tG-(-DME0X
zCfTYyLM)ez*)rp*0$o!T5YH(XJ^+-h+cZfjF`4hLvU0kbC(9sWn%0K6N#lsx><dQB
zRkBr)(|8`G2s3VEl3+GZu(tY^7#(gc%312Ajsa5J&=efb0 at siq(lpR+)r`Vr<RY+K
zOIMs=91uk`5QJYDOkc3FFrkJHqc15^zPR&TSwb#Lr at -yi)Prgv#1VrN#bhyxZJt*Q
zQ4nPV;;$broLmPEt?&k(++6#q2BAz+72VWjYnVVpkQ`fS_myNa^m9wp1c}pi+c5O`
zXo)CJeOqWFdNmxUnW+g}SFtoh*2sD6ir7kVx54N0yJ-v at P3>_qVR_2@`Yx3cC(66E
zejHpaL#U?T><&y*R(0F|Zfj1OyFz(Dm-pk5H_doX5E0g(%gHH4YJ{;^Y?!e(oy0T3
zb^DwGyaM9Kc|y4{iL`pt@`BsZbezfkfxx~*0?=?2pxzQCa;>CfbT}PPGJ)ecURNzJ
zrHcuX$626Mn|kUFXLlYIxL&uf$QsQ2H{Tq1c_<az7Nu|!NPr>$hFS*9``Oe{r*wfz
zEz=~Ea5{HXAn?2*vBcwSL^R)>;(A9jK&DK_f#X7S&{Qx)lwML|MM0v}6w^r3G>8c?
zL|Ib|7=9~DZ8f(2T$CZS`EJJ6S`ka=vG at fs#WhkR&MhEadB`##m;#>$wALZIDiFqW
zq9v|jXE at FBgk^ddr~;D#%at{VMu?>h8C%XWmPKS~B}->Mq)L+RnzjX0h1zaL;e5uj
z at hr$<MFVbbm?&pU4PgIHw)%D<2-<|@YoLL^`N0_-oP!=sGQ{%l4zz-h)tU)hKvwC9
zjIbPF#^N~6sg<vmB at CAAfp7&u)}pKA8fD+H!~h`!HEOd=EY`rARRls57?TYY!Dh-H
zLu at x(KsSTMZ=x9h1Qd)urEIAINL2>VlEEDTY#_RWb0j(-4<NYZuq6k_U1d|)E)EC@
ziXIsm$}szm8{1BOk^>)eK>X^!1P#?Qay<u`3PF`IvJ^Yu<r>Sp8KZCsY{(<LVYqln
zLEBg=Xa!Zr^P{EZa+xIcDM{yEqG+NlaH5s^M>D at mct;-4(D&w<#e=%wR6AOn-9|mD
ziFw%$*mrrzvd;C|UFY`tY>De`4+~c6Ez$_7y1{k2vg4hfh8B-Rtt<Pp1r9-9Dbk?H
z+ljXu(2^~<T$N0&`~(3u$b7w_FTHf1q^+!CD_lC9coRp<&Aw3Y*dYy$Q?aJ>41=fl
zV{h)7)Z}@7;6`z9y1-?x;S2_H;NjSk<UzaJYnwrq+ at 0v%T%j-+liS54uyi8v;JeF+
z_<N32ryLSm+5IS<=Rj^?{H$<6(d{KFC4HyQo5LG#l$x>uj at cdLfCrzXZY>3$=q?v`
z%ey6jwT5`rjR?8DQg)oSUTbyI)LF!VELtiOg%l|GhtQ~;InIC&{iS;C<3UdxG+I3*
z8rcWAqYkN}3PU=}j&BSg|5A at v^}d!KcFG{JE1m9=GpDB`A$I^DZATUt- at nSj)G;cu
zV6?2^T<g11tHmp#h{h-1aB`Mr8bk#>zfE<`>@=F4MzyNoBRVia`l4(&3GPk<P at 0A+
zf<zGvBcfdnNx&}&M%2rJUS{kIx{@sLrV&U!wvHkReeh%nB3ed3=SN8#v2-$Bkq!lv
z7Gp at 7$U=aD7!!k!**=pv%~^^SNUAI;Oq?+0Lmj}GGyvVgSedSaa1h!006-MMGK-Qx
z6tgIaLcjw6QZNI<lx<lR6$z*?f^e9{pf95|ibEV)F(QDsf-Zt+5>!nE!EGC^1CA0W
zVeFyMh+HD&sv#MI!FD}@$}wG*9aw-!Us_8XJ4-dSFeq<O>K5EVVlkYUECU at -$4inF
z2WA at Qj*)0o0rWA6?ylDQ7#difn^Lw^?J%NG%#?Fg25u_D5KuNI1~rlC5HP8m=s8<L
z0Mek_wr(j8yq~Q&f~$iPQ=m6m4ALWaY*6tCf)vOPtOICTZthy4lxnfe<dy9Jm>|&0
zHU;dFU_PW!bcvQxN6tWBfh;mXYeL)u)L123p$QOOGC&ppr$nWzl{w3#2xU43-H_PG
zp$-%RJ;HJ(P>TV;#kO`5ro-3^{O}cXWJP2ch9aFwQcO}eN#Z!qEyCC_V at nozwxX&g
z$<ioEUCP!%*g61PlQdNYL^35oI?o-VKP=%hNrw$Ud=1(2#Uj at v#iU?dx|K%>@OEQ5
ziA;_0IMb9QJIxG1Opfp6x|9a4so^Y2Q>1nJ3gbja!%?BfB_Xk-a2BZ+c9iZw1MnhU
zbT|~N4LcoYZ7sWXkm2S5-%@9zrP-<U)X~?8RqC6uZwPug$xOux{7eH$G at 06sVlTV9
zADL1|W8XzbArPAIB2FB>r$@6`thME2z6f(iMGDH(glSy|xHwEXz%q=I+UJZc2<DNi
zSh|(Q5f};E=E&ARfnt#uE3%~JF^n130VM#0br_l$bd070lR_>5AeAhjlOU?{h)4om
zq>iiEF^D7x=ImywtbxN>E<`Z}`?D}NUA99IOfO5Yr7CRWhAa!ZrLWebKpS`qFG)!j
zgb`hlWJ~UJL>+>Y3Q-5=X`Ux}>c-wYfezCwPQepxtSUSwYG{>Oz?f!f0__g{#d4+9
zfC%V<!2FgQh#ICgVJrLCr2vb$lX=T1wLp5q9Ti!HP|tNF4Ga&6vE>4Eq+n{vbTUm{
z1qX5L>I%|L34#u<>87kBCK*zH4t4o{=wRMpJ6yn=^BfqdVp%y+*(Np3$eGge$BPW}
zz|$b8DZpTe)YsUe8$@=teUJDdWl!AMLb-3anI(%r$FZcEEJ*<>5rn6NoH&V>m%c04
zbm%LfS)74hmE4Ra!GXQ2;y||D)D7-jSE(DTZ3gKiv2{_$U at SC(&}~W_r+~H7TTsyr
zL9qZTqUqAMWCeT&YG8s=mqkqnzA#Fl-nIz at +m1(9voM9m8aPi92Q+iP-RTRm9bYF%
z)$|OokN809veacOuTc_)K?2n&ydas*oUn|QE%XFE16`Z_e@&b*jvFxyMUS+A5d=nH
z7??r at 3sow0fVpIv6kE7(<<bYxF%-T}N?rumAe*r)QzA$F|9PG-KW7y$(%%ms&8rWm
z)>=HWzT2+XYk^{Ue*gXFJ?<DMXO-*a^7VE3<)`t2FExVK2|9<yNU#eHG6c%yH%Abj
zKsl2qq)!f9=XQAWgEOcu*iVEnudf_;z&|~mkZ-2EG84<=Z;0A79&Z}z7Y1NB5nfML
z!*#=XKHD76$MZGdR!Ap*`R(xkx(Nufu<3x1-t#$S&0&5T#Y_e(hBA#KO?dG`A_x%k
zhG6oO8{AZbK#D*3kJNJCAqZJCtk~ms{oV`anHKj=%%f+TjXvbz;C}j`No%z5U{KC(
z=$f9ZL(ZZf^kkL?StP?K93_55JMUT4g<kwY=v6NtoirV?9w>STlgE|ykBh@?B|GKa
z=C^c>6octQX3t-08g#Z>Zp#+h08SVTwE<1f>NWr^oizT5_4FQ4TxWP)ZT(ozo(%SI
z#{3Q!x(05x9*b&%lhi=VMmxf&^ySd^0g!ci8dDjW#l93AQ_k7XF at dWr)re~Qy{lkW
zKd<muR2l=c%G%QelhnbFB3$FHvp8B_N7TXQ?UpDQ0X1IP)-91MeWMIE57W1npc*e|
zmUvF0v_#hr4wK!`6hY={sl&)+)d%;Tt_AXZLKKR(4^2q!91T&iK=}@oiou~IM)6Sg
z at S)e!VTDfRpx&53anpBzWz<-yGqzf~o#N4$+B`?x|8di$nCz{z0b$QJR%XyRcfp3Z
z-z4mU?czdI64KqziYEa8IO?1{(uoc2haE<w$YBr7<e9oE4-tV)=A}>-0^1$zqQB>Z
zhO$Qpa?kbcV=g%kXu@~OBeggN4t|kdFFu)UatNgZ-R?EHfhj(WyRGy3GYH-uA2_0r
zmju1XUa400XS21mNQx*<KJd^Jf+;>lVxA(GwsT1UTb0f~-Af-XG^Lnijno$z5p(ub
z0;$m~giY3x3ZbPw*B6PSi at m6vdt~j^_VI$VwyfA0(%pLp1Nq9dV3t;MMlRSe=E2}p
z7zRG at X-nj4`vfl*rI6$kX1%W*C{b57mR>E;&qG%OQF`GguM4*YfpS4`?5i^*e#-T2
zoL5K6SSX7*bj;((dGi$?FSPSywW071WqatZeQ*32+^C>~M;vQ!Fq;JwP{mUI%z57-
zE)gDPEFOyz*j?6T%d#hh`aa(8Htad4FubXv8);SBCwSs2ebM{?CA5(Z!4dE5_7kJX
zrOBLZQ?=?;0<@l#!74G9JGCS_k-BDU at mJ072P`NyuTS|_y0OY{-oI7_UZFU|S6RMS
XY at 3gEoUH7s+P27(s<La%7GUxpA}0A(

diff --git a/lib-python/2.2/test/autotest.py b/lib-python/2.2/test/autotest.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/autotest.py
@@ -0,0 +1,6 @@
+# This should be equivalent to running regrtest.py from the cmdline.
+# It can be especially handy if you're in an interactive shell, e.g.,
+# from test import autotest.
+
+import regrtest
+regrtest.main()
diff --git a/lib-python/2.2/test/badsyntax_future3.py b/lib-python/2.2/test/badsyntax_future3.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/badsyntax_future3.py
@@ -0,0 +1,10 @@
+"""This is a test"""
+from __future__ import nested_scopes
+from __future__ import rested_snopes
+
+def f(x):
+    def g(y):
+        return x + y
+    return g
+
+print f(2)(4)
diff --git a/lib-python/2.2/test/badsyntax_future4.py b/lib-python/2.2/test/badsyntax_future4.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/badsyntax_future4.py
@@ -0,0 +1,10 @@
+"""This is a test"""
+import __future__
+from __future__ import nested_scopes
+
+def f(x):
+    def g(y):
+        return x + y
+    return g
+
+print f(2)(4)
diff --git a/lib-python/2.2/test/badsyntax_future5.py b/lib-python/2.2/test/badsyntax_future5.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/badsyntax_future5.py
@@ -0,0 +1,12 @@
+"""This is a test"""
+from __future__ import nested_scopes
+import foo
+from __future__ import nested_scopes
+
+
+def f(x):
+    def g(y):
+        return x + y
+    return g
+
+print f(2)(4)
diff --git a/lib-python/2.2/test/badsyntax_future6.py b/lib-python/2.2/test/badsyntax_future6.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/badsyntax_future6.py
@@ -0,0 +1,10 @@
+"""This is a test"""
+"this isn't a doc string"
+from __future__ import nested_scopes
+
+def f(x):
+    def g(y):
+        return x + y
+    return g
+
+print f(2)(4)
diff --git a/lib-python/2.2/test/badsyntax_future7.py b/lib-python/2.2/test/badsyntax_future7.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/badsyntax_future7.py
@@ -0,0 +1,11 @@
+"""This is a test"""
+
+from __future__ import nested_scopes; import string; from __future__ import \
+     nested_scopes
+
+def f(x):
+    def g(y):
+        return x + y
+    return g
+
+print f(2)(4)
diff --git a/lib-python/2.2/test/badsyntax_nocaret.py b/lib-python/2.2/test/badsyntax_nocaret.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/badsyntax_nocaret.py
@@ -0,0 +1,2 @@
+def f(x):
+    [x for x in x] = x
diff --git a/lib-python/2.2/test/data/PyBanner048.gif b/lib-python/2.2/test/data/PyBanner048.gif
new file mode 100644
index 0000000000000000000000000000000000000000..1a5c87f647fbf33e5b46103119c9fd42afbe9e5d
GIT binary patch
literal 954
zc${<hbh9u|oW`KcaD;*3|NsAAfBbp<@yCPLU#~s+c;@cAL)TvKIRAX@$tQ~sJ(#)s
zZvU2Bt!uAUEx%l_;C$N5GYljG9S{Msi-Gn31LeMy%y}8B*5$n3SD<sgB=>&By7x8j
z{~uUT;GrYce_~0|$((r{tZl_!y0Q#RM^`;uQ6(RHpD7?imZ|-N_{uUC7WQk3?{;NH
zGBwmSM64;~s?+3SXG-Nyu3{`#=d5DO(qd+qsKHY;ql=l7x86TUlZ}C6Vbu~P?#atE
z!mD*BPwkz#Y9{O4c}n3W8Y~Pf8hs4Am3i417pt>w-Kbi1Ms*cq6L0#=1@(C;T5FqB
zI`?aIZdcj6`MlEcyPC%tUL`!Oy6(*WPKk$;osA=QZI3!%)vK*u3l>dh6n=W+r`7Uw
z87|f;mIVuzy^!H%3ug*Ry3lZtiKX(3gJD90!wDVbQ~xb(3ZIH_hKecjIdrrNEMq*+
zw2q_9fVDYU$~z!BlZkPe2`868z>JleY9_Cwc0?5}jhn}yqqg&PsRyI=95n_eg{;)6
zUEV25u at h%3nrgtVIQJB900ZOA{Gyhb0w)(u-^0G>fNh4rBnQ=ooY0O-yFUwdHXcyA
zcQ9fSL*pbiE|<mbrb;)bzx9z3j>%fBFT}IRa?h1uSC4v4gI;fuEi5h!m!cM^#(GWk
z)Zh>@cyMCD9RtCd at Fy#;tZHd4VqVvKa97jak0*L2zVCdJ5Y(_=Q)s7-#`Zma8C79D
zo;Ht8`8 at AtY}j?kT1V}M at K+x0X?v|380XI|kgij|@Ti-SU1381e@~x>;pe#a#%!0U
zXwY=(GJ4Nj6vE9Z<1CQzTvD~cRgFbh!Lm9)<52PQi2^*0N<0}(#WyA{yw8=v$ncnh
z+d+JqmhwJ{=D_5=P12nQ8`P(o{r5R^k3)Fb0*4reni+>CorqY};~^rp-QCqN<%yI2
z*O?PMZ25C$KIG7ORpFGNZL#5oqLzx1{A}5?6FH@{b6!U9EDAX8v_MD4Qm%i7#Dfb-
zsdf$w5#n=CsHs&-%;03a{`Ya%Vl9osnUM at JSCTmYaBgm3`mo}MbD!^1!{bb=HZ5YG
ztMBtklXLq~s~K|E)2dc3vF%_z&XjSEA)ROPg<XeMXvnN&a_w^2=u%|eA$i!|aP8Mp
z^-yc8<*x;{eJhCGsOd9FSI>FE&BHUVGZpCVa=fskIb!Cda_v1<Cw3q1HK^#)7ku++
QLJp6_)!*-V6&V<;0S=;nMF0Q*

diff --git a/lib-python/2.2/test/data/msg_01.txt b/lib-python/2.2/test/data/msg_01.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/data/msg_01.txt
@@ -0,0 +1,19 @@
+Return-Path: <bbb at zzz.org>
+Delivered-To: bbb at zzz.org
+Received: by mail.zzz.org (Postfix, from userid 889)
+	id 27CEAD38CC; Fri,  4 May 2001 14:05:44 -0400 (EDT)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=us-ascii
+Content-Transfer-Encoding: 7bit
+Message-ID: <15090.61304.110929.45684 at aaa.zzz.org>
+From: bbb at ddd.com (John X. Doe)
+To: bbb at zzz.org
+Subject: This is a test message
+Date: Fri, 4 May 2001 14:05:44 -0400
+
+
+Hi,
+
+Do you like this message?
+
+-Me
diff --git a/lib-python/2.2/test/data/msg_02.txt b/lib-python/2.2/test/data/msg_02.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/data/msg_02.txt
@@ -0,0 +1,135 @@
+MIME-version: 1.0
+From: ppp-request at zzz.org
+Sender: ppp-admin at zzz.org
+To: ppp at zzz.org
+Subject: Ppp digest, Vol 1 #2 - 5 msgs
+Date: Fri, 20 Apr 2001 20:18:00 -0400 (EDT)
+X-Mailer: Mailman v2.0.4
+X-Mailman-Version: 2.0.4
+Content-Type: multipart/mixed; boundary="192.168.1.2.889.32614.987812255.500.21814"
+
+--192.168.1.2.889.32614.987812255.500.21814
+Content-type: text/plain; charset=us-ascii
+Content-description: Masthead (Ppp digest, Vol 1 #2)
+
+Send Ppp mailing list submissions to
+	ppp at zzz.org
+
+To subscribe or unsubscribe via the World Wide Web, visit
+	http://www.zzz.org/mailman/listinfo/ppp
+or, via email, send a message with subject or body 'help' to
+	ppp-request at zzz.org
+
+You can reach the person managing the list at
+	ppp-admin at zzz.org
+
+When replying, please edit your Subject line so it is more specific
+than "Re: Contents of Ppp digest..."
+
+
+--192.168.1.2.889.32614.987812255.500.21814
+Content-type: text/plain; charset=us-ascii
+Content-description: Today's Topics (5 msgs)
+
+Today's Topics:
+
+   1. testing #1 (Barry A. Warsaw)
+   2. testing #2 (Barry A. Warsaw)
+   3. testing #3 (Barry A. Warsaw)
+   4. testing #4 (Barry A. Warsaw)
+   5. testing #5 (Barry A. Warsaw)
+
+--192.168.1.2.889.32614.987812255.500.21814
+Content-Type: multipart/digest; boundary="__--__--"
+
+--__--__--
+
+Message: 1
+Content-Type: text/plain; charset=us-ascii
+Content-Transfer-Encoding: 7bit
+Date: Fri, 20 Apr 2001 20:16:13 -0400
+To: ppp at zzz.org
+From: barry at digicool.com (Barry A. Warsaw)
+Subject: [Ppp] testing #1
+Precedence: bulk
+
+
+hello
+
+
+--__--__--
+
+Message: 2
+Date: Fri, 20 Apr 2001 20:16:21 -0400
+Content-Type: text/plain; charset=us-ascii
+Content-Transfer-Encoding: 7bit
+To: ppp at zzz.org
+From: barry at digicool.com (Barry A. Warsaw)
+Precedence: bulk
+
+
+hello
+
+
+--__--__--
+
+Message: 3
+Date: Fri, 20 Apr 2001 20:16:25 -0400
+Content-Type: text/plain; charset=us-ascii
+Content-Transfer-Encoding: 7bit
+To: ppp at zzz.org
+From: barry at digicool.com (Barry A. Warsaw)
+Subject: [Ppp] testing #3
+Precedence: bulk
+
+
+hello
+
+
+--__--__--
+
+Message: 4
+Date: Fri, 20 Apr 2001 20:16:28 -0400
+Content-Type: text/plain; charset=us-ascii
+Content-Transfer-Encoding: 7bit
+To: ppp at zzz.org
+From: barry at digicool.com (Barry A. Warsaw)
+Subject: [Ppp] testing #4
+Precedence: bulk
+
+
+hello
+
+
+--__--__--
+
+Message: 5
+Date: Fri, 20 Apr 2001 20:16:32 -0400
+Content-Type: text/plain; charset=us-ascii
+Content-Transfer-Encoding: 7bit
+To: ppp at zzz.org
+From: barry at digicool.com (Barry A. Warsaw)
+Subject: [Ppp] testing #5
+Precedence: bulk
+
+
+hello
+
+
+
+
+--__--__----
+--192.168.1.2.889.32614.987812255.500.21814
+Content-type: text/plain; charset=us-ascii
+Content-description: Digest Footer
+
+_______________________________________________
+Ppp mailing list
+Ppp at zzz.org
+http://www.zzz.org/mailman/listinfo/ppp
+
+
+--192.168.1.2.889.32614.987812255.500.21814--
+
+End of Ppp Digest
+
diff --git a/lib-python/2.2/test/data/msg_03.txt b/lib-python/2.2/test/data/msg_03.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/data/msg_03.txt
@@ -0,0 +1,16 @@
+Return-Path: <bbb at zzz.org>
+Delivered-To: bbb at zzz.org
+Received: by mail.zzz.org (Postfix, from userid 889)
+	id 27CEAD38CC; Fri,  4 May 2001 14:05:44 -0400 (EDT)
+Message-ID: <15090.61304.110929.45684 at aaa.zzz.org>
+From: bbb at ddd.com (John X. Doe)
+To: bbb at zzz.org
+Subject: This is a test message
+Date: Fri, 4 May 2001 14:05:44 -0400
+
+
+Hi,
+
+Do you like this message?
+
+-Me
diff --git a/lib-python/2.2/test/data/msg_04.txt b/lib-python/2.2/test/data/msg_04.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/data/msg_04.txt
@@ -0,0 +1,37 @@
+Return-Path: <barry at python.org>
+Delivered-To: barry at python.org
+Received: by mail.python.org (Postfix, from userid 889)
+	id C2BF0D37C6; Tue, 11 Sep 2001 00:05:05 -0400 (EDT)
+MIME-Version: 1.0
+Content-Type: multipart/mixed; boundary="h90VIIIKmx"
+Content-Transfer-Encoding: 7bit
+Message-ID: <15261.36209.358846.118674 at anthem.python.org>
+From: barry at python.org (Barry A. Warsaw)
+To: barry at python.org
+Subject: a simple multipart
+Date: Tue, 11 Sep 2001 00:05:05 -0400
+X-Mailer: VM 6.95 under 21.4 (patch 4) "Artificial Intelligence" XEmacs Lucid
+X-Attribution: BAW
+X-Oblique-Strategy: Make a door into a window
+
+
+--h90VIIIKmx
+Content-Type: text/plain
+Content-Disposition: inline;
+	filename="msg.txt"
+Content-Transfer-Encoding: 7bit
+
+a simple kind of mirror
+to reflect upon our own
+
+--h90VIIIKmx
+Content-Type: text/plain
+Content-Disposition: inline;
+	filename="msg.txt"
+Content-Transfer-Encoding: 7bit
+
+a simple kind of mirror
+to reflect upon our own
+
+--h90VIIIKmx--
+
diff --git a/lib-python/2.2/test/data/msg_05.txt b/lib-python/2.2/test/data/msg_05.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/data/msg_05.txt
@@ -0,0 +1,28 @@
+From: foo
+Subject: bar
+To: baz
+MIME-Version: 1.0
+Content-Type: multipart/report; report-type=delivery-status;
+	boundary="D1690A7AC1.996856090/mail.example.com"
+Message-Id: <20010803162810.0CA8AA7ACC at mail.example.com>
+
+This is a MIME-encapsulated message.
+
+--D1690A7AC1.996856090/mail.example.com
+Content-Type: text/plain
+
+Yadda yadda yadda
+
+--D1690A7AC1.996856090/mail.example.com
+
+Yadda yadda yadda
+
+--D1690A7AC1.996856090/mail.example.com
+Content-Type: message/rfc822
+
+From: nobody at python.org
+
+Yadda yadda yadda
+
+--D1690A7AC1.996856090/mail.example.com--
+
diff --git a/lib-python/2.2/test/data/msg_06.txt b/lib-python/2.2/test/data/msg_06.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/data/msg_06.txt
@@ -0,0 +1,33 @@
+Return-Path: <barry at python.org>
+Delivered-To: barry at python.org
+MIME-Version: 1.0
+Content-Type: message/rfc822
+Content-Description: forwarded message
+Content-Transfer-Encoding: 7bit
+Message-ID: <15265.9482.641338.555352 at python.org>
+From: barry at zope.com (Barry A. Warsaw)
+Sender: barry at python.org
+To: barry at python.org
+Subject: forwarded message from Barry A. Warsaw
+Date: Thu, 13 Sep 2001 17:28:42 -0400
+X-Mailer: VM 6.95 under 21.4 (patch 4) "Artificial Intelligence" XEmacs Lucid
+X-Attribution: BAW
+X-Oblique-Strategy: Be dirty
+X-Url: http://barry.wooz.org
+
+MIME-Version: 1.0
+Content-Type: text/plain; charset=us-ascii
+Return-Path: <barry at python.org>
+Delivered-To: barry at python.org
+Message-ID: <15265.9468.713530.98441 at python.org>
+From: barry at zope.com (Barry A. Warsaw)
+Sender: barry at python.org
+To: barry at python.org
+Subject: testing
+Date: Thu, 13 Sep 2001 17:28:28 -0400
+X-Mailer: VM 6.95 under 21.4 (patch 4) "Artificial Intelligence" XEmacs Lucid
+X-Attribution: BAW
+X-Oblique-Strategy: Spectrum analysis
+X-Url: http://barry.wooz.org
+
+
diff --git a/lib-python/2.2/test/data/msg_07.txt b/lib-python/2.2/test/data/msg_07.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/data/msg_07.txt
@@ -0,0 +1,83 @@
+MIME-Version: 1.0
+From: Barry <barry at digicool.com>
+To: Dingus Lovers <cravindogs at cravindogs.com>
+Subject: Here is your dingus fish
+Date: Fri, 20 Apr 2001 19:35:02 -0400
+Content-Type: multipart/mixed; boundary="BOUNDARY"
+
+--BOUNDARY
+Content-Type: text/plain; charset="us-ascii"
+
+Hi there,
+
+This is the dingus fish.
+
+--BOUNDARY
+Content-Type: image/gif; name="dingusfish.gif"
+Content-Transfer-Encoding: base64
+content-disposition: attachment; filename="dingusfish.gif"
+
+R0lGODdhAAEAAfAAAP///wAAACwAAAAAAAEAAQAC/oSPqcvtD6OctNqLs968+w+G4kiW5omm6sq2
+7gvH8kzX9o3n+s73/g8MCofEovGITGICTKbyCV0FDNOo9SqpQqpOrJfXzTQj2vD3TGtqL+NtGQ2f
+qTXmxzuOd7WXdcc9DyjU53ewFni4s0fGhdiYaEhGBelICTNoV1j5NUnFcrmUqemjNifJVWpaOqaI
+oFq3SspZsSraE7sHq3jr1MZqWvi662vxV4tD+pvKW6aLDOCLyur8PDwbanyDeq0N3DctbQYeLDvR
+RY6t95m6UB0d3mwIrV7e2VGNvjjffukeJp4w7F65KecGFsTHQGAygOrgrWs1jt28Rc88KESYcGLA
+/obvTkH6p+CinWJiJmIMqXGQwH/y4qk0SYjgQTczT3ajKZGfuI0uJ4kkVI/DT5s3/ejkxI0aT4Y+
+YTYgWbImUaXk9nlLmnSh1qJiJFl0OpUqRK4oOy7NyRQtHWofhoYVxkwWXKUSn0YsS+fUV6lhqfYb
+6ayd3Z5qQdG1B7bvQzaJjwUV2lixMUZ7JVsOlfjWVr/3NB/uFvnySBN6Dcb6rGwaRM3wsormw5cC
+M9NxWy/bWdufudCvy8bOAjXjVVwta/uO21sE5RHBCzNFXtgq9ORtH4eYjVP4Yryo026nvkFmCeyA
+B29efV6ravCMK5JwWd5897Qrx7ll38o6iHDZ/rXPR//feevhF4l7wjUGX3xq1eeRfM4RSJGBIV1D
+z1gKPkfWag3mVBVvva1RlX5bAJTPR/2YqNtw/FkIYYEi/pIZiAdpcxpoHtmnYYoZtvhUftzdx5ZX
+JSKDW405zkGcZzzGZ6KEv4FI224oDmijlEf+xp6MJK5ojY/ASeVUR+wsKRuJ+XFZ5o7ZeEime8t1
+ouUsU6YjF5ZtUihhkGfCdFQLWQFJ3UXxmElfhQnR+eCdcDbkFZp6vTRmj56ApCihn5QGpaToNZmR
+n3NVSpZcQpZ2KEONusaiCsKAug0wkQbJSFO+PTSjneGxOuFjPlUk3ovWvdIerjUg9ZGIOtGq/qeX
+eCYrrCX+1UPsgTKGGRSbzd5q156d/gpfbJxe66eD5iQKrXj7RGgruGxs62qebBHUKS32CKluCiqZ
+qh+pmehmEb71noAUoe5e9Zm17S7773V10pjrtG4CmuurCV/n6zLK5turWNhqOvFXbjhZrMD0YhKe
+wR0zOyuvsh6MWrGoIuzvyWu5y1WIFAqmJselypxXh6dKLNOKEB98L88bS2rkNqqlKzCNJp9c0G0j
+Gzh0iRrCbHSXmPR643QS+4rWhgFmnSbSuXCjS0xAOWkU2UdLqyuUNfHSFdUouy3bm5i5GnDM3tG8
+doJ4r5tqu3pPbRSVfvs8uJzeNXhp3n4j/tZ42SwH7eaWUUOjc3qFV9453UHTXZfcLH+OeNs5g36x
+lBnHvTm7EbMbLeuaLncao8vWCXimfo1o+843Ak6y4ChNeGntvAYvfLK4ezmoyNIbNCLTCXO9ZV3A
+E8/s88RczPzDwI4Ob7XZyl7+9Miban29h+tJZPrE21wgvBphDfrrfPdCTPKJD/y98L1rZwHcV6Jq
+Zab0metpuNIX/qAFPoz171WUaUb4HAhBSzHuHfjzHb3kha/2Cctis/ORArVHNYfFyYRH2pYIRzic
+isVOfPWD1b6mRTqpCRBozzof6UZVvFXRxWIr3GGrEviGYgyPMfahheiSaLs/9QeFu7oZ/ndSY8DD
+ya9x+uPed+7mxN2IzIISBOMLFYWVqC3Pew1T2nFuuCiwZS5/v6II10i4t1OJcUH2U9zxKodHsGGv
+Oa+zkvNUYUOa/TCCRutF9MzDwdlUMJADTCGSbDQ5OV4PTamDoPEi6Ecc/RF5RWwkcdSXvSOaDWSn
+I9LlvubFTQpuc6JKXLcKeb+xdbKRBnwREemXyjg6ME65aJiOuBgrktzykfPLJBKR9ClMavJ62/Ff
+BlNIyod9yX9wcSXexnXFpvkrbXk64xsx5Db7wXKP5fSgsvwIMM/9631VLBfkmtbHRXpqmtei52hG
+pUwSlo+BASQoeILDOBgREECxBBh5/iYmNsQ9dIv5+OI++QkqdsJPc3uykz5fkM+OraeekcQF7X4n
+B5S67za5U967PmooGQhUXfF7afXyCD7ONdRe17QogYjVx38uLwtrS6nhTnm15LQUnu9E2uK6CNI/
+1HOABj0ESwOjut4FEpFQpdNAm4K2LHnDWHNcmKB2ioKBogysVZtMO2nSxUdZ8Yk2kJc7URioLVI0
+YgmtIwZj4LoeKemgnOnbUdGnzZ4Oa6scqiolBGqS6RgWNLu0RMhcaE6rhhU4hiuqFXPAG8fGwTPW
+FKeLMtdVmXLSs5YJGF/YeVm7rREMlY3UYE+yCxbaMXX8y15m5zVHq6GOKDMynzII/jdUHdyVqIy0
+ifX2+r/EgtZcvRzSb72gU9ui87M2VecjKildW/aFqaYhKoryUjfB/g4qtyVuc60xFDGmCxwjW+qu
+zjuwl2GkOWn66+3QiiEctvd04OVvcCVzjgT7lrkvjVGKKHmmlDUKowSeikb5kK/mJReuWOxONx+s
+ULsl+Lqb0CVn0SrVyJ6wt4t6yTeSCafhPhAf0OXn6L60UMxiLolFAtmN35S2Ob1lZpQ1r/n0Qb5D
+oQ1zJiRVDgF8N3Q8TYfbi3DyWCy3lT1nxyBs6FT3S2GOzWRlxwKvlRP0RPJA9SjxEy0UoEnkA+M4
+cnzLMJrBGWLFEaaUb5lvpqbq/loOaU5+DFuHPxo82/OZuM8FXG3oVNZhtWpMpb/0Xu5m/LfLhHZQ
+7yuVI0MqZ7NE43imC8jH3IwGZlbPm0xkJYs7+2U48hXTsFSMqgGDvai0kLxyynKNT/waj+q1c1tz
+GjOpPBgdCSq3UKZxCSsqFIY+O6JbAWGWcV1pwqLyj5sGqCF1xb1F3varUWqrJv6cN3PrUXzijtfZ
+FshpBL3Xwr4GIPvU2N8EjrJgS1zl21rbXQMXeXc5jjFyrhpCzijSv/RQtyPSzHCFMhlME95fHglt
+pRsX+dfSQjUeHAlpWzJ5iOo79Ldnaxai6bXTcGO3fp07ri7HLEmXXPlYi8bv/qVxvNcdra6m7Rlb
+6JBTb5fd66VhFRjGArh2n7R1rDW4P5NOT9K0I183T2scYkeZ3q/VFyLb09U9ajzXBS8Kgkhc4mBS
+kYY9cy3Vy9lUnuNJH8HGIclUilwnBtjUOH0gteGOZ4c/XNrhXLSYDyxfnD8z1pDy7rYRvDolhnbe
+UMzxCZUs40s6s7UIvBnLgc0+vKuOkIXeOrDymlp+Zxra4MZLBbVrqD/jTJ597pDmnw5c4+DbyB88
+9Cg9DodYcSuMZT/114pptqc/EuTjRPvH/z5slzI3tluOEBBLqOXLOX+0I5929tO97wkvl/atCz+y
+xJrdwteW2FNW/NSmBP+f/maYtVs/bYyBC7Ox3jsYZHL05CIrBa/nS+b3bHfiYm4Ueil1YZZSgAUI
+fFZ1dxUmeA2oQRQ3RuGXNGLFV9/XbGFGPV6kfzk1TBBCd+izc7q1H+OHMJwmaBX2IQNYVAKHYepV
+SSGCe6CnbYHHETKGNe43EDvFgZr0gB/nVHPHZ80VV1ojOiI3XDvYIkl4ayo4bxQIgrFXWTvBI0nH
+VElWMuw2aLUWCRHHf8ymVCHjFlJnOSojfevCYyyyZDH0IcvHhrsnQ5O1OsWzONuVVKIxSxiFZ/tR
+fKDAf6xFTnw4O9Qig2VCfW2hJQrmMOuHW0W3dLQmCMO2ccdUd/xyfflH/olTiHZVdGwb8nIwRzSE
+J15jFlOJuBZBZ4CiyHyd2IFylFlB+HgHhYabhWOGwYO1ZH/Og1dtQlFMk352CGRSIFTapnWQEUtN
+l4zv8S0aaCFDyGCBqDUxZYpxGHX01y/JuH1xhn7TOCnNCI4eKDs5WGX4R425F4vF1o3BJ4vO0otq
+I3rimI7jJY1jISqnBxknCIvruF83mF5wN4X7qGLIhR8A2Vg0yFERSIXn9Vv3GHy3Vj/WIkKddlYi
+yIMv2I/VMjTLpW7pt05SWIZR0RPyxpB4SIUM9lBPGBl0GC7oSEEwRYLe4pJpZY2P0zbI1n+Oc44w
+qY3PUnmF0ixjVpDD/mJ9wpOBGTVgXlaCaZiPcIWK5NiKBIiPdGaQ0TWGvAiG7nMchdZb7Vgf8zNi
+MuMyzRdy/lePe9iC4TRx7WhhOQI/QiSVNAmAa2lT/piFbuh7ofJoYSZzrSZ1bvmWw3eN2nKUPVky
+uPN5/VRfohRd0VYZoqhKIlU6TXYhJxmPUIloAwc1bPmHEpaZYZORHNlXUJM07hATwHR8MJYqkwWR
+WaIezFhxSFlc8/Fq82hEnpeRozg3ULhhr9lAGtVEkCg5ZNRuuVleBPaZadhG0ZgkyPmDOTOKzViM
+YgOcpukKqQcbjAWS0IleQ2ROjdh6A+md1qWdBRSX7iSYgFRTtRmBpJioieXJiHfJiMGIR9fJOn8I
+MSfXYhspn4ooSa2mSAj4n+8Bmg03fBJZoPOJgsVZRxu1oOMRPXYYjdqjihFaEoZpXBREanuJoRI6
+cibFinq4ngUKh/wQd/H5ofYCZ0HJXR62opZFaAT0iFIZo4DIiUojkjeqKiuoZirKo5Y1a7AWckGa
+BkuYoD5lpDK6eUs6CkDqpETwl1EqpfhJpVeKpVl6EgUAADs=
+
+--BOUNDARY--
diff --git a/lib-python/2.2/test/data/msg_08.txt b/lib-python/2.2/test/data/msg_08.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/data/msg_08.txt
@@ -0,0 +1,24 @@
+MIME-Version: 1.0
+From: Barry Warsaw <barry at zope.com>
+To: Dingus Lovers <cravindogs at cravindogs.com>
+Subject: Lyrics
+Date: Fri, 20 Apr 2001 19:35:02 -0400
+Content-Type: multipart/mixed; boundary="BOUNDARY"
+
+--BOUNDARY
+Content-Type: text/plain; charset="us-ascii"
+
+
+--BOUNDARY
+Content-Type: text/html; charset="iso-8859-1"
+
+
+--BOUNDARY
+Content-Type: text/plain; charset="iso-8859-2"
+
+
+--BOUNDARY
+Content-Type: text/plain; charset="koi8-r"
+
+
+--BOUNDARY--
diff --git a/lib-python/2.2/test/data/msg_09.txt b/lib-python/2.2/test/data/msg_09.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/data/msg_09.txt
@@ -0,0 +1,24 @@
+MIME-Version: 1.0
+From: Barry Warsaw <barry at zope.com>
+To: Dingus Lovers <cravindogs at cravindogs.com>
+Subject: Lyrics
+Date: Fri, 20 Apr 2001 19:35:02 -0400
+Content-Type: multipart/mixed; boundary="BOUNDARY"
+
+--BOUNDARY
+Content-Type: text/plain; charset="us-ascii"
+
+
+--BOUNDARY
+Content-Type: text/html; charset="iso-8859-1"
+
+
+--BOUNDARY
+Content-Type: text/plain
+
+
+--BOUNDARY
+Content-Type: text/plain; charset="koi8-r"
+
+
+--BOUNDARY--
diff --git a/lib-python/2.2/test/data/msg_10.txt b/lib-python/2.2/test/data/msg_10.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/data/msg_10.txt
@@ -0,0 +1,32 @@
+MIME-Version: 1.0
+From: Barry Warsaw <barry at zope.com>
+To: Dingus Lovers <cravindogs at cravindogs.com>
+Subject: Lyrics
+Date: Fri, 20 Apr 2001 19:35:02 -0400
+Content-Type: multipart/mixed; boundary="BOUNDARY"
+
+--BOUNDARY
+Content-Type: text/plain; charset="us-ascii"
+Content-Transfer-Encoding: 7bit
+
+This is a 7bit encoded message.
+
+--BOUNDARY
+Content-Type: text/html; charset="iso-8859-1"
+Content-Transfer-Encoding: Quoted-Printable
+
+=A1This is a Quoted Printable encoded message!
+
+--BOUNDARY
+Content-Type: text/plain; charset="iso-8859-1"
+Content-Transfer-Encoding: Base64
+
+VGhpcyBpcyBhIEJhc2U2NCBlbmNvZGVkIG1lc3NhZ2Uu
+
+
+--BOUNDARY
+Content-Type: text/plain; charset="iso-8859-1"
+
+This has no Content-Transfer-Encoding: header.
+
+--BOUNDARY--
diff --git a/lib-python/2.2/test/data/msg_11.txt b/lib-python/2.2/test/data/msg_11.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/data/msg_11.txt
@@ -0,0 +1,7 @@
+Content-Type: message/rfc822
+MIME-Version: 1.0
+Subject: The enclosing message
+
+Subject: An enclosed message
+
+Here is the body of the message.
diff --git a/lib-python/2.2/test/data/msg_12.txt b/lib-python/2.2/test/data/msg_12.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/data/msg_12.txt
@@ -0,0 +1,36 @@
+MIME-Version: 1.0
+From: Barry Warsaw <barry at zope.com>
+To: Dingus Lovers <cravindogs at cravindogs.com>
+Subject: Lyrics
+Date: Fri, 20 Apr 2001 19:35:02 -0400
+Content-Type: multipart/mixed; boundary="BOUNDARY"
+
+--BOUNDARY
+Content-Type: text/plain; charset="us-ascii"
+
+
+--BOUNDARY
+Content-Type: text/html; charset="iso-8859-1"
+
+
+--BOUNDARY
+Content-Type: multipart/mixed; boundary="ANOTHER"
+
+--ANOTHER
+Content-Type: text/plain; charset="iso-8859-2"
+
+
+--ANOTHER
+Content-Type: text/plain; charset="iso-8859-3"
+
+--ANOTHER--
+
+--BOUNDARY
+Content-Type: text/plain; charset="us-ascii"
+
+
+--BOUNDARY
+Content-Type: text/plain; charset="koi8-r"
+
+
+--BOUNDARY--
diff --git a/lib-python/2.2/test/data/msg_13.txt b/lib-python/2.2/test/data/msg_13.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/data/msg_13.txt
@@ -0,0 +1,94 @@
+MIME-Version: 1.0
+From: Barry <barry at digicool.com>
+To: Dingus Lovers <cravindogs at cravindogs.com>
+Subject: Here is your dingus fish
+Date: Fri, 20 Apr 2001 19:35:02 -0400
+Content-Type: multipart/mixed; boundary="OUTER"
+
+--OUTER
+Content-Type: text/plain; charset="us-ascii"
+
+A text/plain part
+
+--OUTER
+Content-Type: multipart/mixed; boundary=BOUNDARY
+
+
+--BOUNDARY
+Content-Type: text/plain; charset="us-ascii"
+
+Hi there,
+
+This is the dingus fish.
+
+--BOUNDARY
+Content-Type: image/gif; name="dingusfish.gif"
+Content-Transfer-Encoding: base64
+content-disposition: attachment; filename="dingusfish.gif"
+
+R0lGODdhAAEAAfAAAP///wAAACwAAAAAAAEAAQAC/oSPqcvtD6OctNqLs968+w+G4kiW5omm6sq2
+7gvH8kzX9o3n+s73/g8MCofEovGITGICTKbyCV0FDNOo9SqpQqpOrJfXzTQj2vD3TGtqL+NtGQ2f
+qTXmxzuOd7WXdcc9DyjU53ewFni4s0fGhdiYaEhGBelICTNoV1j5NUnFcrmUqemjNifJVWpaOqaI
+oFq3SspZsSraE7sHq3jr1MZqWvi662vxV4tD+pvKW6aLDOCLyur8PDwbanyDeq0N3DctbQYeLDvR
+RY6t95m6UB0d3mwIrV7e2VGNvjjffukeJp4w7F65KecGFsTHQGAygOrgrWs1jt28Rc88KESYcGLA
+/obvTkH6p+CinWJiJmIMqXGQwH/y4qk0SYjgQTczT3ajKZGfuI0uJ4kkVI/DT5s3/ejkxI0aT4Y+
+YTYgWbImUaXk9nlLmnSh1qJiJFl0OpUqRK4oOy7NyRQtHWofhoYVxkwWXKUSn0YsS+fUV6lhqfYb
+6ayd3Z5qQdG1B7bvQzaJjwUV2lixMUZ7JVsOlfjWVr/3NB/uFvnySBN6Dcb6rGwaRM3wsormw5cC
+M9NxWy/bWdufudCvy8bOAjXjVVwta/uO21sE5RHBCzNFXtgq9ORtH4eYjVP4Yryo026nvkFmCeyA
+B29efV6ravCMK5JwWd5897Qrx7ll38o6iHDZ/rXPR//feevhF4l7wjUGX3xq1eeRfM4RSJGBIV1D
+z1gKPkfWag3mVBVvva1RlX5bAJTPR/2YqNtw/FkIYYEi/pIZiAdpcxpoHtmnYYoZtvhUftzdx5ZX
+JSKDW405zkGcZzzGZ6KEv4FI224oDmijlEf+xp6MJK5ojY/ASeVUR+wsKRuJ+XFZ5o7ZeEime8t1
+ouUsU6YjF5ZtUihhkGfCdFQLWQFJ3UXxmElfhQnR+eCdcDbkFZp6vTRmj56ApCihn5QGpaToNZmR
+n3NVSpZcQpZ2KEONusaiCsKAug0wkQbJSFO+PTSjneGxOuFjPlUk3ovWvdIerjUg9ZGIOtGq/qeX
+eCYrrCX+1UPsgTKGGRSbzd5q156d/gpfbJxe66eD5iQKrXj7RGgruGxs62qebBHUKS32CKluCiqZ
+qh+pmehmEb71noAUoe5e9Zm17S7773V10pjrtG4CmuurCV/n6zLK5turWNhqOvFXbjhZrMD0YhKe
+wR0zOyuvsh6MWrGoIuzvyWu5y1WIFAqmJselypxXh6dKLNOKEB98L88bS2rkNqqlKzCNJp9c0G0j
+Gzh0iRrCbHSXmPR643QS+4rWhgFmnSbSuXCjS0xAOWkU2UdLqyuUNfHSFdUouy3bm5i5GnDM3tG8
+doJ4r5tqu3pPbRSVfvs8uJzeNXhp3n4j/tZ42SwH7eaWUUOjc3qFV9453UHTXZfcLH+OeNs5g36x
+lBnHvTm7EbMbLeuaLncao8vWCXimfo1o+843Ak6y4ChNeGntvAYvfLK4ezmoyNIbNCLTCXO9ZV3A
+E8/s88RczPzDwI4Ob7XZyl7+9Miban29h+tJZPrE21wgvBphDfrrfPdCTPKJD/y98L1rZwHcV6Jq
+Zab0metpuNIX/qAFPoz171WUaUb4HAhBSzHuHfjzHb3kha/2Cctis/ORArVHNYfFyYRH2pYIRzic
+isVOfPWD1b6mRTqpCRBozzof6UZVvFXRxWIr3GGrEviGYgyPMfahheiSaLs/9QeFu7oZ/ndSY8DD
+ya9x+uPed+7mxN2IzIISBOMLFYWVqC3Pew1T2nFuuCiwZS5/v6II10i4t1OJcUH2U9zxKodHsGGv
+Oa+zkvNUYUOa/TCCRutF9MzDwdlUMJADTCGSbDQ5OV4PTamDoPEi6Ecc/RF5RWwkcdSXvSOaDWSn
+I9LlvubFTQpuc6JKXLcKeb+xdbKRBnwREemXyjg6ME65aJiOuBgrktzykfPLJBKR9ClMavJ62/Ff
+BlNIyod9yX9wcSXexnXFpvkrbXk64xsx5Db7wXKP5fSgsvwIMM/9631VLBfkmtbHRXpqmtei52hG
+pUwSlo+BASQoeILDOBgREECxBBh5/iYmNsQ9dIv5+OI++QkqdsJPc3uykz5fkM+OraeekcQF7X4n
+B5S67za5U967PmooGQhUXfF7afXyCD7ONdRe17QogYjVx38uLwtrS6nhTnm15LQUnu9E2uK6CNI/
+1HOABj0ESwOjut4FEpFQpdNAm4K2LHnDWHNcmKB2ioKBogysVZtMO2nSxUdZ8Yk2kJc7URioLVI0
+YgmtIwZj4LoeKemgnOnbUdGnzZ4Oa6scqiolBGqS6RgWNLu0RMhcaE6rhhU4hiuqFXPAG8fGwTPW
+FKeLMtdVmXLSs5YJGF/YeVm7rREMlY3UYE+yCxbaMXX8y15m5zVHq6GOKDMynzII/jdUHdyVqIy0
+ifX2+r/EgtZcvRzSb72gU9ui87M2VecjKildW/aFqaYhKoryUjfB/g4qtyVuc60xFDGmCxwjW+qu
+zjuwl2GkOWn66+3QiiEctvd04OVvcCVzjgT7lrkvjVGKKHmmlDUKowSeikb5kK/mJReuWOxONx+s
+ULsl+Lqb0CVn0SrVyJ6wt4t6yTeSCafhPhAf0OXn6L60UMxiLolFAtmN35S2Ob1lZpQ1r/n0Qb5D
+oQ1zJiRVDgF8N3Q8TYfbi3DyWCy3lT1nxyBs6FT3S2GOzWRlxwKvlRP0RPJA9SjxEy0UoEnkA+M4
+cnzLMJrBGWLFEaaUb5lvpqbq/loOaU5+DFuHPxo82/OZuM8FXG3oVNZhtWpMpb/0Xu5m/LfLhHZQ
+7yuVI0MqZ7NE43imC8jH3IwGZlbPm0xkJYs7+2U48hXTsFSMqgGDvai0kLxyynKNT/waj+q1c1tz
+GjOpPBgdCSq3UKZxCSsqFIY+O6JbAWGWcV1pwqLyj5sGqCF1xb1F3varUWqrJv6cN3PrUXzijtfZ
+FshpBL3Xwr4GIPvU2N8EjrJgS1zl21rbXQMXeXc5jjFyrhpCzijSv/RQtyPSzHCFMhlME95fHglt
+pRsX+dfSQjUeHAlpWzJ5iOo79Ldnaxai6bXTcGO3fp07ri7HLEmXXPlYi8bv/qVxvNcdra6m7Rlb
+6JBTb5fd66VhFRjGArh2n7R1rDW4P5NOT9K0I183T2scYkeZ3q/VFyLb09U9ajzXBS8Kgkhc4mBS
+kYY9cy3Vy9lUnuNJH8HGIclUilwnBtjUOH0gteGOZ4c/XNrhXLSYDyxfnD8z1pDy7rYRvDolhnbe
+UMzxCZUs40s6s7UIvBnLgc0+vKuOkIXeOrDymlp+Zxra4MZLBbVrqD/jTJ597pDmnw5c4+DbyB88
+9Cg9DodYcSuMZT/114pptqc/EuTjRPvH/z5slzI3tluOEBBLqOXLOX+0I5929tO97wkvl/atCz+y
+xJrdwteW2FNW/NSmBP+f/maYtVs/bYyBC7Ox3jsYZHL05CIrBa/nS+b3bHfiYm4Ueil1YZZSgAUI
+fFZ1dxUmeA2oQRQ3RuGXNGLFV9/XbGFGPV6kfzk1TBBCd+izc7q1H+OHMJwmaBX2IQNYVAKHYepV
+SSGCe6CnbYHHETKGNe43EDvFgZr0gB/nVHPHZ80VV1ojOiI3XDvYIkl4ayo4bxQIgrFXWTvBI0nH
+VElWMuw2aLUWCRHHf8ymVCHjFlJnOSojfevCYyyyZDH0IcvHhrsnQ5O1OsWzONuVVKIxSxiFZ/tR
+fKDAf6xFTnw4O9Qig2VCfW2hJQrmMOuHW0W3dLQmCMO2ccdUd/xyfflH/olTiHZVdGwb8nIwRzSE
+J15jFlOJuBZBZ4CiyHyd2IFylFlB+HgHhYabhWOGwYO1ZH/Og1dtQlFMk352CGRSIFTapnWQEUtN
+l4zv8S0aaCFDyGCBqDUxZYpxGHX01y/JuH1xhn7TOCnNCI4eKDs5WGX4R425F4vF1o3BJ4vO0otq
+I3rimI7jJY1jISqnBxknCIvruF83mF5wN4X7qGLIhR8A2Vg0yFERSIXn9Vv3GHy3Vj/WIkKddlYi
+yIMv2I/VMjTLpW7pt05SWIZR0RPyxpB4SIUM9lBPGBl0GC7oSEEwRYLe4pJpZY2P0zbI1n+Oc44w
+qY3PUnmF0ixjVpDD/mJ9wpOBGTVgXlaCaZiPcIWK5NiKBIiPdGaQ0TWGvAiG7nMchdZb7Vgf8zNi
+MuMyzRdy/lePe9iC4TRx7WhhOQI/QiSVNAmAa2lT/piFbuh7ofJoYSZzrSZ1bvmWw3eN2nKUPVky
+uPN5/VRfohRd0VYZoqhKIlU6TXYhJxmPUIloAwc1bPmHEpaZYZORHNlXUJM07hATwHR8MJYqkwWR
+WaIezFhxSFlc8/Fq82hEnpeRozg3ULhhr9lAGtVEkCg5ZNRuuVleBPaZadhG0ZgkyPmDOTOKzViM
+YgOcpukKqQcbjAWS0IleQ2ROjdh6A+md1qWdBRSX7iSYgFRTtRmBpJioieXJiHfJiMGIR9fJOn8I
+MSfXYhspn4ooSa2mSAj4n+8Bmg03fBJZoPOJgsVZRxu1oOMRPXYYjdqjihFaEoZpXBREanuJoRI6
+cibFinq4ngUKh/wQd/H5ofYCZ0HJXR62opZFaAT0iFIZo4DIiUojkjeqKiuoZirKo5Y1a7AWckGa
+BkuYoD5lpDK6eUs6CkDqpETwl1EqpfhJpVeKpVl6EgUAADs=
+
+--BOUNDARY--
+
+--OUTER--
diff --git a/lib-python/2.2/test/data/msg_14.txt b/lib-python/2.2/test/data/msg_14.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/data/msg_14.txt
@@ -0,0 +1,23 @@
+Return-Path: <bbb at zzz.org>
+Delivered-To: bbb at zzz.org
+Received: by mail.zzz.org (Postfix, from userid 889)
+	id 27CEAD38CC; Fri,  4 May 2001 14:05:44 -0400 (EDT)
+MIME-Version: 1.0
+Content-Type: text; charset=us-ascii
+Content-Transfer-Encoding: 7bit
+Message-ID: <15090.61304.110929.45684 at aaa.zzz.org>
+From: bbb at ddd.com (John X. Doe)
+To: bbb at zzz.org
+Subject: This is a test message
+Date: Fri, 4 May 2001 14:05:44 -0400
+
+
+Hi,
+
+I'm sorry but I'm using a drainbread ISP, which although big and
+wealthy can't seem to generate standard compliant email. :(
+
+This message has a Content-Type: header with no subtype.  I hope you
+can still read it.
+
+-Me
diff --git a/lib-python/2.2/test/data/msg_15.txt b/lib-python/2.2/test/data/msg_15.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/data/msg_15.txt
@@ -0,0 +1,52 @@
+Return-Path: <xx at xx.dk>
+Received: from fepD.post.tele.dk (195.41.46.149) by mail.groupcare.dk (LSMTP for Windows NT v1.1b) with SMTP id <0.0014F8A2 at mail.groupcare.dk>; Mon, 30 Apr 2001 12:17:50 +0200
+User-Agent: Microsoft-Outlook-Express-Macintosh-Edition/5.02.2106
+Subject: XX
+From: xx at xx.dk
+To: XX
+Message-ID: <xxxx>
+Mime-version: 1.0
+Content-type: multipart/mixed;
+   boundary="MS_Mac_OE_3071477847_720252_MIME_Part"
+
+> Denne meddelelse er i MIME-format. Da dit postl¾sningsprogram ikke forstŒr dette format, kan del af eller hele meddelelsen v¾re ul¾selig.
+
+--MS_Mac_OE_3071477847_720252_MIME_Part
+Content-type: multipart/alternative;
+   boundary="MS_Mac_OE_3071477847_720252_MIME_Part"
+
+
+--MS_Mac_OE_3071477847_720252_MIME_Part
+Content-type: text/plain; charset="ISO-8859-1"
+Content-transfer-encoding: quoted-printable
+
+Some removed test. 
+
+--MS_Mac_OE_3071477847_720252_MIME_Part
+Content-type: text/html; charset="ISO-8859-1"
+Content-transfer-encoding: quoted-printable
+
+<HTML>
+<HEAD>
+<TITLE>Some removed HTML</TITLE>
+</HEAD>
+<BODY>
+Some removed text.
+</BODY>
+</HTML>
+
+
+--MS_Mac_OE_3071477847_720252_MIME_Part--
+
+
+--MS_Mac_OE_3071477847_720252_MIME_Part
+Content-type: image/gif; name="xx.gif";
+ x-mac-creator="6F676C65";
+ x-mac-type="47494666"
+Content-disposition: attachment
+Content-transfer-encoding: base64
+
+Some removed base64 encoded chars.
+
+--MS_Mac_OE_3071477847_720252_MIME_Part--
+
diff --git a/lib-python/2.2/test/data/msg_16.txt b/lib-python/2.2/test/data/msg_16.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/data/msg_16.txt
@@ -0,0 +1,123 @@
+Return-Path: <>
+Delivered-To: scr-admin at socal-raves.org
+Received: from cougar.noc.ucla.edu (cougar.noc.ucla.edu [169.232.10.18])
+	by babylon.socal-raves.org (Postfix) with ESMTP id CCC2C51B84
+	for <scr-admin at socal-raves.org>; Sun, 23 Sep 2001 20:13:54 -0700 (PDT)
+Received: from sims-ms-daemon by cougar.noc.ucla.edu
+ (Sun Internet Mail Server sims.3.5.2000.03.23.18.03.p10)
+ id <0GK500B01D0B8Y at cougar.noc.ucla.edu> for scr-admin at socal-raves.org; Sun,
+ 23 Sep 2001 20:14:35 -0700 (PDT)
+Received: from cougar.noc.ucla.edu
+ (Sun Internet Mail Server sims.3.5.2000.03.23.18.03.p10)
+ id <0GK500B01D0B8X at cougar.noc.ucla.edu>; Sun, 23 Sep 2001 20:14:35 -0700 (PDT)
+Date: Sun, 23 Sep 2001 20:14:35 -0700 (PDT)
+From: Internet Mail Delivery <postmaster at ucla.edu>
+Subject: Delivery Notification: Delivery has failed
+To: scr-admin at socal-raves.org
+Message-id: <0GK500B04D0B8X at cougar.noc.ucla.edu>
+MIME-version: 1.0
+Sender: scr-owner at socal-raves.org
+Errors-To: scr-owner at socal-raves.org
+X-BeenThere: scr at socal-raves.org
+X-Mailman-Version: 2.1a3
+Precedence: bulk
+List-Help: <mailto:scr-request at socal-raves.org?subject=help>
+List-Post: <mailto:scr at socal-raves.org>
+List-Subscribe: <http://socal-raves.org/mailman/listinfo/scr>,
+	<mailto:scr-request at socal-raves.org?subject=subscribe>
+List-Id: SoCal-Raves <scr.socal-raves.org>
+List-Unsubscribe: <http://socal-raves.org/mailman/listinfo/scr>,
+	<mailto:scr-request at socal-raves.org?subject=unsubscribe>
+List-Archive: <http://socal-raves.org/mailman/private/scr/>
+Content-Type: multipart/report; boundary="Boundary_(ID_PGS2F2a+z+/jL7hupKgRhA)"
+
+
+--Boundary_(ID_PGS2F2a+z+/jL7hupKgRhA)
+Content-type: text/plain; charset=ISO-8859-1
+
+This report relates to a message you sent with the following header fields:
+
+  Message-id: <002001c144a6$8752e060$56104586 at oxy.edu>
+  Date: Sun, 23 Sep 2001 20:10:55 -0700
+  From: "Ian T. Henry" <henryi at oxy.edu>
+  To: SoCal Raves <scr at socal-raves.org>
+  Subject: [scr] yeah for Ians!!
+
+Your message cannot be delivered to the following recipients:
+
+  Recipient address: jangel1 at cougar.noc.ucla.edu
+  Reason: recipient reached disk quota
+
+
+--Boundary_(ID_PGS2F2a+z+/jL7hupKgRhA)
+Content-type: message/DELIVERY-STATUS
+
+Original-envelope-id: 0GK500B4HD0888 at cougar.noc.ucla.edu
+Reporting-MTA: dns; cougar.noc.ucla.edu
+
+Action: failed
+Status: 5.0.0 (recipient reached disk quota)
+Original-recipient: rfc822;jangel1 at cougar.noc.ucla.edu
+Final-recipient: rfc822;jangel1 at cougar.noc.ucla.edu
+
+--Boundary_(ID_PGS2F2a+z+/jL7hupKgRhA)
+Content-type: MESSAGE/RFC822
+
+Return-path: scr-admin at socal-raves.org
+Received: from sims-ms-daemon by cougar.noc.ucla.edu
+ (Sun Internet Mail Server sims.3.5.2000.03.23.18.03.p10)
+ id <0GK500B01D0B8X at cougar.noc.ucla.edu>; Sun, 23 Sep 2001 20:14:35 -0700 (PDT)
+Received: from panther.noc.ucla.edu by cougar.noc.ucla.edu
+ (Sun Internet Mail Server sims.3.5.2000.03.23.18.03.p10)
+ with ESMTP id <0GK500B4GD0888 at cougar.noc.ucla.edu> for jangel1 at sims-ms-daemon;
+ Sun, 23 Sep 2001 20:14:33 -0700 (PDT)
+Received: from babylon.socal-raves.org
+ (ip-209-85-222-117.dreamhost.com [209.85.222.117])
+ by panther.noc.ucla.edu (8.9.1a/8.9.1) with ESMTP id UAA09793 for
+ <jangel1 at ucla.edu>; Sun, 23 Sep 2001 20:14:32 -0700 (PDT)
+Received: from babylon (localhost [127.0.0.1]) by babylon.socal-raves.org
+ (Postfix) with ESMTP id D3B2951B70; Sun, 23 Sep 2001 20:13:47 -0700 (PDT)
+Received: by babylon.socal-raves.org (Postfix, from userid 60001)
+ id A611F51B82; Sun, 23 Sep 2001 20:13:46 -0700 (PDT)
+Received: from tiger.cc.oxy.edu (tiger.cc.oxy.edu [134.69.3.112])
+ by babylon.socal-raves.org (Postfix) with ESMTP id ADA7351B70 for
+ <scr at socal-raves.org>; Sun, 23 Sep 2001 20:13:44 -0700 (PDT)
+Received: from ent (n16h86.dhcp.oxy.edu [134.69.16.86])
+ by tiger.cc.oxy.edu (8.8.8/8.8.8) with SMTP id UAA08100 for
+ <scr at socal-raves.org>; Sun, 23 Sep 2001 20:14:24 -0700 (PDT)
+Date: Sun, 23 Sep 2001 20:10:55 -0700
+From: "Ian T. Henry" <henryi at oxy.edu>
+Subject: [scr] yeah for Ians!!
+Sender: scr-admin at socal-raves.org
+To: SoCal Raves <scr at socal-raves.org>
+Errors-to: scr-admin at socal-raves.org
+Message-id: <002001c144a6$8752e060$56104586 at oxy.edu>
+MIME-version: 1.0
+X-Mailer: Microsoft Outlook Express 5.50.4522.1200
+Content-type: text/plain; charset=us-ascii
+Precedence: bulk
+Delivered-to: scr-post at babylon.socal-raves.org
+Delivered-to: scr at socal-raves.org
+X-Converted-To-Plain-Text: from multipart/alternative by demime 0.98e
+X-Converted-To-Plain-Text: Alternative section used was text/plain
+X-BeenThere: scr at socal-raves.org
+X-Mailman-Version: 2.1a3
+List-Help: <mailto:scr-request at socal-raves.org?subject=help>
+List-Post: <mailto:scr at socal-raves.org>
+List-Subscribe: <http://socal-raves.org/mailman/listinfo/scr>,
+ <mailto:scr-request at socal-raves.org?subject=subscribe>
+List-Id: SoCal-Raves <scr.socal-raves.org>
+List-Unsubscribe: <http://socal-raves.org/mailman/listinfo/scr>,
+ <mailto:scr-request at socal-raves.org?subject=unsubscribe>
+List-Archive: <http://socal-raves.org/mailman/private/scr/>
+
+I always love to find more Ian's that are over 3 years old!!
+
+Ian
+_______________________________________________
+For event info, list questions, or to unsubscribe, see http://www.socal-raves.org/
+
+
+
+--Boundary_(ID_PGS2F2a+z+/jL7hupKgRhA)--
+
diff --git a/lib-python/2.2/test/data/msg_17.txt b/lib-python/2.2/test/data/msg_17.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/data/msg_17.txt
@@ -0,0 +1,12 @@
+MIME-Version: 1.0
+From: Barry <barry at digicool.com>
+To: Dingus Lovers <cravindogs at cravindogs.com>
+Subject: Here is your dingus fish
+Date: Fri, 20 Apr 2001 19:35:02 -0400
+Content-Type: multipart/mixed; boundary="BOUNDARY"
+
+Hi there,
+
+This is the dingus fish.
+
+[Non-text (image/gif) part of message omitted, filename dingusfish.gif]
diff --git a/lib-python/2.2/test/data/msg_18.txt b/lib-python/2.2/test/data/msg_18.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/data/msg_18.txt
@@ -0,0 +1,6 @@
+Content-Type: text/plain; charset="us-ascii"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+X-Foobar-Spoink-Defrobnit: wasnipoop; giraffes="very-long-necked-animals";
+	spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"
+
diff --git a/lib-python/2.2/test/data/msg_19.txt b/lib-python/2.2/test/data/msg_19.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/data/msg_19.txt
@@ -0,0 +1,43 @@
+Send Ppp mailing list submissions to
+	ppp at zzz.org
+
+To subscribe or unsubscribe via the World Wide Web, visit
+	http://www.zzz.org/mailman/listinfo/ppp
+or, via email, send a message with subject or body 'help' to
+	ppp-request at zzz.org
+
+You can reach the person managing the list at
+	ppp-admin at zzz.org
+
+When replying, please edit your Subject line so it is more specific
+than "Re: Contents of Ppp digest..."
+
+Today's Topics:
+
+   1. testing #1 (Barry A. Warsaw)
+   2. testing #2 (Barry A. Warsaw)
+   3. testing #3 (Barry A. Warsaw)
+   4. testing #4 (Barry A. Warsaw)
+   5. testing #5 (Barry A. Warsaw)
+
+hello
+
+
+hello
+
+
+hello
+
+
+hello
+
+
+hello
+
+
+
+_______________________________________________
+Ppp mailing list
+Ppp at zzz.org
+http://www.zzz.org/mailman/listinfo/ppp
+
diff --git a/lib-python/2.2/test/data/msg_20.txt b/lib-python/2.2/test/data/msg_20.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/data/msg_20.txt
@@ -0,0 +1,22 @@
+Return-Path: <bbb at zzz.org>
+Delivered-To: bbb at zzz.org
+Received: by mail.zzz.org (Postfix, from userid 889)
+	id 27CEAD38CC; Fri,  4 May 2001 14:05:44 -0400 (EDT)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=us-ascii
+Content-Transfer-Encoding: 7bit
+Message-ID: <15090.61304.110929.45684 at aaa.zzz.org>
+From: bbb at ddd.com (John X. Doe)
+To: bbb at zzz.org
+Cc: ccc at zzz.org
+CC: ddd at zzz.org
+cc: eee at zzz.org
+Subject: This is a test message
+Date: Fri, 4 May 2001 14:05:44 -0400
+
+
+Hi,
+
+Do you like this message?
+
+-Me
diff --git a/lib-python/2.2/test/data/msg_21.txt b/lib-python/2.2/test/data/msg_21.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/data/msg_21.txt
@@ -0,0 +1,22 @@
+From: aperson at dom.ain
+To: bperson at dom.ain
+Subject: Test
+Content-Type: multipart/mixed; boundary="BOUNDARY"
+
+MIME message
+--BOUNDARY
+Content-Type: text/plain; charset="us-ascii"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+
+One
+
+--BOUNDARY
+Content-Type: text/plain; charset="us-ascii"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+
+Two
+
+--BOUNDARY--
+End of MIME message
diff --git a/lib-python/2.2/test/data/msg_22.txt b/lib-python/2.2/test/data/msg_22.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/data/msg_22.txt
@@ -0,0 +1,46 @@
+Mime-Version: 1.0
+Message-Id: <a05001902b7f1c33773e9@[134.84.183.138]>
+Date: Tue, 16 Oct 2001 13:59:25 +0300
+To: a at example.com
+From: b at example.com
+Content-Type: multipart/mixed; boundary="============_-1208892523==_============"
+
+--============_-1208892523==_============
+Content-Type: text/plain; charset="us-ascii" ; format="flowed"
+
+Text text text.
+--============_-1208892523==_============
+Content-Id: <a05001902b7f1c33773e9@[134.84.183.138].0.0>
+Content-Type: image/jpeg; name="wibble.JPG"
+ ; x-mac-type="4A504547"
+ ; x-mac-creator="474B4F4E"
+Content-Disposition: attachment; filename="wibble.JPG"
+Content-Transfer-Encoding: base64
+
+/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEB
+AQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQH/wAALCAXABIEBAREA
+g6bCjjw/pIZSjO6FWFpldjySOmCNrO7DBZibUXhTwtCixw+GtAijVdqxxaPp0aKvmGXa
+qrbBQvms0mAMeYS/3iTV1dG0hHaRNK01XblnWxtVdjkHLMIgTyqnk9VB7CrP2KzIINpa
+4O7I+zxYO9WV8jZg71Zlb+8rMDkEirAVQFAUAKAFAAAUAYAUDgADgY6DjpRtXj5RxjHA
+4wQRj0wQCMdCAewpaKKK/9k=
+--============_-1208892523==_============
+Content-Id: <a05001902b7f1c33773e9@[134.84.183.138].0.1>
+Content-Type: image/jpeg; name="wibble2.JPG"
+ ; x-mac-type="4A504547"
+ ; x-mac-creator="474B4F4E"
+Content-Disposition: attachment; filename="wibble2.JPG"
+Content-Transfer-Encoding: base64
+
+/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEB
+AQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQH/wAALCAXABJ0BAREA
+/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQA
+W6NFJJBEkU10kKGTcWMDwxuU+0JHvk8qAtOpNwqSR0n8c3BlDyXHlqsUltHEiTvdXLxR
+7vMiGDNJAJWkAMk8ZkCFp5G2oo5W++INrbQtNfTQxJAuXlupz9oS4d5Y1W+E2XlWZJJE
+Y7LWYQxTLE1zuMbfBPxw8X2fibVdIbSbI6nLZxX635t9TjtYreWR7WGKJTLJFFKSlozO
+0ShxIXM43uC3/9k=
+--============_-1208892523==_============
+Content-Type: text/plain; charset="us-ascii" ; format="flowed"
+
+Text text text.
+--============_-1208892523==_============--
+
diff --git a/lib-python/2.2/test/data/msg_23.txt b/lib-python/2.2/test/data/msg_23.txt
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/data/msg_23.txt
@@ -0,0 +1,8 @@
+From: aperson at dom.ain
+Content-Type: multipart/mixed; boundary="BOUNDARY"
+
+--BOUNDARY
+Content-Type: text/plain
+
+A message part
+--BOUNDARY--
diff --git a/lib-python/2.2/test/double_const.py b/lib-python/2.2/test/double_const.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/double_const.py
@@ -0,0 +1,30 @@
+from test_support import TestFailed
+
+# A test for SF bug 422177:  manifest float constants varied way too much in
+# precision depending on whether Python was loading a module for the first
+# time, or reloading it from a precompiled .pyc.  The "expected" failure
+# mode is that when test_import imports this after all .pyc files have been
+# erased, it passes, but when test_import imports this from
+# double_const.pyc, it fails.  This indicates a woeful loss of precision in
+# the marshal format for doubles.  It's also possible that repr() doesn't
+# produce enough digits to get reasonable precision for this box.
+
+PI    = 3.14159265358979324
+TWOPI = 6.28318530717958648
+
+PI_str    = "3.14159265358979324"
+TWOPI_str = "6.28318530717958648"
+
+# Verify that the double x is within a few bits of eval(x_str).
+def check_ok(x, x_str):
+    assert x > 0.0
+    x2 = eval(x_str)
+    assert x2 > 0.0
+    diff = abs(x - x2)
+    # If diff is no larger than 3 ULP (wrt x2), then diff/8 is no larger
+    # than 0.375 ULP, so adding diff/8 to x2 should have no effect.
+    if x2 + (diff / 8.) != x2:
+        raise TestFailed("Manifest const %s lost too much precision " % x_str)
+
+check_ok(PI, PI_str)
+check_ok(TWOPI, TWOPI_str)
diff --git a/lib-python/2.2/test/greyrgb.uue b/lib-python/2.2/test/greyrgb.uue
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/greyrgb.uue
@@ -0,0 +1,1547 @@
+begin 644 greytest.rgb
+M =H! 0 " 0 !   !    "0   )(     ;F\@;F%M90                  
+M                                                            
+M                            !  "      #_                $ %-
+M\        0]<$ %%Z! !2>P                                     
+M                                                            
+M                                                            
+M                                                            
+M                                                            
+M                                                            
+M                                                            
+M                                                            
+M                          H    +!@  # L   T1   .%@  #QH  ! >
+M   1(P  $BD  !,O   4-   %3H  !8_   710  &$H  !E0   :50  &UD 
+M !Q?   =90  'FH  !]P   @=@  (7D  ")^   CA   )(D  "6/   FE   
+M)YH  "B>   II   *JH  "NP   LM0  +;D  "Z_   OQ   ,,@  #',   R
+MT   ,]4  #39   UWP  -N4  #?J   X\   .?0  #KX   [_   /0(  #X'
+M   _"@  0!   $$5  !"&P  0R$  $0G  !%+0  1C(  $<X  !(/@  24( 
+M $I(  !+3@  3%(  $U8  !.7@  3V0  %!J  !1<   4G4  %-Z  !4@   
+M588  %:,  !7D0  6)8  %F;  !:H   6Z0  %RH  !=K0  7K(  %^X  !@
+MO@  8<0  &+*  !CS0  9-(  &77  !FW   9^(  &CH  !I[@  :O0  &OZ
+M  !L_P  ;@,  &\)  !P#@  <1(  '(8  !S'@  =",  '4I  !V+P  =S, 
+M '@Y  !Y/P  >D0  'M*  !\4   ?58  'Y:  !_8   @&0  (%I  "";P  
+M at W4  (1[  "%?P  AH4  (>*  "(D   B98  (J<  "+H@  C*@  (VN  ".
+ML@  C[8  )"Z  "1OP  DL0  )/*  "4T   E=8  );<  "7X@  F.@  )GM
+M  ":\@  F_@  )S\  "> @  GP@  * .  "A$P  HA<  *,<  "D(0  I28 
+M *8K  "G,   J#8  *D[  "J00  JT<  *Q-  "M4P  KED  *]>  "P9   
+ML6H  +)P  "S=   M'H  +6   "VA@  MXL  +B/  "YDP  NI<  +N=  "\
+MHP  O:D  +ZN  "_M   P+H  ,'   #"Q@  P\P  ,31  #%UP  QMT  ,?C
+M  #(Z   R>T  ,KS  #+]P  S/P  ,X"  #/"   T T  -$2  #2&   TQX 
+M -0D  #5*@  UB\  -<U  #8.@  V3X  -I#  #;2   W$T  -U3  #>60  
+MWU\  .!E  #A:P  XG   .-T  #D>0  Y7T  .:#  #GB0  Z(\  .F4  #J
+MF0  ZY\  .RD  #MJ   [JX  .^T  #PN0  \;\  /+%  #SR@  ]-   /76
+M  #VVP  ]^   /CF  #YZP  ^O   /OU  #\^P  _@$  /\'  $ #  ! 1( 
+M 0(6  $#&P !!"$  04G  $&+  !!S(  0 at X  $)/  !"D(  0M'  $,3  !
+M#5$  0Y6   !!@   04   $&   !!0   00   $$   !!0   08   $&   !
+M!0   08   $%   !!@   04   $&   !!0   00   $&   !!@   04   $&
+M   !!@   0,   $%   !!@   04   $&   !!0   08   $$   !!@   08 
+M  $&   !!0   00   $&   !!0   00   $$   !!    04   $$   !!@  
+M 08   $%   !!@   00   $$   !!    08   $%   ! P   08   $%   !
+M!@   08   $&   !!@   04   $&   !!@   00   $&   !!@   00   $&
+M   !!@   08   $&   !!@   04   $%   !!@   08   $&   !!0   04 
+M  $%   !!0   00   $$   !!0   04   $&   !!@   08   $&   ! P  
+M 04   $%   !!0   08   $&   !!@   08   $&   !!0   00   $&   !
+M!0   00   $&   !!@   04   $&   !!@   00   $&   !!@   04   $&
+M   !!@   08   $$   !!@   00   $%   !!@   08   $&   !!    08 
+M  $%   !!@   08   $&   !!@   08   $&   !!    00   $$   !!0  
+M 04   $&   !!@   08   $&   !!@   08   $%   !!0   08   $$   !
+M!@   08   $&   !!0   00   $%   !!0   04   $%   !!0   08   $%
+M   !!@   08   $&   !!@   08   $%   !!@   08   $&   !!    08 
+M  $&   !!@   04   $$   !!    00   $&   !!@   08   $%   !!@  
+M 08   $&   !!@   08   $%   !!@   08   $&   !!0   04   $&   !
+M!    04   $&   !!@   04   $%   !!@   08   $&   !!@   04   $&
+M   !!0   00   $%   !!0   04   $&   !!@   08   $&   !!@   04 
+M  $$   !!0   00   $&   !!@   08   $%   !!0   08   $%   !!   
+M 08   $&   !!0   08   $&   !!0   08   $&   !!0   04   $&   !
+M!0   04   $%   !!@   08   $&   !!0   08   $$   !!0   08   $&
+M   !!0   08   $&   !!    08   $%   !!0   04   $%   !!OZ*@EA6
+M*3156$5:6TUF<DE8/R4M,C ^16:"=7UP2S]I<FM;6&-B13EO:$]$+59<<G=%
+M1%IR?7IQ:F%C6EIH245?9UA/<GT^65IB:G)N=HIM6C])6&]B:# .-GV&<F)A
+M9()@,B4M%3EZ8FAB8F!"24UC8VM8+2 =&SEU at WIMBDS^$@D5+6EJ6UHM)3(^
+M16)R<FE:8F=J<FM/6%QB8F-;6E9)6E9B:G-K6V-I8UA)/F%J9F)B;G5U?7IH
+M/SQ?8W-S:4E@?'-J8F E&$R(844^:6]B:%0I$E at I#A(M259?:'-Z;TAH8$1$
+M/F)O6%A%6V$_3X"*8"<V:VIT?'U at +3\Y at DQ6 44!:@"(>HII,#9$6%D#.?Y$
+M=W5H8%D_*1@@+3!0<F9K8E at _8W=B:V-B5BTE16%H7D4M,&]W2$Q:6W)\;5MR
+M:UI:8#!%:W=G16-Z*5A666QZ;FZ!8F-8'3!B8EH;#A5-;EI/8UQF=SXM/#(V
+M6C)H at FMK<FQ;8V)K:TE5.Q4 at 8HJ*=85R/!@.(FAN8F [$BOU6%5$:GUZ8TUC
+M:VMC7F)J;VM at 3V!J65E-1$AJ8S]87&A-/CY;8&!H8F)Z>7)U:EA%:5MK at 4\Y
+M8H-V>VE8-CQ;@W V+4QO3#=64!U)1# 5&#M$8'!C<G!83#Y$:%A-;EA8+39+
+M6%IZC'I+/FEJ;GV">$DV.TE@ 3D!1 #^<8)P-EAA8FI?+1LE<()F8V-?22DM
+M.RU-:F)B3#8M3W!B;W)8.3DG&SE at 6%@I'6)[:F- at 36IR6T]R:EII/B=8<GIK
+M6$]G/U9$6G)R;7)R4')H,BU8<E8M*SE5:&!'6FEB>U at V<&!:/QM)<G)Z at W):
+M9EMB<E9A1!(51'J*=75]_G0R#AA):FIW:" ;26AB<75N8D5$8G)J8E]0>G)@
+M17=[8FIH23Y at 9T5%8FE-.3Q-7V)K5B=B@')Z:FE;;$UF at TLV:H-N?7-825IW
+M>W!)24]K224V5D16:6$@&#(\16E-6SDM7VI:=WIZ:FM at -ALE+39F?'I836M:
+M9GV"@FI%/X)%:P$V 5@ \')M>F-;6V)V=RT.)6.(8F-P<F$V-F ^6FI?36X[
+M&RU;6H"":3XV/" R23961#9;<WAI7%]G8UE%<$U%:#XI16)R=VA)1U9,+UAG
+M:EM<:V-R9E at Y18!J5DEH:G!C8$E1;WIB25A))VY at -C)8=VMJ374#8OY)22T5
+M$BUB<FIJ=6U@,B V36)\=R4.&S9B>7)K9DU%3&MJ8FA%9GI,+6]W:G)K6C]9
+M:$U5:G)H8$0Y25M<224E8&MR9EM;<'-Z=TE9>H)N?'9 at 7EEJ<FY0>H* :D5;
+M23E):&\P)RE86%I at 8Q42,&A%<'IU>GIJ7"T@%25-:ER-339A:FIZ?()Z6UA@
+M<P%8 6D R')1:F)88UIM?EP;)59S1$UW?7 V.6A-3$Q627)H-C(V1'J"<E]5
+M53Y452 _8%A)6&-96F]R6DD_:$DY8VE>6F)M>F]),$PM(@-)Z5A)6')R9FE-
+M5G*#6DUB:FI$6E8M3'UZ24UN7EI--BU;?V)K6G)R:V))6#\R+39 at 6FAC9F9J
+M7#8V/V)U=RT@&!56<FUJ:E]8.5IA8F](2'4V&SE,:GMM:$E%8%8V37-J:6!)
+M6&IH/B 5+0-$QTU-;WI_8"T[7H)];FMB6UY::FQ;?8F*<EIC838P:5DB*1M)
+M8$56:R(I1#DV8VYRAX-]=U\V,CYJ>DPY(#9L<FU]=7=@36][ 7H!< "B at 6YB
+M44A at 66-R=S\T.5\M.6]]@&):<&]:14D^>7)@.RU?>@-R_G!F3&!N("5)8EI%
+M/S9%:H!B3#Y)6$E;>GIL<GIZ<E4V8$E$24D_/C8Y8%MB:E!B8H)Z7EEJ=3XY
+M61LI=GI%15Y[>G$_&S9Z8VMB=75R8D]/8VE63&);8V]F<F9914E86FIW7U4[
+M,F-R<F9Q:F-/6"]%;V)B:U0@(#9;>G)Q8:0M24DG16M<8VAB8WMR9S(8.UA$
+M-EA$871L/B4I,'6"8&-H168$:YMN@(9]9F-A/R5?7"4K("TY54UJ26QI+2U:
+M6EX#>HU]:UI)26*#83(8(EQZ V:%:F!-:W(!>@%W /Y[AG=@6#E-6F)S:%A5
+M16EO<GAY at GN"BG!)-!)B9F)0+5IS7E!M>V],67-5("U)8%@K%39@>G-R8#]8
+M36N";D1/<H)Z:UA-25I)+2TV6# V/TUJ8FMH:H)J6EMC8$E5,#QR:TU9/UIZ
+M at FT@2'-F=FIB8FUS8DUC<X-U:W!R646K>GIB44UG:%EJ at G)B6FIJ:VZ >VYK
+M:#LV7EEJ=V-$$B5J>G5N=TD@&RU?:@-:N45(>WMK+1A)<FI8:V-)84L;&"D_
+M>&U:9FDV37-S;&E;=7A]=G)K6UA-7#9$/S0;/V!B16IH+45;6 at -CEFMK=6M:
+M259B7C\M-F!L/TQK8UIB:FH!8@%$ /YG at GUS6#!$8&!:6#E$+6!K:VIR>G=D
+M?7=M3R5823DR+5EB:$UB>G!)37IO/S(R.TLE&"UA:G)W33YM36J":S8V46YU
+M>F],1%Q6,B4R7D0R-C!61%!@36IR:VEB8UMA259J64UB/S9K>FLE)5IK:V(Y
+M/F)R<EM:4'V*;6AJ6DG^<GMJ:V)Z<DQ$=7!F:%!;8F9V?7UU<F<^231C<F)$
+M%2EB=7)0>H [&SQ83%IH8#8G:GI@&PXM8FIK>W=I8#\8%14M<5I,8W!)/FEZ
+M<F]F4UR">'IL;G):6DE,65XE+55-=VIF>FI;:&!%/S\V/V ^)3Y,8VMB8E at R
+M+3EK;VAH at G)Z 7<!60#^:W5V?$T_24UA/S8V.QLV359:7&IR669B36AF6$D[
+M("4V/V!88G=O6DUR<FA$13P\*PX;26]B:DDV:%IC?EHP26U;4WUZ6D169$T\
+M+5A:26 ^/V9H:%E;8FMC14UJ:F-B:$186UA::G)A031%36)6+24O6FMR8D1B
+MA6)%3%I?_EQM:FUK;G=G67%Y<FLY5EQ<8F9Z=7QZ64(M6W):8#8M3&)H*5"#
+M:"TY138W8FH_*6AZ6B48)4U::G5Z:V=9*Q at .(%MP:%AK83]-<7IZ9F!-=75S
+M6WJ#8UMA6&!I/"DV57!<2')K<&MC6#]$/DEC6DD^+6-R:VE-*2TE17!C6X)B
+M=@&" 6D BF)B9GLV-EAJ:%H#2?X_25986%9:<G)O:41@<V-852DK-#9)6%EC
+M:F=R:VIK8%4^-C .#BUH:F)%3W)J<G\^&"5?84QR<EY)-CY663P^3&!R:"=@
+M>G)Z64QB:EX_5F]J:V=>24EB<G5Z=GIR8F!8650I&R4Y<F]G;X)H/C98=V)'
+M7U!:<F9P:F9R>FKS5$QB:V)<9EN"@G!:26]O3&A>259)6CY,>GII7UE%-CEC
+M23YB>F<[.T1965I636)J:$DE& D;2UA,6ED^/RU%:UMR>G9U:DQNBG=C6TQH
+M:U4[+5AN<UI:8W)N;G=C34UH:VMJ5B4Y:V-K6T1).RU?6DQ':P%] 6L _E]%
+M8( V+3EJ;4QA24E>8&-J:&!6<(!Z=T]8:WIJ8#8[,BU)9U],3&)[<6)G:5 at M
+M.SD@&!(_;VU)37)W at H)4(!@E25MF:F)9-CLP6$4Y15MV<B(P:G)O-BE/>GIJ
+M6$1B:%MC8%9<=7U]>W)S:F-I36A5.Q@;6FIU at H)J/BU>??YR658^+6I:7&Y;
+M3W%W:$1C=6YL63]R<F]B9')G.5IB;&]H3$1C at FYS>VA64TMC9UI?9F(P26%-
+M8UI:8&IR>FT\*Q at I5DU-25M%/DE83%9J>GIF/AU:DH)P84E:8DL^66EF?6XY
+M8')K4'MR6#9::X!]8S8P8VMF84Q(:&!?3T6"6EP!:@%P /YH/TU[-"4=4SXP
+M:#8V6FIJ8EM)+UN':G)R34V":F)@7#P at +6AW8DE:=WI;8&!8-D0_.S(I+6!J
+M6%1::H)]=U4T*S98:VIC<$DP26%))S]-<GH_)5IO7R(2/WI_<E8 at +41:8EIB
+M6G5]?7IJ7%IQ=DU,.3LG&#M9<H-\8BTE8FVN9FIB2#9:65EJ64E:<EPY3')P
+M<FH^6F)@6F)J21MA3%%K:F at Y-VUK9GIW;V!?6P-KSV!%&SM?/T5B8FIR?8EZ
+M8U @+6A8+2U at 24E%26!86FMK1"TE5I**@FA823\M-G!P6GIZ,#!0:D5:7&8E
+M+3EZBG)9.5!B65M--FER<$DP8F\!: %O /Y)25EI6%0[.QL;6#()3&IJ:V$V
+M&TU_:FU[8F)K33]J:T08%4AZ<%E:<GMJ;V-)-DQD/BTR-DA:35]83VIJ8F!%
+M.RTY66MB<F(M/V9 at -C9:>GMB,$E:/A at 2*6B":T4E"2U-641-8W6&?6YB.2E;
+M;EI8/BD8#A@@1'IZ5B([<G+^:7)J63]>7V)B3%QC:T\^27)Z<F at V.5M;7&IJ
+M6A5?6C]G8F- at .6!H66YU;FAC34U<<F@^&RU(/CY/8VYR<HEM8EDE&U9M("UM
+M138_-CY)6EAF*2!58GJ%DGI)-B 8)7)W:VIN8$M8:F-O:F@[*259=7IR6%9B
+M8EM%+4QO;68Y at FAQ 7<!20#S6$5 at 1&IW8&%%+3])-%]K:VA%1#!H:V-Z>FIK
+M:U]):&L[&!@^<FL_-DQR>W)O6C8R;V%%.SY66E9$6F)J;V!:6E9)238_<G5W
+M.39 at 63\_:'5R>F%0638K,#9(:'<M.Q at I8&A;/T1,;7IJ3242-F=-35DM*0,)
+M_AL^64DE3()R8G)H8%MB8%I;8G)Z<D0E27!R=G)>3UI;9WML:T1-9TQJ8UIJ
+M8W!H36J":F)W:4E/=V]:2U4[(C]836ER=81U<EM$-$EI("UH6DD[*PY-8E9)
+M$B!-8V)N at HIG*0X.%5""?6I9:&!-:F9U>FM at 255@65EZ:FAC<(I at -C9)7V)J
+M8FYT 7H!< #^6#Y)16=O3%EP83])-F)Z=V at P1$E)-C!Q at FIK:V-K@'=)("56
+M8EH_+2=CBFIJ6D0_36)/1#9-:&]%8&EJ<FA;:')R8S\M8G9Z6#E?.2U%8G)D
+M>H*"8CLR25A@:FIH, at D@6&IJ:4PY37)R9U4 at +6!@9UI852D.#B R,#PM3'5J
+M_D<Y2$5@<&I:35YU=7)-+3]K<G5J:V]V9UQR;UM816!B<FM964Q at 6EINBG)9
+M<GI-8'IZ8%AQ2!(E5C!):FEUAH)J64M$8#L\:6IB8#(.&UEB138_858V-EAN
+M8DP_.QT_=8)W6F-6-V)U;FYF;U]-8VQ/56IO6F-B-D15+2=@>H)Z@@%U 7  
+M_E8Y6556:$DP8G=I9S96<G)C25AC624 at 3'5R:&)J;GIM<$0E.3XY/T4M.7)>
+M8TD^245-1#DV/UEB9EA;<'=B35Z"BGIK/DE::FM$8#8;-DUB9VUXA6M$0C])
+M8&=:;QL)"45536]O6$5K<G)W2S9F7'IL8$E402 K,ALI*3]O:^I)+24V36MJ
+M8%IP?7!F8U168G)R8EIJ=W%;<')J14EC9G)R9TE$14U:9H)W6F-B1&)Z<EE8
+M<F E+3L_24U0:WU]7%E?6FAD26!;8F]4*39O>F)8/C8_4"T_<F<[+3M426IU
+M<F- at 14E:=75Z W*31%AP:2(Y8TE9:UI%1!L2,&)PB@%M 78 _F ^8%XG.50E
+M,$QF>E9::VM:36EL9BT;-G!R7%IF>G9K:VY+1"4 at -EA)36=6:4U6:&!/6F!)
+M6$U$:$E/8VI at +3EK=H6"84E-:F]'-TDI&S9)1W-R?7)98$D^1$E89S05#BU)
+M36MW;UIG66:"8S!@.69Z:D4P840M/#(V.S99<KE@/R V3V)B2$ARB'5F8F)@
+M6F)J6UA?;VI<<FMJ6DEK=W)B8$D_/DE:8FMO8FA-1%IR:456:W)H64D$6,-;
+M:VYB66IO;7IC85E-:G]?3VZ*>F]8(AM)6$EU<DDE-F!'8FM/9TDP:6MF<GI]
+M?69A6W)[)4%)'3EM:UX_,!@8+5EZ 6X!=0#^8E]K:"4E7%4G+41B;UQW<VM0
+M:6MK6" M8VPY33]<<&-;8%I8.14M13!C6"M at 8F)R@%!B;V-L83Y%8&E:16 ^
+M+38Y;H)B3%EK=V =2U at E*3\Y6G-U=5IB6D1)/U9 at 540@-DU98G)R9G)),&I@
+M'4 at V5GIZ8"U%841)6EI835EF_ED\,C]%6&M at 8G)V;G)N:W)K239 at 8V!88&%;
+M6E]C<G]Q:5 at V-CY;:V-C;GIJ2458:FM;:G)R:$1$:&MK8U at _66%@8G!C9FYR
+M85A98VM:6V9K8UMH+1 at B239F>G)/:&I,1%I$6E at M6&IJ9&UUBEQC9G*"14A8
+M*2)8:F):8$DI*8(^:P%F 6H _EIH<& M(#]N23\^1&IR>FMR6D]9:F0I)5I:
+M+5A%1%E?/S!(24DR.S8;.3X;/F)-<G])36]R=V$M)5A_;SE:;6$V)4]W:$18
+M:W)@)419,BD^24]K9F968F=)35A)6EA9/UE at 6$Q/:F9S8#!,8"5)/T50;G)8
+M16%M/F)K8U\Y3?YP6#8_.T5 at 9G)U=6-K8UYF<%026'!A15EA65A@:')]?8EA
+M.RDM16MJ8W)Z:T586E!P8%IP<F at _.6J&A7))/UYI3TAR<GI_>G)P8'!C23Y@
+M8DU/=UDK*38[6W.$=WIZ:$187#YG8E]B:F)3>H9:/V)Z=58P/BD8275B7&9@
+M/TN"/D4!<@%B ,PV6%@;/C\_8%I823Y8:GIR<6(_.5MI/C996"U89EA-6$DI
+M'39)6$DV+40[)2T_1&]V2S9:>HI>&!@P<H%+.5MR8UAC>G\G87)F:#8[ S^+
+M,#];6U8O1&!Q:%8#2?Y-36!P8VA96F]M;6M,5F(_26 R'4]W7SY-<D1>;G%;
+M+3ES<4LV-CYA:WQN<FAL<&EK=UX8.7]P/TEA:&!J;6I[=8IW5!LK+3E8<7J#
+M8D5/13]::$E-8VE9.6"#BFL_+5EK8$UB7&U\?7J#>&I9.3YA8TU-:F!)*24_
+M3%Z#>GJC at G)'5DDP8FUB65YK7&9U<U9%:WUJ150E#C9R8B(^1%IN at V$!- $@
+M /XR6&9;15A8259:6$DY6&IZ=5XV,$E666=K83M)5DU-6V!4+2U)6EA67$DV
+M-E4^6'=K63XP:HI)#@D517IP1#E::W=Z=7\P5G=Q:E8M)55H24EH338V3$1B
+M>FE)3T1%.5B$>G);:H*"9G)J65MC6V\[&SYB5DE)6CEQ<VQA1$O^=X)N/!LV
+M8&Z"=FYL9GJ =H!C&!M8:38P6H!O6FA:;V9]@W \+2T;)6."B%I'6V%%8&M)
+M,$5I83];>GYW7BU)8FAA6TQ?<FIJBHIO6DUF>G=-6G-H3SD5+5L_8F-;:W)B
+M8$DV6FIJ6EAN=5I9:&))6X)R:FA/%1):32DV.3]9 at GIR 8 !50#^2T]K<%AG
+M63X_6&)?23]-<G-K/C8_241;;FI8:&A)/EIK;TM8:$PY8%\P*2(V.7* 6T]5
+M*3]U8"<8("U,=VD_/UMZ>W)Z8#EJBG59( D@,#968DD=/F]R=6A-:W)N3S9-
+M>GYH+TQZ at FI:=W),9FUI7C]@34UN238Y>GIF6T]I_G-]>V$8%3=R>G-C34]]
+MAG9]<#PI.5]0)2]Z@&-C8FA@<G9V:%XV&!@_>H):/U!T6W=[8"TP85LV16MN
+M>G at _/TEI6DU).6)837:&;V-$4')U8VMR:T1L241U63Y$/CEC=WIR6&AJ8V9S
+M>GIB35A-.6.'9EQ98#8 at .4U$2T5)3()C9@&& 7( _FE%:WI;:F,^.6-B7U]8
+M36IU<6!)26!@6FMK:'-O54EA8F-)8')@+6!G-C(G$AM:@FA%/S]9;VM84%!)
+M6EQ?/C94:FYN>W)-1'UV3R 8#A at T8&]),#E,<F at Y25IR8S8V6$A:82U$>GUN
+M46IR7&)M6T]:<%DY9E\M&VMK8VE%6^=R:7IJ*QTM:FYK22<P=XI]?7MI7C9%
+M6"TG:FM@<%!$67AC5CE9;C0@/VMR.38Y:&B"BFTI-F9:/R5/6W)X238V84DY
+M.QTV-C!C@&UL1$UF:F-[>V-A<&YW?7(^+2DM6')Z<FIO<F-B W*6:V!)-BDY
+M>F)B6EA$.TEB.3]9<$0^3 *# /YR.4UZ;VIG.2M;8%]B<FIJ=75O5E9O:EM;
+M6DQF;TQ:8V- at 7UIN8CE?9T55+14;17EO-BU+8'=K25A at 6V-;64D_26-D:X%J
+M8D5R at GAG.Q@@1'=R8&EM3V9:&QLP<F(=+5]%-D])16^"=6A1:G)R6TLV/U]F
+M5DQ8-CE-8$Q8+3#G6V-W=BTT15MJ:T08*6N"=7U[9F at V/FE)26E-1'5I/D=K
+M;C0;/G]F/TUJ:#\V+3Y(>H=R/TMI8D4 at +45CA'!5-D4_2U4R-C!:<GV#9F-A
+M7T11>GUF7&-R at GIP2S8\,DEK:SDM:XAO30-;ED]9854V+4UZ7DE)1#Q$5CXM
+M/GQ?)24!<@&# /YK24UZ:G!,/CY at 85!:?V9J:W)O36)W:VEH9UH_66%:66)J
+M:V]P8UAB65A-7UY$3')W238V.6MH24E:8%]<8F]B1$U-7GIB:EQF=8)Z6QL2
+M+6]$1&J(>F9?*0X;66 ;+6A%7V@^&T2"B')B2&V*6EQ)0D1B:EI).SY/8%I,
+M+27^16-J:C<_9F-C:TL at +6MZ=G!W;%@I+V]P6FDV(F)Z;F];6"4K:(5W86!P
+M:F=5*S8_:G)<14UO:ELR&Q at Y?(1W/S _6& _+2D^669Z,CEB<4E%=GUU8&AZ
+M?()P1#Q926%R;%D^9WUZ;%I-23\_:68\&R)B;CM58&%I6DD\26M@@B4. 4P!
+M=0#*3#E:>DUW8#Y(6G!B37%M=VIJ8CE%<GMQ<W=K/C]66$U;:U%I6UM>:T]6
+M3&-O;VAF<F)51#9;:5EH8DT_15IZ=TDV)TUW<FI036(#<OY?.S]<)2UC?7UN
+M7U5!-F9B-C9H,%!R/A(E:()F3#Y:BFH^1&A08V];-C8_8&MS:4E)65YN>UI@
+M:VMQ8#8E+4]K>V-(:ED8%4R"<UDE&U9V<H)_51(E:W5S6TUL<FMB83YO;UY/
+M655;>&$R&!4[8GB*:3]/:6-)-#Q866-K-BFS/F9:1&IZ;F)K<GIZ8S\Y8%II
+M>&!/;G)U=GUR8%A$-G-X22DK24TP:6IJ>G(Y66-S:2T. 38!< #^)1U@>FER
+M9EI-6&]W26)RBH%Z<$4V6G)B:G-K34M81#Y$9SD_13]-;$U61$E;=WMH7&)@
+M63Y::5I_;T0M-D1J>FE$(#EJ at GMF6F-,6G%W:F-I)39B>G)J3&)J4')[8&!V
+M.5IH6#0K27)B241B at GI/26A68')8)25$<HAZ8EAI_FE13V)::FEC=W ^)398
+M37)Q3&)%'1 at V<FI)*39:6U%RBF88&#EB:6)C:VIC36DV7&I-8V8Y-EYI,A4E
+M6%!Z?7)C;'IJ<BT^:6]B:&%$.6]D34]O8D1 at 8EQF63PM5C]:=V%B<G9N<G5R
+M>G=)&V]W8#Y at 838E5F-;>H))/V)[=X)>( $; 3\ UQ at 5-EJ <F-$+V1O6B=:
+M<GR"@G-M35AB34AJ=T5)6$U at 8&))1#\V16AI:%@P+5ER=F!:6E at V16=9AWI8
+M+3]@8F9W839)8'5]<EMC23]9<F1B=S8V8GIR6 at -C_F)J=7IQ:2U)24UA25MF
+M3456:X6"9E]9+S9A52LE,FZ%=S\_36YQ22]%:EI;>WIO3V%A16-W7F at V(" _
+M<6M6-$EI-C!9>G=%(!LV3UIR<EY:3V]85FA87G-97EIO/Q at V:%IO4%QW;G=K
+M=3XV36],-UI-.6MJ3$1H<"U-33E-<*98+3\R,&)F<G5V8VEC6EQZ5!)9?5M%
+M6W%926MH2')S22 Y at HUZ7@$R 2  _BDK,CY]B&]$'3=@/" ^8G6"@&-:7')K
+M5CLY<&-86EAK<5 at W66 [-F)R>G=)+3E9<FM$138E/V!%>GUH26)Z=VMK=V)%
+M26MR<F)B8TDV8FM99F]-:W5<1&!18WMP3WJ";3\V-CE:6G=K1"U)8VEZ=7)B
+M," V.3Y5/W)]<DE86_Y<=7=8.6 P/G)M;6IZ:SE:=V--/RDI/F-:5"TY9D0I
+M16IJ=#0@)2TV:WM;1&MR<DQB<&%K<7!B<F<M/W=R8S8T:W-L<G)@6%MI225/
+M/TEC8D4E16 V5DD^.7-Q1"TV/F%K?&YR:&QP:6MW7A at Y?W _27%I3&IM16I:
+M-A(G8H*"BG<!20$R /Y97D5/:()];U9)/RDI8%IJ?()J6%IR:F)$,%IC8&EK
+M:W)@/UAH/B V;WIZ;UA836]P6#88%24_,'!R8D5<?7):6GJ 6D5C8FER:G)H
+M1%9J9F)H8EIN:4Q)+TU[<F)]DGH_.RT^36)R:#X;+4DR6X)R<E@@)R M7DES
+M?690<GO18EQK;TU8)2U%15!M@&\Y37)L6UD_+39$/S\M/FM8/TEP8FM>,B K
+M-FJ":4EK>G598'-L8VMI35!H3UES@&M$+4UR:VIR;W)Z>V)%6TDY26AO!#:K
+M5DUB2W>";CP;-F!N at G9N;&9Z@': 8Q@;6&DV,%J ;UIH35I9;C0;)V:"?0%H
+M 5D _H!B65Y88VY[>F))(!AJ<69N>F)K<%QJ:FE5.4U)6FIJ<F]925I$*1M,
+M<G5R8%A)<'IW1!@8&R ;;%])/UJ"<#0_<H)R6F)C6DA<<GI at 5F)K6TUH6$5-
+M/CPB-F]07GV, at E8^+41;;W)O12TR.R4V>FUK:"D5&" [6&AR1$%UBOZ 6S]:
+M8G!%-CPT26)F<EAL;FMA8&%+84E)5" =16)),')_:VE5*RU)8WIZ>'M[<$E-
+M<VMI44U9,#!8;W9[<FA5-T]A1&)D>H6#<EIC838P:5DB*39)341H:7-]>V$8
+M%3=R>G-C34]]AG9]<#PI.5]0)2]Z@&-C6V)A?8!>-D6"=W4!; %K /Z"33E@
+M15A,<'MK5" 28WIK:W=B:FE,6')Z:#XM)24^8G)W:F%//D0[25IJ<FH^,&*"
+M>F at _.24@(&AG1#]:>G(^.6-N<F!B:V V/UEK:$U9>F=%8EH^+1L@%39-+2=J
+M at GQ--BT[.6MK<G)624DI)7IR9FM5%14@,DE at 6C9A@X#^DG<P,#]K639%/UIO
+M3%EB>FY0:G%;8GIB8V0T'39O339O@&YC:4E$/U!C<G6"<EA)36M[:U at _53 E
+M6&YV<FIK6EAA12=84&Z AGUF8V$_)5]<)1LM=VA-6UMR:7IJ*QTM:FYK22<P
+M=XI]?7MI7C9%6"TG:FM@<&=0:WI]@&E/@EIB 7(!>@#^@TDG639).6EP8E4@
+M)5IZ>F)J:F)8+39B9'Y;-"D8($5C<F];2RU):6A@:7IZ1"=:@H)Z:$DE+45H
+M650V26AK35EB<G)H:69J6$E$14TY7W)Z.5E;8#(.#A at T22T;37J"62T[.QLY
+M36* :EE at +1MR<FMO:385&"U)6$DB8HAN_G2#8SLE8#\V56!I<F8_1'=K1%QI
+M+3!W?%M8-BM+=U at Y<WI>6EIH<4E%:7)N?7-%63E(?7-P83X@&TEZ at W);7T5;
+M>& Y6&);=7A]=G)K6UA-7#8;)5IR9EHP6V-W=BTT15MJ:T08*6N"=7U[9F at V
+M/FE)26E-1'6(:7=K<G)I/((;)P%F 8, PGM))4D\+1M-8#DV("58<H)O:F)6
+M.38V/C=R8U1$," M1&-H5BT8-F]P86-T at E@I1'*)@GMR+3!F<%I-54E)84]@
+M6 at -R_FE;:G)B7SXV+4]N at F)R:F->,A at R86A5*45K?&LP.3<I)2=->FMM;S\@
+M17!R:W)<*2 T56$V$CEJ>GV&<E at R8#\\7V-S<VE)8'QS:F)@)1A,B&%%(!LY
+MAV P:G)A6D5;@4\Y:W)SAX-B7S=)<&9P82D.#BUB>GIC8$5C at FM/8[MR9E-<
+M at GAZ;&YR6EI)54E88F]I)45C:FHW/V9C8VM+("UK>G9P=VQ8*2]O<%II-B)B
+MBGIK8&EK8#8.#@$P 6( _C9Z@&!9+2<V840I("E8<GIZ<D\_.5AF7D]U8E]8
+M1#(R,%9B8C\@+6AO659J>FHY+4UZ?7IZ/RU:>FM:8E at _241-/V)K<FAH8W)D
+M6V0_/$EF?$=K=69R21LR:H)O/C9B9(!F8$0_52)%<EQF at F,[+4EF<FMH-B5>
+M?6-9*SYCBOZ-BGML36)816E;:X%/.6*#=GMI6#8\6X-P-A at 8-H)K/V%H6DQ?
+M8VM8/VMJ9HJ/<FD_8&%(<6$I%1 at M36IK:V at Y37MP6$UJ9F!-=75S6WJ#8UMA
+M6VMS6F-I25E>;GM:8&MK<6 V)2U/:WMC2&I9&!5, at G-9)1M6=79G,B4@)3Z"
+M=U4"%0#^26I[@&YR:$5 at 1#LR+59B<G* 6U]H86QW:6=%:G=86$E$3V)J:%X_
+M8&E96FIZ=6A$/V)U:F9A15EV=FMC8V%-1#8M3V)N15MR@&)B3U9)6&MJ1$U-
+M7&HV%25,9&(V)6."@GU];6I5%39R9U!Z<EH_5F!J;U82%5J':'!926*#_H6&
+M?8%A6VE;;$UF at TLV:H-N?7-825IW>W!)*RE):VM;84U at 16)J:DDT8VA/>HUV
+M:#E,1#!I8T0@(#]I<TU(<V9%8W=),$Q;<GIV=6I,;HIW8UMB:W)@6F)I:5%/
+M8EIJ:6-W<#XE-EA-<G%,8D4=&#9R:DDI-EI<6V]).2<8+8)O;0$. 2D _DE9
+M9'=09H!B6$0R,D1)36]K:D10;V)9;W)@+6AR8U8_1%E:6598/T4_16!K<G=B
+M14]O<W!:64E-<G=B7FIO:$TV)4EO83DP:'MR7%]H6&M[>G)P36)J524M6%]$
+M&QM9>GV%?&IK/Q at I<'IF<W=B3VA?7&<Y& DG<FIN:6!-9H%Z WW\<F%;6W!S
+M>G=)67J";GQV8%Y9:G)N4%@\26):6E at Y6&%B9G!+'4UH.6:1 at F M+55)86MF
+M-B V8W<V)5IO6UMP6# ^5FIZ>F8^'5J2 at G!A5G!I245%36YQ22]%:EI;>WIO
+M3V%A16-W7F at V(" _<6M6-$EI3S9K,$1:)39W at P$M 2< L4E-6FE$2'IR:TDM
+M+4E>1&-W34E-6F)96W)P+5EW:U\V+4E?/SY)6%@R(E]C:WI;-DD#<LMH23XV
+M:W)G6F)R<F M&#=H24@;/FIB/V)J67*%?7:!:V)B:4E89W!)&!@V:69]@FI/
+M&PX;8()U=W)O6EI/8E at M.RD;16-B6U at _47,#?;!Z:D],;WI_8"T[7H)];FMB
+M6UY::FQ;8%I/6$E-8#X^6W)L9E45+5PY6H.*<"4;2W #=\EC/#=C>EDT/DU;
+M:6]A23966FMK1"TE5I**@FA)7&-)/DE;7'5W6#E@,#YR;6UJ>FLY6G=C33\I
+M*3YC6E0M.69A1&]%-CL;&TR" 6@!/@#^:$UC<$E%:W6'9R4I5%A%6G1$2&A;
+M7F!%7GU)37]R:EE$5DU).SY)8#\E86QZ>F(V/UQ[<FI8+1MC<G)K<GIU<$0M
+M/UP[5#8M23DM36);9(J$5'5R8FMK6F=W;TDE("U8669U>EDI#@X^@GUZ<E!,
+M.3!H8#(_5CXV6EDM-EA at EDU>=7V#=5M$871L/B4I,'6"8&-I168$:YEQ<FLY
+M/UAN/BT_6FM(530T/S9%9G!W)1M+ W+*:6%6.6.#@7!?/DUU>FI;*4E:6&8I
+M(%EJ?862>FEG9F)%6GMB7&MO35 at E+45%4&V ;SE-<FQ;63\M-D0_/RT^:W=8
+M9&=8.R ;+6@!=P%< +E:,#EH/CEG>H-W24E at 7T5-:%E9:&AC8$E/<FI,:VMC
+M8VAG7TT^/E9@/S!I<')J6$D^17)U<E at E&%@#:O=Z>G:#<E]?24E:;5\^+2U%
+M6D=B?8E:4&IR<FI,67)K6#8I-DE9:69J:3P8&"5B<G)K6E at M)5A?1#]F7C9$
+M+0X at 15@M,%MK?8)[8TEA2QL8*3]X;5IF;2U)<W-L:6MN;#8V27!8/S9%6D58
+M9FE$-D5B8TTE&S]S7 at -;R& V-F:$>W=A6W5[8UHM36)621(837)F=9**<FMN
+M>V%%BH!;/UIB<$4V/#1)8F9R6&QN:V%@84MA24E4(!U%>&)B7UA)-BDI.0%H
+M 5H W%8V+4DM+4ER at WI086%B8%E at 8&-R8FA at +2U;?W)H8$]88VIH6DE)6&]-
+M+45B<FA-24E-8VIH5"T.259B:W)Z>X*":V!)25IJ>E8V/TE%35IR>F ^5FIT
+M<DU8:FM: TG^7$];6EQH6#8R/CYB:F!)240^6%8^-E966%4T#ALM6$$E+3!K
+MBI)[86 _&!45+7%:3&9W23YI>G)O<EQC.RTV8VEN86-:/SE::ED_:7IW-B4I
+M/U$Y)S9C<$D@,')B9F]C;GI[=SYB;U8I%2!$<EE:>GUR:FZ%=S^ DG<P-DE@
+MH5 at V13]:;TQ98GIN4&IQ6V)Z8F-D-!TV=V]:34E%1#\V+0$_ 5@ _B='4$16
+M34U::GUZ:CE88FI:.6IU;E at I("UGBG9L:3XE/F]B5FAK<FI at 6%MM<FM;5EAA
+M=VM@/S8_8&!W>G5R<FE at 36!:6%IJ<FIO8$]-6VMR<EA-53]@<FYS6TD_8')O
+M62TI.5IU<FE;8VQK6#\V/TU;<%DK)6EA66A+-"M+:?YI/!@./X*- at G)G62L8
+M#B!;<&A8:V$_37%Z>H)[6T0R-EAJ;W!R<&%$8W)83W*&@#\V27%A-A@;8&Q$
+M(#)I8#]:3411;H,Y8G)C,B Y37IF86EN9T=RBG)5;G2#8S\Y6$D\56!I:V$_
+M36MK3%QI+3!H<&!8-BM):VD^-C(M/FJ";S8"/P#^6%A)-D1-36ER=7IK-CY8
+M45@^:GIF330\-T5Z>FYC,AL at 8UM%8FIJ;FQC;VUN;%E);W)J;EI)/SY)6G*"
+M at G5K<VM at 6F!B8&AM8W)R6T5C>WIR63E>56!S<G);)25@=G)C1" E26MZ=U!C
+M?75I1"D;-FAP:3\E16!8:V V+45R_H!5#@X_=7UU<6AF/RL8*4EF;UDY15@^
+M6F9R=HIP7C\V26)J<G-Z=UAJ:UIH8W6#3UE?>W1$*2U;:#\@,EIW6UM>-C!(
+M9EA::FQ/-D1)<W-B6FP^&T1]:EAZ?89R6#]82SQ?86EI63]8:6-B8F E+4UK
+M84L at -DUK:#\E*2DM3()C30%/ 6$ _F-C1#8V/TMH<F9H@%4M/C9$:&9U8B<E
+M86E86G5Z:#8@)SM8;G!J3UIB:WIN:FD^-G)R64<_6#LE*39:>GUU:FYK8EI@
+M8VIK:F)1>G0;27)R:G5%15IP<W9W8#0 at 6FY;3U at E&S]A6ULG1'IV<FD_&"EA
+M:%YG1#\_+8-L224P:_Y[/ X.5G5[;FQH<69$*2M88V-P)1U$24EB:%!N;&%)
+M/UE:8GAN>H=P;&Q/:&QW?EMA.5MW:4MA<'=9*39%<G)C:5Y526!P;UI-35IJ
+M6FIJ139//SM)8V=;<G)K3$]6:&YR;UA5-B ;/UA:8FA6/CY)25MJ<&M;6V R
+M)3M)8VB"9D\!5@%I +=C8EH^+2([<%YC6W9C,#8@&UAZ:SD5)6EM:D]/@&Y8
+M.S(M,&^">F)-24AZ>V!;)2!9:UA)/C\V R"(+5QR;FIJ;FH#8OYF>FUK8W)W
+M+4M:3U%V<&%@3VIF<F->/V%K14]I1#]B<G)F-CM0<G. ;#(E8%I8:&EI6#^#
+M:TD;/GAZ21 at 81&IZ:UA%8GA:.S]88V%K7E5+8&!(;U at _36$\*6EF7EIF at WUN
+M8W=J8EEK at F-F,C96:6F!<G)I25A+8H!L8V)I8W&[:VYS22=6>FI66T0E-F!I
+M:5]B8G)R;UE-6W)F9F],8#P8#BE)6&)O6V!)/SY-<G9R8D]K6B V36QR:F@!
+M:P%R /YK3&)M$A(V5DAA6V9W/BTI&"UN:24.)6AN7&MK>G)B22TM1%E]@G)?
+M6%9J>VM6.T19:%I-6%E)-CP\,CYB;UE<>FIK:V!-:DQ9>W=Z36!A345:;W)W
+M86E:6F)B;&MH6%EP84]B>G6 <FE%8EYN:F8_6F-I:VYS<&"":T0828/^BG%8
+M14E?>VQ$&S!S<%E+,%MW:VEQ8$UF24UH/S9$)2U965 at _6W)K:&%J>G P.7-W
+M<$LM.5A;9W)C3UMI/V)Z<EH^3%YR35""9R55=W!%65@\25AI<F)08FIR;UA-
+M:G)R:G!6:FT_'2!)6VYZ<F]I2418=W)R:UIC:$M51%MC at D50 7H!=0"T9T1J
+M at QL21$U$8FM<:V-85#0M8%\M(#9L<F)O=GIM8DDK(E8_8F)C65]U=G5J5CY)
+M/FEG301 at _FAI=5]/838R=V)/3%A%6#8^;G)R9FIW:DTV,&*"@G-915A><G):
+M24UR;%EC<F-R;75R=V-/6F9K<%M>:VMJ7F9_6T0I)V*(@X* 3TAR<#(2'5EJ
+M8UY$/F)B=V]625I8.4E at 83(@+5E8/S]88#9%9UIR<#PE1X-R<$LV6'!C8<AA
+M25!F/UIB5CDI*4];35MZ=S8G8&,P2U]96C9/<F=%/T=B;VA::W)Z<FMJ;W)@
+M/RU$6'%Z;UYK34EB6VYK:EIB4&)W8F%)&QT!: %F ,!B3&Z1)1A56UIH<F-1
+M8F):6$M$/S8Y3W)U<G)J;6I)+2L826!8.38P6G)]@VA%,"TM8W=I65M@:G*!
+M>VM/6#(M W+384U-6#X_:&-B;6MU>F(R%2]WB'I/.5I,;VMA13YI8UAH>FM:
+M2&IZ at W%%66MM<D]/6F]C3U%J6#9$*2=-<HJ*;%%J:RTG)38P25I$)24M;G)C
+M86,#6>5H=S(@-&!H8$E8/Q at E2UEP:U at E-G-S>W _5G)\:4U:344_:6 ^)14I
+M25@^1&IZ<D1).398:%I8/TEJ8TE$.41G:F-G:FMR<FMK<F)6/EA@:G)G35I@
+M36%)6G)G3VE)479]<F$I&P%9 6H B%HP6HI8,%AC!&O^85A035AH*1LI1&!J
+M;H)U<F-P1!LR)3YM6#0I*3Y99GI?/D0I(#YC>E!96UIN at G5B35@_27IZ at W!-
+M85M%/V!8.6)F>XAC)0D26H-W8#D^.V):84U/3T4^6GML8V-H>HIL,$AZ=7)%
+M/TEC8F-C:6$M1#8I-DUUBGI/4'-87EY5]"DM5#L8&RU9<FMZ at 6$_.6)0.RDV
+M26IR8V!$+3Q5:'=R7B4W:W)]<F%C6V9[8EL_+3!O;4DI&"5$22T;/FZ"?U0=
+M/$EB7&$\-C]:6F!H<69N at G!)17> <D]%35AB:W=W:EHP+V!;6C8Y6EHY<4]-
+M8GIU<F$V 6$!<@#^:#];;G)A36)R:E!C<G);24]?.1(2)4EC<HJ">FMH7CLV
+M86!H230T/%A98VM)+39;(!(V:%I86EMN>W);35 at V6&YUB&(T8W!824\^)3 _
+M@)%P*0X827)F:V$^/VA98VQA2T0V-V=A8&MZ=8-C-D5[@G=@/SE)87)Z6FDV
+M/SXM_C9-8G:"6TUL<%MA:5Q)83\R+39)6F-Z at G!826U-+3M$15IK<W)9/UA8
+M8W!S8S957GJ":UMQ6T1RB'IA/S9I:4DR*2T_82L@)6*%?6 at M-EMJ<DLW/$M9
+M1$1K<V)>>W=A6'*#=6$^/EA:;G=R:F I$C]/6#X_240G8VE86%!F>H)W30%:
+M 7, _FM;8VIZ:U9B>FM:8W)V>&!-3%48#@XI27J2A'5R:EA)28B#<DTR+3YI
+M;V)H5"D26"D.$BU)5E]H<WIR:&)B-CE%9H-@,&)R:VQH23()&VJ)>DDT+5AK
+M7G)S/DEP8F-K83]97CY915AB=UQZ<458<GMZ<T0M1%ER?75F8VA%19]@:V)>
+M11U,:G)P1!LE8GIB45A8/AU8844_,&-0.2U) V"K6V)R<FYK;&E5/"U-=7US
+M8VQS8UMB:6QV=6UC6CXV6&A at 35]J6ELY27R";0-RL'M$+55 at 7C\^8WIP6W)R
+M8V-K>W)B/UAA36IP<E!8,A at E26-H9U9$.UMF:F-$4')U8P%K 7( _F-06EY\
+M=T=B>FM88G)U>F]-8%@;"0X8-FN*BFU>338V1(J%@F]4+2U,;TPW5E =240P
+M%1@[1&!P8W)R;VMK6"T;/WAP.6)U9')R3#()&%F"=UM>6$5),&!O.5AA:6EB
+M6#!::#]A24U97$UR>%M98VQZ:D0I-C9C?7%;3V V/Z!Z;FAC624M25Z(;CLR
+M6W5R:V-O-BU)6$54/E at V(!M%6P-8PDQ-6V-C:6-926!Q:WIR7G)K6TE/86=R
+M=69A8#])=X-J9G!R6#8E/WN";G5R:F!>)4)@6$D_3'5N6F- at 1&MC<FIO8 -C
+MF'IS>FA at 230I6%IR;V!-:&EC:FQ$369J8P)[ /YF33M-=GU$35I,-&!J;7-U
+M4&IP,A4.'3QO at HUZ12TI+4EZ>()U<EY)3VM))3961%9I82 8,CQ%:4U;<F)B
+M7F @&"5-8UAN:D]F at ELV&"!@>G=$.7!I-A(V8#9/15AI340;/UXG/T1,9T1)
+M;')C22<_<FLP+40V/W=R6EMF+4G^=U1W>F8M("5:>WMH25IR:7> =V!%3UI)
+M8FAF+0X8/TU%/DU)-C]88%I:6$E@>F)K<G)N8DE%241)8V)-8V-88H*1<FR!
+M?&$I("UJ>G=Z6SXP8"DK/SX_-CEJ<TQ8:$]@86EI8F)H.4V">FIB8E8^1$DY
+M:6]$16)K6EQF8V%?@D11 7H!?0#^=7)816IZ:$E)+1@^54U[@%!,<D4P%2 R
+M3'IV>F$R(!LV:W9V:F9R>H* :D5;23E):&\P)RE86%I at 8VUJ6%M;1&YF)25B
+MBG(_:H5Z<#8M67I[8"4P<&T;-FA at 839/<F)>*3YQ52DM27!;64]B:FD\,DQB
+M239$2S9)8V-B<$D^]&E(<H)X7C E8VUK:#E-:4A9?8)R;EE/3VAK8S0.($E-
+M24E865IB6#8E/EI at 6V ^,&:#;G%I245$-EMF.4]I85IB at G)4;FYB+1LB36IJ
+M?&]F6T\R*T1+539)8W)%/F9W85A;<FMB/Q at E;W)H6$Q8/D1)(F!K UB)7&)(
+M,CEB<4E% 78!?0#^;GIZ:%MF;S V13DV1#ERBGI::E95+3QA8W)J;F V*2D\
+M3W!N;&)D?8F*<EIC838P:5DB*1M)8$56:VUB7UXV.7MB&Q4W=6]':&AU?4DM
+M35QV at 2T2+5@I&U9C9B=9>&IP/S!S;24V640P8G%F;GIJ6TE':&%82TE/245$
+M<F]/_F!H:VYS6SL^8W)K8SPY86%::WJ#>&A1<'=R9U45)4U:.4E;6V)V=RT@
+M-EA at 6E@^&T1K37* :6%@15AP64UB8UL_9H-K8V)P7C(R=$Q$8GMZ@&-5-D5A
+M85AA:7!5-EIK8UA%8W-O-A at E86]J8%I935]))6ES<6)89G%/-BD^9H):1 %J
+M 7H _F1F;FUR6U at E%2T\/SXY<(:(:UI:9CY$;GIJ:7)A/RT_845I7&IK4&Z 
+MAGUF8V$_)5]<)2L at +3E536IR:UY)&RUC- at D.'6IZ8W!$68A+/%M:<()A.2 E
+M/!@E6&85/G-'7UE)<G(M/VD[%2MP;G6*;F-P:VQO:$U'64DV,&MW6OY-:FQC
+M65XP-D=F<G!),%IC:6-K at H1N36)K<FMF1$EP:C(V6&-:;7Y<*3))6$U)9BTE
+M/SYC<W-L8DUH8UA8:F9 at +6J&<V-%:F)).VI,6&F">H->8FT^-D596VEK8$]C
+M:F)I/TEC<VE%1&-P:&]R:%IG6E5B<GIJ1&:"8V%$.6^"9$T!3P%O /YZ8EQ;
+M:VE6/A4@)54V/&A]?6I%2UA at 15F(6UEW:3\M6'!-8TQB@&);=7A]=G)K6UA-
+M7#9$/S0;/V!B<F]@2QL_:$0G'1MBBGU[85B"/S9A<'J*>F M+44@%2TV"25@
+M/C Y8&YZ6W!K530_6T]F at VM/>HA[;G)925E4)15-<F/^86MC:V-K64LV16)Z
+M6R4_:VMI8W.*?6M:3%MP8TQ6<FM81$Q at 66-R=S\_158_/F]M,B4_66-F<UQC
+M:#E$6&)N:"U,<G=J+4E65DE:7VAK<GJ(04UZ;BTM-D5I:4U8:6MS<DE46UYS
+M:U9(<$UP=W)K:F9C.3]N>UQB=5Q:33EK at FI, 40!:P#^@W)'6G%C8F$R%1 at V
+M/CYF?75K+2TV<&A-=UMI=V$_-DQB6E at P,G=R9E-<@GAZ;&YR6EI)3%E>)2U5
+M36!W6TP^8(" :R4)-GI]=G)B at UDP.6IZ?8IK/EEA-"DK%0X;8%@V+6!W=UIR
+M:&!@86M;6FYI,&.*AGIJ8#Y%/A at .+5]0_E!J9GAR<7=S8%59:U\M/V-<;W!F
+M?8)U<EI)8FL_1&QK6%A-.4U:8G-H3TE)6FB#AVDM/T5:9GIB8F<M(C8_=W)+
+M16)N<E at V1$1)16-O:DE>>UEM>FU;1#8_:%@M+3E<@H-,16%%8VE85EE)8&IR
+M=7)O8AL2.7IW;G)B3S])8X)B10$E 44 _GI[159C15IR;S\Y)2U$8W5H<BD5
+M&UAK;W)(9VQ%/F-I6CE)-D5J:F9 at 375U<UMZ at V-;85A@:3PI-E5$8EL^86QN
+M?'<\("U:>GIF8G=Q351L>W*">V-F:5Y)4$$@,EA at 1#9-9G)B35E96$UZ<EI:
+M82=:?8=Z9F-823\5"1 at _2ZDV-UIZ7&-[;F]@8%I-66%C6V-L:WUR>G9P6V)J
+M7E]Z:T]833E$8&!:6 ,_U&-RBGUR:5L_36-Z>EQ853(;)VMR7CE$8FMC2SY4
+M/R<V4&Q536(Y<HIF6UDV:8!5&Q at I-F:(:$]:15E at 14PY5E9;9GIZ<F ;%1M;
+M;F9M:EM):W!J/@$; 2< _H"*:F-96FER9EA at +2U%:WMR<C\I&#)(>G5B<F,Y
+M56:!=S8V8'=P3%MR>G9U:DQNBG=C6TQH:U4[+5AA34DB/V]R9G=/-DM-;8-R
+M9FUW33EI<FMC9G=O8F-96V8E55M)26!O<'=H-C\V+2=K<FQT<$1K?8I[8UMI
+M84DI"0XM1?X@&U9J/UJ!<FAB8%M)6F%C<5MA9G)R=G5R<F]D26^"<FA-23])
+M36$_-C8[+6IF at GIC:F-56UYNB6A86V8I&T5B<4E)2&A;8#DE+3P;-FEI:E at B
+M6H-L338E8H-O/" R539KB&]A25]H6DU%259(6G5Z=TE$,AM)<&-R9C]->H*"
+M:UX!1 %4 /Y]BGUL4&=R<EY9.39$-CEP;FMH:4LM,&N!8V9@)TE)2'(M'5IZ
+M6#E6:GIZ9CX=6I*"<&%)6F)+/EEI6G)R1"5B>W)R338_+2]J<EQ@<FQ@:%IC
+M6#E::5 at _/EII*4E4(")H>G)[<F-H51 at 826),8UQ:<X*%?6MH:G!B7C 8("G^
+M("!)8RTP=V):34E915IQ<EYC8%IK8DA]>VI_=S9K?75Z9C8V6&IH6DE)6#9J
+M;75[;T\V6&%P;'):/TEI:4L^6FM at 8%MA/VA4*2U@,B5M669Q26.#:UDV/%MZ
+M<V8\25 at V/WIN6V%H<&]H6%9 at 6$UJ?7!486DG+VA;7G-;3W*'@G!, 6@!:@#^
+M:X)\:F%::GIR:"<V=$0;-F-:<VYH/R5:>VMK9R4V5#]6&Q at P7T(M6%IK:T0M
+M)5:2BH)H6$D_+39P<#EHBF at 5/FI<;U@^52 @1%@Y6')Z>W)B;&)@7V-:-BU)
+M83])- at X5,$5,4&1U=7<I#B(^5#\G)UIZ=75F:VMN>FM>.R E_CPV6W$\)5A-
+M:$E$/"5+ at W]/.4UB<& G<G-C<G-%;'5R;V at V+3EJ;4Q>24EF8UQ0:F<^)3Y/
+M;&QC238V379W34UJ6G!@8U5B5C8M340R:$M%>W%L at W):24D^6G5Z<G%@1#]F
+M9D]A8W-S;&E68V Y4&IH7EA8539 at 345R<F)F=8*"8P)- /Y%:WIU:UM89()O
+M)2UQ:3LE245K:FM@,E9R8EMK24EA3TD^1#XY-AU)6EAF*2!9:GV%DGI)-B 8
+M)7)W6%N*=RD_84QK;5AA/#Q86#8P7'J*>GIW:W)W;V)+/"U%25A5.3(V*2(^
+M1%IN at V$T("5821L817=N:EY/:&9U;VE4.S;^23YA>F@[-DEW8#L@%2UO>UD@
+M+6!S:#]H:V)J:V%J7U!G5C0E'5,^/DD^-F!H5C1-23 \1%IS<F)653Y%:X-K
+M5F!%:VM/=6I5.R5%,#]Z2S]R>G)]<V-A.QLE:H)]@F-F;VM:7F%C<W5K8T1B
+M<$Q%6UE>/RT_-RTY:W!5.T]F at GR# 5L!.0#^+5AN>FUK8D]M<#\M8H!P/CY)
+M7UI9639-<V]<8DQC=$Q$8FM9+3PM36)621(837)F=9**9RD.#A50 at G)B=8!A
+M8ULO3VIK8SE8;& \&S!S at VYV>FQR>GIR:44E+41@<7!A/RTI-CD_67IR@%4R
+M6&9;1%EZ<FI9-EAO9&9O7E]+_E at Y37ER225$>V at R%1 at M86\\#AL^:W)W>W)J
+M8EYA:DDP:UA85#L[)2U$.S9-:U]%63\G,$E/;'- at -TE)2V.#@FIC86Y[6W)K
+M6D0 at 23(V=6);:6!J>GUQ:44K)4UR>G)C;'-C6V)I;'9U;6,G1')?6F%6338E
+M-S8K+4]Z=5HY8H)0>@%H 38 M1L_<&YF=7IB4& M%3!J@%])5$5%5CX^16)R
+M<E at P37),6&EZ9SLV/F)O5BD5($1R65IZ?7<T PZY-G)Q<GIK8G-C/CE:@&@M
+M3'=B1!@517-R64];<G5U at GA4)3(_86-O8%A$1$M%24QC9H9R2T]K<&IB W*R
+M838P:FA/:W-K85 at V1&MI11LM85A5/#1);V<I#A4R36)ZB7)J8EE>8DLP8$1J
+M=V!A1#8#/]588V=)2U0V-#9%6%!)&R4M/TQQ at FU<;&Y]<H!:860V83X_4&MG
+M=UQ,>FYK:6%$8&EJ;F-><FM;24]A9W)U9F%)8&)-36M at 6TLV7EY$+3EJ at VHE
+M6DEB 6,!+0#^(#!>:$UN at FIB6" .*4EN9$UH138^)TE81%QR8C9)8EIH:W)F
+M84DY8G)C,B Y37IF86EN<V8P#@XM:VUU at G)9:FI8-F%]:T1$@&])'14E3'IL
+M65E;:V9V=TD;("=@6FQ at 6$DV1%EP1#Y, at X-I16MZ<FMK9G=I539C8TMC>WIR
+M_FQ at 6&MP9C0I)2=8:4EC>FD@"1 at I/T1F?7I;34U at 3UA:245G;TQ9<%X_23]:
+M:6!664D_7DE88%]$+3(I-C9$;FI:<W)F=8!/86)6/EA61$E@<6A624E$8%IJ
+M359B6C\M6&AJ:$U%;WIJ=VE8:%])26):6EE):'!8,B58>G<R6();:P%/ 38 
+M_C]+8&Y;3WUR46 \%2U69FI;;F [(!4W7S]);' _1$5C;VI;4&- at 6%IJ;$\V
+M1$ES<V):;'IJ9BL2+6)Z;7EZ:VIO/R)@>EQ>,')J5C)!/"=J<F]H6EM19V(M
+M(" I/DQQ=VE)-BT^?%\E)7*#<CE->G5R:FV"<%XP6%A+:W)M??Z)<EEB>WMF
+M6!@8+4]A<'YP-BDM-EA at 8%!O:UA%6CE/;(-R6D5B:&MH64E,6EM)35HV+5IK
+M<&)B6#!$6%Y56&)98(" 6VIR16]J64E95F!-1&)Z:4E/1$U$<H!J65 at V(#Y8
+M8F]6.6ER8W)Z8EE:23E)/S=89FAR6"48/'-]83Z"3( !6P$V /XY.5EN>VIZ
+M<F)C6C8W/D5R?79K6"LM6&)626IP23<G-E!L6$588G!O6DU-6FI::FI%-D]Z
+M>FI5)3(Y:G]B8GIF;T0I/VIB5C]K>F$M1%8V8'-Z;6)-:')8*2]8;CLP8()Z
+M8$D\.7)C)0Y,=6M)37IZ<F9M at G-K/R4M8'-K6WK^DG=/8WJ"=6Y!&#(M2WIR
+M34E-6&!@8FM at -GIJ36$M-F-Z8TU88F)M>G-Z<&!8/DUF-A at V:WIR8U\^.6!X
+M<'1 at -BUU@G-K<&)R=V):23E6<$DT:G)R:E at M)VJ)@F)654$V/CEC<$U(:UMF
+M>FI6:&@[+4DE16%R>F at E#AUB;GLI at C!R 6,!/P#^*1L^;WR"@GIF:UMA64E%
+M8G6&=4\G/FMS:W!R<F-+/!LV:4TV2W!K;G-))U9Z:E9;1"4V<GU:5"TV6&J 
+M8EMN3UMB7DEK<EE/<GIK7B<P-D]Z at FY@16IZ:"DE19!6&RUR at DPY86A[:24.
+M-G!,.5IZ>W)C7&YL<F$@%3EC:T]J_HJ"6%9J at X-U<$M?7DEH:UA88&MK:&EF
+M:EAR=6MJ.R)%9U9;8FMJ:VMN?8)W:3\_8EX;-F!K=7=L83]/:DU%)3(I375[
+M;V)R at GIN<EXM,&AH-D59;8=6&Q4^@HIZ8F%?8#8B/EIC-%I@:')J66IF23)I
+M/"U)<GUP-!@;/EIN/X(_:P%K 58 _B4K66M]>WUM:&!$6EAC8FERAGU-("U;
+M=6YZ at H)S:F R)6TY%2UA35""9R55=W!%65@\27![;UXR)3EB at G!J9F)07FM)
+M6G5C8GIN8W<V/#(Y@'IC-C9:9G=5)3^";B ;28)P23]BBH%>(!L_)1U@>G=R
+M:V]U=G5F-"LM3VE%1/YZBFDP17Q[=G)I3&)R26%/6UIN<G)I6VIR<WMN8U4;
+M+5E/<GIC8UQ;:WN#?7!86&IF+2U836)X:6)-6FM@)1LV6%I<:F-6:GIR<GIJ
+M8E]B:T0I)4>"5B .&UEZ;6IH47=A/D1):%A63%!B8VEJ3UA8:V$E,&-U<FE5
+M/$UP>UF"87(!:@%$ .X@/VA;@G5]9EQ;3#XV3VE99GUZ6B(5.6)?;H.&@FM-
+M1#)H1!LM/D59>G<V)V!C,$M?65IS=6I:12TE+V9]<FUK3V)B8%IK<'=S7&MU
+M1%Y$,G*":"4;+3)J:5E);'-F)2EK;4D at .8*->EXI&Q45- at -:_F)K>G5Z<DM$
+M24MA65AQ at G V/F-F=6)@)T5P/TE$33]B:W)H:&-R>WUS8S\8&"TP8G)%34U/
+M8W)Z>W=J:G=>,CQ+.45P:UM@:7)P63]/>'-R:F)-7V]J:FM09']Z<E at V-DQN
+M8D(I("U at 3UEM1&-P6U at _67=I239)6&!@,"=A:8]I-"E/8V-K:%E$8W9K<'H!
+M<@%) *0I/VM/;GJ"<F-)/TP^,&-B:FYN3U @.SXV3WJ"AFM%,#]Z;S\#-LTW
+M:GIR1$DY-EAH6F-R<G=N63\M+6-N;F)C6VA:8W!J:G5K3UIH/FEX6F]Z5A at .
+M&#QC:VQ at 6VIW6#9@:C82)V*&C8(\(!TI,CY$/S\T:P-Z_E at P85E)26ER;7IC
+M:6E(9G-9-EEI84U$-BU/8FY%6V]Z=G5U<C88#A at V8W-6/T1::V]B36IW<G!$
+M/U]I-C9<:F)H:V)K=VA18W6$>F(_5FIB65],1W&">GIO:5MF:VA>/"U$.415
+M(C]P8E at V3WQZ<%A98EI@/S)8:W-5*2TY;8EL338;+69P=W0!<@%@ /XM)V)C
+M6FMU?(-K/EA6'3E,<X)N6V%)5DDI-F=UBG)),C9U>F])+1L2/FZ"?U0=/$UB
+M7G)K9GIZ<FQ+-F)N<DQ$-FE at 1&AN at GI064DM)VF(;G6":308&TMK;G)-14QR
+M:%9O=VXT&R=FA85I55]>14]>141$6VYZ@&(E3%I%27#^<E%J8FIX145R:V)K
+M=V]H338E26]A.3!H<FMJ8F)5(!4;26MZ:UD_16N!62DY7')H/S]9=UDV-EY%
+M34DM1&QR82M:?8)K*3]J:%9)245$:GI\@GIK:FIR=$LV-DE at 5!LE6%D^66-\
+M=6MB66]@6F%@:7)[:40@&V!P/Q4."2U;@D5' 7(!8@#^+41O<EE,6VV*<D5)
+M8#8V.6F">G=S<&I at 6%5;9GU[83X^4'I\<%0I&"5BA7UH+39,66-L=VQO<75Z
+M8EAH9F-@/R!%:#E%6H*"1#X\& E$>GI]@WIA)1@^8W)P/C!$<G=:3V)]@%XV
+M17=U;&N 8EE>7CM):&]F:WIR2V!%+3YW_H%N8E%:=V-86GIZ<G]R<F M&#=H
+M24@;/FII:$U%:6,@)6AZ?6L_-C):@F]$-D]O8$E))U]L/"T[254M&!TY:W E
+M-F9_<BTG36]A24E$(DAQ;8)Y;FIJ=WMP5"T_;VT_-DU at -C]C=75B14AC2$UI
+M6G)B8UI>-"!-;U42#A at M38(^)P%, 40 _BD^66MO8G)R>G)H5DU>7#!$@7UN
+M<G5R35MW<&)R=6I:1#]:9&1X83(E36U39VE)8%QK7&MS<G)N;EI0:EI)85XE
+M-FA at 15A[@F at V&R K,&)K>H)Z8S(@)3!@:V8V-FAR9CE$>GV :4]:8G)Z at DTY
+M8$DE&T5R<F9N9FMP11LE1/Y[AG=@6FMH242 @WJ&>G5P1"T_7#M4-BU-:&MA
+M2UIC.2UB at GUK,ALE6()Z:%A;=W-B52 V8$DR+6%U53PM-UMQ7C98:F-),CD^
+M23X^238Y7UEJ;F]H8G)]@&<[-F!G/S9?;U0P:G5R6C ^6$1,7SEH36%%8FE+
+M<7M@*2DR25B"6& !:0%> /Y</S8P.3]R at G)K6TDY8V])+5EZ9EYF:#8Y=8)C
+M36]H6VEA6$4_@'(V-G!C)TAO26MR8UQC7FYR:UI)66-I2V)O0BD^5C9$>H1R
+M1!M862<Y36:"?7)C7D$@/EM at +2)$23\E-FMN<FL\&R=F at X-))UE)*0XE9W)R
+M<&!K>&8[.UGH9X)]<V-:8"4;8']K<GIZ at W)?8$D_6FU?36-O8V%P61T;27)V
+M:3P5%3]W>FI816)P6FEF-EM6/" Y;UM%25AA<6@^-C!::&!4+39%.SE86&!B
+M9FI at 7EEM>H)B84E65BTM6'9G/G&">F(#/HE)8V V5F-?:T<#8HEA6$]@35IB
+M7VL!> &  /YW6TDR$AM@>FQC8#8E6W)P246 :$U08" ;6H)K3U9,37!_<$U/
+M?W<^-FAB25A at 16-K8EI;6VYZ<FI)/DQI8&E[:#LG/BTM<(J"22)-624M26%R
+M=7-R:V$_26%F-BT_/RD5+6)Z>V\M#@XP8GM))4E<,A4 at 6&ER=V-C:VE)3VGV
+M:W5V?'!?52 .+6)H8W-Z>H)R8TD_3VIZ7VMH36%K82D at 6')R:UDR&S9J>FUA
+M6&%;15ES65MH53L\340P+3YC<&M?/S9>8H!O,BDV/TE665IB<G%H6EEB9'EB
+M3$1)1#(M16=,6'*"@FM6+24V8FI:6F):<F-:6 at -BAVAF7$]I:V(!9@%U /Y[
+M<FE$"14V6F)<6CPM26MR<F*"<$Q:8"L2+6-W:& V.5N#>SDV:H!K36!(8&1,
+M8')<:FMC7F9Z<VI$(#EA.6)Z<F-)-B 83(^*<%E+84D_36-H7')Z<F=;66)K
+M6$M96#8I*45R?7<_(" M1&M>+3!>1"DO15M;>G)C8DE%6FOM8F)F>WAK7B 5
+M(%EO8G)R:6!-8%M86FIR:G):/DE18$1):VYK<4]$*39K>G%B8VYA/UAF:5YK
+M6DM88$DV(" _5F-R6V!H36IJ23(V24U624E-9W%Q<FAB6EIR9SDV23PV/TDV
+M36IZ?7)J/R V5 at -:D&)B:FI-6F),3']W:$588VH!8@%F /YR<FM>(!@R/D5:
+M6CXM6&-B:W*#>F%::%DI&#EO7%@[/TUK<BT;-F)R:F ^5F Y6H)J<VQF:T1C
+M;&$^&S]8)4QD;7)I22 .+7EU=G$Y17-P8F)G36)N<FYP;&-C6F%;:'!>,#9B
+M;G)H2UA9:&QH22TR.SQ)6UM,9G5O6$4^6'"\7T5@@()V9U at T,EER:W5K;&A@
+M6EA-3VAM8V-:-BTM-C)6@&)>=T0M("5@<FYI6F%A6%MC8W)Z8#8Y:6$_ R"7
+M,%I at 1&=K8"T8&#M;;G)L85A)245B<$D#/Z=%66A;6%AA85QZ<$]$:W5;3&-8
+M6"TI/EIC7T1$8$PV)UI><&E64&T!<@%B *]Q8EIB.Q@[6#!$22TE7FE/3'N"
+M>FI::W))("UO6C8^6&!03#LR)2]-6DUA8VE:3 1R_EQK36-K8%4R-EXM8%MB
+M:FQ)& XB6DU;51T_>GQF6FU at 8%QB<'A[8U])86%B>W)</TE-8V=H<F-R<FIB
+M52 5+6AI:%@_<FI+63\P;V at _37M]?7IR7D5+241B:UY::V8V)2E)<F)K:%0[
+M*14 at 17IF9HA@*Q@@6GI[<%M%245:<<U(:GIO+1L^6$LM*2LG640P6G)A-Q at 8
+M+6%]=FYP:5Y)-D5R<$LV/DDV-CX_6GAX8VMK7BTY;GMP8UE)/#1$86=8+3EB
+M240V+1U-?V)-9@&" 68 IH%H4&%$-DE8.3LR&"!)6CDG<(I\8EAR>FDV.6A:
+M+39C=V8^,$D[ R74+6%J<'MI36)N>W)L6FIZ:V _/UE)8&-K:VI)& DR7BTM
+M( D_<FU6.6)C8EPY:7=Z:UE5:7!P:EQZ:$0[2UIC>F9>>FIN<#D.)6)O:%D_
+M:V P22T2 TG^66EK=G)[=V)A/QLV36!?67<\(!@B8F)R>FA>*0X at 26QC:GIO
+M-B V:7N">FA:23\Y6V-A=W-5-#E8638V539+,A)%:V-%,!TE16MR8EA at 8F)R
+M<6UW8S8V52T5("4P:GUZ=6YW/AM,BHI[<%I865IN<F ^/F!-87!+("5B:#8Y
+M 7L!=@#)@G)B/UA;:&MI62T8-#E;5B)@B8IO6G*(=F!:6UDR+6F#>F _-DDT
+M'0X;24Q:>H5:/UZ#>G)B:WIU:TQ%541836!R>FA>,C]H)0,)_B)B?V Y7V)H
+M:$E89G)?,$EI8&-O4&]O/S1+6F^#:%MR:W)K.Q@@1&-I3UYC6# V(!@E&R(^
+M8VQH3W6)>UE$(!4M;FE;8EPG#@Y636=J4&M5&"!88%I-<F,M)55R<G5Z>W!A
+M84E%:G![B&]@6&!?-BU<6E at 8#B5 at 8FEA1#0G1(]J34U at 9FIR;VAN<UE>63D#
+M&*(I1&)B7&MS5!4G9&UV>W)K;VMN=FIB6D5$38!K5#)$7R(M 6(!;0#):V-R
+M7S]->GQS8EA$6#!$;"(P;8)Z8&-]9D5K>E at M($F"<G)P63XR#@D at 0B=);H6*
+M:W)]>F9B;'IF>GI@,"=;4QU->F!R;H!H( ,5_B V:X!C8$5%;G!%8'%8,DEC
+M6$QWB'IK1" M/V-[8#E::WIO/"D^6$589&-P6F!85#(T*2D2,&-,/DV"?VE5
+M( XE<(5P6V!>.Q at M66--,%A>&Q@\66)@=TLE(#MC8V)R>VM><FDP8F)R at GIR
+M9EIM,C9F85E$1#Q$6G)K:54E+9M-,#YB8VUR9F)8:VMP;$DG%0XT8$TY.5IB
+M530#+99%=W9N:6M><FY_<D14.VMW6V%R:T0V 4D!6P#^33Y'8SXY<G)C<&)@
+M:$U5:#]$6V):6WN"8SE@>EHV-DER;FYJ:50K"14G+15)6V: =85]<69F>W)9
+M:8)W/B!62"5):DAC6WIP,C \.RLE37MB63\^9F\^6'!H1%E at 8VAF@X):/CDE
+M.UAF6"TM17)S52 Y;T4_:FYZ36-P3SX__C\[*RU).S(\;FUP62<8+6:#=UI;
+M6&!!%3Y925EA:%4\1$UJ8F Y6#PT.45:<G-B37J 6$]:6EQ;;U\_6"TG6FMH
+M8%A>7DQS8W-4&Q at M239-:FYN8F958VAR<VDV(!(T;%L^6F-98V-5*Q@;5G)L
+M4&)L:VU]<DQC6$UJ6EQZ=H)I10$P 5L _EXM&UAB6W!-,&!J8EI,;')-3'I-
+M)S9K=6-$:')B6%E<<GIJ8EIF/!@G," .*38^35N(>F)<:W)R;VYR:V!83UMH
+M8FIB<&IJ7&T_/U at R($V :DU:36MI.S]@=5 at V,$UP2')[-B(V)2U)8$DT+55L
+M at 6]$27)9+4ELBD5%;5HV1?Y-3%Y$/TE$.6](:&!$1#YJ at X)L8F)R;#Q+6$1-
+M6UIO:FE::&@V&TE>+2 V6')P6SEF at VM@7&A%/UAH65A",C]C=T at Y3W M8F-O
+M51 at 2*5HM-F)Y;UYK3TU9:7-[6#0G-&%;.6J!;'-R9CP@%39R<UI<<G5[?6([
+M644[6UMC9G6"=7,!8 %9 /Y8.Q)$>F9B/C]$8&!,16-R3"EZ83(W26-/2W!K
+M8FIK:F]Z;6I/:$D8&" 8%2 V1!LM>WI96G!/9GQU63]B>UA-<UI::G*$>EIJ
+M53Y:/!LP<FIJ9TQ:8SXE.7=P/BU$8V. @U4I+3(B.6\^(!LY:X!B8GAZ9RTE
+M285?.6))*4G^6T=B9E5964]P6$P^1&A836YZ=VMJ<W=H35M)56!%8WIZ9F9I
+M-BE)6$0R.UAW8V%H<GMN<FIJ6$U:8FQC8#X_66I8-DE924]C6D4T("U:-B5%
+M at GIB8E5$.4]N at W!+23E8239BC8.#@G-A3R4V:W)G8FY[:X-G16D_/V)K9EY<
+M at E%R 7(!30#^/QL.+7)R6D1 at 669@2S9$<F])>FI%24E9-C!<:F-R:UIJ>GIK
+M8VU9/#)$1#0T25P2(&IW2&)]6F)Z<F@^068^1']H/TU9;8*";UQ)540K*6!:
+M9F]@:6A4,#):8E5886!>@HER/B4\*25C10X.%4E-+41Z>F<M%2=J:UIO824V
+M_F!@65I-24U+:G=@23EC=TDR/VMR9EYN=TA-63YA26%N at G!:63]A?4E+7C]H
+M=V)@9G*#>G: <V!@7TQF<G!?66%J85A+6%E96TDV-"4_8#8;+6I]<F)%6#LP
+M6GIZ:T\P23LE18R,AGUS8UHT,EA<9F)<:%9U9S]B.3E6=VIL:8)%6@%J 5L 
+MZE at I&"5B7&)-66-Q;'!--EJ"@WUK23\V23DI.6)C<W-K:GJ#8V)P8UE@:F]I
+M7VI6#B5?<DM'<F-C:FUS=UM8&RUW>F):7V)LC'I:6%A)1$EC<&]K8G!P8%59
+M9D1@:G);3'J2BF\\1$0P6C0##OX5%0XV>GIB1#1$:%E88W%)/D]I5EIO6$5)
+M1VYR6#]:>F\I%2U96#D_=E8^6"TV3VQJ>GIM/REB?TT_:3Y$<F)-6VZ#<F-Z
+M@&=-1"E,<WIK8&%;:6- at 6UA@6$D\+2U)6C8E*4QJ at VM%/C8@,&MV;FD_1"D@
+M+8*-A8-S8UI864^36FIF35IF:FIC9CX=)6MK<W=-3 )K *EB7C(R6#=913E8
+M6V!R<S\G8HI];EM/1#]$.S=A;FQR;69Z>F)%6DE%<@."GVMR;1L_3&)I.5 at Y
+M3V!B=8-S7 at D816YW8V-H3WIS14P#6/Y at 67)R7UER<F);:6 at V7WJ":4UZDI)Z
+M645927 V,#D@#@D.-H*":DE+<8-96&-J;V](83DY>'!%16A9:VEH:FUZ5"<@
+M+5I3/F)-154M%2U,6G)S;S8I8FDM,'%8+4D^-%]F<EM)46UW8S\E.6QZ>FM@
+M8%]O<&!88UDV-C(V8&*R/D1$-C)J8U at R,BLI369N<G!)$A(I;85U?7Y at 36MQ
+M2$U>8UI8:G5F at G5<+1LP47IS33D!:P%R (1U<FA$ UK^545$&RU:<F$B.7I\
+M;G=P8U4M)2 ^;UI at 9F:#9FA8+14E67IZ?7)D<B4M/UAB23LE)3M$6X)N7SDI
+M-G=N<EE)650E%4UK8F%K8VMC:UER6DD^85DR376%<EYNBH9U8%A827=:6GA5
+M(!4.*7!Z8T5)<H9H,#)@9GIK9RDE3%\G^2!W1#EGB8I]>W!F-C9;:T5H8EI@
+M-BLT+3Y68F \26!@)25B9BTG1$]F>H!R8$UB<G!9/$MB;7MR:EMA8F)C6&MA
+M,A@@-FAK8%MH2S(^6V<R-#PV/UA)16MN01T;3VM1>XIC16:">FMC8S P<GID
+M<F9I850V7V]:25@!8P%V +)Z>W=H9FI985 at M$B V26-$,&)M<GIN<& T%3))
+M6$U)67)U35A8%0X5,%A>9EH_7V8^- at -:_DDI%24M27IR6$L_6')Z:F ^13X8
+M#BU?4%!J9GAR<7=O:%XV-CPI-EMU;F-D?8)R:VMI16MM<G)Q64$P+6!F6TE;
+M=WUW239;8G5K;%A635 at M(%PV&SEZA7Q]>G=886QJ/V)O4&-%25LV24DW/S8V
+M3VE5-EA-+25):6YVB8)P3,M-8VA)/DUI:G=R8T]W:4]C:7IL11 at 5+6!B:')P
+M<%8^3&I/568\+3\W+3EL=U4[3TU9>HIP35QVA7UZ;#PE;()U=7-O8DE):G!)
+M/DT!7 %R /Y9=VIF>FI;:& M("D at -F ^)3Y,<WUN<F R%3Y@/DE826MJ2454
+M%0X at +4598F)6175S25AI8UE)-C9$6'=W6TE)6FYU:F-823\5"1 at _2S8W6GI<
+M8WMF:W=>-D0\)4QJ1UIJ;6)><G)H36MU=V-P<%A>7V!-6SY/>GUR<%A;;'K^
+M<F9R<FE:53)551LE6G5T;F9Z;&MR:D0W=W)C8VM86&!8-BTV+2U-:5EA-BU5
+M86IB4'J%>U8^65A)56%B66I_8TUP:V)B:X*#21 at .+5MC6V%KA7Y]@V9-24PV
+M)3<V*25/>WA:.2]B at WYU6EIJ<(9];6E)9W5M;7UZ63)I>FHV at C9? 6(!9P#^
+M17!<2')K<&MC5CY)-DEC6DD^+6-[;7!-*2!)9#X_7EIL3TE)8#\R4%Q;86)J
+M=TURBF)'<&YC:%9)6$QO=V)824]R=FM;:6%)*0D.+44@&U9J/UJ!:6)R6#!;
+M:#9$6D1$65L^-&MJ8&-[<6):;'!,6F- at 8%L[6'5[8')C15J!_G)13&U[:U@;
+M-E4[-DEH:FMC<VMD=6\^'5]R8VIS6EIR:TDE*3(;-E at _3S(V6X%_:UIF at X!-
+M-DE8/TU at 838R:EM$<&]B6V-Z>E at E'3Q at 8%I96WN&AXIW8V!;2S9>7D0M.6Z#
+M:B4;,GIR:V-H6UQ]?5QB6'=F:VY]>DPK8WEG+8(G60%J 5\ _DUN<UI:8W)N
+M;G=I359H:VMJ5B4Y:V-K6T0V5F]<26%I<FI at 6GIP3$EO7&-:87(^48)R2&IL
+M8VMF8G!:67IO6EA;=GIK:&IP8EXP&" I("!)8RTP=UA68U at M26IP8VAH8%E8
+M/C!;8T]C?6!%36)K8DE)/UIJ3%IM>6-B8#E9>_YK.2E%<G]F,C8^14DV.5M9
+M8W=B8G)J8#8V1$Q:8F-K=7)O53(I(#QF138E-C]H@()B6GJ"5ATP7&!).5E5
+M/UA5+7)R6DU1;61O83P^7T]-/CE-=8V*<EM96EE):'!8,B58>G<R&"EB3#E+
+M6F!;<H-Z6T58-V)S;G5J36)D83:"+4T!6@%$ /XV9GUN.6!R:U![<E at V6FN 
+M?6,V,&-K9F%,/TEB;VMB<FMM<EIR?6(P3TU88&MK-B=F<F!B8UYR>F)W6#!B
+M:%E:6FYU9FMK;GIK7CL@)3PV6W$\)5A)6&%C24EKBGIK>G]N:TDE2&-:8G5I
+M/SE,:FI6/RE$<EM88G)O8U at Y6H#^:SLE+5E[<EMB7F!@.R<[-CEJ:UIH/V)P
+M1"U)245;=W5C=WQ@/C(G6V!$)3(M+5QZ8TU/<E8;(%9K:# V6$M at 859Z<T5/
+M6FA$67-X85M8:%@T+5IZAGII/S=89FAR6"48/'-]84E5440E+3])3U!Z at EI5
+M-B4^7$=F>GAR5EA$@BUF 5@!'0#^+5IZ>C P4&I%6EQF)2TY>HIR63E08EE;
+M34L_1&)P7&=13W)B3VYR23\^/UER;UM)35QJ8$E(:G)N@&$V25M-5EIN:EY/
+M:&9U;VE4.S9)/F%Z:#LV6V-:8&!8:HAS3&*"@GII.S!89W)U:4DV,&!H:%4M
+M+V!;15MK:6);8W)Z_G)@/#Q/9W)J;FEI<F9$-B4E36QC24EP9SXV9EA$3VMS
+M8FYW3&%K,#EA6#]!("4^8&]I35E)(!4^=W=$+3 V6&-H<FD_26%825IM?7)R
+M87!C53=/:WUR:4DE16%R>F at E#AUB;GMZ>V-)/#9).6EI;'HY+3(R25I)16:"
+M at G=H-H(=6 %A 24 FEAK:FY at 2UAJ8V]J:#LI)5EU>G)85F)B6T5- V#^8F)K
+M3UAJ<F)J;DU6224P:'IO<FM9:FI-/T1$7'MZ:6MB23E)<FI9-EAO9&9O7E]+
+M6#E->7)))4UB7T])37*(<# Y<WJ"@VDV.6:"=7=)+3)834U at 6#XY345J:E9$
+M6FI_;75J84U86EMB<&-F=W=K1" 8*4]?1%A_<D0M8$DPXTLY8G)R8RD^=TDG
+M35 at _53(T1#Y:<&)H5BL.+7" :38E-#EB<&)?/S]824]C;FUQ<FEC8T]+66-N
+M8EQI/"U)<GUP-!@;/EIN?8IX8#]+/R);<VIK)0XM+4507SEB at H5]<C\@/P%J
+M 6$ \X)]:EEH8$UJ9G5Z:V!)56!967IJ:&-P8#8M8G)U9F)R:$E9;G)K:F)A
+M-A451')N=7I:8G> <$D\/W)U=7)D23\_<G)A-C!J:$]K<VMA6#9$:VE%&V)H
+M8%@_.5QZ<F!+8&)Z at VL_)3]U<GEH/BU8/RT_6TD#+==B@"TG7V!-3X!R6TUK
+M=V$Y:&]J<GIW6T0@'2U66FM_=FE8:44V53(V=W]9)QM@;4186T5F1$EH35!L
+M;VYB1"LT3'!P1"4T/%!W;FI$/TD_15IK:G)[>F,#6ZYI8FM836MA)3!C=7)I
+M53Q-<'N%C7AC145>/%MJ2"T.#CX\/SDM,&*"A7%R8U4_ 3D!:P#^=8)W6F-6
+M-V)U;FYF;U]-8VQ/56IO6F-B-AM$:FUK669Q5E]P:VA/:G [#@XE245G=TQ)
+M9H:(:%8Y:'IR>G(V/UYF=VE5-F-C2V-[>G)L8%AK<&8T1$U-26!@3&M9<GQG
+M36MK9&E:/TQF=75J6%M82UEH840E$CEJ/R Y1!LG_F]R.3!?@W _/UMB=7IZ
+M:F$T&" V36N <FMC:TDV/"DE:HIO.Q at P8&E]:3EJ;VEP6$]:6H)K13<V1&AW
+M82DM14ES>GII8TE$,"]65EQVBG)C:VMR;FI836EI-"E/8V-K:%E$8W:-DH!O
+M33E-6&-0.3D=#CY@=38)%3E[A6YB38)).P$= 3\ BVIU<F- at 14E:=75Z W+^
+M1%AP:2(Y8TE9:UH[6%EJ?V)J<E9'6VIP145K9RD=.SLE/U\G-FIZ>FD^&T5K
+M6W)M/C8_;8)P7C!86$MK<FU]B7)98GM[9DDY+25);UIP8FV(>F-K6C]R>FDV
+M/TUR at V]A8%MA8V]@/" M,F)$.3PK+5MW+1 at P<G5>,$E:<GIZ\G)R52 @&RU6
+M:VYM<W!9/SP;&T>%>EHT)2U);G V1&UO8#E)5B]R:UDT&RU$8F]5)2TG8G)U
+M?7I at 638@+3(V:XV#8F9N<FYZ8V%K<U4I+3EM;$TV&RUFAI*- at VH_-EIS84EA
+M00X;.6I<(!L_<H-U9S8E.P%4 4D WF)K3V=),&EK9G)Z?7UF85MR>R5!21TY
+M;6MH3T1/>GIY>FIF5FI[:3!;<DDM.4DR($DR&V%O6CXR($1 at 15IB340M;8)S
+M:S\E+6!S:UMZDG=/8WJ"=5H^( XE7U9-8&($<OYA.5MR:DDR(DAZ:V)K8DA:
+M<G=;3S\Y8& _3UXV7X!N1#9C<G M15EU<F):;FY$/#(K16!18W)O.2U>,CQB
+M?8)P/CD@)4UW23!?8CX;.6D_6V _-"<I-C9%9C08)4E?:GI]6V-8.RTE,F)U
+M at UQ9>W5N<T5;<GMI1" ;8' _%0Z>"2UL?'!]@F,V17AR34AF("4I,G1>&S!9
+M;75W21LV 6 !1P#^1%I$6E at M36IJ9&AUBEQC9G*"14A8*2)8:FA8.4UZ?'5]
+M at G);6X*(7UEC8F!68VXE/CP;.6I9/#(V9F))/DEB23]<;FQR82 5.6-K3VJ*
+M at EA6:H.#82TG%2UM/C!).2<^;WIP25A::F$_*39F8FMK:4U;<GIJ:FAC8UHB
+M,$0P_DUZ>F Y66MP-#]B=6MA6$]C8%9A-#9-24UH6"T8:5]I:VIR at T0R&Q@^
+M:EA6:&([&S9P6%A>-BT\1%4^.6%$)41$8&IR;EIP6S8I&S9 at 7&Y,2&YN:W M
+M.6)C6EXT($UO51()#B!,6T]4>G(Y+6M]6TUR1#8M-FAR62M86F9Z<H)/: %J
+M 4P _D187"]W8E]B:V)3>H9:/V)Z=58P/BD8275:/CE%>GM(4'MR8EIU at G)C
+M3V%B6&IR-DA$.SY9:F0I-G=R6TD^6#D_:W)S<F8T*RU/:45$>HII,$5\>U@;
+M*1 at E9#X^6#P8&V!Z>G!H6F)I2V9 at 7EIA8FEJ<GMZ<G)Q@&M)&!@;&_XY<F(Y
+M,#E?<%A):7]R:VMI8D5-<EXI.5A8:$DE(')Z<FA,4(AW1"D;26A?:H-J23Q@
+M<FIQ83\\.T]I23]-1!LM56AR<EHY:T\M&!4M341:15EL8V-P23]-845B:4MQ
+M>V @%2!5:5I8.6)R5#9C?6Q%:V$Y*5A6<G))6%E;<X2"=WH!>@%H /)'5DD;
+M<G5B65YK7&9U<U9%:WUJ150E#C9R=SDE,&M_23!B8F-R=7IR:DU-14EC8S9:
+M:&EI3V)W6#!J:UA%23XV/V)R=75P2T1)2V%96'&"<#8^8V9)*2 8*58V:6I)
+M("UG=7IN8VEA8#9@:F!,23E)66T#>OYM<H6"8S(=#A at _:V)$/CDY7V);9GIK
+M67&(=UI;:G!>2TE)84D@#FF$@G))-VN*:#LM26!@9GIJ3%MP>FZ!<TD[/T5I
+M2S!+/RLM6&EW@$U+84LT("M$2SDV.6!N:F-R:4]>84E:<&9Z?6@\-%B#>V-C
+M7E%R9SE1<G=;44TY)SZ+.4QH6&)H3%Z#>GH!@@%R /YB8$DE6FIJ6EAK<F)9
+M:&))6X)R:FA$&QM:>E\R&SEB/BU88&IM>G5F=7))16EP6S9$<FUW5C=K8V-R
+M:DD^/S995D5F<GIZ6#!A64E):7)M>F-I:4 at _9FLV-E@V8&!)+45R>H)G26-J
+M8SY$;TTM25XM+45N=6Y9;GB*<U at K("#^5G=B:&);65A-6V=K8DQF at H)U<G)F
+M9F)?6&-5& Y- at HIU7BU)@F\Y,C9)7W=R8$QJ<VYF:7)A-CPM6%@E,BTV7F8V
+M6GIP<FI9/C]>:6E>-C]C8D0Y=7=)6%I)6%IF at GUN6S9/BGM1;&M16V)>25MR
+M:U at _3$M97E8_.6N"6S]B at F-; 6L!<@#^<G)H6FAJ8V9R<G5F6%A-.6.'9EQ0
+M:#(@.6)17"4R/B451&IB47IZ8FQJ3$EP<&I$,F)N=U\^33]B<GIB6%A%:6-)
+M6VYZ@&(E3%I%27!R46IB:GA%*6* :#X_24U)/E1H<FUR5CQB<G)B35I$&RU8
+M1#8P6G%B3&!NB7MI238M_C]H8V-H359:-DAK<F%97FY]=6Z%>V9B6UM9/!@.
+M-GI]:FE83X)R138E.59J:U]6:FMC;UA9:$E$-EA at .RDR.TE?+39H;UMN:V!9
+M8VMR:5E)8$\;&V)R2$DW+3E%6FUP:U _17U[2&-R:V!<:&EH<G!-8#8Y;7)H
+M(B5CBG59/H)$/@$P 6, VW)Z:F)K<F-B;G)R:V!)+14Y>F)H8F)).S]-3VE)
+M/CPI(#9O:$5R at H)U:$E):VIJ:39$<H9R:V at P25YJ:V-82UMB:&]F:WIR2V!%
+M+3YW at 6YB45IW8S1A>H!J34T#28M8>GIR8S8I6&IR8P-8S40M+38V1&-C:EI:
+M8W5R:&!825Y:8&)K23 ^.RU$:FE866)Z:UQ]>F- at 6$D_+2 5(FMZ;VM)*7* 
+M:E\_/DU:6EA at 8V-I<E9,<&E886Q@ S_"24U8/$E96%EH<FYW:F)J:W!A32T=
+M&TUR13LK*S=88F-C6S\_36Y\6$5O6DEC:&)G;G%97!L;26%(#A)%@GUR/BTE
+M 24!5@#^:VM)-FN(;TU86UA+66E>-B56>EXV/SP[.S8V;G=C7FY5-DU@:G)<
+M3%AS6"TW65MJ8!LM:FYB:E!)/T]K:V _/V!%<G)F;F9K<$4;)41[AG=@6FMH
+M9FMP7S\I+45B:E]).4E:<W)H8%A)24UC>VM:36IK4&E:8V):65A:8V-8_F)0
+M8()K1%!88SL at -C(Y8G!K65IF855+23\V/T0K&!M?<3])2%A0>FMC9F-8:&-A
+M8%@_+5IR:W!J:5D_8%M:6EMC>FEA34Q;9FIR;FQ%46]H23M$8V-S:54M04E)
+M1&EL:3\V36-P22=-/S!::VIK8TU::6$V16M8+6!]>GIP2X(V/ $K 3\ _G)L
+M63YG?7IL6DE%-C]K:388(F)N.UE at 86%5)3YH141C;V=+36IZ8TU:>FDE+59A
+M:5@@)6N#BG=8*2U9=W)@24EH)6=R<G!@:WAF.SM99X)]<V-:8#E$:VDM#A at V
+M9G)C32U%64EB;7II23])8W5Z8F)J:DQI8EI/6F%)35IC;/YB23]J<F9O>')9
+M,BT\-C!R?6MJ9F-B6S\[/T]C1 X.-F-)/TM8369K:W)K8V-R:6-8,A at M3V-R
+M:V-/26MN8UYC9FYN:U at V+4QB:F!I/SYH:5 at V/X"">WMI-CEI22=%:VE)/T^#
+M=V%9:5D_36-G:T4G36Q[8V9J6UEL>GV"<$2"/%D!20%A /YX8$]N<G5V?7)@
+M6$0V<WA)*2M)33!I:F9[=#Q$/RT_9VYS:UIR at VLY.69]/T1>:VMA,"E?@HV 
+M51@@67)C:5E;:2!8:7)W8V-K:4E/:6MU=GQP7U5+/EIH/SE$25IC9F(I+38;
+M/FM]<UL_/V)]>EI,<FYC<GII145P83E)37KK@&,Y6F9>;7UZ:D<_52(;6G9R
+M<G-[>EL^2UM@:EDG#B!)23];:6%:9FYU<FMB9G)S;DDK&"5+;&-92UE\>F-/
+M6V)J;GIO/R4^6&)/144_86E%-D1ZAV]Z=ED^=W T/VE;15DY=7)86FMC:5 #
+M8)([&"U(?8!S=GIG8UIR>F,_.6 !6@%I /YW86)R=FYR=7)Z=TD;;W=@/F!A
+M-B568UMZ?W%C/QLE26MR9EMRAG V-EMR:UMJ<G)H1$1K at XZ#82M%7V)B:6-K
+M:S=%6UMZ<F-B245::V)B9GMX:UYI6&!S:W-K8$]L<V)91" .-G-V<F(Y/FA^
+M at DDP8WIS8V)T.R5H=TU/26G^A',_16->6VUZ6UM)3#=$8VYN=75]BF]%26-K
+M8UIN/!LR+3Q96VA@:7-F>H)K7&=R>VDY&!(\;&A86V%[@F,V5&%-8GM[8#))
+M65E$/$4_258I%2]Z?%MF<&-8>GMA66M/16M93TPY/F!:7U5-14EC*Q at E3WUM
+M;8)N8&!99ED\@BU6 3\!6@#^8F9R=79C:6-:7'I4$EE]6T5;<5E):VA(<GU]
+MB5DK%2E%:VIB=8-O1%I:4'!@6G!R:#\Y:H:&<D4W:6E/2')O<GI)6UM,9G5O
+M6$4^6'!?36)Z at G9G8V-K:F9R<F-)4&-$>W B#BUK<FYJ/BU,>H)-+41Z=UY:
+M9S8 at 16-'65AK_G)V:45,9FM:83),6$L_6FQJ;G5V=8)Z8UE;8V-8>F\M*1 at E
+M/SE-37!R6FMU=6MF:X!R+0X.+5I:16MK=7I:/EEA2TU]?5II<G!I/#99:6A$
+M& X237-%25E%8'9N<VMH6UER at W Y-C!86UDE+3P^8UY!*REB:F)H=69?.4UP
+M6((M/P$R 3  _F%K?&YR:&QP:6MW7A at Y?W _27%I3&IM16I[<))Z5!LK+3E8
+M<7J#8D5/13]::$E-8VE9.6"#BF,V)UEK8$UB7&U\:&EH6#]R:DM9/S!O9TE8
+M>GI]>DQ(:V]B7&IS8S8P)WMU/!@M;'IU9DT[.6=Z1#9)8FMC:V]<545835MG
+M>OYN:W!:24QW;&%+6&->*3E>:V-N=7)]@F]A/EI;8'*#63LE*2DM/DEJ:V-C
+M9G)K<W)Z<RD.%1 at V.RU;4%!K345C:6%9?75%:FYR:#8R6'J)51 at 5&TUS23 V
+M)59[<F-C<&MK;7V$638_65H_("4T15M0;V8T.6AK7FYQ6RTY<W&"1"T!-@$^
+M ,Q@;H)V;FQF>H!V@&,8&UAI-C!:@&]::$U:=U&&BG@\+2T;)6."B%I'6V%%
+M8&M),$5I83];>GYW7BU)8FAA6TQ?<F)O:%D_:V P22T2 TGB66EK=G)C6UYK
+M:$U0<G Y-S9R;FDM)5AZ at W-A3SE:>DP^54Q:;')F66$Y85I(:WIN<FM-/SEA
+M8W!J;VI,,B4Y;VMN<FY]BG)@-D5/37)U7EE)12LK/TM at 6G)Q3UIR at WMQ<#P#
+M(,HO-RU8,")$24U:8W%R?79;:VIJ8"TE.76*<#PT,DUW:308&#YR;F!B;VIR
+M:FV&=$M8:6$V-#0K+4DY3'-5-DQB<7-L841+=X)N/ $; 38 _C=R>G-C34]]
+MAG9]<#PI.5]0)2]Z@&-C6V)H6'IZ=FA>-A at 8/WJ"6C]0=%MW>V M,&%;-D5K
+M;GIX/S]):5I-23EB1&-I3UYC6# V+24I)24^8VQH3W5Z:U%B:%AI8CYA;VYF
+M:DDE/VN*@F-B3%YZ:EA)25YJ;G)>12)>8$QJ<OYF;7=;240V.6Q@:VI-52 ;
+M3&)B<VY\AW)6/S])16)U6VEC63P\65Y).7)W63]K at H9U7%98/RLR559-*1(I
+M-EE915IN?7MK<G)C6B .&TAZ=VEA84UR;%4@(#]B:VAC8F9W<W)U@%M:>W<^
+M/EE%-CX_26YX84E0>GIF6T]I<WV">V$!& $5 ,LM:FYK22<P=XI]?7MI7C9%
+M6"TG:FM@<&=01%EX8U8Y66XT(#]K<CDV.6AH at HIM*39F6C\E3UMR>$DV-F%)
+M.3L=-EA%6&1C<%I at 340$-OXE.6-,/FUU>DU%6T]A33E@<GIJ:F$_.4EU at F99
+M8WN#<DTO.V%T>H-[6"T_6$UR;EI9<FI(22TR86)B3TUP)0XB+31J>G5]>EI;
+M85 at V86M@:V-%239%83\;3&IH-DQR>'5;4&MF/"U/=4\M&!@W9E5%1%!U=FMR
+M<F)@,B K3V^Q:W)R;5M<:TU$/$E:66)H6UQK9V]K<DU%<H%86&QP6DEA:6I]
+M<3Y-<G)F:45;<FEZ:@$K 1T VD5;:FM$&"EK at G5]>V9H-CYI24EI341UB&D^
+M1VMN-!L^?V8_36IH/S8M/DAZAW(_2VEB12 M16.$<%4V13]57BTV,%9N<DUC
+M;T166T]%3&MB3')[359K3U at R+0-R_F%-35@^/VAC8FUK<G=B6'N*;DDE&S!B
+M?8:*:V%8141N>F<_6GI9.2T_5%MB6#EZ12L5%1M$;5Q;>F)>=V V8F!C:V))
+M5#\[65D@(DEI239;<G):36MP239%@$D=("E+;4]524QB<GIN<EY18%Y@:G!B
+M9G)Z8V%C.55 at 2U@^1*-66EEC3&)R:SDM8G-/8GN#:$5F at G)N<CY,8U]$8"TP
+M6V-W=@$M 30 _F9C8VM+("UK>G9P=VQ8*2]O<%II-B)BBGIN;UM8)2MHA7=A
+M8'!J9U4K-C]J<EQ%36]J6S(;&#E\A'<_,#=/838I+41:9CEC9V)J:V,V-EQB
+M26MR:UIB35 at _27IZ@W!-85M%/V!8.6)F>GIH27**=5I$)1LY9'6&>EIA845B
+MAOYZ341J:54V2U1-8&D[<GAF.R Y25M$)V)B47-O-F)P:6)::6E/2V-L-B!8
+M:FE)3V)T345>;D\_6H)6%14M8%8P26%;8%QF;'M-.5!F9FQR:EQ::FMI8#9-
+M=6--/SE%/DE@/TUN:$D[.T at P4'V#<T5:=7)V>V$_6V _3"TE16.":FH!-P$_
+M /YK:W%@-B4M3VM[8TAJ61 at 53()S624;5G5V<H)_51(E:W5S6TUL<FMB83YO
+M;UY/655;>&$R&!4[8GB*:3]/;&E%-#Q68&-I6F)G:G)K3UA<8EMK6EA86TU8
+M-EAN=8AB-&-P6$E//B4P1'J :TE:@G5523Y86%%F;V=%37=6*WW^?')O6D5A
+M84U:3'-R24QK<V-$6F--6%]:8FQJ:B=:;GI).6MS8UYS=$4;3()R3$5::DE+
+M<&Y at 65I]8!(.&#\[("5,:&- at 8%YZ:T\[16-R>G5C7UIC6TL_.75[:UI+6EE+
+M34],:6-I9BT@($5J=GMS8UI(>GUW36-R<VE)25E>@FY[ 5H!8 #^:6-W<#XE
+M-EA-<G%,8D4=&#9R:DDI-EI<6U%UC6X8&#EB:6)C:VIC36DV7&I-8V8Y-EYI
+M,A4E6%!Z?7)C;'IJ<BT^:6]B>F--8VMK8UYB:G)I8SD_6VAB8C8Y16:#8#!B
+M<FML:$DR"1MJB7)).75J/S!A8EE@>W)C3T]J21M<_G5T>V)A:&,_64AN=U at G
+M369 at 6&-;,#!C:G)Z=U8R16)W22U-:VYC;&L\#C9U:CE%6%\P.7)C66QJ<V,T
+M%25)254T26)A1%AL;6YZ8DE-:H:";%M;8TTP+2)B at G)F6%IC:5MA66!A;&(M
+M("!)35MF>W183':"@&-RB'IB6&EI48)/8@%: 6H _EI;>WIO3V%A16-W7F at V
+M(" _<6M6-$EI3S8G67UX12 ;-D]:<G)>6D]O6%9H6%YS65Y:;S\8-FA:;U!<
+M=VYW:W4^-DUO3&YB141B<FIB7U!Z>F ^/W=O:VM8+1L_>' Y8G5D<G),, at D8
+M68)W6SEC8CXV6G!:37IR345K>G!)1.I<=7A,35!P7UI;:W5:-D5 at 6%IJ8S8V
+M3VMU?7=)-C];<EE)25IJ9FI-&S!)<V at Y6&-C53]C13!:<H-S<3PV6#M+86)B
+M6"4B3&MZ at G)I.4A]?6]/6&M:,BTV8WUZ:$0Y8G%L8V9L:G)B+2TT!#^2;GI;
+M8X)V=F-NA7<_/TUN<4DO 44!:@#^,#YR;6UJ>FLY6G=C33\I*3YC6E0M.69A
+M1"DY:F9T-" E+39K>UM$:W)R3&)P86MQ<&)R9RT_=W)C-C1K<VQR<F!86VE)
+M:V9-14QK:F)H169Z3"TM;V)B7F @&"5-8UAN:D]F at ELV&"!@>G=$/UI/,$UI
+M;&A::$PG'5J"<VM)_EER;E at V-F-J8VAR@G _7EI$36MO/BU/=W)N>U at M-EMR
+M4%EC8V)C8408+5MZ=UE:6V-Q8V,_("=0?8-\8U9:.3]S:UI9, X;7'I]@&,_
+M.4AB>EI98&%)+3E1:G)L)QM$>H%J:VQJ=6 at _1%98,"TE16);9WMS9EYR?7))
+M6%M<=8)W6 $Y 6  _B4M1450;8!O.4UR;%M9/RTV1#\_+3YK=U at _27!B:UXR
+M("LV:H)I26MZ=5E@<VQC:VE-4&A/67. :T0M37)K:G)O<GI[8FIJ7U at Y6F%B
+M;TA(=38.&SEJ6%M;1&YF)25BBG(_:H5Z<#8M67I[8$5;8#8V1&)W<EXM%1 at Y
+M:UIC6^QR;FMB53]-:VIB<GIZ26-C1$]B324E27-R:WMP.R!):4188V1K8F%I
+M/"E-<WMP:V%B:W-S63PI+6N">VA18DLY8V%%3S<.#C9PBH9J6UXP57IK:&-P
+M83LP24106AL.'6*#>FI<:G5<26%I:40#/Y$Y.7!R>EM(<WUF4')[8EQK;P%-
+M 5@ _D4V/#1)8F9R6&QN:V%@84MA24E4(!U%>&)),')_:VE5*RU)8WIZ>'M[
+M<$E-<VMI44U9,#!8;W9[<FA5-T]A1&)D>H6#<F9Q:F-/6"]%;V)B:U0P("!B
+M7UXV.7MB&Q4W=6]':&AU?4DM35QV at 3E;:V-5+3=R at V,_1$196V!C:OYU:F);
+M8FM19FI:7G)]<G-C36QA'1 at 8-G%<4'IV4$EO=UA-86I<67)S8RU$9FYJ>X!R
+M8V9[<EA$15AN>UM/:&):8UI83S0@)R\Y=HEP<&D_3&M;:VM[@6D_24E+/A at .
+M&S]N<G)/8W9 at 26!K8F%A8VDR'45F=TU-<G)$076*@%N"/UH!8@%P /Y9-D4_
+M6F],66)Z;E!J<5MB>F)C9#0=-G=O339O@&YC:4E$/U!C<G6"<EA)36M[:U at _
+M53 E6&YV<FIK6EAA12=84&Z AGUN@'MN:V@[-EY9:G=C8402:UY)&RUC- at D.
+M'6IZ8W!$68A+/%M:<()93&Z"=U at R3'ER<&E)/CEA<&;^<$=B<5M<8V];1#]>
+M=89Z:S]F6 X5*S]J6D1K:UQ(>H)R8&]P.3)K<FI$,%AB6G:#>V-:>GIK85A8
+M7F(Y1&-J:F%6:VD^*418-DQK8VMO:6IA86Q1<H)Z:$UB:5 at T-#)):%I:6U%N
+M<&%:6$1-6VN!9C(I8W(Y17IH-F&#@))W at C P 3\!:P#^/S958&ER9C]$=VM$
+M7&DM,'=\6U at V*TN#=U at Y<WI>6EIH<4E%:7)N?7-%63E(?7-P83X@&TEZ at W);
+M7T5;>& Y6&);=7A]9G9]?75R9SY)-&-R8F%$%6]@2QL_:$0G'1MBBGU[85B"
+M/S9A<'J#22<R>HIN845<<G5W6CLE1'):_F,O3(!S8V)K6D0R,&9\;FIB8UDK
+M*4EP:VE98$EK6W9U>FMZ>TDY8UM)-BU)6$US?7UR:FIB7&E>?&Q9.45-67)6
+M.6)022U9:V9-3$5C8UXV:&M6159)+T5K:FM;655)3VMP.3\_2&ML6UD_+2(P
+M<GMA-CE)*3!R22)BB&YT at X)C.P$E 6  _C\\7V-S<VE)8'QS:F)@)1A,B&%%
+M(!LY>H=@,&IR85I%6X%/.6MR<X>#8E\W27!F<&$I#@XM8GIZ8V!%8X)K3V-R
+M9E-<@EQB9GIU?'I90BU;<EIH8#9W6TP^8(" :R4)-GI]=G)B at UDP.6IZ:S8;
+M)5YZ:F(V27MZ<DU+16!J1?Y8.3EZ?7)B33E)52U-=6IC6UE-+2U%6V-O8TDM
+M8VMV>GIJ;GM:67)C-BDP,D0_8W)]@G9S:UA at 47)R6#9$545R:$5-/CDM37)S
+M8T5$6TTP-EQP;%H_.39%<F]N6UM925A0=3XE-C9C:UI89BD.$CER=G%)/" M
+M:382.6IZ?8:"<E@!,@%@ /Y816E;:X%/.6*#=GMI6#8\6X-P-A at 8-GN-;#]A
+M:%I,7V-K6#]K:F:*CW)I/V!A2'%A*148+4UJ:VMH.4U[<%A-:F9 at 375K8EQF
+M6X*"<%I);V],36A>8EL^86QN?'<\("U:>GIF8G=Q351L<V at M&"!)66-)&RUI
+M=FM-6&-R:DW^;3D=4&UR<$D at +5@M-FIN<UM at 8#E>85I-7&A>+5AR;FV"9&9Z
+M8#EF<CXM,BTM.SE1>X9R<GIB8VEB8TDI67%/9W)L:TD_/SEB<FM-5F Y(#Y$
+M8UYB124_169U>F);639)6VMA-TMA<W=K7&D[&!4E6X.1;%88+6%9*SYCBHV*
+M at GML 4T!8@#^:5ML36:#2S9J at VY]<UA)6G=[<$DK*3]Z?6M;84U at 16)J:DDT
+M8VA/>HUV:#E,1#!I8T0@(#]I<TU(<V9%8W=),$Q;<GIV=6YK6DUR<F]B9')G
+M.5A:8DU)(C]O<F9W3S9+36V#<F9M=TTY:7)H/"D8,C]-/QLE66-;84UK<FI-
+M_F98-%919W-I54M-+25B;GIL8EHY:V-?241 at 824Y8G)U at EY0;F,M16))2S8T
+M("D;)V)]<F)U<VMR:T4V)6B"4$UB at W)A86-<6G)R6F-R7C]?6&MC6S\@+3];
+MAH9R3&-)16]R8S]/:V)K;FMK83P8&T5ZDH)A)2UH<%E)8H.%AH)]@0%A 5L 
+M_EM;<'-Z=TE9>H)N?'9 at 7EEJ<FY06#Q%8VM-6EHY6&%B9G!+'4UH.6:1 at F M
+M+55)86MF-B V8W<V)5IO6UMP6# ^5FIZ>G)F:F)-8F)@6F)J21M)84QR<D0E
+M8GMR<DTV/RTO:G)<8')L8&A:8V9F1"U)85\\+4E)15A(:V9>8_UB6EEK8&-F
+M<W)T:#8 at 3VQ[=6M-.6ID3S8M.6$_-#!9?8)N:W!C2TE;36%+1"TI("!-8&MH
+M:FQS>FLM%25C>FI)17-R8V-J:EEH9FMK<FI at 8EIK<VQ).RT=.8:"@EM;.2UG
+M;%D_3ULY.4UB:VM>&!(M7&YZ;#]):FYI8$UF>@-] 7(!80"O3TQO>G]@+3M>
+M at GUN:V);7EIJ;%M at 6D]86DE,8#X^6W)L9E45+5PY6H.*<"4;2W #=_YC/#=C
+M>EDT/DU;:6]A23966FMK1"TY6H**@FA823\M-G!P.6B*:!4^:EQO6#Y5("!$
+M6#E8<GI[<F)L:6II-CE-3S\[.3 V/D1L:UQW=FI;4$AH8F-L?6Y+("U@>GIJ
+M34]R<F@\("E86UDR.7*">G)P9F!-7UIK:4DM,CD_:6++8G)I6G)U:"TE16-F
+M>V%)36--6'!M6UI$:GMP3TUB:UMB<F!5630M?69J<G [&SYJ:UAB6S(R-CEB
+M:44@&"5)6')J14UC8EM8/U%S WT!>@%J )!;1&%T;#XE*3!U at F!C:45F!&N:
+M<7)K7#D_6&X^+3]::TA5-#0_-D5F<'<E&TL#<OYI858Y8X.!<%\^375Z:ELI
+M25I89BD at 66I]A9)Z238@&"5R=UA;BG<I/V%,:VU883P\6%@V,%QZBGIZ<F-K
+M:F-C6UDV/DD[-BT^8FM9@(:#=%@^6%I:<H)N8T0;28"";$5;;FYH12DI25YR
+M2S]P=G)N;&M:2$Q$8FM@/S9)6FN$8U!B; -BR&-4/FAB7GIW6#\[+4UW9G-K
+M3V=V@&!/9G)K6G)R36!A7X)8,EIB22 M:X)P9F<^+38=254E.40_64]P8C8P
+M6EDM-EA at 35YU?0&# 74 L'MC26%+&Q at I/WAM6F9M+4ES<VQI:VYL6C8V27!8
+M/S9%6D589FE$-D5B8U8@&S]S7 at -;_F V-F:$>W=A6W5[8UHM36)621(837)F
+M=9**9RD.#A50 at G)B=8!A8ULO3VIK8SE8;& \&S!S at VYV>FIK:V-J:V-8/UA8
+M53]5:$TO<H6&>F%913E$;'IU8DD2+6:":C]B<FY at .S(T/TA<33!C9FI:4&]%
+M-DE+84]:6#];8&!A5LU9:EM)35AB8VA:85IM=TD@%3EO.6)R9FEF<F-I;&MK
+M:G)S33]%<(IK-S8I(!4 at 37*"<FIF3U0[1$LW/TMA8UEB83\G1"T.($58+3!;
+M:P%] 8( SY)[86 _&!45+7%:3&9W23YI>G)O<EQC33LM-F-I;F%C6C\Y6FI9
+M/VV ARD8)3]1.2<V8W!)(#!R8F9O8VYZ>W<^8F]6*14 at 1')96GI]=S0##OXV
+M<G%R>FMB<V,^.5J :"U,=V)$&!5%<W)96UEK:U%$:U!)/CY58UI0;ELM:HA\
+M7EMJ6!LB7V!J8E at K($1Y;EM>>FMI23([.T1:6EA>6FI:,%@V*39%;U]$2TE-
+M34E,34]Q2RTV84UK8V-P/U%P+0X.+6 V.4QB:6E-16MH16VN;6IZ:SE:=V--
+M/RDI/F-:5"TY<FQR=8-X3TE823E;:5AA84E)7BL.&RU8024M, %K 8H _HV"
+M<F=9*Q at .(%MP:%AK83]-<7IZ at GM;6D0R-EAJ;W!R<&%$8W)83W**D#8T27%A
+M-A@;8&Q$(#)I8#]:3411;H,Y8G)C,B Y37IF86EN<V8P#@XM:VUU at G)9:FI8
+M-F%]:T1$@&])'14E3'IK6DUB:TTY;TPP)S)):VA(:FLV3?Z#=4U)6VLV-E]B
+M6TAC;38V:69K;GIB7&Y+/C9%8V9S;U" >F-N9BDV17)825MQ:$D^23XG7T0M
+M+4E%8FQC6EAP<40@'31>8%M;8EII-BU68#9%4&V ;SE-<FQ;63\M-D0_/RT^
+M8EYN=9"";G!I6#E/6$E;8#E)=U at K*TMI:3R"& X!/P&" /Y]=7%H9C\K&"E)
+M9F]9.458/EIF<G:*<&!>/S9)8FIR<WIW6&IK6FAC=8-/65][=$0I+5MH/R R
+M6G=;6UXV,$AF6%IJ;$\V1$ES<V):;'IJ9BL2+6)Z;7EZ:VIO/R)@>EQ>,')J
+M5C)!/"=B:V!-8&]:16I:+2 V3W=[:&!:)S#^<G!$25AC6EIR<FLW6WLV)U4Y
+M4%!J:UMK8DTV15I<<H!:9GUG;F at _-D5Z6C8W;H!P25E$,EA$-UE)1%EZ=T5%
+M8G):1#L_6%M:8W-:3#]$6&8M-$EB9FI;8VYK86!A2V%)250@'5A/:G"&A&MV
+MA&Q)65 at _14T_27IH-BU%<H!5@@X. 3\!=0#^>VYL:'%F1"DK6&-C<"4=1$E)
+M8FA0;FQJ84D_65IB>&YZAW!L;$]H;'=^6V$Y6W=I2V%P=UDI-D5R<F-I7E5)
+M8'!O6DU-6FI::FI%-D]Z>FI5)3(Y:G]B8GIF;T0I/VIB5C]K>F$M1%8^6FEC
+M35AZ8$UC32TI/TAQ at G=F8395_G)N/T1 at 6UA08F9R:&.#7BU)2UL_.7%R:UI9
+M6$U-.4V";EIF;EYC6$0G;F V,EAW:V)B6V!H3#YI83XT8FI:3TQJ345)/TU%
+M36!S<DQ6/DE;5C]:;UE:8G)K4&IQ6V)Z8F-D-!TY)TE>>H--4XJ"8F- at 23L_
+M26F#;$DE,&M[/((.#@%6 74 _GIK6$5B>%H[/UAC86M>54M at 8$AO6#]-<&$\
+M*6EF7EIF at WUN8W=J8EEK at F-F,C96:6F!<G)I25A+8H!L8V)I8W%K;G-))U9Z
+M:E9;1"4V<GU:5"TV6&J 8EMN3UMB7DEK<EE/<GIK7B<P-D]H8TE)>G)R:C8;
+M&S8V8HIZ7FEA8_Y_=T]@6F%835I>:G)DA75:6VEO1")$<7-:15E-839$>H)B
+M4$U at 6TUN7EI--BU;?V);645%6EDP2VQ>-CE$86A-35A+6$E8-C!):GIR:DL_
+M6%A@:7)A35IR:T1<:2TP=WQ;6#8K52 @5GIZ14U]>G)R;6DM545K at VM)&SYX
+M>DF"&!@!1 %J /Y[;$0;,'-P64LP6W=K:7%@369)36 at _-F%$)2U965 at _6W)K
+M:&%J>G P.7-W<$LM.5A;9W)C3UMI/V)Z<EH^3%YR35""9R55=W!%65@\27![
+M;UXR)3EB at G!J9F)07FM)6G5C8GIN8W<V/#9)<FM-5G)Z?7I>*R M.UIV at F9K
+M;E[^8D at P8TUC8%M/6F)J<FYZ;F)S<VQ)169[6DE$16I/:6IZ;F,Y26A%7GMZ
+M<3\;-GIC8%@[)3!H23!R>FE++3Y86#Y$538P<CXR-F%B;(!81#])8W-S:4M@
+M>'-J8EDM)4R(844@*40I&#ER<F9R>FM-7G-P&RTP8X)K1!A)@XIQ at EA% 4D!
+M7P#^<G R$AU9:F-A1#EB8G=O5DE:6#E)8&%>,A at M8%@^/UA at -D5G6G)P/"5'
+M at W)P2S98<&-A84E09C]:8E8Y*2E/6TU;>G<V)V!C,$M?65IS=6I:12TE+V9]
+M<FUK3V)B8%IK<'=S7&MU1%Y$16MR1$1F=76&8S\_.UEK<GUR;GUI_D0V-D]5
+M14U:8V)B:&!K<FI(<FYJ8$U:;FIP5CE)6W)R>G)A.4EO63]:>H)M($AS9D5B
+M<$4Y6%@[>XIZ:UY51$LM)2T2&V)@-AM$23]W8UE)/UMK at 5@^8H-V>VE8-CQA
+M=VD\("E4,B4M3VIF at 8-P.3YR>BL8&UM_6T0I)V*(@X*"@ %/ 4@ DFIK+2<E
+M-C!%8$D;&QUU=6-A8P-9_FAW:"D at -&-O8$18/!@E2UEP:U at E-G-S>W _5G)\
+M:4U:344_:6 ^)14I25@^1&IZ<D1).398:%IC<G)W;ED_+2UC;FYB8UMH6F-P
+M:FIU:T]::#YI>%IO>CXM8FI1=4PV6#8_8&YZ?6YX<D5+:6YA,$5/25]M>FMJ
+M<FIA:W)J8NM at 6"]$=8=8+4]U at GUZ6CY-<F(_-FMZ:R4E6F];6VM8.4E%67V-
+M>F)W<&)I1"D2(#Q9;U0;-FE;:VQC6#E-9H-+-FJ#;GUS64E8<'-K23(T854W
+M/$U96GJ$<S8 at 4'5$& XY:E at V1"DG4'*2D@%L 5$ BE!S6%Y>52DM6#L##OYJ
+M<FYZ at 6$_.6)08#LI+41J=V- at 1"T\56AW<EXE-VMR?7)A8UMF>V);/RTP<'55
+M& X;1$DM&SYN at G]4'3Q-8EYR:V9Z>G)L2S9B;G),1#9I8$1H;H)Z4%E)+2=I
+MB&YU at DD26G)>3"<E23LR14UF>VUN<CDM8G=8*2TV&SYK?7KS17-K8EQN<FAC
+M5BDM1WIR+4QM?7IR345;>F-+/V-U84$T/DU;:6]A23DG3W6*=EQV<F-S:54K
+M,E9B;V%$6'IR:5M-+1MS>G=)67)];GQV8%Y9:FYK6E@\<G=>16-B66Y[<$DI
+M.6A))PXV:6$M1#P5&T1]D@%[ 4\ _DUL<%MA:5Q)83\@'15C8F-Z at G!826U-
+M/RT\1"U(:W-R63]86%YS>V,V55YZ at FM;<5M$<HAZ83\M<H%8$@X5/F$K("5B
+MA7UH+39,66-L=VQO<75Z8EAH9F-@/R!%:#E%6H*"1#X\& E$>GI]@S8E6WIR
+M6"4 at 1%A6/CM:=5Q9?_Y+*45P+5E$( XV<W9R/VAP8EIK;F-96EY>36IB+6=W
+M>G5B1$UH:DU$86]R6E9P7SY-=7IJ6S8R/VI]?6MM<F)[<F8\-DE9:3\V6F)R
+M>G!)*R!Z?V P/UYU=6YK8EM>6FIL6V!:.6AN26)Z:V9Z>FA)/UE)*0XE6FDV
+M-CX5&"6"67T!@@%; /YC>G=A3V-O8VE%+3(8/SY0<H!S:VER:T0E/V8_.6!R
+M=G!96DU-8W)I87$_7'J 47)N/TAZA'!-176"81T.&"U)2S8V36U39VE)8%QK
+M7&MS<G)N;EI0:EI)85XE-FA at 15A[@F at V&R K,&)K>H(V-FN">F,[%399<%I)
+M87)C1'+>8S8^:#)[<"(.+6MR;F!H:UMC:VUI2#YC>G)@/B5B=7UR33)%<&L^
+M-F)Z>V!)>W=A6W5[8UHY9DE,;H)U:F)(:VY;65E;8%@V+3DP3&MR8UY5=&P^
+M)2U):G)J8VA%9 at 1KGG%R&T5H,#EZ=5QN?7)B24]))0X826$M(AL;.458;@&#
+M 7, _G)Z at GAH8W-S?&A81"D@/DU-;7IR>GIR6#=)<V$V6')V>G!@255%3$U-
+M<E at Y7(!W<G=))5!\<#D_=8-X1#1$-C]@/SYH8R=(;TEK<F-<8UYN<FM:25EC
+M:4MB;T(I/E8V1'J$<D0;6%DG.4UF at E4P8X)]:DD2&SY[;V%C<F]-6_Y;/DEH
+M9GMU/!@M;'IU:FQK8VAR9F)))TQ]>W V+6)V at GM6.T1B;T0V3')R:%AB9F]C
+M;GI[=R)86"U9=72!:UIK8C):8VIK6$E<6$M88$Q9<'AA2QL8*3]C:&)F;2U)
+M<W-L:6M at -C)F+1U9:EIB=G-I6%E4*1 at K87)A1" ;/VF"<FL!=@%W /YR=GI[
+M<F9R=H-R<G0[&#9)35MK<GJ#<F!+6&-9-CMB<GU[85E9-BTM,&);/SEB=UEI
+M5"!$<G-,/VJ">F%9>&99:$E%:&))6&!%8VMB6EM;;GIR:DD^3&E@:7MH.R<^
+M+2UPBH))(DU9)2U)87)B/F-]>FYH*14T8F9K;G5U:%O^8%98:7-R;FDM)5AZ
+M?5Q-:V: ?6I:239-:W)R251B:WV";VAK8VM-241K8EI)8#]:3411;H,[26$V
+M2V]J<F)8;&(P16)F<F--84U::5DG)UE[8#\8%14V86!-9G=)/FEZ<G)O6C8[
+M<%D_6'!H:7!K:UMI83PR/VAW:V-;&RUC at G)S 6P!8P#^67)J<W);:W=[9FV(
+M62 [-DU,36MN=FYL85M;3S\W16J*>D]-8E4R+55Q3S<I.6]@84L;)VM[<$EO
+M at FQ6-&-Q:W)H6%I(8&1,8')<:FMC7F9Z<VI$(#EA.6)Z<F-)-B 83(^*<%E+
+M84D_36-H6C]-<FIR<$DG.V9'66]Z=G5L_F-A9FEK;F9J225):W9:-D10=8)R
+M6TU98&-G<EMI8F!B?&EMB'IR6F-S:6%)-G=;6UXV,$AF:6AI;&IR;&M-3&)P
+M644Y1')W8F-%.5@^& X=1&=9*Q at .(%AO:%AK83]-:G9V<F,M*6MC3U%U=GM\
+M=W)K=V!%6&!98FA<;2T;28)J>@%O 4@ F4MB34QR8DAG<G![>FM5/!L@(#]A
+M2#E%<F,#6/X_,ATY:X!8,#]N83=C>D]$/TEC>WU9(!M:>G=-<HES8#8Y8VMB
+M;V):159 at .5J":G-L9FM$8VQA/AL_6"5,9&UR:4D@#BUY=79Q.45S<&)B9T5G
+M3UEB;7)926!R7CQ)6#E'>G)K<FQC<FIF841%6FMC/C ^8G%U8EE;3U9-8FSD
+M8F-:<'MK6VYP=3E, at F%8.5AR<F-I7E5-6UI%;GIV;G)B/RU)@')8.QLY?7)L
+M6DE9+2 G%1MH9C\K&"E)8FM at 2458/EIF<G)P*QM66"4;.5]R?(!K;G,_,&-R
+M6TL^,%@^+2T^:@%W 5@ R6]J245K<$U/:VMN:6!B9CPP&S]W32TP8$LV16MI
+M/R4E16]I2S!F>&EL=FE815MG>F]%.T1I<FM-<HI[8E at _/U at O35I-86-I6DP$
+M<OY<:TUC:V!5,C9>+6!;8FIL21 at .(EI-6U4=/WI\9EI at 6& Y+5IZ<EMA=WIC
+M6UY$(BE'8UIJ8TU85E9C8$U:8FM;.41H<G)J<&)91#Y9:V-:3&YU<FM%6G5N
+M8'5%-D5;8H!L8V)A8VE+/UI]>F-I6CLE27=U;VXV+6]K;%M0;42S)T0P&VAQ
+M9D0I*TEA8V!)/D5)26)C8F-$.UAC& D8+4UN?WIN8TE+9VI><%@\1$M5/"5%
+M 7(!8@#!<VM,87=L86%X<45%34]C2UM537IA-DE9+2 V at X%M530V6%E96')X
+M=VYK<%H_6UMM9RTM5G!R32=X>G=:3&AA338#+?YA:G![:4UB;GMR;%IJ>FM@
+M/S]926!C:VMJ21@),EXM+2 )/W)M6DU:6%@M&T5R9DUC<H)<2&@^("TY245/
+M3"U%8FYF6#8M-F)C238_3&-Z=7=C+1LV:&(E37=:8'<Y1&"#B6XV+6%C8GIR
+M6CY,6VI-/V.$@FM/138V6F9K=W*\.39P<F]I67IU2UY>.T5B>%H[/UA at 8V->
+M6$M at 8$AI6TU915EP.QT=(#),<HA\3TEW>G)R>FE5/SYI:#Y8 5@!4 #C<$4P
+M8X!B6GN*@UD^25AA15AA3&MI6%A$%0X;@8J"=V$_-BDV88B"=7)F:T\^83]-
+M<$DM/VIO-AMP at WA-.6ER6#8@&RE)3%IZA5H_7H-Z<F)K>G5K3$551%A-8')Z
+M:%XR/V at E PG^(F)_8$5835 at R*41J2%A:8H)O6EH^-C])24U/22DR3VMO8$0R
+M-$U-13P;&S]Z=7IR1!@@6V @17)@6F at Y16!V>V(E+6A96F)6.2DI3ULY6'"#
+MA8);26!88&!><V@^.6MK6UIP<GI\<G!?&S!S<%E+.5MK:VEQ8$UF24UH/VM)
+MFD5K7BDK)RTP6X-Z5EES;G)N at WM@/R=J<TE% 44!80#C<40;.7)I37*-BF]6
+M7$E::5E,6&!L<5M5)PX@>H:*@W=9*0X5.8*,A'UK8$M9639);&A987%O2R5O
+M>G-96&-F8#8@&"!")TENA8IK<GUZ9F)L>F9Z>F P)UM3'4UZ8')N@&@@ Q7^
+M(#9K@&- at 25E)-DE?1$M,68-P;&M)56%C8TD_:5P\1&-Z<F!)/S8R)2D.#B)0
+M>H)Z8$$[4& V8'=C6F V)UIU<ED;*59):6 ^)14I25 at M37=;9(A,/G-Z:&%1
+M:FE86FZ"8#E:;FMS9E\Y$AU9:F-A1#]/8')O5DE:6#E)8')LFF%J<3(I*38\
+M6WIR3&ER<&AB=HIZ6#9C=V _ 2T!7@#^<5@;(&)P.4^&CG),14M:=VE+14]I
+M:U9-/!@86G)U?8%F( D)(FN,A8-R8%EA8F!;8TU-;(*"8T5H8F)I<VMC6#08
+M%2<M%4E;9H!UA7UQ9F9[<EEI at G<^(%9()4EJ2&-;>G R,#P[*R5->V)86$E@
+M241-15E%18B*;F-(25MRX')@.5!I345H<G)B8ED^(!@5%1TR1%IZB%DV8&M-
+M6&N">FI98#9,;FMI/UYH37!U51 at .&T1)/EEJ,#=O/B)CBGQU8&-K13M$>G(V
+M27-K:UH_+2LE-C!%8$0E)39J=6-A8P-9G6AJ<G!S?6E!)2TY8WMR8&%9445%
+M6GV#:EAB<G!+ 2T!3P#^<VT;&$5H85ERBGI8/$E$:X)C,$]Q8TU)/S0R.5A>
+M9G-@& D))7"*?'UZ245C9FIJ:38M7H*&:T5)/TMF>FM:6#P8)S @#BDV/DU;
+MB'IB7&MR<F]N<FM at 6$];:&)J8G!J:EQM/S]8,B!-@&I-/S!85C8M/V Y)W**
+M>G)8.3]L]&UR;DE;645::FM[<FE)-#DP(#150D5RAT0B6'I%6FAN<GIM:FAC
+M8DAO15AT8'*!6!(.%3YA/V)F53E>1!4Y>H""<6-8+14;36\^88*"<V [*T19
+M52DM6#L@("U:<FYZ at 6$_.6);8V)R?75>,B =2'*">G-F S:)1')[:V-B8VQ%
+M 3P!3P#^:VI>03]97V. ?6YI85M84&II-DUI6DU$2UE925E:8G)N-C0@)6*#
+M at G)W6#Y/345:<"T;67U];TTR)39J at W=),DD8&" 8%2 V1!LM>WI96G!/9GQU
+M63]B>UA-<UI::G*$>EIJ53Y:/!LP<FIJ.R5$6"T;-FAA/EIR7'IH/C9-_D]J
+M<FMI6C\Y35Q[A7IB5EE$&R5).4MZB#D;26M-6DQ-3&I;<G:$:C]B/RU,<G6"
+M81T.&"U)-FUW:UI)6#8_8VU]?8-K*148-D158GQ]>V]$+5EA:%Q)83\I("E)
+M6V-Z at G!826UP33ENAVUB/SP at +41NAH-W8&$^-EEB15IO8X)@-@%) 6D _DUB
+M=W%)13YB>G5K<G=C:V)O:%E at 6SXM/%E-6C V8&%F<#]+1"5$<G)F=&EI6#\G
+M/EX;&#EJ<G!>540V8XB 83]9/#)$1#0T25P2(&IW2&)]6F)Z<F@^068^1']H
+M/TU9;8*";UQ)6$4@*6!:9CXI25@@$BE(;'!><DAQ<D5)6-0_3%EJ=V(_-CE-
+M9G5U9W!O:#0;-BU)=H9A*3Y;5F-;85MB3'=]AGM;6E@[.6IU at WA$-$0V/QM,
+M>G5O1%9%6'!H4%R*BF9%,B4E6&=M9G=V6#9;6V,#:*E)-#(I/CY0<H!S:VER
+M;V _7G]:6F!8654^36Z$<V=W8$MA6T]><FI-&P$^ 5H _CE%<G];/B))8E!:
+M<GIW<F9S8UIH8"T;+5E at 83XM.6EK;$M87C9):V$Y7'IU8EDV/E@\-C]-6EY9
+M8& P2&YR8G=C66!J;VE?:E8.)5]R2T=R8V-J;7-W6U@;+7=Z8EIB9FJ,>EI8
+M7DE526-P;S8\7F$T&"4B2&I-:U!G<E]B8OYA6#Y9<VD^-DEG3TAB7F]B:4DK
+M)2 V:G5R84E at 8&-;6UA,.7*"@G);8V-F6&E;:5]6854\*RDM3'IZ6E at M1')R
+M,#!R?')K8"4 at 6&IO4'*#<UAS:6EL<G-H6$0I(#Y-36UZ<GIZ8F-C;&]%.596
+M8VE;6$Q[<V. @5M98UIK>G*"62 !+0$^ /Y$-TQR840@)TDY.5IR:FUV>W)>
+M:VA5,BT_6FIA,B5-<VMA86E@:'A@(#")>F))6&!@:7%I24M;2V-H,$ER63)R
+M<EYJ:6-[8#9((B),<G):25MP.45;6FM:36-I8G)K<FI%.45B9W5;8H:":S\M
+M-F!;238[/$]:6FM$26)C:F+^:G)/16)C/R4V8T\V-D5?6F--1"D5,F)J7&A$
+M6G)J8D1823!J=7I[4%!C:&MK:FI?;W->/UYM.QU@?7<_&S9B9C8V<&9B6' V
+M(#E?9S9-<GYZ>W)F:W-[<G)T.Q at V24U;:W)Z at V)K;'-I538M6'!W<V%-<G-6
+M<H5K6V!,:VQG at FE5 38!.P#^;V),8V]@7CXV2UA96TUK=7Q[:VEJ<&E>26)<
+M8S8I-F!:8FAQ9FZ"<3LM at 8-R3T5,8&IZ=TU-;UMJ:4]:>F8M3&MC8G)R=6MA
+M7C0T-D]O:EI,=TPM36)B6E%B<TU%2&IR8#];6TUB1#EMBH!--RU/6$D^25MP
+M8V: :#\Y8X)8_EIF:VMJ8E$E-F)I22TV6&%H6%@T$BU86E9,-C!M<V\V-EA@
+M:F)R?69::$Q;8F9N:%MR:#!%<F88+6J(/Q4I25E98X%S:$A9/BDR25 at P16)F
+M;G-R7&MW>V9MB%D at .S9-3$UK;G9P:W)R8V$[(CEB=8-R6%A[:6]R:G=J:W!;
+M8()B8@%9 5H _G5Z:%QR7FA>,%N!>V$P6FYZ?6)/36N"=UMJ<DLW/%5>.41K
+M<V)>>W=A17*#=6$^/EA:;G=-36);9UM)269Q7D5(8EIB<FMB8%%?6$DY7')R
+M6FI8)3!B:&-I8VQ913!-7()N<D\V6$U$:W6":#<[65M)/CE)<&='>G-)'5N*
+M8OY$1V9Z;6MC+3]B<7)8-E5(6FMK5B R9FMH6"TI6WMW22U%:VA$8GIN;&)@
+M35I-8V]$4&\V-F]M&Q)$=4DR56!H5F-R>H)R6$DM/TU8-D5;3%!9<F-/9W)P
+M>WIK53P;(" _84 at Y8UMW>F)A52DM-F*%@VEB>G5V:UIC9G-P6VR":$0!80%C
+M /YB9FYR<EI821LY at X9K6&%J;FYB23Y:=WIR<GM$+55 at 7C\^8WIP6W)R8V-K
+M>W)B/UAA36IP36!;8&E;8&%>:V-A16!).4]R8U8^6&!%+45J=7)G6S\R1&MF
+M8F9N8E8^-D5]?79H14]C:WAR?7A)-FAP34E)1&EO.6IZ-QM)@EO^6D]-:DQ9
+M>S _:WQZ:S!>/B=:;7(M-FEM;VA)/$UZ@&D\/&%C24AK8UY$:6%)*V-G/D5O
+M24EK9AL8/W5;36EP;TA%.4=ZA7IO/TE6/S(Y6EMI36ML34]K:VYI8&)F/# ;
+M/W=-+3\V8W)-15E%*1 at P;H)L:WIN>G);6$QH:6%K at G V 3X!30"O6F-!8WQ-
+M-BD8,'* 9FMK=7)B9V%)65E<<FI at 7B5"8%A)/TQU;EIC8$1K8W)J;V #8_YZ
+M<V!C8VAC8W)[;FE-8FEA22TE<FM-)S]U6"TM3&YZ8EI-3S]96D5$;&)9/S8V
+M6FYF8EM,8FYN at GV":TUB<F):8%EB:#]/>U\G8')':F)%6#8^;CLP7(*";#!H
+M1!@E38!),&IR8EQ at 33]B<G!8/T1H8U!B:V at V26E>.5E;24G.:%IG<FM$-EIK
+M:6QC:G)B9BTB6FQ]>F!B6"TI-EIF>F-Q:V%A>'%%14U/8TM;54UZ838M)4E;
+M/S<_2S8.&$5B15MR9GIR:&);66%A8W)H 3X!- #^,#X8*W=C/" 8&U![8V)B
+M=7IK;VM-/S]86SXP8"DK/SX_-CEJ<TQ8:$]@86EI8F)H.4V">EE(8V]R<GIV
+M;7A:45E/640\@GI@)2=G8"T;-GJ"/SE)6%A::%8Y34QA6$1)1%A/1TDR3TQB
+M>F1N<EEC7&MC3&N#7"=)<G);<&A)_F)<35@^/V@[)41V?6-6=UD2&$1Z<EEB
+M;V-%658^.4QA8# =1&-J8G)W-CEC>&A)-DM at 6EIR>FQR84U83&=937)R<V0V
+M6$U0=75Z;SPR+45N<VAW8F)[BH-9/DE8845884QK:5 at V("U923]%13L8&$M:
+M+3!(15!K:UY;2&!N:X)R<P%W 3\ _C\@%2UK=UA$'0XG:W)I6F=W=75Z6CY)
+M:F]F6T\R*T1+539)8W)%/F9W85A;<FMB/Q at E;W(_)4UK:WR$<V!Z<EI)14E)
+M1&YZ;SDM1388%2E<?4DM25E?8V9C8UDP:7I:34UB:5M@/S\V3'-07%Q-13!-
+M.2=BBF(;-DQ-:VY-5OY-1V%;13]@/C);=6T^;WIJ*1(E4'J 8F-I/SE-8%E9
+M8FM$.S]::V)B<C\V2'MR21M$<%E%<GIF<FI/6$E56#E0>GIR:6):669Z>FI8
+M1"\V8W)@:&-1<HV*;U9<25II64Q88&QQ22 @/V!:8&8_&QM9:4D[24D^6V-:
+M6T5'9UR"<6<!>@%\ /YI/"LV86M95C(8)4]K:V%:8FY^@%I)2VY[>H!C539%
+M86%886EP539::V-816-S;S88)6%O/!4I/SEF at H-]<G-F6%A-3UAJ>GIK85@;
+M#B=<:W5H/EYB;W)K7F96&T1U=69/4%M%.4E-,$1J:4\Y22T5*1 at 5.75W-C\^
+M)T=01%;^34UC<%A)3SE88G);'6M];E at I("UGBG9L:38E-FA::')Z;V=86VUW
+M<F,_66%W<EXE-F-C8'J":V9H:6!-8&E/2')Z:WIR25MS>W5R6$E)-EMI6%MC
+M35F&CG),14M:=VE+14]I:VL[("5%6&)]62LE:7!+6#\V.6EH14Q%1$Q8 at FE;
+M 6,!@@"]>F at R+4U:6T]5-#0^:6]B6DU9<GM9,BUB at GJ#7F)M/C9%65MI:V!/
+M8VIB:3])8W-I141C<%XR&R4E.5IF>P-C UO^66!K?7UZ<V(I&"E::FI:1$5,
+M;H-Z:EM-1$]R?6YR83DP-CY6/E9::FA8654;'1T at +4QH24]A6%A)-D1-36)R
+M:VQH-CY845DM:GIF33]$/T5R<FMH)1 at E6$UB:F9R<F-O;7)R63]W<FIN8U4\
+M/DEB>HZ*=6MS<%M):'1,:&UCO7)[6REC at WIR63E>56!H6UM at 65ARBGI8/$E$
+M:X)C,$]Q8WII1"D;-FEW:3\E16!%:5D\-FAK5D5622]%:UH!1 %J /YJ;T5$
+M8%IB6&$\*R4V:&E;84]C:DD;$CER>HA!37IN+2TV16EI35AI:W-R251;7G-K
+M5DAP<6!!-#])3V)Q;%YC85M;8%ERA(1U<F(V+51P>G-B6EH^4(6*:D5;:F9N
+M=FZ#<F-)+2U+86QK<G!;2&DM(#M)8VAF3U9I8V-$-C;^/TMH<F9H@%4M/C9$
+M:&9U6F)K:V-;8%@_8"48'3MQ<FA/6F)K>FYJ:SXM<G)91S]8.QLE+6**AGUJ
+M<GIB+41R:VMJ8E%Z=!M)<G)J=45%6F]K:VE@/R!:;EM/6"4;/V%;6R=$>G9R
+M:3\8*6%H7F=$/S\M85M$-EQP;%H_.39%@G*  3D!/P#^:6M96VAL<6EO62TE
+M($5?8&-96F\V#@DE25Y[66UZ;5M$-C]H6"TM.5R"@TQ%845C:5A667=R6E19
+M65MR>GIP8V-;3UA/>HR$=6YC239@=7MZ6&-L:45J?6(Y3&IL9FMT:6"*>DLM
+M-D1N>G9R8$UH52DV36QR:FAK<F-B6CXM_B([<%YC6W9C,#8@&UAZ:VAI<G!K
+M6VEA26Y9/#(M;X)Z8DU)2'I[8%LE(%EK6$DR/C(.&!@M6W5U:FIS;U]A8F9Z
+M;6MC<G<M2UI/479P86!/:FIK8UX_86M%3VE$/V)R<F8V.U!R<X!L,B5 at 6EAH
+M:6E8/SE@;CY$8UYB124_18)F@@%9 4D _F!96F-/<HER=W)@-"5+16%L7T]P
+M6RDP/%5-8CERBF9;639I@%4;&"DV9HAH3UI%66!%3#E:;VM;6F%C:UY[<W)S
+M:%M826I]A'IF84LV3')Z;4]:9H!B8FM@/CY<;FMB<V9GA8!I/C8;27=U=6M9
+M6UA+541;8T50>G5K3&)M$OX2-E9(85MF=SXM*1 at M;FEV>WQW<FMW8$5R8DDM
+M+5E]@G)?6%9J>VM6.T19:%I-9F%+*30\,CYB;UE<>FIK:V!-:DQ9>W=Z36!A
+M345:;W)W86E:6F)B;&MH6%EP84]B>G6 <FE%8EYN:F8_6F-I:VYS<& ;/X!?
+M6&MC6S\@+3^"6X(!8@%; /X\'3EH6&.'>FYZ=6M88T5-:$U@:F(V54MI:E at B
+M6H-L338E8H-O/" R539KB&]A25]H6DU%/TUJ;TEC:V T8W)S@&];6%AB<G5Z
+M:UD_1#!$8D=I64=[<F-:7U0^,%MB2&IU?8: >F!)*3!;;FMJ8EM;8&-B84D;
+M'6AF9T1J at QNN$D1-1&)K7&MC6%0T+6!?7W)\@&MN<S\P;6))*R(_8F);65]U
+M=G5J5CY)/FEG.0-CSVEJ<75?3V$V,G=B3TQ815 at V/FYR<F9J=VI--C!B at H)S
+M64587G)R6DE-<FQ98W)C<FUU<G=C3UIF:W!;7FMK:EYF*3EK8EIK<VQ).RT=
+M.7 !8P%6 .T\(#9965MW>W)R=WIK:F-P:3]%6E at P/T199G%)8X-K638\6WIS
+M9CQ)6#8_>FY;86AP;VA8.3]<<FAJ;V G,$5C>FM at 6EA:6EQ]@VDY53\=+1MB
+M8CE>8UA:6$Q6-CY-+TUN?8J&?'=8-"4_4')C UO(7G9]<F$I&UEJ8DQND248
+M55M::')C46)B6EA+1#\M36Y_>FYC24MJ22TK&%Q8.38P6G)]@VA%,"TM8W=I
+M65M::GJ!>VM/6#(M W+#84U-6#X_:&-B;6MU>F(R%2]WB'I/.5I,;VMA13YI
+M8UAH>FM:2&IZ at W%%66MM<D]/6F]C3U%I8VIB:UMB<F!5630M8 %J 6  _DLP
+M7G!B3%QR:%MB>G):3'-Z8TM)6%E824M%>W%L at W):24D^6G5Z<G%@1#]F9D]A
+M8W-S;&E82V%X9UYC<%4T+4UP3UM at 6EI%,FN*=S8V+1 at K*T5O8$0^25I;6$DY
+M/TDM.6!J?8:#<EM5*24M6EI-6UM/8GIU<F$V87):,%J*6(,P6&,$:_EA6%!-
+M6&@I&R R3'*(?$])=W!$&S(E;5 at V*2D^669Z7SY$*2 ^8WI065M:;H)U8DU8
+M/TEZ>H-P36%;13]@6#EB9GN(8R4)$EJ#=V Y/CMB6F%-3T]%/EI[;&-C:'J*
+M;#!(>G5R13])8V)C8WJ#<V9R:UIR<DU at 85]I 5H!40#"/RE->G=,3'!@/SEO
+M<DDV6GIN6$5%8&]+2S]R>G)]<V-A.QLE:H)]@F-F;VM:7F%C<W5K8V$Y,&]W
+M6DAJ:$DV26E;!&"E22T_=8IQ2388.V%%;'-9-CE64&!924E8.SEA6V."@G-:
+M8$LR.P-)_EMC6%A09GIW35IS:#];;G)A36)R:E!C<G);24]?.1(G+3!;@WI6
+M67-H7CLV86A)-#0\6%EC:TDM-EL@$C9H6EA:6VY[<EM-6#98;G6(8C1C<%A)
+M3SXE,#^ D7 I#AA)<F9K83X_:%EC;&%+1#8W9V%@:WIU at V,V17N"=V _.9))
+M87)Z?7UN;&MK:G)S33]%<'<!;0%- /Y$&R5B>F)18VDM'5AA13\P8U M-C]A
+M=V9B6VE@:GI]<6E%*R5-=7US8VQS8UMB:6QV=6UC7RT;6H)H15],.3]%:6!C
+M:6!R;V Y6H*$:T at I/W!$37)P.3(O.5A at 6F%I23YB8VM[=69B6V%H9U9)15MB
+M:F-$4')U8VMR:UMC:GK^:U9B>FM:8W)V>&!-340@*38\6WIR3&ER:EA)5G)O
+M33(M/FEO8FA4*1)8*0X2+4E67VAS>G)H8F(V.45F at V P8G)K;&A), at D;:HEZ
+M230M6&M><G,^27!B8VMA/UE>/EE%6&)W7'IQ15AR>WIS1"U$67)]=69C:$5'
+M=W):7D4=@DQJ 7(!< #^;CLR6W5R:V-O-BU)6$54/E at V+4188VYR<F=W7$QZ
+M;FMI841@<6MZ<EYR:UM)3V%G<G5F86 _27>#:F9P22T[16%;6VMO;FMO:5!R
+M at VU8/SE:-CYK<DDV+39,8$E/>UY%14US=G)B8$E-<F]@36)C8VIL1$UF:F-[
+M>V-06EY\_G='8GIK6&)R=7IO6F!4*2DM.6-[<F!A64TV/DUW>FM4+2U,;TPW
+M5E =240P%1@[1&!P8W)R;VMK6"T;/WAP.6)U9')R3#()&%F"=UM>6$5),&!O
+M.5AA:6EB6#!::#]A24U97$UR>%M98VQZ:D0I-C9C?7%;3V V/WIN:&-9)8(M
+M20%> 8@ _GMH25IR:7> =V!%3UI)8FAF+2 V15IB9FYK<#XP8EIB8EDV6WIB
+M:W)R;F))14E$26-B36-C6&*"D7)L at 6@_1$5A9CE%=TA(:WIC8GYM3$DP-B4^
+M7')C6#8M.58G.7IN138I45I>:F V+6EO1$E at 8V)<9F-A7T11>GUF33M-=OY]
+M1$U:3#1@:FUS=5!B8SLR(!U(<H)Z<V8M*2U)<GIU<EY)3VM))3961%9I82 8
+M,CQ%:4U;<F)B7F @&"5-8UAN:D]F at ELV&"!@>G=$.7!I-A(V8#9/15AI340;
+M/UXG/T1,9T1);')C22<_<FLP+40V/W=R6EMF+4EW5'=Z9BV"("4!6@%[ .MK
+M:#E-:4A9?8)R;EE/3VAK8S0.#B4^34U<:G)955M865IA24QC/C!F at VYQ:4E%
+M1#9;9CE/:6%:8H)R5&YB:VE93UPE(&M)15IR65M[<UQ at 538\14QB8V-)-C8E
+M#B=B:DEB7C8=-EM>+1M@:P-:_F)B33(Y8G%)179]=7)816IZ:$E)+1@^54U[
+M@%!,<D4_/" M1&Z&@W<R(!LV:W9J9G)Z at H!J15M).4EH;S G*5A86F!C;6I8
+M6UM$;F8E)6**<C]JA7IP-BU9>GM@)3!P;1LV:&!9/UAR8EXI/G%5*2U)<%M9
+M3V)J:3PR3&))-I)$2S9)8V-B<$D^:4AR at GA>,"4!8P%M /YK8SPY86%::WJ#
+M>&A1<'=R9U50(!@V24]98F)-8VA923!9:%9 at -AM$:TUR@&EA8$58<%E-8F-;
+M/V:#:V-;:W-P83XE+6-H;VMN6EIZBG)K;%A@:6)B66IJ3&9%(")9839W>V$Y
+M/F!8/TEI<W%B6F)K3S8I/F9:1&IZ;GIZ:%O^9F\P-D4Y-D0Y<HIZ6FI68%A9
+M53Y-;H1S-BDI/$]N;&)D?8F*<EIC838P:5DB*1M)8$56:VUB7UXV.7MB&Q4W
+M=6]':&AU?4DM35QV at 2T2+5@I&U9B83Y9:VIP/S!S;24V640P8G%F;GIJ6TE'
+M:&%82TE/245$<F]/8&AK;G-;@CL^ 6,!<@#^<G!),%IC:6-K at H1N36)K<FMF
+M<D0I-DU/3'!O6%AF:UX\8& T168M)3\^8W-S;&)-:&-86&IF8"UJAG-C6G)Z
+M;&%+/#Q/<'* >F-:;HEZ7F-::')F8T]<:C!@8U at V.5@^7GIR6DE:/CE:8G)Z
+M:D1F at F-A1#EO9$U/;V1F;FUR_EM8)14M/#\^.7"&B&M:6F8^1&YZ:FER83\M
+M/V%%7&IK4&Z AGUF8V$_)5]<)2L at +3E536IR:UY)&RUC- at D.'6IZ8W!$68A+
+M/%M:<()A.2 E/!@E6'IZ231)<6$V&!M@;$0[%2MP;G6*;F-P:VQO:$U'64DV
+M,&MW6DUJ;&-97H(P-@%' 68 _F)Z6R4_:VMI8W.*?6M:3%MP8VI65#D_64U:
+M=W Y6G)A:7UP-C9O;3(E/UEC9G-<8V at Y1%AB;F at M3')W:E!R>F-/6&%527)J
+M=75N6UZ&@V)08F9J9G)K8W!)25!;/QM%84EC<UHP2RT_:#D_;GM<8G5<6DTY
+M:VI,1&MZ8EQ;:_YI5CX5("55-CQH?7UJ14M88$59B%M9=VD_+5AP34QB@&);
+M=7A]=G)K6UA-7#9$/S0;/V!B<F]@2QL_:$0G'1MBBGU[85B"/S9A<'J*>F M
+M+44@%2UU at T]97WMT1"DM6V at _530_6T]F at VM/>HA[;G)925E4)15-<F-A:V-K
+M8VN"64L!-@%% /Y9:U\M/V-<;W!F?8)U<EI)8FM at 66A@15E96FMP)TQN/V*"
+M at F)C at X=I+3]%6F9Z8F)G+2(V/W=R2T5B;G)C:G)K6$E%6&%R<VUJ;6I;=GUM
+M6UIN<F)R<FEL=VQA6#L2+3Y+8698,#(5,&$;$CEZ=VYR8D\_26-B125%@W)'
+M6G'^8V)A,A48-CX^9GUU:RTM-G!H37)B/S9K>C9,8EHP,G=R9E-<@GAZ;&YR
+M6EI)3%E>)2U536!W6TP^8(" :R4)-GI]=G)B at UDP.6IZ?8IK/EEA-"DK=WY;
+M83E;=VE+87!W66!@86M;6FYI,&.*AGIJ8#Y%/A at .+5]04&IF>')Q at G=S 6 !
+M50#^8%I-66%C6V-L:WUR>G9P6V)J6F%L:455/T]B:S]/7$1:<GIU;8I]<FE;
+M/TUC>GI<6%4R&R=K<EXY1&)K8DQB:VT[("U/369[=6MK9FYU;FM/<GUZ;FYK
+M9FII>&A+-RD;26]@/S [*S9$&Q4;6VYF;6I;26MP:CX;)WI[159C_D5:<F\_
+M.24M1&-U:'(I%1M8:UMZ8TL_8W5C:5HY-D5J:F9 at 375U<UMZ at V-;85A@:3PI
+M-E5$8EL^86QN?'<\("U:>GIF8G=Q351L>W*">V-F:5Y)4&N"8V8R-E9I:8%R
+M<FE96$UZ<EI:82=:?8=Z9F-823\5"1 at _2S8W6GI<8X)[;@%O 6  U6!;25IA
+M8W%;869R<G9U<G)O9&E;:F]-241%7VIB8%E>8FJ"<F:">F-J8U5;7FZ):%A;
+M9BD;16)Q24E(:%0G.5MI/"LK)1TV;W5R8VMP:FMC26)U?6<#:H%N W+^:V@\
+M$EE[8E at M)SEH8$0R&TEP8W)F/TUZ at FM>1%2 BFIC65II<F988"TM16M[<G(_
+M*1 at R2&AJ341A;W)J at 7<V8'=P3%MR>G9U:DQNBG=C6TQH:U4[+5AA34DB/V]R
+M9G=/-DM-;8-R9FUW33EI<FMC9G=O8F-96SES=W!++3E8I%MG<F-/-BTG:W)L
+M='!$:WV*>V-;:6%)*0D.+44@&U9J/UJ!<@%H 6( _DE915IQ<EYC8%IK8E!Z
+M>VI_=W!L<FI9-BTM/W!933E at 8$5K:FUU>V]/-EAA<&QR6C]):6E+/EIK8&!;
+M850M+4U82UE0+2DB-U%B3%!C<G)C35IK;F)945IF;G)Z>F at W%4UR6E!C84U1
+M8FMI)R]H6UYS6T]RAW!,:&I]BGUL4/YG<G)>63DV1#8Y<&YK:&E++3!P:TU%
+M8')R35I@/UIZ6#E6:GIZ9CX=6I*"<&%)6F)+/EEI6G)R1"5B>W)R338_+2]J
+M<EQ@<FQ@:%IC6#E::5 at _/EHE1X-R<$LV6'!C86%)51 at 826),8UQ:<X*%?6MH
+M:G!B7C 8("D@($EC+3""=V(!6@%- /Y$/"5+ at W]/.4UB:V P:W)B<GAR<HV*
+M;U9<25II64PM6&!86V-<4&IG/B4^3VQL8TDV-DUV=TU-:EIP8&-F6#]8.41@
+M85E)25\P24PO.6-R<F)B:&I:1#E%6G)B<F at _-FMZ6UH_9GQS8DU:8E4V8$U%
+M<G)B9G6"8TU-:X)\:F'^6FIZ<F at G-G1$&S9C6G-N:#\E6G)J8V ^/DE-238^
+M64(M6%IK:T0M)5:2BH)H6$D_+39P<#EHBF at 5/FI<;U@^52 @1%@Y6')Z>W)B
+M;&)@7V-:-BU))39S<WMP/U9R?&E-6G<I#B(^5#\G)UIZ=75F:VMN>FM>.R E
+M/#9;<3PE at EA- 6@!20#^.R 5+6][62 M8'-H2VAK7&9K:UR&CG),14M:=VE+
+M/TMC8VAI5C1-23 \1%IS<F)653Y%:X-K5F!%:VM/8VE;838G/T]/1V-R239)
+M)RU)6W)R:FMJ9U at _.4EP6WIC2T1C;EA:6W)U>G)@3UA)+59$.6AN:VMF?(-;
+M.45K>G5K_EM89()O)2UQ:3LE245K:FM@,E9N9F)J24EA5DD_/CDV'4E:6&8I
+M(%EJ?862>DDV(!@E<G=86XIW*3]A3&MM6&$\/%A8-C!<>HIZ>G=K<G=O8DL\
+M+24W:W)]<F%C6V9[8EN#830@)5A)&QA%=VYJ7D]H9G5O:50[-DD^87IH.X(V
+M20%W 6  O3(5&"UA;SP.&SYK<G=[<FIB8EM;<HIZ6#Q)1&N"8S8_6VMC:U]%
+M63\G,$E/;'- at -TE)2V.#@FIC86Y[6U #:?YA1#=%/TQF?7 Y1#8M/R]B>U!/
+M:FMQ9DE%;&AJ34Q@:&A)37*"?7IM8DU8.2U5/D5 at 9UIJ8E!Z:#8M6&YZ;6MB
+M3VUP/RUB@' ^/DE?6EE9-DUS;UQB3&-T3$1B62T\+4UB5DD2&$UR9G62BF<I
+M#@X54()R8G6 86-;+T]J:V/ .5AL8#P;,'.#;G9Z;')Z>G)I124V55YZ at FM;
+M<5M$<HAZ>G* 53)89EM$67IR:EDV6&]D9F]>7TM8.4UY<DDE1 %[ 6@ _E4\
+M-$EO9RD.%3)-8GJ)<FIB65HV6FY;3U at E&S]A6S(V16UI9F=)2U0V-#9%6%!)
+M&R4M/TQQ at FU<;&Y]<FMB8VQO<6-I1$Q->X):1#8M/"4Y9T0P1&-K;TTY8W!K
+M6$Q:8%@E17)@=8)F45A8/B=$,#Y@:$5$6DEB8RT;/W!N9J]U>F)08"T5,&J 
+M7TE41456/CY%8G)R6#!-<DQ8:6<[-CYB;U8I%2!$<EE:>GUW- ,.N39R<7)Z
+M:V)S8SXY6H!H+4QW8D08%45S<EE/6W)U=8)X5"5A<3]<>H!1<FX_2'J$8V:&
+M<DM/:W!J8 at -RDF$V,&IH3VMS:V%8-D1K:44;+0%A 5@ _EAI26-Z:2 )#B _
+M.6)]>EM-36!$86M%3VE$/V)R<C\R+5EK;&)664D_7DE88%]$+3(I-C9$;FI:
+M<W)F=7U at 1&-O9FYW6#\P:X)Z:#\M55Y$6U4V-DU;:D0V6GI]=6I))2 E56(O
+M67IB5EA<5BTV-D5:<%E)6%MK3S8@,%YH3?YN at FIB6" .*4EN9$UH138^)TE8
+M1%QR8C9)8EIH:V9A23EB<F,R(#E->F9A:6YS9C .#BUK;76"<EEJ:E at V87UK
+M1$2 ;TD=%25,>FQ965MK9G9W21M-<E at Y7(!W<G=))5!\/DR#@VE%:WIR:VMF
+M=VE5-F-C2V-[>G)L8%AK<&:"-"D!)0$G /XM3V%P?G R%0XE66!@4&]K6$5:
+M;&MH6%EP84]B>G5H64E$7UM)35HV+5IK<&)B6#!$6%Y56&)98(" 6VI]:CY,
+M6EYK;&%:14UR>G!A16)O6DUC63]-8FM%-D5:>GUK1" 8(%A:2W!R1TE816 M
+M*RTV3%MB2SY,@%LV/TM@;EO^3WUR46 \%2U69FI;;F [(!4W7S]);' _1$5C
+M;VI08V!86FIL3S9$27-S8EIL>FIF*Q(M8GIM>7IK:F\_(F!Z7%XP<FI6,D$\
+M)VIR;VA:6U%G8BT@,&);/SEB=UEI5"!$<B4E<H-R.4UZ=7)J;8)P7C!86$MK
+M<FU]B7)98GM[@F98 A@ Z#(M2W"">DDR(#9I>W<P3'IJ36%R<EI)37)L6V-R
+M8WIS>G!@6#Y-9C88-FMZ<F-?/CE@>'!T8#8M=8)S:VIB6EE:8VM:6G!)/V-F
+M8F-C<GM at -F-J6FEV>FD_.3E-<FDV&" R/S!(@'-8 TG^838B-B=)36%8*3!R
+M8S\Y.5EN>VIZ<F)C6C8W/D5R?79K6"LM6&)626IP23<V/EAG15AB<&]:34U:
+M:EIJ:D4V3WIZ:E4E,CEJ?V)B>F9O1"D_:F)6/VMZ82U$5C9@<WIM8DUH<E at I
+M+U5Q3S<I.6]H84D;+6,I#DQU:TE->GIRE69M at G-K/R4M8'-K6WJ2=T]C>H)U
+M;@%! 1@ _E]>24%<@#DV,CYR at W)96G)U:VIO:V%%/FEC6&-S:VMN?8)W:3\_
+M8EX;)6EK=7=L83]/:DU%)3(I375[;TU%6F-;:'IB6FYC54E%8V9::G)B/TUI
+M8W**AH)K:4D_6V _,D1>8#XM8X%R:5I-8&E<8$1%6%I8/S]K:U8I&SYO?/Z"
+M at GIF:UMA64E%8G6&=4\G/FMS:W!R<F-+/S8_23]+<&MN<TDG5GIJ5EM$)39R
+M?5I4+398:H!B6VY/6V)>26MR64]R>FM>)S V3WJ";F!%:GIH*25C>D]$/TEC
+M=W!5/SE/*1 at V<$PY6GI[<F-<;FQR82 5.6-K3VJ*@EA6:H."@W4!< %+ /Y,
+M8G(M-W G3&YR>G)>:W)S>VYC8EIA34]/145:<&Q;:WN#?7!86&IF(!MI.5IX
+M:6)-6FM@)1LV6%I<:FA)+3Y at 6VMZ<FIR<F-)17%S1TUB8UE-3U9;?7QU=H)O
+M5D589D5;:VI@/SEP;7)B5C]P:GIK5F!L85EA<FI$)2M9:WW^>WUM:&!$6EAC
+M8FERAGU-("U;=6YZ at H)S:E@[/U4M+6%-4()G)55W<$596#Q)<'MO7C(E.6*"
+M<&IF8E!>:TE:=6-B>FYC=S8\,CF >F,V-EIF=U4E/X)N(!M)@G!825IR<%@@
+M&S\E'6!Z=W)K;W5V=68T*RU/:45$>HII,$5\@GMV 7(!:0#^)T5P5#=K/T]<
+M<HI[8FIZ at X-Z:VA96V%92T0[/F)A3V-R>GMW:FIW7C(K625%<&M;8&ER<%D_
+M3WAS=VIH538M1&EL;FIB:G)B.3YL>D0G/V--6&%@669R=&YZ9E@^-D5A6F9N
+M9TD^7UEK;UM%8%!U:DQ%9F9K<'IR22 _:%N"Z'5]9EQ;3#XV3VE99GUZ6B(5
+M.6)?;H.&@FM-/SQ8&RT^15EZ=S8G8&,P2U]96G-U:EI%+24O9GUR;6M/8F)@
+M6FMP=W-<:W5$7D0R<H)H)1LM,FII64EL<V8E*6MM22!$=8-Z7BD;%14V UJ5
+M8FMZ=7IR2T1)2V%96'&"<#8^8V9U 6(!8 #^-EEI22UJ8F!9=86"8E!F=GU\
+M at X V&Q@@.V-L5D5$6FMO8DUJ=W)P1#]?:38V7&IB:&MB:W=H46-UA'IK6U at T
+M+6)Z>F]%6F90.6%S<TD;+40P36MR:E%B;75U7F!8.S]@6DUB8D1-:6MF8EI@
+M8T]K7EA:8F-P=W1R8"D_:T]NGGJ"<F-)/TP^,&-B:FYN3U @.SXV3WJ"AFM%
+M,#]Z/P,VS3=J>G)$23DV6&A:8W)R=VY9/RTM8VYN8F-;:%IC<&IJ=6M/6F@^
+M:7A:;WI6& X8/&-K;&!;:G=8-F!J-A(G8H:- at CP@'2DR/D0_/S1K WJ/6#!A
+M64E):7)M>F-I:4AF 7,!60#^8FMW/B!P64TY:WUZ<F ^8VM;:8!;*1@@26MZ
+M:UD_16N!62DY7')H/S]9=UDV-EY%34DM1&QR82M:A()N8EH_)TAU?7=9<ED^
+M*VR#?6@^550E3')[=F!;6VIU8E!I6%EB8D],34E8:W)F:TU68EIL8%A:3&-;
+M14=R8BTG8F-:_FMU?(-K/EA6'3E,<X)N6V%)5DDI-F=UBG)),C9U;TDM&Q(^
+M;H)_5!T\36)><FMF>GIR;$LV8FYR3$0V:6!$:&Z">E!922TG:8AN=8)I-!@;
+M2VMN<DU%3')H5F]W;C0;)V:%A6E57UY%3UY%1$1;;GJ 8B5,6D5)<')1:F)J
+M>()%10%R 6L _GIR?TD217!)+4QR>G%P-FEO/C!8<&,@)6AZ?6L_-C):@F]$
+M-D]O8$E))U]L/"T[254M&!TY:W E+6J"=FMC8#9%<H)]:HIC+2U@>H)O3&%)
+M&W!Z at X-R:V!98FA(6V-K<V))6$E)6FUZ<G)::5I,3$UF3T5:7SXG3$0M1&]R
+M6?Y,6VV*<D5)8#8V.6F">G=S<&I at 6%5;9GU[83X^4'QP5"D8)6*%?6 at M-DQ9
+M8VQW;&]Q=7IB6&AF8V _($5H.45:@H)$/CP8"41Z>GV#>F$E&#YC<G ^,$1R
+M=UI/8GV 7C9%=W5L:X!B65Y>.TEH;V9K>G)+8$4M/G>!;F)16G>"8U@!6@%Z
+M /Z#>H9P,#EO:#]%8WIF8C]H=V$\-EIC.2UB at GUK,ALE6()Z:%A;=W-B52 V
+M8$DR+6%U53PM-UMQ7C98:VMF8W!I8FU\@W-R8"D@/W)Z:C8V+0Y:<FEU>G)J
+M66)R6TU;9G5F.38V,%9R=7QR47!B23XI6%A$8UI:8&E>*3Y9:V_^8G)R>G)H
+M5DU>7#!$@7UN<G5R35MW<&)R=6I:1#]D9'AA,B5-;5-G:4E at 7&M<:W-R<FYN
+M6E!J6DEA7B4V:&!%6'N":#8;("LP8FMZ at GIC,B E,&!K9C8V:')F.41Z?8!I
+M3UIB<GJ"33E at 224;17)R9FYF:W!%&R5$>X9W8%IK at FA) 40!@ #^?VMN at V!;
+M<FI-6&IZ8DQ8:W=C:6EP61T;27)V:3P5%3]W>FI816)P6FEF-EM6/" Y;UM%
+M25AA<6@^-C!::%E/;W=F>GIR6C\8#BE::FI5&Q4526)/8FIM;4=,<GI087)P
+M;CDM*1LY<G)Z:T=B8V)@6$]C6F-J7VMX@%P_-C Y_C]R at G)K6TDY8V])+5EZ
+M9EYF:#8Y=8)C36]H6VEA13^ <C8V<&,G2&]):W)C7&->;G)K6DE98VE+8F]"
+M*3Y6-D1ZA')$&UA9)SE-9H)]<F->02 ^6V M(D1)/R4V:VYR:SP;)V:#@TDG
+M64DI#B5G<G)P8&MX9CL[66>"?7-C6H)@)0$; 6  [6)I7GII8H)R5FIZ>FM$
+M8W-K3&-W:V$I(%AR<FM9,ALV:GIM85AA6T59<UE;:%4[/$U$,"T^8W!K7S\V
+M7FA>-D=K7'I\<F,[#@X;6&IJ;3P at 06-I3&)B<G=81&):1%N =7)8-CPM/W-]
+M>G)C6ED#8OYH9EQ/:6MB9G5W6TDR$AM@>FQC8#8E6W)P246 :$U08" ;6H)K
+M3U9,37!W6%IK=SXV:&))6&!%8VMB6EM;;GIR:DD^3&E@:7MH.R<^+2UPBH))
+M(DU9)2U)87)U<W)K83])868V+3\_*14M8GI[;RT.#C!B>TDE25PR%2!8:7*0
+M=V-C:VE)3VEK=79\<%]5( $. 2T _EEO7G)B8WUS35QR<W))6G)?)T1C46!$
+M26MN:W%/1"DV:WIQ8F-N83]89FE>:UI+6&!)-B @/U9C<EM@:F)%+4EI8G)V
+M=W=I.RDV8&-::TDV27MR34U9>W5<:6,V+V-V at G5C/U5)6G5\9FMJ35MC24Q_
+M=VA%6&-J8F9[<FE$"?X5-EIB7%H\+4EK<G)B at G!,6F K$BUC=VA at -CY;8%A-
+M8F]K36!(8&1,8')<:FMC7F9Z<VI$(#EA.6)Z<F-)-B 83(^*<%E+84D_36-H
+M7')Z<F=;66)K6$M96#8I*45R?7<_(" M1&M>+3!>1"DO15M;>G)C8DE%6FMB
+M8F9[>&N"7B !%0$@ +-9<FMS;%MH=V V*45K:$QC7QL;,BTV,E: 8EYW1"T@
+M)6!R;FE:86%86V-C<GI at -CEI83\#(/XP6F _;WIK1#1):6IR=7)R@')98'=F
+M1%A)/TEO>DD^37)J6FMJ1#LY8H-M8D])36IL=5Q?1$1 at 3#8G6EYP:590;7)B
+M<G)K7B 8,CY%6EH^+5AC8FMR at WIA6FA9*1 at Y;UQ8/S]86DD^16)R:F ^5F Y
+M6H)J<VQF:T1C;&$^&S_*6"5,9&UR:4D@#BUY=79Q.45S<&)B9TUB;G)N<&QC
+M8UIA6VAP7C V8FYR:$M866AL:$DM,CL\25M;3&9U;UA%/EAP7T5@@()V9U@!
+M- $R /Y+241J<%I9<G I&"5-<EML<$DI)2D5($5Z:FJ 8"L8(%IZ>W!;14E%
+M6G%(:GIO+1L^6$LM*2LG640B8HIR6$188%EJ:VMB at WUC<H-R2$E%/T5Z>TE$
+M:&9C:EQC14DB6'IF65E%3&IF:VM8+3EB240V+1U-?V)-9H)F<6):8CNI&#M8
+M,$1)+25>:4],>X)Z:EIK<DD at +6]:-CY88%@_-B4O35I-86-I6DP$<M-<:TUC
+M:V!5,C9>+6!;8FIL21 at .(EI-6U4=/WI\9EIM8&!<8G!X>V-?26%A8GMR7#])
+M36-G:')C<G)J8E4@%2UH:6A8/W)J2UD_,&]H/TU[?7UZ<@%> 44 _F$_&S98
+M8EA9=SL.#B)B8G> :5Y5*0X at 26EC:G9I-B V:7N">FA:23\Y6V-A=W-5-#E8
+M638V539+,A)$?W);:'-K6EI;8SYS>F-F>G5C6E9)+7F*:UIB7&MK6EM).QM8
+M=7)-13]8;FEF<V ^/F!-87!+("5B:#8Y>W:!:%!A1* V25 at Y.S(8($E:.2=P
+MBGQB6')Z:38Y:%HM-F-P83Y).P,EU"UA:G![:4UB;GMR;%IJ>FM@/S]926!C
+M:VMJ21@),EXM+2 )/W)M5CEB8V)<.6EW>FM956EP<&I<>FA$.TM:8WIF7GIJ
+M;G Y#B5B;VA9/VM@,$DM$@-)AEEI:W9R>P%W 6( _EE$(!4M;FE;8EPG#@Y6
+M36=J4&MW51@@6&!:6G)C+255<G)U>GMP86%)16IP>XAO8%A at 7S8M7%I8& XE
+M8$E8:WI[:UI86$EC9VMN9D]G6F-)%5E\@G)<8V)J8F!)/C1);G9-)TEB:V=F
+M>FIB6D5$38!K5#)$7R(M8FV"<F(_6,-;:&MI62T8-#E;5B)@B8IO6G*(=F!:
+M6UDR+6%Z<DD^230=#AM)3%IZA5H_7H-Z<F)K>G5K3$551%A-8')Z:%XR/V at E
+M PFZ(F)_8#E?8FAH25AF<E\P26E at 8V]0;V\_-$M:;X-H6W)K<FL[&"!$8VE/
+M7F-8,#8@&"4;(CYC;&A/=0&) 7L _FE5( XE<(5P6V!>.Q at M66--,%AP7AL8
+M/%EB8'=+)2 [8V-B<GMK7G)I,&)B<H)Z<F9:;3(V9F%91$0\1!LP8FZ <EI6
+M24U;6F)U:FM$1&(P"3),9'%<8EQR<FM8)S9C:VY6)39$8%!9@&Y_<D14.VMW
+M6V%R:T0V25MK8W)?/\-->GQS8EA$6#!$;"(P;8)Z8&-]9D5K>E at M($EZ<FM9
+M/C(."2!")TENA8IK<GUZ9F)L>F9Z>F P)UM3'4UZ8')N@&@@ Q6Z(#9K@&-@
+M145N<$5@<5 at R26-83'>(>FM$("T_8WM at .5IK>F\\*3Y815AD8W!:8%A4,C0I
+M*1(P8TP^30&" 7\ _G!9)Q at M9H-W6EM88$$5/EE)66%A:%4\1$UJ8F Y6#PT
+M.45:<G-B37J 6$]:6EQ;;U\_6"TG6FMH8%A>7CP_8W)Z<EA83T]86VA91W=J
+M6F%5-$MK<FYB.45RBGU9+2)B:FMO7DE)6$QB<FU]<DQC6$UJ6EQZ=FE%,%M-
+M/D=C/OXY<G)C<&)@:$U5:#]$6V):6WN"8SE@>EHV-DER;FII5"L)%2<M%4E;
+M9H!UA7UQ9F9[<EEI at G<^(%9()4EJ2&-;>G R,#P[*R5->V)9/SYF;SY8<&A$
+M66!C:&:#@EH^.24[6&)8-BU%<G-5(#EO13]J;GI-8W!//C\_.RLM23N",CP!
+M;@%M /YH8$1$/FJ#@FQB8G)L/$M81$U;85IO:FE::&@V&TE>+2 V6')P6SEF
+M at VM@7&A%/UAH65A",C]C=T at Y3W!>16-K;G!)6%A)6%A at 33!><FQR<$1;<X*#
+M:!T;6HV*:C88)TU(:6);641-;V9[?6([644[6UMC9G5U<V!97BT;6&+^6W!-
+M,&!J8EI,;')-3'I-)S9K=6-$:')B6%E<<GIB6F8\&"<P( XI-CY-6XAZ8EQK
+M<G)O;G)K8%A/6VAB:F)P:FI<;3\_6#(@38!J35I-:VD[/V!U6#8P37!(<GLV
+M(C8E+4E:238M56R!;T1)<EDM26R*145M6C9%34Q>1#])@D0Y 6\!2 #^3#Y$
+M:%A-;GIW:VIS=VA-6TE58$U%8WIZ9F9I-BE)6$0R.UAW8V%H<GMN<FIJ6$U:
+M8FQC8#X_66I8+3!P83];<FMK3UI83UA-6& ^2%I(<GI66&9]A%\2#C9]DHI9
+M)2D_14U$65HP/T4W:X-G16D_/V)K9EY<47)R35@[$D1Z_F9B/C]$8&!,16-R
+M3"EZ83(W26-/2W!K8FIK:F]Z:D]H21 at 8(!@5(#9$&RU[>EE:<$]F?'59/V)[
+M6$US6EIJ<H1Z6FI5/EH\&S!R:FIG3%IC/B4Y=W ^+41C8X"#52DM,B(Y6#\E
+M&SEK@&)B>'IG+25)A5\Y8DDI25M'8F9568)93P%P 5@ _F!).6-W23(_:W)F
+M7FYW2$U9/F%)26%N at G!:63]A?4E+7C]H=V)@9G*#>G: <V!@7TQF<G!?66%J
+M9D0_8V!%87AQ:VE;84E95EEC:5D_)UAB5CE;@H)X01 at R:X:*=U at _66)@+3]A
+M23LB(E9U9S]B.3E6=VIL:45::EL_&PXM<OYR6D1 at 669@2S9$<F])>FI%24E9
+M-C!<:F-R:UIJ>FMC;5D\,D1$-#1)7!(@:G=(8GU:8GIR:#Y!9CY$?V at _35EM
+M at H)O7$E812 I8%IF;V!I:%0P,EIB55AA8%Z"B7(^)3PI-EA$%0X524TM1'IZ
+M9RT5)VIK6F]A)39 at 8%E:34F"34L!:@%W /YR6#]:>F\I%2U96#D_=E8^6"TV
+M-D]L:GIZ;3\I8G]-/VD^1')B35MN at W)C>H!G340I3'-Z:V!A6VEC:&->/UAO
+M:F-C65@^245837!P6UEH65 at E.6U]A' V2V-N at GIR3TQB6"=8<H!O23]F:FIC
+M9CX=)6MK<W=-3&MK6"D8)6+^7&)-66-Q;'!--EJ"@WUK23\V23DI.6)C<W-K
+M:GIF8G!C66!J;VE?:E8.)5]R2T=R8V-J;7-W6U@;+7=Z8EI?8FR,>EI87DE5
+M26-P;VMB<'!@55EF1&!J<EM,>I**;SP_1#]).Q at .#A45#C9Z>F)$-$1H65AC
+M<4D^3VE66F]8 at D5) 4<!;@#^:VEH:FUZ5"<@+5I3/F)-154M%1(M3%IR<V\V
+M*6)I+3!Q6"U)/C1?9G);25%M=V,_)3EL>GIK8&!?;W!?845:;FI at 6$D_35D_
+M2$E@>V=P;V9G13EH;7IR145-8G)U>G):;%@I16Z*@G5K:G5F at G5<+1LP47IS
+M33EK<F)>,C)8HS=913E86V!R<S\G8HI];EM/1#]$.S=A;FQR;6YR8DU8245R
+M X+::W)M&S],8FDY6#E/8&)U at W->"1A%;G=C8VA/>G-%3%A)5F!9<G)?67)R
+M8EMI:#9?>H)I37J2DGI;15A88#\T.2 ."0XV at H)J24MQ at UE88VIO;TAA.3EX
+M<$5% 6@!60#].6>)BGU[<&8V-EMK16AB6F V*RLT+3Y68F \26!@)25B9BTG
+M1$]F>H!R8$UB<G!9/$MB;7MR:EMA8F)I64U::W=L8%Y)8VEA/C9%6%IJ8DQ8
+M:4UI8EEB9F!)6$U;>X)::FA)5G*)>'5N<GID<F9I850V7V]:25AC=G5R:$0#
+M6OY5140;+5IR82(Y>GQN=W!C52TE(#YO6F!F:W)H22T5(%F">GUR9'(E+3]8
+M8DD[)24[1%N";E\Y*2U$5F!B:V)W<D1)/TMC:%M@<EE:<EI)/F%9,DUUA7)>
+M;HJ&=6!;6%AP6EIX52 5#BEP>F-%27*&:# R8&9Z:V<I)4Q?)R !=P%$ /X;
+M.7J%?'UZ=UAA;&H_8F]08T5)6%LV24DW/S8V3VE5-EA-+25):6YVB8)P3$UC
+M:$D^36EJ=W)C3W=K16- at .3!:>G!S<6%I<G);/UA,.4U%-C9 at 8&IK23E<8%E@
+M/CES<S]B8C Y<H5\<F)L at G5U<V]B24EJ<$D^35QR>GMW:&:L:EEA6"T2(#9)
+M8T0P8FUR>FYP8#05,DE834E9;G):6!4)"2=:669:/U]F/C8#6M$Y6G)J6"=9
+M86AK8VM-241K8EI)8#\_-DEK:#X_<&IB;VA>-C8\*39;=6YC9'V"<FMK:45K
+M;7)R<5E!,"U at 9EM)6W=]=TDV6V)U:VQ85DU8+2 !7 $V /X;)5IU=&YF>FQK
+M<FI$-W=R8V-K;UA88%@V+38M+4UI66$V+55A:F)0>H5[5CY96$E586)9:G]C
+M38%W36%C-A at G5DUR<E%W?79;66MP33E)23DP1%IK6CEB:%IC7S]@6C9P6"4E
+M375U;6AG=6UM?7I9,FEZ:C8V7V)G67=J9GK^:EMH8"T@*2 V8#XE/DQS?6YR
+M8#(5/F ^25A):VI%5!4.%2 ^66=B5D5U<TE8:&)-375S6S Y36J(>G):8W-I
+M84DV=UM8-CEK7QTE8FYJ9FMW7C9$/"5,:D=::FUB7G)R:$UK=7=C<'!87E]@
+M35L^3WI]<G!86VQZ<F9R<FE:@E4R E4 _CLV26AJ:V-S:V1U;SX=7W)C:G-[
+M6EIR:TDE*3(;-E at _3S(V6X%_:UIF at X!--DE8/TU at 838R:ELR at 7I,7V]5*1LM
+M.5QB37*%=5I;:VYH+4EH:3\_+45916)R:V-J8%\^'6A at -C8^66)'8W=F:VYU
+M<E K8WEG+2=9:E]%:V):<OYK:VAB6#Y)-DEC6DD^+6-[;7!-*2!)9#X_7EIC
+M7%9@/S)07F!C8FIW36MZ8EEK:F-C;G5R8%9,8FIN=3E, at F%8.5AR<G!9+6)@
+M*39J:F%I8G)8,%MH-D1:1$196SXT:VI at 8WMQ8EIL<$Q:8V!@6SM8=7M@<F-%
+M6H%R44QM>VN"6!L!-@%5 /Y%238Y6UEC=V)B<FI at -C9$3%IB<F-K=7)O53(I
+M(#QF138E-C]H@()B6GJ"5ATP7&!).5E5/UA5+7IZ1$UW<%X at +61B5DEF@G5:
+M6G-R6BT_:W)U9B4R6%]J;G)K9G)H/AMC8T5-340Y)3!8-V)N:FYH6F)D838M
+M35I$36-L6UO^8FIJ8VM at 24E8:FMJ5B4Y:V-K6T0V5F]<26%C:F)::W!,26]<
+M8UIA<DE;=V]:8FMB6FMK:FUR8F)-7')N8'5%-D5;8H!N<UA823Q)6DTP6%9C
+M6"U):G!C:&A at 65@^,%MC3V-]8$5-8FMB24D_6FI,6FUY8V)@.5E[:SDI17)_
+M at F8R 38!/@#^:7)F1#8E)4UL8TE)<&<^/V%81$U/:W-B;G=,86LP.6%8/T$@
+M)3Y@;VE-64DI(&!J:VIB47IT8VAR:3])>H!9-D1)/R4M8GV%:SE,:38;7GIR
+M:TTM1#Y:8EIQ at H)R4#XM<H**>F9I7S0@,C))6D]-9H*"=V at V'59@,BU:<G(P
+M_D5:8UIB8F ^-D5ZBG)9.5!B65M-2S]$8G!<8UM<8D]N<DD_/C]9<F];6%MB
+M:F!)+2E86$1$?8)C145@=GMB)2UH65IB169Z;V!+36!9-EMC6F!@6&J(<TQB
+M at H)Z:3LP6&=R=6E)-C!@:&A5+2]@6T5;:VEB6V-R>G)@/#Q/9X)R:@%N 6D 
+M_F9W=VM$(!@I3U]$6']P53Y823!)2SEB<G)C*3]O22=-6#]5,C1$/EIP8FA6
+M,B!89GIM:V-R=V)P8E\V+6J":DE)62T.$E9N?7I,/S\E&UB"=4U))38M24T^
+M66Z">TD;&UIMBGUR<G=I/RTM15!?.6*"A7UR/RD_8V%;:VIN8)5+6&IC;VIB
+M/S(E675Z<EA68F);14T#8+%B8FM/6')B:FY-5DDE,&AZ;W)K66IJ32 @7&%-
+M6W6%8#8G6G5R61LI5DEI8$EB;FMH V.T:TE-8E]/24URB' P.7-Z at H-I-CEF
+M at G5W22TR6$U-8%@^.4U%:FI61%IJ?VUU:F%-6%I;8@%P 6, _FIR>G=;1" =
+M+59::W]V:5MA13)%7C(V=W]9*2589DE86T5F1$EH35!L;VY at 1"U836I,67MW
+M>E!W;FI$-F. 9FA863PP)6-]A8EJ2S8@(#9R:C!-53L[8%M92T51>E at 5%3EK
+M>F9:3&)[:SX\/SDM,&*"A7%R8U4_/F>"?6I9:.U at 36IF=7IK8$E58%E9>FIH
+M8W!@-BUB<G5F8G)H26YR:VIB8385%41R;G5Z6F)W@"4 at 8VQF676*66 V3&YK
+M:3]>:$UP=7![6V)H1&)J:VEB:&!8/SE<>G)@2V!B>H-K/R4_=7)Y:#XM6#\M
+M/UM) RV08H M)U]@34^ <EM-:W=A.0%H 6\ _F)U>GIJ8308(#9-:X!R:6-L
+M224M/S(P:HIO.R P6FE]:3EJ;VEP6$]:6H)K23Q615 at V/FYR:UIK>GII8W)[
+M:& ^.6AM2X"*A'QR8U9454MJ8S\^6D]$8&)R:UDO658R,"=J=5Q$+3!P8CY@
+M=38)%3E[A6YB34D['3]U at G=:8_Y6-V)U;FYF;U]-8VQ/56IO6F-B-AM$:FUK
+M669Q5G!K:$]J<#L.#B5)16=W3$EFABT\8VMO1&J&;6IH8V)(;T58=&!R at 6]U
+M:%IG+3E::W!$34U)8&!,:UER?&=-:VMD:5H_3&9U=6I86UA+66AA1"42.6H_
+M(#E$&R=O<CDP7X."<#\!/P%; /Y:<GIZ<G)5(" ;+59K5C]+8UI)/C!;<FIP
+M35HT)2U);G V1&UO8#E)5D1R:UDT34U8/C]H8EHY7VMU?7I\=VA@/S9::V%F
+M?6YF:EIK:G)Z?6],7E9 at 24Q/?8-W7E98-UA)8GIW6C]58S ;.6I<(!L_<H-U
+M9S8E.U1):G5R8V"&14E:=75Z W+W1%AP:2(Y8TE9:UH[6%EJ?V)J<E9;:G!%
+M16MG*1T[.R4_7R<V:GHV5&MC63=,6UMR=H1J/V(_+4QR=8)O:VIK<E0M/FAX
+M23DM)4EH6G!B;8AZ8VM:/W)Z:38_37*#;V%@6V%C;V \("TR8D0Y/"LM6W<M
+M&#!R=5X!, %) /Y9=7)B6FYN1#PR*T5834U@:VMJ5BT_:V-K8V@^.2 E37=)
+M,%]B/BT^8$E:8#\T36%;13]@6#DR25IF?8*%@&),:'!I8F at Y241$3$1O:EQN
+M=7)(:')O:%A$:7AU8EIA2$5[@H9Z6T];<#LE*3)T7ALP66UU=TD;-F!'8FM/
+M9TG8,&EK9G)Z?7UF85MR>R5!21TY;6MH3T1/>GIY>FI6:GMI,%MR22TY23(@
+M23(;86]:5FIJ34E%)TQW?89[6UI8.SEJ=8-Z9EIK>F<V+5]X6CX@%2U?5DU@
+M8 at 1RI&$Y6W)J23(B2'IK8FMB2%IR=UM//SE at 8#]/7C9?@&Y$-F-R< $M 44 
+M_F)U:V%83V- at 5F$T-DU)/D]K@'UC-C!C:V-C<DTR&Q@^:EA6:&([&S]H8%A>
+M-BTT8W!824\^)41$:&-U=7=[:UIK>FIB8CDB+4E%-FA?6UI::TU;:D]J:TQJ
+M?75L8F-:.5YT?75L8UMC6#8M-FAR62M86F9Z<D]H:DQ$6D1:6/XM36IJ9&AU
+MBEQC9G*"14A8*2)8:FA8.4UZ?'5]@EM;@HA?66-B8%9C;B4^/!LY:G!B6VI;
+M6V V.7*"@G);8V-F6&E;:5E6/V-Z=UD_8'=A+2<5,F8_,$DY)SYO>G!)6%IJ
+M83\I-F9B:VMI35MR>FIJ:&-C6B(P1#!->GI at .5F":W !- $_ /YI?W)K:VEB
+M14UR7BDY82TM.7J*<EDY4&):8WIW1"D;26A?:H-J23Q@<FIQ83\\.5IK:VQH
+M23(R56ER=UI;<G!K;7IK65I)&QM<9UAI6FAH1%!J8F)%35HY7(IR;G)S8C]%
+M1T]R<FE>4$TY*5A6<G))6%E;<X1W>GIH1%A<+W?^8E]B:V)3>H9:/V)Z=58P
+M/BD8275:/CE%>GM(4'MB6G6"<F-/86)8:G(V2$0[/EF!<E!B:&!I6#!J=7I[
+M4%!C:&MK:FI$24E89X!8241B6!LI&"5D/CY8/!@;8'IZ<&A:8FE+9F!>6F%@
+M:&AR>WIR<G& :TD8&!L;.7)B.3 Y at E]P 5@!20##9GIK67&(=UI;:G!>2V@[
+M*259=7IR6%9B8EMKBF@[+4E at 8&9Z:DQ;<'IN at 7-)/S])8%@P23\M-EAI=X!-
+M1%EB:5!R;P-)_BTE+4UK;UI-8V!-:G!C65E)(D>";6YU9FI$/E9:8G5]<FA-
+M.2<^.4QH6&)H3%Z#>GJ"<D=621MR=6)97FM<9G5S5D5K?6I%5"4.-G)W.24P
+M:W]),&)C<G5Z<FI-345)8V,V6FAI:4^#?4Q::V-B<&!J8G)]9EIH3%MB9FY$
+M-J988$1H3UE96DDI(!@I5C9I:DD at +6=U>FYC:6%@-F!J8$Q)/DE9:@-ZD6UR
+MA8)C,AT.&#]K8D0^.3E? 6(!6P#^9VMB3&:"@G5R<F9F8FM at 255@65EZ:FAC
+M:V!8>F\Y,C9)7W=R8$QJ<VYF:7)A-CPY358M,BTV668V4()P33 P8$5(:$U-
+M6#LI)39I<EI:8V!86FIB6FEI*2=C>GUV8EY8*59K6V9]?6YB3$M97E8_.6N"
+M6S]B8UMK<F)@225:_FIJ6EAN=5I9:&))6X)R:FA/%1):>E\R&SEB/BU8:FUZ
+M=69U<DE%:7!;-D1R;7=6 at GPY25I?1W-K:$1B>FYL8F!-6DUC/R4Y84];:&MH
+M13]F:S8V6#9 at 8TDM17)Z at F=)8VIC/D1O32U)7#8P16YU;EEN>(IS7"D8(%9W
+M8FAB6X)96 %- 5L _FMR85E>;GUU;H5[9EYF;U]-8VE/5FIO8&- at 6X)R138E
+M.59J:U]-:FMC;UA9:$E$-EA8-B ;&U9?)2UK<G!5+45936MH6&!%)14E87!:
+M6&M964]J8UAJ<TDB.6N%@V]96#8^4&E;;H-B638Y;7)H(B5CBG59/D0^,&IZ
+M>G)8:/YJ8V9S>WI;35A-.6.'9EQ/;2T5.6)17"4R/B451&)1>GIB;&I,27!P
+M:D0R8FYW7WV%.24B/CEB86-)2&MC7D1I84DK8V!526%;6V->:38I8H!H/C])
+M544^5&AR;7)6/&)R>F at Y8C\;+5A$-C!:<6),8&Z)@&U0-BD_:&-C:$V"5EH!
+M-@%( (U$:FE866)Z:UQ]>F)J W+^1%MK8S8Y8TU:8BER@&I?.S!)6EI-:'!K
+M;VM83'!I6&%L8#L@%2 V53Q$64U;=V9)65MC:UAF21 at .%4E987%X6CER<EIC
+M>GUA&QM,=75J63D[*2=,6D=R8CD;&TEA2 X218)]<CXM)25-=8IU:F]R8V)U
+M?8=S8$DM%3EZ8FAB\&)@0DE-3VE)/CPI(#9H17*"@G5H24EK:FII-D1RAG)M
+MBDP2"14M:6I;6BTE,CY%8G)R:5IO:UIJ:W)K6V(V-&%Z@&IC8$@M+5AZ>G)C
+M-BE8;H6#6F!-/"TM-C9$8V-J6EIC>GQW:%Q)7EI at 8FM),#X!.P$M /X;/F=O
+M6EMF;V)M;F)B?7UF86!K;S8_22DY6C!;<G)W;C8P2S8V8FIB<G ^,%]U46MN
+M8D08)3PM-FY at 1"4Y:VA-85M$145J/R4K*40P.6N 1R>!:DQB;8)I*R5-<FIB
+M338I1$0M/DUI6TDI(#<V/AL8179M=CX;)3Q$37UZ66K^>W9N<F)M at W)H238_
+M6C)H at FMK<FQ;8W=P:$0V5#YJ25E\BG5 at 14EA8V9I,B)B?75UA7(\& XB:&YB
+M8#L2*UA51&I]>F-J<EIK;'MZ8TU526EW>W)R<#X;&$EZ?VY?)1 at E1X*":FA%
+M22TM-C]A=V9B6VE@:GI]<E at Y6F$_8W)9 at BTE 54!,@#^(!LY<G=;8GIR<FUI
+M:76"8F-F<GI)2%@M+44E/DQF>H- at +41+/%!P3&)H*14O:5IB:VM$($1N.QMP
+M;T0M/FEC3%]8238^8EA5-C9F+14^8AT.;V1;65%N339)<G5Z:E@\(&%K6#9@
+M=VE at 7DM$-DDW.UIM8F(M#B4M+1UB>41;_GUF7GIF66)F:VE86$4;27)R>H-R
+M6DUN>G5<.5I94$DO9X!:.5AI87!K6"TM6G)J=75]=#(.&$EJ:G=H(!M):&)Q
+M=6YB:FI%8&J"@F(^6%EK<FMK>H)8&!4R7G=B13 .%2UQ=7IP36 _+3]8:75]
+M<F=W7$QZ;G)?-C!)'3EO38(M-@%- 40 TRT8&TUJ6FEZ:F)J;&)H238V26EC
+M8G)O23E%-CLI8(*#<#<V2556:VAB:" @.UE96F)H,A at R8U4E:F])/DUK:$]8
+M8$E8240Y7S(;6DD;,C\M*5I0 W+^:SDB/WM\=6I,-"!I>FH_27IZ;V)A24MI
+M.4EJ>FY@*1TT/#(I7V(M7VQ:8GIZ9FY>8V!816Y at -C)8=VMJ34UK;75K36!J
+M7U at M27)?+4UP8V9X23(_8V-K:FIR;5\R(#9-8GQW)0X;-F)Y<FMF:&\V-EEU
+M at F))25A at 8TU9:W)BJD4T-DQB:4PT&#Q@<G)Z<F)@-ALV25IB9GIK<#XP8EI<
+M:V _.R4^8DDM, %; 68 _E4M-EY816)Z:$UB<FIW324 at -F!06G*(;UA)/S\E
+M27J":C\V.41C<'IR:$])7EI9:')I/" @241)=W!-/TUJ:UE)6$1-6TPV23XM
+M.6@[.4E98%A$<GR!>E4R+6MU=7)-(A5-;F@^+VN"<DQ))TEW,#9%>H9K23)%
+M:5Y)8#DE;^%J65IZ at U!-8%M-;EY:338M6W]B:TQ8:V)C:F9K:VIC539,6#Y%
+M<FI<>U at _16E:36)J9F9J5C8V/V)R;S8@&!56<FUJ:GJ&6C]B>G56,#XR*4EJ
+M7TE%6&%A64E-:&X[&TEB W*<9EQG.R 5)3E%1%QJ<EE56UA,7&MI6"U<:%A4
+M-@%% 6\ _E])6VYI86AR6D5J>G)_>#P5-&%>-C)J at F))+4EI6&Z#>G!A/CM@
+M46-C36)K=VI66G!W:$0M+2 ^@GIP8V-S:UM)2S\^8FIW8E at Y-EA8+45J8EE+
+M;H-]?4PV56IQ>GIB,#E64%DV%3YZ at FHM("UA1#85,GIK.4DP,$1H:#8;.?Y:
+M6$1:>F,Y26A%7GMZ<3\;-GIC;EI-3$U@:UQR<F)B.2 R)1 at M9DPT8FI@-UI<
+M-EIC9G)F645)6%IJ;UA$.S)C<G)F<69U<U9%:WUJ150M(#9R=TD^26)@63\V
+M6FM>/S]$6F-B7TUH9D0@(#9)3UEB8DUC:%E,6FIR6#!;33F"23X!+0%6 /Y9
+M6$5-8GI[=SX=27IF>H!$%1M$<#L2.7]W-A(G8F)L at WIR<%AA?6%6/RE/:WN(
+M;4E;<FMB840I+7)Z at G)F?8)H/S\V/V!:<FIH:%M;5ATE6EI%8&IQ=7UB,%9L
+M>G9V<DEB:$TW238Y9H6#8%5%66IH,BE-7S!$9CXM6FY@,BG6/UA)6G)A.4EO
+M63]:>H)M($AS9G9K238B.7!F>H-J;3XR,A4.$D1F14QR;T5);C\M/G)Z<F-?
+M239+8F)6)5IJ:EI8;G5:66AB25N"<FIH3R E5G5O:58#2:<_66MH1#Y):& _
+M*1LV:VM8/RDV5EA'<&]86&9K;VA>:V at V34DY.S\!50%/ /Y@:UDT1&]Z>DD[
+M-DQK;&M9,C)C>E4@)6)W/R!$2$UC>GIB8E at _:G)A52LE16:">W=?7&MC<&D_
+M+6)Z at GIF=HIW230M/V)B:V)J:F)Q:"(5,BT^:F)$8W-C/T1-<FYZ<V-U=6H^
+M22T^:X:)<F)56&)S8#P^8%IH>FA66G-K6BWH-F!B1%M:/DUR8C\V:WIK)25:
+M;W)R;UXM+4E';7AN?V P1$$8#BUH5DEM<FA)8G!4/V9<;8!K8#])>GIR6&AJ
+M8V9S=G5F35A-.6.'9EQ07S8E/F1F:V)<2U8Y37ER22U$>V$V&Q at M8&@#294Y
+M/UE-6G=P.5IR>W=H7G!@5EI5)S\!=0%; /Y/:FQ>26%<<E!I22E;<#\_/D]Z
+M>ED[,D]R;45H8$5/<GI,1V Y6F)?:F @)3EW?7IJ1UEB9FM)*4UM?7IR>GU[
+M:30T16)G;VIB;F]U>E\[/"5%<EE$:6%/6&!;:V9<8VYU at GI8+1(I:'V%@F))
+M16QK6U0Y33]B=6)0:'IK:27+)5AK.3]-15MZ8TL_8W5A030^35Q9;UQ%/U99
+M=VYB;7 P268I&"U,:%E::V--66IP3&)07'IU6CE6=8IU:F]R8V)N<GIK8$DM
+M%3EZ V*R6DD[/VA/:W-K8U at V1&MI11LM85A5/#1);V [5FA at 15E96FMP)TQN
+M?8)J7FMK8F!+)2<!@0%I /Y61&ML8VMI:%MC8#Y9<"T;+45W<EI6/S99:UMH
+M:%E8:GIF14P^23]$<(%<)2=B<EQB8#E0<&D_)4EH<G9N<GIZ<E at V26)J:VYF
+M9G**C()C:#]:@FM68TL_3W=N:F V26I]A85P*0X at 3&IUD'=$)6IB1&E88"\Y
+M2# G37=<83G^&S96.45$36AJ341A;W):5G!?/F),8F)814AK at W!B9G),36]+
+M-DAS9D5B<$4Y7UEH6FIO;6UZ:#!):VLY+6N(;TU86UA+66E>-B56<EY%240[
+M.V-+8WIV<FM at 6&MP9C0I)2=8:4EC>FD at 86QI154_3V)K/T]<<HI[8FIW;&-$
+M at AL; 6L!:@#^9#E-<5IBA7=O:G):8G<[&"5);VI:65Y$14U at 2%A)1&)M;V _
+M/#(@)6!U=U9"5EI%36 at Y.6):/!@R2VEO:VIN=7AB-C988&-N<EQJ?86"<EIH
+M4W5V<#XB("]J<FI$+55R<'J#?UX at .VE:/WJ/3!LE239,8G)J9%DE%25-6$18
+M_E0Y+256,D5P:SXV8GI[8$E[=V%R;V)06TD_:GQU:V)R;F)K;%XE6F];6VM8
+M.59@:VMR<G%B:G)9+7)L63YG?7IL6DE%-C]K:388(F)F/U5 at 8&%82VMR;7N#
+M<EEB>WMF6!@8+4]A<'YP,EMJ;TU)1$5?:F)@676%@F)0:F)B88(R)0%8 6@ 
+M_FM).6MG8(%N8FIR:U!K6#(M8VIC34UO;UMA:UI-/SE:8V)@/S8R$C)63W)R
+M:FM9/V%K63E)-CLR-D5C8VIF9G5W:#8V3TE;;X)-/W)R9G(_7&9B<H(^&!@M
+M6VI8+2)8?VYJ;7IW-C9@:"]- at G<\*4E),#]LB8-X64\_1%M84/YK6S9$6#M$
+M8F]$-DQR<FA88F9O:H)V8&!%/FMZ=&U;:G)Z>GMW-#Y-6VEO84E)36MW;FUR
+M6FMR?S]X8$]N<G5V?7)@6$0V<WA)*2M)33!C:FIP-F!S:UMZDG=/8WJ"=6Y!
+M&#(M2W"">DEL<FI9-BTM/W!933EK?7IR8$E$9W>":%4!+0%- +IA6#!6<GJ"
+M<FIK;G)C7TQ516)@6F!-<6U-37MZ:4(V8%Y?7UI>,A at I23Y::W)Z;TDY3&E@
+M63\R)4M: V+^:FYR>F(^/U at V-DU[:DUJ;U!S8F)R:FJ#<$E51#Y812TE17)]
+M:EE::TU).5E$)UEZ6C!-:F P1'J">FIK<&$Y6G)J6"=986AK8VM-241K8EI)
+M8#]:6X)R=8AK15IR<FQ8-C)B at GUZ<%\^375Z:EL[,&F(<F9O1$]B<FAW86)R
+MPW9N<G5R>G=)&V]W8#Y at 838E5F)B:RT^8VM/:HJ"6%9J at X-U<$M?7DE!8F],
+M:7IP8S\I,B5%<$DM3')Z<7 \)4QZ>G@!( $E /Y6.2T^:G5V<G=O6FQP6DU8
+M16EF14E;;VH^/W9U;U4V6FI02#E8,C(K+3984&)U>G!$/F)C:4\R&S]@:G)K
+M8FMN?W)A6EL_-C9J>F)N:C]J at FIZ<FIU<E!K6# [/C8I26Z"A6-::EI:24Q@
+M56%R6B(V66)?-EAZ=61F<G!$377^>ULG.4UMB'IR6F-S:6%)-G=;6SEK3V:%
+M<D]@>FYZ8" 2)UMR8GMW85MU>V-:&QM;@GUR<EE:;V9J8F9R=79C:6-:7'I4
+M$EE]6T5;<5E):V-:8C8T3VE%1'J*:3!%?'MV<FE,8G(_1%H^6E%C;V V1$0Y
+M;V at _16-Z9F)5+2UC at H)Z 38!,@#^:38M26-K7E-M8C!-8TT^245I8T5%7FQH
+M6$V#;EI8&SEK8TDV/BDM+1 at M8#Y%:WIR:%I?85%%22DV6&IN=6I>7FMR>V]B
+M:$DP3&)<8F1;:W5J<FYJ<FE-:V-)+2TI$B5-=(9U:V)(259$3$E(:F\V25A9
+M8D1$<FY>3UQN:V-U_GUZ8$\Y6VYP=3E, at F%8.5AR<F,P341J<DTP17=M?6L\
+M*RDY7SEB9F]C;GI[=PD2,&IZ=7IF8FIB8F%K?&YR:&QP:6MW7A at Y?W _27%I
+M3&IM36)$24MA65AQ at G V/F-F=6)@)T5P5#\\/%DP*V!H-C]I6W)J35AJ>F),
+M6#\M38)Z>@(^ --F5"5):&!,35Q at +3!,,B4[26-8/UAB:U]86GI]8E at I+6!P
+M65IF.3(@%2EB-CY?:'%O8FMI23 [*3!-6EMK?V]:3&)R:F)B53\V.TU$8G)R
+M9FIZ;P-K_F)B7E\R.R )"2UF<G6">C9%;UI$+2=/8D5K<F)K8D1K9F!-,$1B
+M6G=Z;'6 66M%6G5N8'5%-D5;8H!L23]$:VM$(")@;'IU<FA at 6$DV6#]:3411
+M;H,P("U6<G5U;7J :EA@;H)V;FQF>H!V@&,8&UAI-C!:@&]::$U:,&%92:I)
+M:7)M>F-I:4AF<UDV66E)+2D\:3XE6%HM+4UB at G)6:GIZ:T0Y24]B<G(!,@$M
+M /X^/AU%;VMC:&-I7D5F+2DR-DTM+6]_:$U,8GJ%>VA85EIJ65ER8$0T*TM@
+M-CY/87!P8G)_:#8E,#!I6SE:>G5J5EIB9G),6$DM)S8Y8&I:8EIR<EQ1:FYK
+M7E\V-CL."1LG-TA3@$4P66]A/BT^.3MS>F9C:EIP:FI,)38V,&+^6SEDC7]W
+M.41@@XEN-BUA8V)Z<EI))SY0;4LV7UE/<H)J;UH_-FA at 6UXV,$AF:&!@6FIB
+M3&2!BGI--W)Z<V--3WV&=GUP/"DY7U E+WJ 8V-;8B5,6D5)<')1:F)J>$5%
+M<FMB:W<^(!LV8E8_35 at R+3!C?7--9'V#<DD[37)O at F); 2D!&P#^/S(8-G=U
+M8VA;<F at _:"TM53LM$AM:BF]).5IK?8)Q9G1K<FI><FMI239M3S9%6VMJ;UI1
+M@'=8+3DI8V8V67IZ<F]B36N"3%@Y+3\_/EEB6%M-7&I),$1M;F)B24D^(# [
+M(!L;,')I2T1B6EM813(E8'IV:V),8X*":E9)*2!9_DDG1'V*:#E%8'9[8B4M
+M:%E:8E9-21L5/H%R:W)5(EEW4%EA-C9H<F-I7E5/6EIK<G!C.25,>H9[12UJ
+M;FM))S!WBGU]>VE>-D58+2=J:V!P9U!+8$4M/G>!;F)16G=C6%IZ>G)_21(.
+M&UII;G!?.QLB86YJ.T5U?7)).5AR at H)J3P%5 40 _F<V&!M9<DU)37%R7V([
+M)UA4)0D5,'IZ:$D^17)Z<F9R;&UN;FI/8V _<%0E+4EJ<FI6169B3S9$-V%?
+M.5ER9FYR8#EC>FEG*25)9EM;:DU::VIK5C8G8G)R:EIC8$EJ=V _1%]U>W!C
+M8TA-<DPI,$59;G]B+39W at FU_6" 58)EF35MUA6 V)UIU<ED;*59):6 ^630.
+M#B)B VKD21(M7T1,:$0;8H!L8V);8%LP16N :"4827)Z>F)%6VIK1!@I:X)U
+M?7MF:#8^:4E):4U$=8AI:W!%&R5$>X9W8%IK:$E$@(-ZAG P#A5$6WJ0;RT;
+M,DEW;S8P8%YB8$D^8(!J/P%; 7< _H!)*25)8DD_.4]Z@&9A14E?52 8(%9T
+M>FM$)4QL8EQ08V-F>FA$3UE-:%@I%1L_>G)F1$TY138V.TE8,%AB/UIJ7S9:
+M<EEL)0XE1%EB:CY- at GUW8D(I6&)R<DQ,2#!>?7IF:G)F:VIM=T0Y:EH@*41%
+M3'5C/REAAVUZ8"4 at 8_YL9EEUBEE@-DQN:VD_7FA-<'552QL)#B!)34P_7T19
+M7C8P6FTM8GIR6CY,6EHE%39J9QL88'I:6G)F8V-K2R M:WIV<'=L6"DO;W!:
+M:38B8HIZ:WAF.SM99X)]<V-:8"4;8']K;H-@(" V,FV2>C\I5$1:=SXM6#8P
+M65 at V,'*"<#X!+P%J /Y_<G1D:&)8-B4I:Y!R8V!)6FA6.Q@;/VUM7SPY8DP^
+M+4E82')I/S998&A8/"D at -FAR;&AH25]$)2DM1"TV238Y8&!B;VM)8$0@*TM-
+M8E8;+7J*>FI,+3LV8H!C2S V-D^&>F-U:59%6WI;36IP4 XM6&IJ7%I)6GIU
+M:U at M/&/^:V]$:H9M:FAC8DAO15AT8'*!6&8M&!A$<&]?8&MK>GI:.T5H5EIB
+M5CDI*4];.1LM6F at I(FN -B=Z:VMQ8#8E+4]K>V-(:ED8%4R"<UDE&U9U=F-K
+M:4E/:6MU=GMP7U4@#BUB:5YZ:3P\-C9K at G):.S8_.6E--EA$&SY?21L_ at F%>
+M 14!.0#^6EIW>G]R:#8;-%AU:UIW7TMH6TDI$BU:8E]F25]8-BU886)R;S8;
+M/VAP/S9>7D0Y6FMR=V!A6#PI(C8G*SPM-EA1:WIR<&E@,BE$8$PP#AMKC&UJ
+M8EA5-F"":TU-7B4=>FUQ>FU6-DUZ6T1F:V@@&$EZ<D\P6F)M<G!A-E1K_F-9
+M-TQ;6W)VA&H_8C\M3')U at F&(;SXE/VEH6FAK:G5Z<E],:&%H8#XE%2E)6& M
+M/FIB.R5R at S8.:VEC=W ^)39837)Q3&)%'1 at V<FI)*39:7%MC8DE%6FMB8F9S
+M<&A>(!4 at 66]><F),8#!><'MW8F=98&AK7SE$4S9):&@R+8(V80(M /XM-FEN
+M at GUO025$6$Q--G):/FAO669$/UMO:F-B8V \+5AC>X-Q5"LV8'$_.6)_:$59
+M469U>6M;8$M>6#\T*QLV6$QC>G5W8ED_/#=H1"TP(%B*;6)-8V=-8G)B/UI:
+M.RER=&UZ<%A%2']B/VIJ;S8;-GIZ6"U86V)06V%:5FK^:DU)12=,=WV&>UM:
+M6#LY:G6#>'5\8UE4/D5866I@:V=R8F)J;W!U51 at .&T1)8C\Y7S @&UIZ21M@
+M6EM[>F]/86%%8W=>:#8@(#]Q:U9-24AZ>V!;)2!9:UA)/D4[#A at 8-#)9<FMS
+M;%MH-F!O>GIK:$UA=7)R=%@M,%9R at F$_ at BM+ 4D!+0#^,BE):G6">E4M7FA$
+M33YB3S9:<$]B;TU-<H!K9F]J5C8Y37J%<FA95&AP24EC?'-8/SE(9GIR8F-:
+M:&M:9#P;+4E)3')W;6)$+T0Y:CXE,AL^<'-B16%L<'IU33!;6DL at 3'IU?7UP
+M3$EK;V-H6V@\)4EU=6A-7V)H13E(<&);HVI;6V V.7*"@G);8V-F6&E;:5]'
+M<FMF=TTB6$QB.5E-5D5: W+:@5 at 2#A4^8&]?24DE%1M8;UHV:C ^<FUM:GIK
+M.5IW8TT_*2D^8UI47UA6:GMK5CM$66A:369A2RDT/%Y%2TE$:G!:65568WUZ
+M:EI9/S]W=8J%22DV6GIO7CM6 5@!.P#^1!LT26-R<E at M66):7V-H-B5)8$Q/
+M8E at Y8H)Z:G!P7%LV,'*"<4UB6F)R63Y$=X%J6#LM8'!C:%I:2&-<<%0M+3Y)
+M269J:EI?/EE$6%8@%1M)/V- at +45B<GA]:TU?8V [,EER;GR'8$U96VMP3$PI
+M)6*#:V9B8FIJ33!9 at 7)0VF)H8&E8,&IU>GM04&-H:VMJ:E\P8G=B<$0V25I6
+M+3])+25):V-R>F$=#A at M27)R:V$_-%AK:U at E1"4M1450;8!O.4UR;%M9/RTV
+M1#\_65]U=G5J5CY)/FEG.0-CHVEJ<7=B83\;-EAB6$DV.7)Z8D4_)1M;?(:*
+M:$1$16EC8$E6 5\!60#8:6!8:TUK<$D=16A966-W6B _8$587T]+6GUZ9TUK
+M8V9$/UQU:CE%.5IK83\\37!P:%\_35M)869%7V!,6DT_-EAF3VEB;%!;6FJ 
+M>F=8,BU824U?-C]$9P-R_F-D<FI:23EA8VZ"<E5$16-S6C<I)4E]:F-B8G)Z
+M21U:@WU,6FMC8G!@:F)R?69::$Q;8F9N:#Y,9&IR7UE?6F!%1#L8#CYJ8&IK
+M9D$T1#8_>H6"<EE);WIJ52 ;138\-$EB9G)8;&YK86!A2V%)250P6G)]@VA%
+M,"TM8W=I6:5;6FIZ at 8E[640@%2UN:4D^,&)J:5XY(#!$:G6";EE-25MB8U9?
+M 4T!:P"O@&]98C]@:5HV16A98S]B:$E):6!86UAI7G)U6SE,8V),245J<F!$
+M/U9;5DEI:6,#8OYH;V!-6V I26]@23Y$&SEI4&-B=4]C15&*C()W239)6V-B
+M25Y916J">F)9<GI/22T_8DE;@G)J:$\5(DUO+4EF34E$.6^)8"U, at GPY25I?
+M1W-K:$1B>FYL8F!-6DUC;T1-6UM-4'!B6F--5C8@($5H:%8T,C(;*U1,:GA]
+M>EG.3'*"<F,_,EDV13]:;TQ98GIN4&IQ6V)Z8F-D*3Y99GI?/D0I(#YC>E!9
+M6UIN at H)_:54@#B5A<$E$25A$8&QI.RU6:&9]>V)).3E8:$U8 5D!=@#^BG=/
+M8R4V5E]%8&I,:U at V6$DY8&!8/T]R>GV":#\_8VI at 23];:F!6:&!8/S]K>G)K
+M3TUR at GIJ:VL_6FMK:$D\*3];8')R:E!B24EU?7V*;D])35IB3&%)&U"*BF9(
+M6GIW.2TV33ECAV9<3VTM%3Y?6%AB.2 8%3EP>F]F?84Y_B4B/CEB86-)2&MC
+M7D1I84DK8V=B:FMC+2UB8D]C8U at W24U:;W)H7E561#9)+3YJ;G):4&YZ=7IK
+M63\V56!I<F8_1'=K1%QI+3!W?%M8/%A98VM)+39;(!(V:%I86EMN>VYM<%DG
+A&#9B<$M)66 _,%I[<"TV6F)R at GIP5#!)6H)@6 %9 74 
+ 
+end
diff --git a/lib-python/2.2/test/output/test_MimeWriter b/lib-python/2.2/test/output/test_MimeWriter
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_MimeWriter
@@ -0,0 +1,110 @@
+test_MimeWriter
+From: bwarsaw at cnri.reston.va.us
+Date: Mon Feb 12 17:21:48 EST 1996
+To: kss-submit at cnri.reston.va.us
+MIME-Version: 1.0
+Content-Type: multipart/knowbot;
+    boundary="801spam999";
+    version="0.1"
+
+This is a multi-part message in MIME format.
+
+--801spam999
+Content-Type: multipart/knowbot-metadata;
+    boundary="802spam999"
+
+
+--802spam999
+Content-Type: message/rfc822
+KP-Metadata-Type: simple
+KP-Access: read-only
+
+KPMD-Interpreter: python
+KPMD-Interpreter-Version: 1.3
+KPMD-Owner-Name: Barry Warsaw
+KPMD-Owner-Rendezvous: bwarsaw at cnri.reston.va.us
+KPMD-Home-KSS: kss.cnri.reston.va.us
+KPMD-Identifier: hdl://cnri.kss/my_first_knowbot
+KPMD-Launch-Date: Mon Feb 12 16:39:03 EST 1996
+
+--802spam999
+Content-Type: text/isl
+KP-Metadata-Type: complex
+KP-Metadata-Key: connection
+KP-Access: read-only
+KP-Connection-Description: Barry's Big Bass Business
+KP-Connection-Id: B4
+KP-Connection-Direction: client
+
+INTERFACE Seller-1;
+
+TYPE Seller = OBJECT
+    DOCUMENTATION "A simple Seller interface to test ILU"
+    METHODS
+            price():INTEGER,
+    END;
+
+--802spam999
+Content-Type: message/external-body;
+    access-type="URL";
+    URL="hdl://cnri.kss/generic-knowbot"
+
+Content-Type: text/isl
+KP-Metadata-Type: complex
+KP-Metadata-Key: generic-interface
+KP-Access: read-only
+KP-Connection-Description: Generic Interface for All Knowbots
+KP-Connection-Id: generic-kp
+KP-Connection-Direction: client
+
+
+--802spam999--
+
+--801spam999
+Content-Type: multipart/knowbot-code;
+    boundary="803spam999"
+
+
+--803spam999
+Content-Type: text/plain
+KP-Module-Name: BuyerKP
+
+class Buyer:
+    def __setup__(self, maxprice):
+        self._maxprice = maxprice
+
+    def __main__(self, kos):
+        """Entry point upon arrival at a new KOS."""
+        broker = kos.broker()
+        # B4 == Barry's Big Bass Business :-)
+        seller = broker.lookup('Seller_1.Seller', 'B4')
+        if seller:
+            price = seller.price()
+            print 'Seller wants $', price, '... '
+            if price > self._maxprice:
+                print 'too much!'
+            else:
+                print "I'll take it!"
+        else:
+            print 'no seller found here'
+
+--803spam999--
+
+--801spam999
+Content-Type: multipart/knowbot-state;
+    boundary="804spam999"
+KP-Main-Module: main
+
+
+--804spam999
+Content-Type: text/plain
+KP-Module-Name: main
+
+# instantiate a buyer instance and put it in a magic place for the KOS
+# to find.
+__kp__ = Buyer()
+__kp__.__setup__(500)
+
+--804spam999--
+
+--801spam999--
diff --git a/lib-python/2.2/test/output/test_asynchat b/lib-python/2.2/test/output/test_asynchat
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_asynchat
@@ -0,0 +1,3 @@
+test_asynchat
+Connected
+Received: 'hello world'
diff --git a/lib-python/2.2/test/output/test_augassign b/lib-python/2.2/test/output/test_augassign
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_augassign
@@ -0,0 +1,51 @@
+test_augassign
+6
+[6]
+6
+[1, 2, 3, 4, 1, 2, 3, 4]
+[1, 2, 1, 2, 3]
+1
+1
+1
+11
+1
+12
+1
+1
+13
+__add__ called
+__radd__ called
+__iadd__ called
+__sub__ called
+__rsub__ called
+__isub__ called
+__mul__ called
+__rmul__ called
+__imul__ called
+__div__ called
+__rdiv__ called
+__idiv__ called
+__floordiv__ called
+__rfloordiv__ called
+__ifloordiv__ called
+__mod__ called
+__rmod__ called
+__imod__ called
+__pow__ called
+__rpow__ called
+__ipow__ called
+__or__ called
+__ror__ called
+__ior__ called
+__and__ called
+__rand__ called
+__iand__ called
+__xor__ called
+__rxor__ called
+__ixor__ called
+__rshift__ called
+__rrshift__ called
+__irshift__ called
+__lshift__ called
+__rlshift__ called
+__ilshift__ called
diff --git a/lib-python/2.2/test/output/test_binascii b/lib-python/2.2/test/output/test_binascii
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_binascii
@@ -0,0 +1,29 @@
+test_binascii
+Conversion between binary data and ASCII
+binascii.Error
+binascii.Incomplete
+a2b_base64     : (ascii) -> bin. Decode a line of base64 data
+b2a_base64     : (bin) -> ascii. Base64-code line of data
+a2b_hqx        : ascii -> bin, done. Decode .hqx coding
+b2a_hqx        : Encode .hqx data
+crc_hqx        : (data, oldcrc) -> newcrc. Compute hqx CRC incrementally
+rlecode_hqx    : Binhex RLE-code binary data
+rledecode_hqx  : Decode hexbin RLE-coded string
+a2b_uu         : (ascii) -> bin. Decode a line of uuencoded data
+b2a_uu         : (bin) -> ascii. Uuencode line of data
+base64 test
+VGhlIHF1aWNrIGJyb3duIGZveCBqdW1wcyBvdmVyIHRoZSBsYXp5IGRvZy4NCgABAgMEBQYHCAkK
+CwwNDg8QERITFBUWFxgZGhscHR4fICEiIyQlJicoKSorLC0uLzAxMjM0NTY3ODk6Ozw9Pj9AQUJD
+REVGR0hJSktMTU5PUFFSU1RVVldYWVpbXF1eX2BhYmNkZWZnaGlqa2xtbm9wcXJzdHV2d3h5ent8
+fX5/gIGCg4SFhoeIiYqLjI2Oj5CRkpOUlZaXmJmam5ydnp+goaKjpKWmp6ipqqusra6vsLGys7S1
+tre4ubq7vL2+v8DBwsPExcbHyMnKy8zNzs/Q0dLT1NXW19jZ2tvc3d7f4OHi4+Tl5ufo6err7O3u
+7/Dx8vP09fb3+Pn6+/z9/v8NCkhlbGxvIHdvcmxkLgo=
+uu test
+M5&AE('%U:6-K(&)R;W=N(&9O>"!J=6UP<R!O=F5R('1H92!L87IY(&1O9RX-
+M"@ ! @,$!08'" D*"PP-#@\0$1(3%!46%Q at 9&AL<'1X?("$B(R0E)B<H*2HK
+M+"TN+S Q,C,T-38W.#DZ.SP]/C] 04)#1$5&1TA)2DM,34Y/4%%24U155E=8
+M65I;7%U>7V!A8F-D969G:&EJ:VQM;F]P<7)S='5V=WAY>GM\?7Y_@(&"@X2%
+MAH>(B8J+C(V.CY"1DI.4E9:7F)F:FYR=GI^@H:*CI*6FIZBIJJNLK:ZOL+&R
+ML[2UMK>XN;J[O+V^O\#!PL/$Q<;'R,G*R\S-SL_0T=+3U-76U]C9VMO<W=[?
+MX.'BX^3EYN?HZ>KK[.WN[_#Q\O/T]?;W^/GZ^_S]_O\-"DAE;&QO('=O<FQD
+"+ at H 
diff --git a/lib-python/2.2/test/output/test_builtin b/lib-python/2.2/test/output/test_builtin
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_builtin
@@ -0,0 +1,53 @@
+test_builtin
+4. Built-in functions
+test_b1
+__import__
+abs
+apply
+callable
+chr
+cmp
+coerce
+compile
+complex
+delattr
+dir
+divmod
+eval
+execfile
+filter
+float
+getattr
+hasattr
+hash
+hex
+id
+int
+isinstance
+issubclass
+len
+list
+long
+map
+max
+min
+test_b2
+oct
+open
+ord
+pow
+range
+input and raw_input
+testing
+testing
+reduce
+reload
+repr
+round
+setattr
+str
+tuple
+type
+vars
+xrange
+zip
diff --git a/lib-python/2.2/test/output/test_cfgparser b/lib-python/2.2/test/output/test_cfgparser
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_cfgparser
@@ -0,0 +1,9 @@
+test_cfgparser
+Testing basic accessors...
+Testing writing of files...
+Testing case sensitivity...
+Testing interpretation of boolean Values...
+Testing value interpolation...
+Testing parse errors...
+Testing query interface...
+Testing miscellaneous error conditions...
diff --git a/lib-python/2.2/test/output/test_cgi b/lib-python/2.2/test/output/test_cgi
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_cgi
@@ -0,0 +1,29 @@
+test_cgi
+''
+'&'
+'&&'
+';'
+';&;'
+'='
+'=&='
+'=;='
+'=a'
+'&=a'
+'=a&'
+'=&a'
+'b=a'
+'b+=a'
+'a=b=a'
+'a=+b=a'
+'&b=a'
+'b&=a'
+'a=a+b&b=b+c'
+'a=a+b&a=b+a'
+'x=1&y=2.0&z=2-3.%2b0'
+'x=1;y=2.0&z=2-3.%2b0'
+'x=1;y=2.0;z=2-3.%2b0'
+'Hbc5161168c542333633315dee1182227:key_store_seqid=400006&cuyer=r&view=bustomer&order_id=0bb2e248638833d48cb7fed300000f1b&expire=964546263&lobale=en-US&kid=130003.300038&ss=env'
+'group_id=5470&set=custom&_assigned_to=31392&_status=1&_category=100&SUBMIT=Browse'
+Testing log
+Testing initlog 1
+Testing log 2
diff --git a/lib-python/2.2/test/output/test_charmapcodec b/lib-python/2.2/test/output/test_charmapcodec
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_charmapcodec
@@ -0,0 +1,16 @@
+test_charmapcodec
+abc == abc: OK
+abcdef == abcdef: OK
+defabc == defabc: OK
+dabcf == dabcf: OK
+dabcfabc == dabcfabc: OK
+abc == abc: OK
+abcdef == abcdef: OK
+defabc == defabc: OK
+dabcf == dabcf: OK
+dabcfabc == dabcfabc: OK
+def == def: OK
+def == def: OK
+df == df: OK
+df == df: OK
+\001 maps to undefined: OK
diff --git a/lib-python/2.2/test/output/test_class b/lib-python/2.2/test/output/test_class
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_class
@@ -0,0 +1,101 @@
+test_class
+__init__: ()
+__coerce__: (1,)
+__add__: (1,)
+__coerce__: (1,)
+__radd__: (1,)
+__coerce__: (1,)
+__sub__: (1,)
+__coerce__: (1,)
+__rsub__: (1,)
+__coerce__: (1,)
+__mul__: (1,)
+__coerce__: (1,)
+__rmul__: (1,)
+__coerce__: (1,)
+__div__: (1,)
+__coerce__: (1,)
+__rdiv__: (1,)
+__coerce__: (1,)
+__mod__: (1,)
+__coerce__: (1,)
+__rmod__: (1,)
+__coerce__: (1,)
+__divmod__: (1,)
+__coerce__: (1,)
+__rdivmod__: (1,)
+__coerce__: (1,)
+__pow__: (1,)
+__coerce__: (1,)
+__rpow__: (1,)
+__coerce__: (1,)
+__rshift__: (1,)
+__coerce__: (1,)
+__rrshift__: (1,)
+__coerce__: (1,)
+__lshift__: (1,)
+__coerce__: (1,)
+__rlshift__: (1,)
+__coerce__: (1,)
+__and__: (1,)
+__coerce__: (1,)
+__rand__: (1,)
+__coerce__: (1,)
+__or__: (1,)
+__coerce__: (1,)
+__ror__: (1,)
+__coerce__: (1,)
+__xor__: (1,)
+__coerce__: (1,)
+__rxor__: (1,)
+__contains__: (1,)
+__getitem__: (1,)
+__setitem__: (1, 1)
+__delitem__: (1,)
+__getslice__: (0, 42)
+__setslice__: (0, 42, 'The Answer')
+__delslice__: (0, 42)
+__getitem__: (slice(2, 1024, 10),)
+__setitem__: (slice(2, 1024, 10), 'A lot')
+__delitem__: (slice(2, 1024, 10),)
+__getitem__: ((slice(None, 42, None), Ellipsis, slice(None, 24, None), 24, 100),)
+__setitem__: ((slice(None, 42, None), Ellipsis, slice(None, 24, None), 24, 100), 'Strange')
+__delitem__: ((slice(None, 42, None), Ellipsis, slice(None, 24, None), 24, 100),)
+__getitem__: (slice(0, 42, None),)
+__setitem__: (slice(0, 42, None), 'The Answer')
+__delitem__: (slice(0, 42, None),)
+__neg__: ()
+__pos__: ()
+__abs__: ()
+__int__: ()
+__long__: ()
+__float__: ()
+__oct__: ()
+__hex__: ()
+__hash__: ()
+__repr__: ()
+__str__: ()
+__coerce__: (1,)
+__cmp__: (1,)
+__coerce__: (1,)
+__cmp__: (1,)
+__coerce__: (1,)
+__cmp__: (1,)
+__coerce__: (1,)
+__cmp__: (1,)
+__coerce__: (1,)
+__cmp__: (1,)
+__coerce__: (1,)
+__cmp__: (1,)
+__coerce__: (1,)
+__cmp__: (1,)
+__coerce__: (1,)
+__cmp__: (1,)
+__coerce__: (1,)
+__cmp__: (1,)
+__coerce__: (1,)
+__cmp__: (1,)
+__del__: ()
+__getattr__: ('spam',)
+__setattr__: ('eggs', 'spam, spam, spam and ham')
+__delattr__: ('cardinal',)
diff --git a/lib-python/2.2/test/output/test_coercion b/lib-python/2.2/test/output/test_coercion
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_coercion
@@ -0,0 +1,1054 @@
+test_coercion
+2 + 2 = 4
+2 += 2 => 4
+2 - 2 = 0
+2 -= 2 => 0
+2 * 2 = 4
+2 *= 2 => 4
+2 / 2 = 1
+2 /= 2 => 1
+2 ** 2 = 4
+2 **= 2 => 4
+2 % 2 = 0
+2 %= 2 => 0
+2 + 4.0 = 6.0
+2 += 4.0 => 6.0
+2 - 4.0 = -2.0
+2 -= 4.0 => -2.0
+2 * 4.0 = 8.0
+2 *= 4.0 => 8.0
+2 / 4.0 = 0.5
+2 /= 4.0 => 0.5
+2 ** 4.0 = 16.0
+2 **= 4.0 => 16.0
+2 % 4.0 = 2.0
+2 %= 4.0 => 2.0
+2 + 2 = 4
+2 += 2 => 4
+2 - 2 = 0
+2 -= 2 => 0
+2 * 2 = 4
+2 *= 2 => 4
+2 / 2 = 1
+2 /= 2 => 1
+2 ** 2 = 4
+2 **= 2 => 4
+2 % 2 = 0
+2 %= 2 => 0
+2 + (2+0j) = (4+0j)
+2 += (2+0j) => (4+0j)
+2 - (2+0j) = 0j
+2 -= (2+0j) => 0j
+2 * (2+0j) = (4+0j)
+2 *= (2+0j) => (4+0j)
+2 / (2+0j) = (1+0j)
+2 /= (2+0j) => (1+0j)
+2 ** (2+0j) = (4+0j)
+2 **= (2+0j) => (4+0j)
+2 % (2+0j) = 0j
+2 %= (2+0j) => 0j
+2 + [1] ... exceptions.TypeError
+2 += [1] ... exceptions.TypeError
+2 - [1] ... exceptions.TypeError
+2 -= [1] ... exceptions.TypeError
+2 * [1] = [1, 1]
+2 *= [1] => [1, 1]
+2 / [1] ... exceptions.TypeError
+2 /= [1] ... exceptions.TypeError
+2 ** [1] ... exceptions.TypeError
+2 **= [1] ... exceptions.TypeError
+2 % [1] ... exceptions.TypeError
+2 %= [1] ... exceptions.TypeError
+2 + (2,) ... exceptions.TypeError
+2 += (2,) ... exceptions.TypeError
+2 - (2,) ... exceptions.TypeError
+2 -= (2,) ... exceptions.TypeError
+2 * (2,) = (2, 2)
+2 *= (2,) => (2, 2)
+2 / (2,) ... exceptions.TypeError
+2 /= (2,) ... exceptions.TypeError
+2 ** (2,) ... exceptions.TypeError
+2 **= (2,) ... exceptions.TypeError
+2 % (2,) ... exceptions.TypeError
+2 %= (2,) ... exceptions.TypeError
+2 + None ... exceptions.TypeError
+2 += None ... exceptions.TypeError
+2 - None ... exceptions.TypeError
+2 -= None ... exceptions.TypeError
+2 * None ... exceptions.TypeError
+2 *= None ... exceptions.TypeError
+2 / None ... exceptions.TypeError
+2 /= None ... exceptions.TypeError
+2 ** None ... exceptions.TypeError
+2 **= None ... exceptions.TypeError
+2 % None ... exceptions.TypeError
+2 %= None ... exceptions.TypeError
+2 + <MethodNumber 1> = 3
+2 += <MethodNumber 1> => 3
+2 - <MethodNumber 1> = 1
+2 -= <MethodNumber 1> => 1
+2 * <MethodNumber 1> = 2
+2 *= <MethodNumber 1> => 2
+2 / <MethodNumber 1> = 2
+2 /= <MethodNumber 1> => 2
+2 ** <MethodNumber 1> = 2
+2 **= <MethodNumber 1> => 2
+2 % <MethodNumber 1> = 0
+2 %= <MethodNumber 1> => 0
+2 + <CoerceNumber 2> = 4
+2 += <CoerceNumber 2> => 4
+2 - <CoerceNumber 2> = 0
+2 -= <CoerceNumber 2> => 0
+2 * <CoerceNumber 2> = 4
+2 *= <CoerceNumber 2> => 4
+2 / <CoerceNumber 2> = 1
+2 /= <CoerceNumber 2> => 1
+2 ** <CoerceNumber 2> = 4
+2 **= <CoerceNumber 2> => 4
+2 % <CoerceNumber 2> = 0
+2 %= <CoerceNumber 2> => 0
+4.0 + 2 = 6.0
+4.0 += 2 => 6.0
+4.0 - 2 = 2.0
+4.0 -= 2 => 2.0
+4.0 * 2 = 8.0
+4.0 *= 2 => 8.0
+4.0 / 2 = 2.0
+4.0 /= 2 => 2.0
+4.0 ** 2 = 16.0
+4.0 **= 2 => 16.0
+4.0 % 2 = 0.0
+4.0 %= 2 => 0.0
+4.0 + 4.0 = 8.0
+4.0 += 4.0 => 8.0
+4.0 - 4.0 = 0.0
+4.0 -= 4.0 => 0.0
+4.0 * 4.0 = 16.0
+4.0 *= 4.0 => 16.0
+4.0 / 4.0 = 1.0
+4.0 /= 4.0 => 1.0
+4.0 ** 4.0 = 256.0
+4.0 **= 4.0 => 256.0
+4.0 % 4.0 = 0.0
+4.0 %= 4.0 => 0.0
+4.0 + 2 = 6.0
+4.0 += 2 => 6.0
+4.0 - 2 = 2.0
+4.0 -= 2 => 2.0
+4.0 * 2 = 8.0
+4.0 *= 2 => 8.0
+4.0 / 2 = 2.0
+4.0 /= 2 => 2.0
+4.0 ** 2 = 16.0
+4.0 **= 2 => 16.0
+4.0 % 2 = 0.0
+4.0 %= 2 => 0.0
+4.0 + (2+0j) = (6+0j)
+4.0 += (2+0j) => (6+0j)
+4.0 - (2+0j) = (2+0j)
+4.0 -= (2+0j) => (2+0j)
+4.0 * (2+0j) = (8+0j)
+4.0 *= (2+0j) => (8+0j)
+4.0 / (2+0j) = (2+0j)
+4.0 /= (2+0j) => (2+0j)
+4.0 ** (2+0j) = (16+0j)
+4.0 **= (2+0j) => (16+0j)
+4.0 % (2+0j) = 0j
+4.0 %= (2+0j) => 0j
+4.0 + [1] ... exceptions.TypeError
+4.0 += [1] ... exceptions.TypeError
+4.0 - [1] ... exceptions.TypeError
+4.0 -= [1] ... exceptions.TypeError
+4.0 * [1] ... exceptions.TypeError
+4.0 *= [1] ... exceptions.TypeError
+4.0 / [1] ... exceptions.TypeError
+4.0 /= [1] ... exceptions.TypeError
+4.0 ** [1] ... exceptions.TypeError
+4.0 **= [1] ... exceptions.TypeError
+4.0 % [1] ... exceptions.TypeError
+4.0 %= [1] ... exceptions.TypeError
+4.0 + (2,) ... exceptions.TypeError
+4.0 += (2,) ... exceptions.TypeError
+4.0 - (2,) ... exceptions.TypeError
+4.0 -= (2,) ... exceptions.TypeError
+4.0 * (2,) ... exceptions.TypeError
+4.0 *= (2,) ... exceptions.TypeError
+4.0 / (2,) ... exceptions.TypeError
+4.0 /= (2,) ... exceptions.TypeError
+4.0 ** (2,) ... exceptions.TypeError
+4.0 **= (2,) ... exceptions.TypeError
+4.0 % (2,) ... exceptions.TypeError
+4.0 %= (2,) ... exceptions.TypeError
+4.0 + None ... exceptions.TypeError
+4.0 += None ... exceptions.TypeError
+4.0 - None ... exceptions.TypeError
+4.0 -= None ... exceptions.TypeError
+4.0 * None ... exceptions.TypeError
+4.0 *= None ... exceptions.TypeError
+4.0 / None ... exceptions.TypeError
+4.0 /= None ... exceptions.TypeError
+4.0 ** None ... exceptions.TypeError
+4.0 **= None ... exceptions.TypeError
+4.0 % None ... exceptions.TypeError
+4.0 %= None ... exceptions.TypeError
+4.0 + <MethodNumber 1> = 5.0
+4.0 += <MethodNumber 1> => 5.0
+4.0 - <MethodNumber 1> = 3.0
+4.0 -= <MethodNumber 1> => 3.0
+4.0 * <MethodNumber 1> = 4.0
+4.0 *= <MethodNumber 1> => 4.0
+4.0 / <MethodNumber 1> = 4.0
+4.0 /= <MethodNumber 1> => 4.0
+4.0 ** <MethodNumber 1> = 4.0
+4.0 **= <MethodNumber 1> => 4.0
+4.0 % <MethodNumber 1> = 0.0
+4.0 %= <MethodNumber 1> => 0.0
+4.0 + <CoerceNumber 2> = 6.0
+4.0 += <CoerceNumber 2> => 6.0
+4.0 - <CoerceNumber 2> = 2.0
+4.0 -= <CoerceNumber 2> => 2.0
+4.0 * <CoerceNumber 2> = 8.0
+4.0 *= <CoerceNumber 2> => 8.0
+4.0 / <CoerceNumber 2> = 2.0
+4.0 /= <CoerceNumber 2> => 2.0
+4.0 ** <CoerceNumber 2> = 16.0
+4.0 **= <CoerceNumber 2> => 16.0
+4.0 % <CoerceNumber 2> = 0.0
+4.0 %= <CoerceNumber 2> => 0.0
+2 + 2 = 4
+2 += 2 => 4
+2 - 2 = 0
+2 -= 2 => 0
+2 * 2 = 4
+2 *= 2 => 4
+2 / 2 = 1
+2 /= 2 => 1
+2 ** 2 = 4
+2 **= 2 => 4
+2 % 2 = 0
+2 %= 2 => 0
+2 + 4.0 = 6.0
+2 += 4.0 => 6.0
+2 - 4.0 = -2.0
+2 -= 4.0 => -2.0
+2 * 4.0 = 8.0
+2 *= 4.0 => 8.0
+2 / 4.0 = 0.5
+2 /= 4.0 => 0.5
+2 ** 4.0 = 16.0
+2 **= 4.0 => 16.0
+2 % 4.0 = 2.0
+2 %= 4.0 => 2.0
+2 + 2 = 4
+2 += 2 => 4
+2 - 2 = 0
+2 -= 2 => 0
+2 * 2 = 4
+2 *= 2 => 4
+2 / 2 = 1
+2 /= 2 => 1
+2 ** 2 = 4
+2 **= 2 => 4
+2 % 2 = 0
+2 %= 2 => 0
+2 + (2+0j) = (4+0j)
+2 += (2+0j) => (4+0j)
+2 - (2+0j) = 0j
+2 -= (2+0j) => 0j
+2 * (2+0j) = (4+0j)
+2 *= (2+0j) => (4+0j)
+2 / (2+0j) = (1+0j)
+2 /= (2+0j) => (1+0j)
+2 ** (2+0j) = (4+0j)
+2 **= (2+0j) => (4+0j)
+2 % (2+0j) = 0j
+2 %= (2+0j) => 0j
+2 + [1] ... exceptions.TypeError
+2 += [1] ... exceptions.TypeError
+2 - [1] ... exceptions.TypeError
+2 -= [1] ... exceptions.TypeError
+2 * [1] = [1, 1]
+2 *= [1] => [1, 1]
+2 / [1] ... exceptions.TypeError
+2 /= [1] ... exceptions.TypeError
+2 ** [1] ... exceptions.TypeError
+2 **= [1] ... exceptions.TypeError
+2 % [1] ... exceptions.TypeError
+2 %= [1] ... exceptions.TypeError
+2 + (2,) ... exceptions.TypeError
+2 += (2,) ... exceptions.TypeError
+2 - (2,) ... exceptions.TypeError
+2 -= (2,) ... exceptions.TypeError
+2 * (2,) = (2, 2)
+2 *= (2,) => (2, 2)
+2 / (2,) ... exceptions.TypeError
+2 /= (2,) ... exceptions.TypeError
+2 ** (2,) ... exceptions.TypeError
+2 **= (2,) ... exceptions.TypeError
+2 % (2,) ... exceptions.TypeError
+2 %= (2,) ... exceptions.TypeError
+2 + None ... exceptions.TypeError
+2 += None ... exceptions.TypeError
+2 - None ... exceptions.TypeError
+2 -= None ... exceptions.TypeError
+2 * None ... exceptions.TypeError
+2 *= None ... exceptions.TypeError
+2 / None ... exceptions.TypeError
+2 /= None ... exceptions.TypeError
+2 ** None ... exceptions.TypeError
+2 **= None ... exceptions.TypeError
+2 % None ... exceptions.TypeError
+2 %= None ... exceptions.TypeError
+2 + <MethodNumber 1> = 3
+2 += <MethodNumber 1> => 3
+2 - <MethodNumber 1> = 1
+2 -= <MethodNumber 1> => 1
+2 * <MethodNumber 1> = 2
+2 *= <MethodNumber 1> => 2
+2 / <MethodNumber 1> = 2
+2 /= <MethodNumber 1> => 2
+2 ** <MethodNumber 1> = 2
+2 **= <MethodNumber 1> => 2
+2 % <MethodNumber 1> = 0
+2 %= <MethodNumber 1> => 0
+2 + <CoerceNumber 2> = 4
+2 += <CoerceNumber 2> => 4
+2 - <CoerceNumber 2> = 0
+2 -= <CoerceNumber 2> => 0
+2 * <CoerceNumber 2> = 4
+2 *= <CoerceNumber 2> => 4
+2 / <CoerceNumber 2> = 1
+2 /= <CoerceNumber 2> => 1
+2 ** <CoerceNumber 2> = 4
+2 **= <CoerceNumber 2> => 4
+2 % <CoerceNumber 2> = 0
+2 %= <CoerceNumber 2> => 0
+(2+0j) + 2 = (4+0j)
+(2+0j) += 2 => (4+0j)
+(2+0j) - 2 = 0j
+(2+0j) -= 2 => 0j
+(2+0j) * 2 = (4+0j)
+(2+0j) *= 2 => (4+0j)
+(2+0j) / 2 = (1+0j)
+(2+0j) /= 2 => (1+0j)
+(2+0j) ** 2 = (4+0j)
+(2+0j) **= 2 => (4+0j)
+(2+0j) % 2 = 0j
+(2+0j) %= 2 => 0j
+(2+0j) + 4.0 = (6+0j)
+(2+0j) += 4.0 => (6+0j)
+(2+0j) - 4.0 = (-2+0j)
+(2+0j) -= 4.0 => (-2+0j)
+(2+0j) * 4.0 = (8+0j)
+(2+0j) *= 4.0 => (8+0j)
+(2+0j) / 4.0 = (0.5+0j)
+(2+0j) /= 4.0 => (0.5+0j)
+(2+0j) ** 4.0 = (16+0j)
+(2+0j) **= 4.0 => (16+0j)
+(2+0j) % 4.0 = (2+0j)
+(2+0j) %= 4.0 => (2+0j)
+(2+0j) + 2 = (4+0j)
+(2+0j) += 2 => (4+0j)
+(2+0j) - 2 = 0j
+(2+0j) -= 2 => 0j
+(2+0j) * 2 = (4+0j)
+(2+0j) *= 2 => (4+0j)
+(2+0j) / 2 = (1+0j)
+(2+0j) /= 2 => (1+0j)
+(2+0j) ** 2 = (4+0j)
+(2+0j) **= 2 => (4+0j)
+(2+0j) % 2 = 0j
+(2+0j) %= 2 => 0j
+(2+0j) + (2+0j) = (4+0j)
+(2+0j) += (2+0j) => (4+0j)
+(2+0j) - (2+0j) = 0j
+(2+0j) -= (2+0j) => 0j
+(2+0j) * (2+0j) = (4+0j)
+(2+0j) *= (2+0j) => (4+0j)
+(2+0j) / (2+0j) = (1+0j)
+(2+0j) /= (2+0j) => (1+0j)
+(2+0j) ** (2+0j) = (4+0j)
+(2+0j) **= (2+0j) => (4+0j)
+(2+0j) % (2+0j) = 0j
+(2+0j) %= (2+0j) => 0j
+(2+0j) + [1] ... exceptions.TypeError
+(2+0j) += [1] ... exceptions.TypeError
+(2+0j) - [1] ... exceptions.TypeError
+(2+0j) -= [1] ... exceptions.TypeError
+(2+0j) * [1] ... exceptions.TypeError
+(2+0j) *= [1] ... exceptions.TypeError
+(2+0j) / [1] ... exceptions.TypeError
+(2+0j) /= [1] ... exceptions.TypeError
+(2+0j) ** [1] ... exceptions.TypeError
+(2+0j) **= [1] ... exceptions.TypeError
+(2+0j) % [1] ... exceptions.TypeError
+(2+0j) %= [1] ... exceptions.TypeError
+(2+0j) + (2,) ... exceptions.TypeError
+(2+0j) += (2,) ... exceptions.TypeError
+(2+0j) - (2,) ... exceptions.TypeError
+(2+0j) -= (2,) ... exceptions.TypeError
+(2+0j) * (2,) ... exceptions.TypeError
+(2+0j) *= (2,) ... exceptions.TypeError
+(2+0j) / (2,) ... exceptions.TypeError
+(2+0j) /= (2,) ... exceptions.TypeError
+(2+0j) ** (2,) ... exceptions.TypeError
+(2+0j) **= (2,) ... exceptions.TypeError
+(2+0j) % (2,) ... exceptions.TypeError
+(2+0j) %= (2,) ... exceptions.TypeError
+(2+0j) + None ... exceptions.TypeError
+(2+0j) += None ... exceptions.TypeError
+(2+0j) - None ... exceptions.TypeError
+(2+0j) -= None ... exceptions.TypeError
+(2+0j) * None ... exceptions.TypeError
+(2+0j) *= None ... exceptions.TypeError
+(2+0j) / None ... exceptions.TypeError
+(2+0j) /= None ... exceptions.TypeError
+(2+0j) ** None ... exceptions.TypeError
+(2+0j) **= None ... exceptions.TypeError
+(2+0j) % None ... exceptions.TypeError
+(2+0j) %= None ... exceptions.TypeError
+(2+0j) + <MethodNumber 1> = (3+0j)
+(2+0j) += <MethodNumber 1> => (3+0j)
+(2+0j) - <MethodNumber 1> = (1+0j)
+(2+0j) -= <MethodNumber 1> => (1+0j)
+(2+0j) * <MethodNumber 1> = (2+0j)
+(2+0j) *= <MethodNumber 1> => (2+0j)
+(2+0j) / <MethodNumber 1> = (2+0j)
+(2+0j) /= <MethodNumber 1> => (2+0j)
+(2+0j) ** <MethodNumber 1> = (2+0j)
+(2+0j) **= <MethodNumber 1> => (2+0j)
+(2+0j) % <MethodNumber 1> = 0j
+(2+0j) %= <MethodNumber 1> => 0j
+(2+0j) + <CoerceNumber 2> = (4+0j)
+(2+0j) += <CoerceNumber 2> => (4+0j)
+(2+0j) - <CoerceNumber 2> = 0j
+(2+0j) -= <CoerceNumber 2> => 0j
+(2+0j) * <CoerceNumber 2> = (4+0j)
+(2+0j) *= <CoerceNumber 2> => (4+0j)
+(2+0j) / <CoerceNumber 2> = (1+0j)
+(2+0j) /= <CoerceNumber 2> => (1+0j)
+(2+0j) ** <CoerceNumber 2> = (4+0j)
+(2+0j) **= <CoerceNumber 2> => (4+0j)
+(2+0j) % <CoerceNumber 2> = 0j
+(2+0j) %= <CoerceNumber 2> => 0j
+[1] + 2 ... exceptions.TypeError
+[1] += 2 ... exceptions.TypeError
+[1] - 2 ... exceptions.TypeError
+[1] -= 2 ... exceptions.TypeError
+[1] * 2 = [1, 1]
+[1] *= 2 => [1, 1]
+[1] / 2 ... exceptions.TypeError
+[1] /= 2 ... exceptions.TypeError
+[1] ** 2 ... exceptions.TypeError
+[1] **= 2 ... exceptions.TypeError
+[1] % 2 ... exceptions.TypeError
+[1] %= 2 ... exceptions.TypeError
+[1] + 4.0 ... exceptions.TypeError
+[1] += 4.0 ... exceptions.TypeError
+[1] - 4.0 ... exceptions.TypeError
+[1] -= 4.0 ... exceptions.TypeError
+[1] * 4.0 ... exceptions.TypeError
+[1] *= 4.0 ... exceptions.TypeError
+[1] / 4.0 ... exceptions.TypeError
+[1] /= 4.0 ... exceptions.TypeError
+[1] ** 4.0 ... exceptions.TypeError
+[1] **= 4.0 ... exceptions.TypeError
+[1] % 4.0 ... exceptions.TypeError
+[1] %= 4.0 ... exceptions.TypeError
+[1] + 2 ... exceptions.TypeError
+[1] += 2 ... exceptions.TypeError
+[1] - 2 ... exceptions.TypeError
+[1] -= 2 ... exceptions.TypeError
+[1] * 2 = [1, 1]
+[1] *= 2 => [1, 1]
+[1] / 2 ... exceptions.TypeError
+[1] /= 2 ... exceptions.TypeError
+[1] ** 2 ... exceptions.TypeError
+[1] **= 2 ... exceptions.TypeError
+[1] % 2 ... exceptions.TypeError
+[1] %= 2 ... exceptions.TypeError
+[1] + (2+0j) ... exceptions.TypeError
+[1] += (2+0j) ... exceptions.TypeError
+[1] - (2+0j) ... exceptions.TypeError
+[1] -= (2+0j) ... exceptions.TypeError
+[1] * (2+0j) ... exceptions.TypeError
+[1] *= (2+0j) ... exceptions.TypeError
+[1] / (2+0j) ... exceptions.TypeError
+[1] /= (2+0j) ... exceptions.TypeError
+[1] ** (2+0j) ... exceptions.TypeError
+[1] **= (2+0j) ... exceptions.TypeError
+[1] % (2+0j) ... exceptions.TypeError
+[1] %= (2+0j) ... exceptions.TypeError
+[1] + [1] = [1, 1]
+[1] += [1] => [1, 1]
+[1] - [1] ... exceptions.TypeError
+[1] -= [1] ... exceptions.TypeError
+[1] * [1] ... exceptions.TypeError
+[1] *= [1] ... exceptions.TypeError
+[1] / [1] ... exceptions.TypeError
+[1] /= [1] ... exceptions.TypeError
+[1] ** [1] ... exceptions.TypeError
+[1] **= [1] ... exceptions.TypeError
+[1] % [1] ... exceptions.TypeError
+[1] %= [1] ... exceptions.TypeError
+[1] + (2,) ... exceptions.TypeError
+[1] += (2,) => [1, 2]
+[1] - (2,) ... exceptions.TypeError
+[1] -= (2,) ... exceptions.TypeError
+[1] * (2,) ... exceptions.TypeError
+[1] *= (2,) ... exceptions.TypeError
+[1] / (2,) ... exceptions.TypeError
+[1] /= (2,) ... exceptions.TypeError
+[1] ** (2,) ... exceptions.TypeError
+[1] **= (2,) ... exceptions.TypeError
+[1] % (2,) ... exceptions.TypeError
+[1] %= (2,) ... exceptions.TypeError
+[1] + None ... exceptions.TypeError
+[1] += None ... exceptions.TypeError
+[1] - None ... exceptions.TypeError
+[1] -= None ... exceptions.TypeError
+[1] * None ... exceptions.TypeError
+[1] *= None ... exceptions.TypeError
+[1] / None ... exceptions.TypeError
+[1] /= None ... exceptions.TypeError
+[1] ** None ... exceptions.TypeError
+[1] **= None ... exceptions.TypeError
+[1] % None ... exceptions.TypeError
+[1] %= None ... exceptions.TypeError
+[1] + <MethodNumber 1> ... exceptions.TypeError
+[1] += <MethodNumber 1> ... exceptions.TypeError
+[1] - <MethodNumber 1> ... exceptions.TypeError
+[1] -= <MethodNumber 1> ... exceptions.TypeError
+[1] * <MethodNumber 1> = [1]
+[1] *= <MethodNumber 1> ... exceptions.TypeError
+[1] / <MethodNumber 1> ... exceptions.TypeError
+[1] /= <MethodNumber 1> ... exceptions.TypeError
+[1] ** <MethodNumber 1> ... exceptions.TypeError
+[1] **= <MethodNumber 1> ... exceptions.TypeError
+[1] % <MethodNumber 1> ... exceptions.TypeError
+[1] %= <MethodNumber 1> ... exceptions.TypeError
+[1] + <CoerceNumber 2> ... exceptions.TypeError
+[1] += <CoerceNumber 2> ... exceptions.TypeError
+[1] - <CoerceNumber 2> ... exceptions.TypeError
+[1] -= <CoerceNumber 2> ... exceptions.TypeError
+[1] * <CoerceNumber 2> = [1, 1]
+[1] *= <CoerceNumber 2> ... exceptions.TypeError
+[1] / <CoerceNumber 2> ... exceptions.TypeError
+[1] /= <CoerceNumber 2> ... exceptions.TypeError
+[1] ** <CoerceNumber 2> ... exceptions.TypeError
+[1] **= <CoerceNumber 2> ... exceptions.TypeError
+[1] % <CoerceNumber 2> ... exceptions.TypeError
+[1] %= <CoerceNumber 2> ... exceptions.TypeError
+(2,) + 2 ... exceptions.TypeError
+(2,) += 2 ... exceptions.TypeError
+(2,) - 2 ... exceptions.TypeError
+(2,) -= 2 ... exceptions.TypeError
+(2,) * 2 = (2, 2)
+(2,) *= 2 => (2, 2)
+(2,) / 2 ... exceptions.TypeError
+(2,) /= 2 ... exceptions.TypeError
+(2,) ** 2 ... exceptions.TypeError
+(2,) **= 2 ... exceptions.TypeError
+(2,) % 2 ... exceptions.TypeError
+(2,) %= 2 ... exceptions.TypeError
+(2,) + 4.0 ... exceptions.TypeError
+(2,) += 4.0 ... exceptions.TypeError
+(2,) - 4.0 ... exceptions.TypeError
+(2,) -= 4.0 ... exceptions.TypeError
+(2,) * 4.0 ... exceptions.TypeError
+(2,) *= 4.0 ... exceptions.TypeError
+(2,) / 4.0 ... exceptions.TypeError
+(2,) /= 4.0 ... exceptions.TypeError
+(2,) ** 4.0 ... exceptions.TypeError
+(2,) **= 4.0 ... exceptions.TypeError
+(2,) % 4.0 ... exceptions.TypeError
+(2,) %= 4.0 ... exceptions.TypeError
+(2,) + 2 ... exceptions.TypeError
+(2,) += 2 ... exceptions.TypeError
+(2,) - 2 ... exceptions.TypeError
+(2,) -= 2 ... exceptions.TypeError
+(2,) * 2 = (2, 2)
+(2,) *= 2 => (2, 2)
+(2,) / 2 ... exceptions.TypeError
+(2,) /= 2 ... exceptions.TypeError
+(2,) ** 2 ... exceptions.TypeError
+(2,) **= 2 ... exceptions.TypeError
+(2,) % 2 ... exceptions.TypeError
+(2,) %= 2 ... exceptions.TypeError
+(2,) + (2+0j) ... exceptions.TypeError
+(2,) += (2+0j) ... exceptions.TypeError
+(2,) - (2+0j) ... exceptions.TypeError
+(2,) -= (2+0j) ... exceptions.TypeError
+(2,) * (2+0j) ... exceptions.TypeError
+(2,) *= (2+0j) ... exceptions.TypeError
+(2,) / (2+0j) ... exceptions.TypeError
+(2,) /= (2+0j) ... exceptions.TypeError
+(2,) ** (2+0j) ... exceptions.TypeError
+(2,) **= (2+0j) ... exceptions.TypeError
+(2,) % (2+0j) ... exceptions.TypeError
+(2,) %= (2+0j) ... exceptions.TypeError
+(2,) + [1] ... exceptions.TypeError
+(2,) += [1] ... exceptions.TypeError
+(2,) - [1] ... exceptions.TypeError
+(2,) -= [1] ... exceptions.TypeError
+(2,) * [1] ... exceptions.TypeError
+(2,) *= [1] ... exceptions.TypeError
+(2,) / [1] ... exceptions.TypeError
+(2,) /= [1] ... exceptions.TypeError
+(2,) ** [1] ... exceptions.TypeError
+(2,) **= [1] ... exceptions.TypeError
+(2,) % [1] ... exceptions.TypeError
+(2,) %= [1] ... exceptions.TypeError
+(2,) + (2,) = (2, 2)
+(2,) += (2,) => (2, 2)
+(2,) - (2,) ... exceptions.TypeError
+(2,) -= (2,) ... exceptions.TypeError
+(2,) * (2,) ... exceptions.TypeError
+(2,) *= (2,) ... exceptions.TypeError
+(2,) / (2,) ... exceptions.TypeError
+(2,) /= (2,) ... exceptions.TypeError
+(2,) ** (2,) ... exceptions.TypeError
+(2,) **= (2,) ... exceptions.TypeError
+(2,) % (2,) ... exceptions.TypeError
+(2,) %= (2,) ... exceptions.TypeError
+(2,) + None ... exceptions.TypeError
+(2,) += None ... exceptions.TypeError
+(2,) - None ... exceptions.TypeError
+(2,) -= None ... exceptions.TypeError
+(2,) * None ... exceptions.TypeError
+(2,) *= None ... exceptions.TypeError
+(2,) / None ... exceptions.TypeError
+(2,) /= None ... exceptions.TypeError
+(2,) ** None ... exceptions.TypeError
+(2,) **= None ... exceptions.TypeError
+(2,) % None ... exceptions.TypeError
+(2,) %= None ... exceptions.TypeError
+(2,) + <MethodNumber 1> ... exceptions.TypeError
+(2,) += <MethodNumber 1> ... exceptions.TypeError
+(2,) - <MethodNumber 1> ... exceptions.TypeError
+(2,) -= <MethodNumber 1> ... exceptions.TypeError
+(2,) * <MethodNumber 1> = (2,)
+(2,) *= <MethodNumber 1> => (2,)
+(2,) / <MethodNumber 1> ... exceptions.TypeError
+(2,) /= <MethodNumber 1> ... exceptions.TypeError
+(2,) ** <MethodNumber 1> ... exceptions.TypeError
+(2,) **= <MethodNumber 1> ... exceptions.TypeError
+(2,) % <MethodNumber 1> ... exceptions.TypeError
+(2,) %= <MethodNumber 1> ... exceptions.TypeError
+(2,) + <CoerceNumber 2> ... exceptions.TypeError
+(2,) += <CoerceNumber 2> ... exceptions.TypeError
+(2,) - <CoerceNumber 2> ... exceptions.TypeError
+(2,) -= <CoerceNumber 2> ... exceptions.TypeError
+(2,) * <CoerceNumber 2> = (2, 2)
+(2,) *= <CoerceNumber 2> => (2, 2)
+(2,) / <CoerceNumber 2> ... exceptions.TypeError
+(2,) /= <CoerceNumber 2> ... exceptions.TypeError
+(2,) ** <CoerceNumber 2> ... exceptions.TypeError
+(2,) **= <CoerceNumber 2> ... exceptions.TypeError
+(2,) % <CoerceNumber 2> ... exceptions.TypeError
+(2,) %= <CoerceNumber 2> ... exceptions.TypeError
+None + 2 ... exceptions.TypeError
+None += 2 ... exceptions.TypeError
+None - 2 ... exceptions.TypeError
+None -= 2 ... exceptions.TypeError
+None * 2 ... exceptions.TypeError
+None *= 2 ... exceptions.TypeError
+None / 2 ... exceptions.TypeError
+None /= 2 ... exceptions.TypeError
+None ** 2 ... exceptions.TypeError
+None **= 2 ... exceptions.TypeError
+None % 2 ... exceptions.TypeError
+None %= 2 ... exceptions.TypeError
+None + 4.0 ... exceptions.TypeError
+None += 4.0 ... exceptions.TypeError
+None - 4.0 ... exceptions.TypeError
+None -= 4.0 ... exceptions.TypeError
+None * 4.0 ... exceptions.TypeError
+None *= 4.0 ... exceptions.TypeError
+None / 4.0 ... exceptions.TypeError
+None /= 4.0 ... exceptions.TypeError
+None ** 4.0 ... exceptions.TypeError
+None **= 4.0 ... exceptions.TypeError
+None % 4.0 ... exceptions.TypeError
+None %= 4.0 ... exceptions.TypeError
+None + 2 ... exceptions.TypeError
+None += 2 ... exceptions.TypeError
+None - 2 ... exceptions.TypeError
+None -= 2 ... exceptions.TypeError
+None * 2 ... exceptions.TypeError
+None *= 2 ... exceptions.TypeError
+None / 2 ... exceptions.TypeError
+None /= 2 ... exceptions.TypeError
+None ** 2 ... exceptions.TypeError
+None **= 2 ... exceptions.TypeError
+None % 2 ... exceptions.TypeError
+None %= 2 ... exceptions.TypeError
+None + (2+0j) ... exceptions.TypeError
+None += (2+0j) ... exceptions.TypeError
+None - (2+0j) ... exceptions.TypeError
+None -= (2+0j) ... exceptions.TypeError
+None * (2+0j) ... exceptions.TypeError
+None *= (2+0j) ... exceptions.TypeError
+None / (2+0j) ... exceptions.TypeError
+None /= (2+0j) ... exceptions.TypeError
+None ** (2+0j) ... exceptions.TypeError
+None **= (2+0j) ... exceptions.TypeError
+None % (2+0j) ... exceptions.TypeError
+None %= (2+0j) ... exceptions.TypeError
+None + [1] ... exceptions.TypeError
+None += [1] ... exceptions.TypeError
+None - [1] ... exceptions.TypeError
+None -= [1] ... exceptions.TypeError
+None * [1] ... exceptions.TypeError
+None *= [1] ... exceptions.TypeError
+None / [1] ... exceptions.TypeError
+None /= [1] ... exceptions.TypeError
+None ** [1] ... exceptions.TypeError
+None **= [1] ... exceptions.TypeError
+None % [1] ... exceptions.TypeError
+None %= [1] ... exceptions.TypeError
+None + (2,) ... exceptions.TypeError
+None += (2,) ... exceptions.TypeError
+None - (2,) ... exceptions.TypeError
+None -= (2,) ... exceptions.TypeError
+None * (2,) ... exceptions.TypeError
+None *= (2,) ... exceptions.TypeError
+None / (2,) ... exceptions.TypeError
+None /= (2,) ... exceptions.TypeError
+None ** (2,) ... exceptions.TypeError
+None **= (2,) ... exceptions.TypeError
+None % (2,) ... exceptions.TypeError
+None %= (2,) ... exceptions.TypeError
+None + None ... exceptions.TypeError
+None += None ... exceptions.TypeError
+None - None ... exceptions.TypeError
+None -= None ... exceptions.TypeError
+None * None ... exceptions.TypeError
+None *= None ... exceptions.TypeError
+None / None ... exceptions.TypeError
+None /= None ... exceptions.TypeError
+None ** None ... exceptions.TypeError
+None **= None ... exceptions.TypeError
+None % None ... exceptions.TypeError
+None %= None ... exceptions.TypeError
+None + <MethodNumber 1> ... exceptions.TypeError
+None += <MethodNumber 1> ... exceptions.TypeError
+None - <MethodNumber 1> ... exceptions.TypeError
+None -= <MethodNumber 1> ... exceptions.TypeError
+None * <MethodNumber 1> ... exceptions.TypeError
+None *= <MethodNumber 1> ... exceptions.TypeError
+None / <MethodNumber 1> ... exceptions.TypeError
+None /= <MethodNumber 1> ... exceptions.TypeError
+None ** <MethodNumber 1> ... exceptions.TypeError
+None **= <MethodNumber 1> ... exceptions.TypeError
+None % <MethodNumber 1> ... exceptions.TypeError
+None %= <MethodNumber 1> ... exceptions.TypeError
+None + <CoerceNumber 2> ... exceptions.TypeError
+None += <CoerceNumber 2> ... exceptions.TypeError
+None - <CoerceNumber 2> ... exceptions.TypeError
+None -= <CoerceNumber 2> ... exceptions.TypeError
+None * <CoerceNumber 2> ... exceptions.TypeError
+None *= <CoerceNumber 2> ... exceptions.TypeError
+None / <CoerceNumber 2> ... exceptions.TypeError
+None /= <CoerceNumber 2> ... exceptions.TypeError
+None ** <CoerceNumber 2> ... exceptions.TypeError
+None **= <CoerceNumber 2> ... exceptions.TypeError
+None % <CoerceNumber 2> ... exceptions.TypeError
+None %= <CoerceNumber 2> ... exceptions.TypeError
+<MethodNumber 1> + 2 = 3
+<MethodNumber 1> += 2 => 3
+<MethodNumber 1> - 2 = -1
+<MethodNumber 1> -= 2 => -1
+<MethodNumber 1> * 2 = 2
+<MethodNumber 1> *= 2 => 2
+<MethodNumber 1> / 2 = 0
+<MethodNumber 1> /= 2 => 0
+<MethodNumber 1> ** 2 = 1
+<MethodNumber 1> **= 2 => 1
+<MethodNumber 1> % 2 = 1
+<MethodNumber 1> %= 2 => 1
+<MethodNumber 1> + 4.0 = 5.0
+<MethodNumber 1> += 4.0 => 5.0
+<MethodNumber 1> - 4.0 = -3.0
+<MethodNumber 1> -= 4.0 => -3.0
+<MethodNumber 1> * 4.0 = 4.0
+<MethodNumber 1> *= 4.0 => 4.0
+<MethodNumber 1> / 4.0 = 0.25
+<MethodNumber 1> /= 4.0 => 0.25
+<MethodNumber 1> ** 4.0 = 1.0
+<MethodNumber 1> **= 4.0 => 1.0
+<MethodNumber 1> % 4.0 = 1.0
+<MethodNumber 1> %= 4.0 => 1.0
+<MethodNumber 1> + 2 = 3
+<MethodNumber 1> += 2 => 3
+<MethodNumber 1> - 2 = -1
+<MethodNumber 1> -= 2 => -1
+<MethodNumber 1> * 2 = 2
+<MethodNumber 1> *= 2 => 2
+<MethodNumber 1> / 2 = 0
+<MethodNumber 1> /= 2 => 0
+<MethodNumber 1> ** 2 = 1
+<MethodNumber 1> **= 2 => 1
+<MethodNumber 1> % 2 = 1
+<MethodNumber 1> %= 2 => 1
+<MethodNumber 1> + (2+0j) = (3+0j)
+<MethodNumber 1> += (2+0j) => (3+0j)
+<MethodNumber 1> - (2+0j) = (-1+0j)
+<MethodNumber 1> -= (2+0j) => (-1+0j)
+<MethodNumber 1> * (2+0j) = (2+0j)
+<MethodNumber 1> *= (2+0j) => (2+0j)
+<MethodNumber 1> / (2+0j) = (0.5+0j)
+<MethodNumber 1> /= (2+0j) => (0.5+0j)
+<MethodNumber 1> ** (2+0j) = (1+0j)
+<MethodNumber 1> **= (2+0j) => (1+0j)
+<MethodNumber 1> % (2+0j) = (1+0j)
+<MethodNumber 1> %= (2+0j) => (1+0j)
+<MethodNumber 1> + [1] ... exceptions.TypeError
+<MethodNumber 1> += [1] ... exceptions.TypeError
+<MethodNumber 1> - [1] ... exceptions.TypeError
+<MethodNumber 1> -= [1] ... exceptions.TypeError
+<MethodNumber 1> * [1] = [1]
+<MethodNumber 1> *= [1] => [1]
+<MethodNumber 1> / [1] ... exceptions.TypeError
+<MethodNumber 1> /= [1] ... exceptions.TypeError
+<MethodNumber 1> ** [1] ... exceptions.TypeError
+<MethodNumber 1> **= [1] ... exceptions.TypeError
+<MethodNumber 1> % [1] ... exceptions.TypeError
+<MethodNumber 1> %= [1] ... exceptions.TypeError
+<MethodNumber 1> + (2,) ... exceptions.TypeError
+<MethodNumber 1> += (2,) ... exceptions.TypeError
+<MethodNumber 1> - (2,) ... exceptions.TypeError
+<MethodNumber 1> -= (2,) ... exceptions.TypeError
+<MethodNumber 1> * (2,) = (2,)
+<MethodNumber 1> *= (2,) => (2,)
+<MethodNumber 1> / (2,) ... exceptions.TypeError
+<MethodNumber 1> /= (2,) ... exceptions.TypeError
+<MethodNumber 1> ** (2,) ... exceptions.TypeError
+<MethodNumber 1> **= (2,) ... exceptions.TypeError
+<MethodNumber 1> % (2,) ... exceptions.TypeError
+<MethodNumber 1> %= (2,) ... exceptions.TypeError
+<MethodNumber 1> + None ... exceptions.TypeError
+<MethodNumber 1> += None ... exceptions.TypeError
+<MethodNumber 1> - None ... exceptions.TypeError
+<MethodNumber 1> -= None ... exceptions.TypeError
+<MethodNumber 1> * None ... exceptions.TypeError
+<MethodNumber 1> *= None ... exceptions.TypeError
+<MethodNumber 1> / None ... exceptions.TypeError
+<MethodNumber 1> /= None ... exceptions.TypeError
+<MethodNumber 1> ** None ... exceptions.TypeError
+<MethodNumber 1> **= None ... exceptions.TypeError
+<MethodNumber 1> % None ... exceptions.TypeError
+<MethodNumber 1> %= None ... exceptions.TypeError
+<MethodNumber 1> + <MethodNumber 1> = 2
+<MethodNumber 1> += <MethodNumber 1> => 2
+<MethodNumber 1> - <MethodNumber 1> = 0
+<MethodNumber 1> -= <MethodNumber 1> => 0
+<MethodNumber 1> * <MethodNumber 1> = 1
+<MethodNumber 1> *= <MethodNumber 1> => 1
+<MethodNumber 1> / <MethodNumber 1> = 1
+<MethodNumber 1> /= <MethodNumber 1> => 1
+<MethodNumber 1> ** <MethodNumber 1> = 1
+<MethodNumber 1> **= <MethodNumber 1> => 1
+<MethodNumber 1> % <MethodNumber 1> = 0
+<MethodNumber 1> %= <MethodNumber 1> => 0
+<MethodNumber 1> + <CoerceNumber 2> = 3
+<MethodNumber 1> += <CoerceNumber 2> => 3
+<MethodNumber 1> - <CoerceNumber 2> = -1
+<MethodNumber 1> -= <CoerceNumber 2> => -1
+<MethodNumber 1> * <CoerceNumber 2> = 2
+<MethodNumber 1> *= <CoerceNumber 2> => 2
+<MethodNumber 1> / <CoerceNumber 2> = 0
+<MethodNumber 1> /= <CoerceNumber 2> => 0
+<MethodNumber 1> ** <CoerceNumber 2> = 1
+<MethodNumber 1> **= <CoerceNumber 2> => 1
+<MethodNumber 1> % <CoerceNumber 2> = 1
+<MethodNumber 1> %= <CoerceNumber 2> => 1
+<CoerceNumber 2> + 2 = 4
+<CoerceNumber 2> += 2 => 4
+<CoerceNumber 2> - 2 = 0
+<CoerceNumber 2> -= 2 => 0
+<CoerceNumber 2> * 2 = 4
+<CoerceNumber 2> *= 2 => 4
+<CoerceNumber 2> / 2 = 1
+<CoerceNumber 2> /= 2 => 1
+<CoerceNumber 2> ** 2 = 4
+<CoerceNumber 2> **= 2 => 4
+<CoerceNumber 2> % 2 = 0
+<CoerceNumber 2> %= 2 => 0
+<CoerceNumber 2> + 4.0 = 6.0
+<CoerceNumber 2> += 4.0 => 6.0
+<CoerceNumber 2> - 4.0 = -2.0
+<CoerceNumber 2> -= 4.0 => -2.0
+<CoerceNumber 2> * 4.0 = 8.0
+<CoerceNumber 2> *= 4.0 => 8.0
+<CoerceNumber 2> / 4.0 = 0.5
+<CoerceNumber 2> /= 4.0 => 0.5
+<CoerceNumber 2> ** 4.0 = 16.0
+<CoerceNumber 2> **= 4.0 => 16.0
+<CoerceNumber 2> % 4.0 = 2.0
+<CoerceNumber 2> %= 4.0 => 2.0
+<CoerceNumber 2> + 2 = 4
+<CoerceNumber 2> += 2 => 4
+<CoerceNumber 2> - 2 = 0
+<CoerceNumber 2> -= 2 => 0
+<CoerceNumber 2> * 2 = 4
+<CoerceNumber 2> *= 2 => 4
+<CoerceNumber 2> / 2 = 1
+<CoerceNumber 2> /= 2 => 1
+<CoerceNumber 2> ** 2 = 4
+<CoerceNumber 2> **= 2 => 4
+<CoerceNumber 2> % 2 = 0
+<CoerceNumber 2> %= 2 => 0
+<CoerceNumber 2> + (2+0j) = (4+0j)
+<CoerceNumber 2> += (2+0j) => (4+0j)
+<CoerceNumber 2> - (2+0j) = 0j
+<CoerceNumber 2> -= (2+0j) => 0j
+<CoerceNumber 2> * (2+0j) = (4+0j)
+<CoerceNumber 2> *= (2+0j) => (4+0j)
+<CoerceNumber 2> / (2+0j) = (1+0j)
+<CoerceNumber 2> /= (2+0j) => (1+0j)
+<CoerceNumber 2> ** (2+0j) = (4+0j)
+<CoerceNumber 2> **= (2+0j) => (4+0j)
+<CoerceNumber 2> % (2+0j) = 0j
+<CoerceNumber 2> %= (2+0j) => 0j
+<CoerceNumber 2> + [1] ... exceptions.TypeError
+<CoerceNumber 2> += [1] ... exceptions.TypeError
+<CoerceNumber 2> - [1] ... exceptions.TypeError
+<CoerceNumber 2> -= [1] ... exceptions.TypeError
+<CoerceNumber 2> * [1] = [1, 1]
+<CoerceNumber 2> *= [1] => [1, 1]
+<CoerceNumber 2> / [1] ... exceptions.TypeError
+<CoerceNumber 2> /= [1] ... exceptions.TypeError
+<CoerceNumber 2> ** [1] ... exceptions.TypeError
+<CoerceNumber 2> **= [1] ... exceptions.TypeError
+<CoerceNumber 2> % [1] ... exceptions.TypeError
+<CoerceNumber 2> %= [1] ... exceptions.TypeError
+<CoerceNumber 2> + (2,) ... exceptions.TypeError
+<CoerceNumber 2> += (2,) ... exceptions.TypeError
+<CoerceNumber 2> - (2,) ... exceptions.TypeError
+<CoerceNumber 2> -= (2,) ... exceptions.TypeError
+<CoerceNumber 2> * (2,) = (2, 2)
+<CoerceNumber 2> *= (2,) => (2, 2)
+<CoerceNumber 2> / (2,) ... exceptions.TypeError
+<CoerceNumber 2> /= (2,) ... exceptions.TypeError
+<CoerceNumber 2> ** (2,) ... exceptions.TypeError
+<CoerceNumber 2> **= (2,) ... exceptions.TypeError
+<CoerceNumber 2> % (2,) ... exceptions.TypeError
+<CoerceNumber 2> %= (2,) ... exceptions.TypeError
+<CoerceNumber 2> + None ... exceptions.TypeError
+<CoerceNumber 2> += None ... exceptions.TypeError
+<CoerceNumber 2> - None ... exceptions.TypeError
+<CoerceNumber 2> -= None ... exceptions.TypeError
+<CoerceNumber 2> * None ... exceptions.TypeError
+<CoerceNumber 2> *= None ... exceptions.TypeError
+<CoerceNumber 2> / None ... exceptions.TypeError
+<CoerceNumber 2> /= None ... exceptions.TypeError
+<CoerceNumber 2> ** None ... exceptions.TypeError
+<CoerceNumber 2> **= None ... exceptions.TypeError
+<CoerceNumber 2> % None ... exceptions.TypeError
+<CoerceNumber 2> %= None ... exceptions.TypeError
+<CoerceNumber 2> + <MethodNumber 1> = 3
+<CoerceNumber 2> += <MethodNumber 1> => 3
+<CoerceNumber 2> - <MethodNumber 1> = 1
+<CoerceNumber 2> -= <MethodNumber 1> => 1
+<CoerceNumber 2> * <MethodNumber 1> = 2
+<CoerceNumber 2> *= <MethodNumber 1> => 2
+<CoerceNumber 2> / <MethodNumber 1> = 2
+<CoerceNumber 2> /= <MethodNumber 1> => 2
+<CoerceNumber 2> ** <MethodNumber 1> = 2
+<CoerceNumber 2> **= <MethodNumber 1> => 2
+<CoerceNumber 2> % <MethodNumber 1> = 0
+<CoerceNumber 2> %= <MethodNumber 1> => 0
+<CoerceNumber 2> + <CoerceNumber 2> = 4
+<CoerceNumber 2> += <CoerceNumber 2> => 4
+<CoerceNumber 2> - <CoerceNumber 2> = 0
+<CoerceNumber 2> -= <CoerceNumber 2> => 0
+<CoerceNumber 2> * <CoerceNumber 2> = 4
+<CoerceNumber 2> *= <CoerceNumber 2> => 4
+<CoerceNumber 2> / <CoerceNumber 2> = 1
+<CoerceNumber 2> /= <CoerceNumber 2> => 1
+<CoerceNumber 2> ** <CoerceNumber 2> = 4
+<CoerceNumber 2> **= <CoerceNumber 2> => 4
+<CoerceNumber 2> % <CoerceNumber 2> = 0
+<CoerceNumber 2> %= <CoerceNumber 2> => 0
+divmod(2, 2) = (1, 0)
+divmod(2, 4.0) = (0.0, 2.0)
+divmod(2, 2) = (1L, 0L)
+divmod(2, (2+0j)) = ((1+0j), 0j)
+divmod(2, [1]) ... exceptions.TypeError
+divmod(2, (2,)) ... exceptions.TypeError
+divmod(2, None) ... exceptions.TypeError
+divmod(2, <MethodNumber 1>) ... exceptions.TypeError
+divmod(2, <CoerceNumber 2>) = (1, 0)
+divmod(4.0, 2) = (2.0, 0.0)
+divmod(4.0, 4.0) = (1.0, 0.0)
+divmod(4.0, 2) = (2.0, 0.0)
+divmod(4.0, (2+0j)) = ((2+0j), 0j)
+divmod(4.0, [1]) ... exceptions.TypeError
+divmod(4.0, (2,)) ... exceptions.TypeError
+divmod(4.0, None) ... exceptions.TypeError
+divmod(4.0, <MethodNumber 1>) ... exceptions.TypeError
+divmod(4.0, <CoerceNumber 2>) = (2.0, 0.0)
+divmod(2, 2) = (1L, 0L)
+divmod(2, 4.0) = (0.0, 2.0)
+divmod(2, 2) = (1L, 0L)
+divmod(2, (2+0j)) = ((1+0j), 0j)
+divmod(2, [1]) ... exceptions.TypeError
+divmod(2, (2,)) ... exceptions.TypeError
+divmod(2, None) ... exceptions.TypeError
+divmod(2, <MethodNumber 1>) ... exceptions.TypeError
+divmod(2, <CoerceNumber 2>) = (1L, 0L)
+divmod((2+0j), 2) = ((1+0j), 0j)
+divmod((2+0j), 4.0) = (0j, (2+0j))
+divmod((2+0j), 2) = ((1+0j), 0j)
+divmod((2+0j), (2+0j)) = ((1+0j), 0j)
+divmod((2+0j), [1]) ... exceptions.TypeError
+divmod((2+0j), (2,)) ... exceptions.TypeError
+divmod((2+0j), None) ... exceptions.TypeError
+divmod((2+0j), <MethodNumber 1>) ... exceptions.TypeError
+divmod((2+0j), <CoerceNumber 2>) = ((1+0j), 0j)
+divmod([1], 2) ... exceptions.TypeError
+divmod([1], 4.0) ... exceptions.TypeError
+divmod([1], 2) ... exceptions.TypeError
+divmod([1], (2+0j)) ... exceptions.TypeError
+divmod([1], [1]) ... exceptions.TypeError
+divmod([1], (2,)) ... exceptions.TypeError
+divmod([1], None) ... exceptions.TypeError
+divmod([1], <MethodNumber 1>) ... exceptions.TypeError
+divmod([1], <CoerceNumber 2>) ... exceptions.TypeError
+divmod((2,), 2) ... exceptions.TypeError
+divmod((2,), 4.0) ... exceptions.TypeError
+divmod((2,), 2) ... exceptions.TypeError
+divmod((2,), (2+0j)) ... exceptions.TypeError
+divmod((2,), [1]) ... exceptions.TypeError
+divmod((2,), (2,)) ... exceptions.TypeError
+divmod((2,), None) ... exceptions.TypeError
+divmod((2,), <MethodNumber 1>) ... exceptions.TypeError
+divmod((2,), <CoerceNumber 2>) ... exceptions.TypeError
+divmod(None, 2) ... exceptions.TypeError
+divmod(None, 4.0) ... exceptions.TypeError
+divmod(None, 2) ... exceptions.TypeError
+divmod(None, (2+0j)) ... exceptions.TypeError
+divmod(None, [1]) ... exceptions.TypeError
+divmod(None, (2,)) ... exceptions.TypeError
+divmod(None, None) ... exceptions.TypeError
+divmod(None, <MethodNumber 1>) ... exceptions.TypeError
+divmod(None, <CoerceNumber 2>) ... exceptions.TypeError
+divmod(<MethodNumber 1>, 2) ... exceptions.TypeError
+divmod(<MethodNumber 1>, 4.0) ... exceptions.TypeError
+divmod(<MethodNumber 1>, 2) ... exceptions.TypeError
+divmod(<MethodNumber 1>, (2+0j)) ... exceptions.TypeError
+divmod(<MethodNumber 1>, [1]) ... exceptions.TypeError
+divmod(<MethodNumber 1>, (2,)) ... exceptions.TypeError
+divmod(<MethodNumber 1>, None) ... exceptions.TypeError
+divmod(<MethodNumber 1>, <MethodNumber 1>) ... exceptions.TypeError
+divmod(<MethodNumber 1>, <CoerceNumber 2>) ... exceptions.TypeError
+divmod(<CoerceNumber 2>, 2) = (1, 0)
+divmod(<CoerceNumber 2>, 4.0) = (0.0, 2.0)
+divmod(<CoerceNumber 2>, 2) = (1L, 0L)
+divmod(<CoerceNumber 2>, (2+0j)) = ((1+0j), 0j)
+divmod(<CoerceNumber 2>, [1]) ... exceptions.TypeError
+divmod(<CoerceNumber 2>, (2,)) ... exceptions.TypeError
+divmod(<CoerceNumber 2>, None) ... exceptions.TypeError
+divmod(<CoerceNumber 2>, <MethodNumber 1>) ... exceptions.TypeError
+divmod(<CoerceNumber 2>, <CoerceNumber 2>) = (1, 0)
diff --git a/lib-python/2.2/test/output/test_compare b/lib-python/2.2/test/output/test_compare
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_compare
@@ -0,0 +1,101 @@
+test_compare
+2 == 2
+2 == 2.0
+2 == 2
+2 == (2+0j)
+2 != [1]
+2 != (3,)
+2 != None
+2 != <Empty>
+2 == <Coerce 2>
+2 == <Cmp 2.0>
+2.0 == 2
+2.0 == 2.0
+2.0 == 2
+2.0 == (2+0j)
+2.0 != [1]
+2.0 != (3,)
+2.0 != None
+2.0 != <Empty>
+2.0 == <Coerce 2>
+2.0 == <Cmp 2.0>
+2 == 2
+2 == 2.0
+2 == 2
+2 == (2+0j)
+2 != [1]
+2 != (3,)
+2 != None
+2 != <Empty>
+2 == <Coerce 2>
+2 == <Cmp 2.0>
+(2+0j) == 2
+(2+0j) == 2.0
+(2+0j) == 2
+(2+0j) == (2+0j)
+(2+0j) != [1]
+(2+0j) != (3,)
+(2+0j) != None
+(2+0j) != <Empty>
+(2+0j) == <Coerce 2>
+(2+0j) == <Cmp 2.0>
+[1] != 2
+[1] != 2.0
+[1] != 2
+[1] != (2+0j)
+[1] == [1]
+[1] != (3,)
+[1] != None
+[1] != <Empty>
+[1] != <Coerce 2>
+[1] != <Cmp 2.0>
+(3,) != 2
+(3,) != 2.0
+(3,) != 2
+(3,) != (2+0j)
+(3,) != [1]
+(3,) == (3,)
+(3,) != None
+(3,) != <Empty>
+(3,) != <Coerce 2>
+(3,) != <Cmp 2.0>
+None != 2
+None != 2.0
+None != 2
+None != (2+0j)
+None != [1]
+None != (3,)
+None == None
+None != <Empty>
+None != <Coerce 2>
+None != <Cmp 2.0>
+<Empty> != 2
+<Empty> != 2.0
+<Empty> != 2
+<Empty> != (2+0j)
+<Empty> != [1]
+<Empty> != (3,)
+<Empty> != None
+<Empty> == <Empty>
+<Empty> != <Coerce 2>
+<Empty> != <Cmp 2.0>
+<Coerce 2> == 2
+<Coerce 2> == 2.0
+<Coerce 2> == 2
+<Coerce 2> == (2+0j)
+<Coerce 2> != [1]
+<Coerce 2> != (3,)
+<Coerce 2> != None
+<Coerce 2> != <Empty>
+<Coerce 2> == <Coerce 2>
+<Coerce 2> == <Cmp 2.0>
+<Cmp 2.0> == 2
+<Cmp 2.0> == 2.0
+<Cmp 2.0> == 2
+<Cmp 2.0> == (2+0j)
+<Cmp 2.0> != [1]
+<Cmp 2.0> != (3,)
+<Cmp 2.0> != None
+<Cmp 2.0> != <Empty>
+<Cmp 2.0> == <Coerce 2>
+<Cmp 2.0> == <Cmp 2.0>
diff --git a/lib-python/2.2/test/output/test_compile b/lib-python/2.2/test/output/test_compile
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_compile
@@ -0,0 +1,7 @@
+test_compile
+1 2
+1 2
+3 4
+1 2 3
+1 2 3
+2 3 4
diff --git a/lib-python/2.2/test/output/test_cookie b/lib-python/2.2/test/output/test_cookie
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_cookie
@@ -0,0 +1,32 @@
+test_cookie
+<SimpleCookie: chips='ahoy' vienna='finger'>
+Set-Cookie: chips=ahoy;
+Set-Cookie: vienna=finger;
+  chips 'ahoy' 'ahoy'
+Set-Cookie: chips=ahoy;
+  vienna 'finger' 'finger'
+Set-Cookie: vienna=finger;
+<SimpleCookie: keebler='E=mc2; L="Loves"; fudge=\n;'>
+Set-Cookie: keebler="E=mc2; L=\"Loves\"; fudge=\012;";
+  keebler 'E=mc2; L="Loves"; fudge=\n;' 'E=mc2; L="Loves"; fudge=\n;'
+Set-Cookie: keebler="E=mc2; L=\"Loves\"; fudge=\012;";
+<SimpleCookie: keebler='E=mc2'>
+Set-Cookie: keebler=E=mc2;
+  keebler 'E=mc2' 'E=mc2'
+Set-Cookie: keebler=E=mc2;
+Set-Cookie: Customer="WILE_E_COYOTE"; Path=/acme;
+
+        <SCRIPT LANGUAGE="JavaScript">
+        <!-- begin hiding
+        document.cookie = "Customer="WILE_E_COYOTE"; Path=/acme; Version=1;"
+        // end hiding -->
+        </script>
+        
+
+        <SCRIPT LANGUAGE="JavaScript">
+        <!-- begin hiding
+        document.cookie = "Customer="WILE_E_COYOTE"; Path=/acme;"
+        // end hiding -->
+        </script>
+        
+If anything blows up after this line, it's from Cookie's doctest.
diff --git a/lib-python/2.2/test/output/test_exceptions b/lib-python/2.2/test/output/test_exceptions
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_exceptions
@@ -0,0 +1,52 @@
+test_exceptions
+5. Built-in exceptions
+spam
+AttributeError
+spam
+EOFError
+spam
+IOError
+spam
+ImportError
+spam
+IndexError
+spam
+KeyError
+spam
+KeyboardInterrupt
+(not testable in a script)
+spam
+MemoryError
+(not safe to test)
+spam
+NameError
+spam
+OverflowError
+spam
+RuntimeError
+(not used any more?)
+spam
+SyntaxError
+'continue' not supported inside 'finally' clause
+ok
+'continue' not properly in loop
+ok
+'continue' not properly in loop
+ok
+spam
+IndentationError
+spam
+TabError
+spam
+SystemError
+(hard to reproduce)
+spam
+SystemExit
+spam
+TypeError
+spam
+ValueError
+spam
+ZeroDivisionError
+spam
+Exception
diff --git a/lib-python/2.2/test/output/test_extcall b/lib-python/2.2/test/output/test_extcall
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_extcall
@@ -0,0 +1,112 @@
+test_extcall
+() {}
+(1,) {}
+(1, 2) {}
+(1, 2, 3) {}
+(1, 2, 3, 4, 5) {}
+(1, 2, 3, 4, 5) {}
+(1, 2, 3, 4, 5) {}
+(1, 2, 3) {'a': 4, 'b': 5}
+(1, 2, 3, 4, 5) {'a': 6, 'b': 7}
+(1, 2, 3, 6, 7) {'a': 8, 'b': 9, 'x': 4, 'y': 5}
+TypeError: g() takes at least 1 argument (0 given)
+TypeError: g() takes at least 1 argument (0 given)
+TypeError: g() takes at least 1 argument (0 given)
+1 () {}
+1 (2,) {}
+1 (2, 3) {}
+1 (2, 3, 4, 5) {}
+0 (1, 2) {}
+0 (1, 2, 3) {}
+1 () {'a': 1, 'b': 2, 'c': 3, 'd': 4}
+{'a': 1, 'b': 2, 'c': 3}
+{'a': 1, 'b': 2, 'c': 3}
+g() got multiple values for keyword argument 'x'
+g() got multiple values for keyword argument 'b'
+f() keywords must be strings
+h() got an unexpected keyword argument 'e'
+h() argument after * must be a sequence
+dir() argument after * must be a sequence
+NoneType object argument after * must be a sequence
+h() argument after ** must be a dictionary
+dir() argument after ** must be a dictionary
+NoneType object argument after ** must be a dictionary
+dir() got multiple values for keyword argument 'b'
+3 512 1
+3
+3
+za () {} -> za() takes exactly 1 argument (0 given)
+za () {'a': 'aa'} -> ok za aa B D E V a
+za () {'d': 'dd'} -> za() got an unexpected keyword argument 'd'
+za () {'a': 'aa', 'd': 'dd'} -> za() got an unexpected keyword argument 'd'
+za () {'a': 'aa', 'b': 'bb', 'd': 'dd', 'e': 'ee'} -> za() got an unexpected keyword argument 'b'
+za (1, 2) {} -> za() takes exactly 1 argument (2 given)
+za (1, 2) {'a': 'aa'} -> za() takes exactly 1 non-keyword argument (2 given)
+za (1, 2) {'d': 'dd'} -> za() takes exactly 1 non-keyword argument (2 given)
+za (1, 2) {'a': 'aa', 'd': 'dd'} -> za() takes exactly 1 non-keyword argument (2 given)
+za (1, 2) {'a': 'aa', 'b': 'bb', 'd': 'dd', 'e': 'ee'} -> za() takes exactly 1 non-keyword argument (2 given)
+za (1, 2, 3, 4, 5) {} -> za() takes exactly 1 argument (5 given)
+za (1, 2, 3, 4, 5) {'a': 'aa'} -> za() takes exactly 1 non-keyword argument (5 given)
+za (1, 2, 3, 4, 5) {'d': 'dd'} -> za() takes exactly 1 non-keyword argument (5 given)
+za (1, 2, 3, 4, 5) {'a': 'aa', 'd': 'dd'} -> za() takes exactly 1 non-keyword argument (5 given)
+za (1, 2, 3, 4, 5) {'a': 'aa', 'b': 'bb', 'd': 'dd', 'e': 'ee'} -> za() takes exactly 1 non-keyword argument (5 given)
+zade () {} -> zade() takes at least 1 argument (0 given)
+zade () {'a': 'aa'} -> ok zade aa B d e V a
+zade () {'d': 'dd'} -> zade() takes at least 1 non-keyword argument (0 given)
+zade () {'a': 'aa', 'd': 'dd'} -> ok zade aa B dd e V d
+zade () {'a': 'aa', 'b': 'bb', 'd': 'dd', 'e': 'ee'} -> zade() got an unexpected keyword argument 'b'
+zade (1, 2) {} -> ok zade 1 B 2 e V e
+zade (1, 2) {'a': 'aa'} -> zade() got multiple values for keyword argument 'a'
+zade (1, 2) {'d': 'dd'} -> zade() got multiple values for keyword argument 'd'
+zade (1, 2) {'a': 'aa', 'd': 'dd'} -> zade() got multiple values for keyword argument 'a'
+zade (1, 2) {'a': 'aa', 'b': 'bb', 'd': 'dd', 'e': 'ee'} -> zade() got multiple values for keyword argument 'a'
+zade (1, 2, 3, 4, 5) {} -> zade() takes at most 3 arguments (5 given)
+zade (1, 2, 3, 4, 5) {'a': 'aa'} -> zade() takes at most 3 non-keyword arguments (5 given)
+zade (1, 2, 3, 4, 5) {'d': 'dd'} -> zade() takes at most 3 non-keyword arguments (5 given)
+zade (1, 2, 3, 4, 5) {'a': 'aa', 'd': 'dd'} -> zade() takes at most 3 non-keyword arguments (5 given)
+zade (1, 2, 3, 4, 5) {'a': 'aa', 'b': 'bb', 'd': 'dd', 'e': 'ee'} -> zade() takes at most 3 non-keyword arguments (5 given)
+zabk () {} -> zabk() takes exactly 2 arguments (0 given)
+zabk () {'a': 'aa'} -> zabk() takes exactly 2 non-keyword arguments (1 given)
+zabk () {'d': 'dd'} -> zabk() takes exactly 2 non-keyword arguments (0 given)
+zabk () {'a': 'aa', 'd': 'dd'} -> zabk() takes exactly 2 non-keyword arguments (1 given)
+zabk () {'a': 'aa', 'b': 'bb', 'd': 'dd', 'e': 'ee'} -> ok zabk aa bb D E V {'d': 'dd', 'e': 'ee'}
+zabk (1, 2) {} -> ok zabk 1 2 D E V {}
+zabk (1, 2) {'a': 'aa'} -> zabk() got multiple values for keyword argument 'a'
+zabk (1, 2) {'d': 'dd'} -> ok zabk 1 2 D E V {'d': 'dd'}
+zabk (1, 2) {'a': 'aa', 'd': 'dd'} -> zabk() got multiple values for keyword argument 'a'
+zabk (1, 2) {'a': 'aa', 'b': 'bb', 'd': 'dd', 'e': 'ee'} -> zabk() got multiple values for keyword argument 'a'
+zabk (1, 2, 3, 4, 5) {} -> zabk() takes exactly 2 arguments (5 given)
+zabk (1, 2, 3, 4, 5) {'a': 'aa'} -> zabk() takes exactly 2 non-keyword arguments (5 given)
+zabk (1, 2, 3, 4, 5) {'d': 'dd'} -> zabk() takes exactly 2 non-keyword arguments (5 given)
+zabk (1, 2, 3, 4, 5) {'a': 'aa', 'd': 'dd'} -> zabk() takes exactly 2 non-keyword arguments (5 given)
+zabk (1, 2, 3, 4, 5) {'a': 'aa', 'b': 'bb', 'd': 'dd', 'e': 'ee'} -> zabk() takes exactly 2 non-keyword arguments (5 given)
+zabdv () {} -> zabdv() takes at least 2 arguments (0 given)
+zabdv () {'a': 'aa'} -> zabdv() takes at least 2 non-keyword arguments (1 given)
+zabdv () {'d': 'dd'} -> zabdv() takes at least 2 non-keyword arguments (0 given)
+zabdv () {'a': 'aa', 'd': 'dd'} -> zabdv() takes at least 2 non-keyword arguments (1 given)
+zabdv () {'a': 'aa', 'b': 'bb', 'd': 'dd', 'e': 'ee'} -> zabdv() got an unexpected keyword argument 'e'
+zabdv (1, 2) {} -> ok zabdv 1 2 d E () e
+zabdv (1, 2) {'a': 'aa'} -> zabdv() got multiple values for keyword argument 'a'
+zabdv (1, 2) {'d': 'dd'} -> ok zabdv 1 2 dd E () d
+zabdv (1, 2) {'a': 'aa', 'd': 'dd'} -> zabdv() got multiple values for keyword argument 'a'
+zabdv (1, 2) {'a': 'aa', 'b': 'bb', 'd': 'dd', 'e': 'ee'} -> zabdv() got multiple values for keyword argument 'a'
+zabdv (1, 2, 3, 4, 5) {} -> ok zabdv 1 2 3 E (4, 5) e
+zabdv (1, 2, 3, 4, 5) {'a': 'aa'} -> zabdv() got multiple values for keyword argument 'a'
+zabdv (1, 2, 3, 4, 5) {'d': 'dd'} -> zabdv() got multiple values for keyword argument 'd'
+zabdv (1, 2, 3, 4, 5) {'a': 'aa', 'd': 'dd'} -> zabdv() got multiple values for keyword argument 'a'
+zabdv (1, 2, 3, 4, 5) {'a': 'aa', 'b': 'bb', 'd': 'dd', 'e': 'ee'} -> zabdv() got multiple values for keyword argument 'a'
+zabdevk () {} -> zabdevk() takes at least 2 arguments (0 given)
+zabdevk () {'a': 'aa'} -> zabdevk() takes at least 2 non-keyword arguments (1 given)
+zabdevk () {'d': 'dd'} -> zabdevk() takes at least 2 non-keyword arguments (0 given)
+zabdevk () {'a': 'aa', 'd': 'dd'} -> zabdevk() takes at least 2 non-keyword arguments (1 given)
+zabdevk () {'a': 'aa', 'b': 'bb', 'd': 'dd', 'e': 'ee'} -> ok zabdevk aa bb dd ee () {}
+zabdevk (1, 2) {} -> ok zabdevk 1 2 d e () {}
+zabdevk (1, 2) {'a': 'aa'} -> zabdevk() got multiple values for keyword argument 'a'
+zabdevk (1, 2) {'d': 'dd'} -> ok zabdevk 1 2 dd e () {}
+zabdevk (1, 2) {'a': 'aa', 'd': 'dd'} -> zabdevk() got multiple values for keyword argument 'a'
+zabdevk (1, 2) {'a': 'aa', 'b': 'bb', 'd': 'dd', 'e': 'ee'} -> zabdevk() got multiple values for keyword argument 'a'
+zabdevk (1, 2, 3, 4, 5) {} -> ok zabdevk 1 2 3 4 (5,) {}
+zabdevk (1, 2, 3, 4, 5) {'a': 'aa'} -> zabdevk() got multiple values for keyword argument 'a'
+zabdevk (1, 2, 3, 4, 5) {'d': 'dd'} -> zabdevk() got multiple values for keyword argument 'd'
+zabdevk (1, 2, 3, 4, 5) {'a': 'aa', 'd': 'dd'} -> zabdevk() got multiple values for keyword argument 'a'
+zabdevk (1, 2, 3, 4, 5) {'a': 'aa', 'b': 'bb', 'd': 'dd', 'e': 'ee'} -> zabdevk() got multiple values for keyword argument 'a'
diff --git a/lib-python/2.2/test/output/test_frozen b/lib-python/2.2/test/output/test_frozen
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_frozen
@@ -0,0 +1,4 @@
+test_frozen
+Hello world...
+Hello world...
+Hello world...
diff --git a/lib-python/2.2/test/output/test_future b/lib-python/2.2/test/output/test_future
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_future
@@ -0,0 +1,9 @@
+test_future
+6
+6
+2
+SyntaxError badsyntax_future3 3
+SyntaxError badsyntax_future4 3
+SyntaxError badsyntax_future5 4
+SyntaxError badsyntax_future6 3
+SyntaxError badsyntax_future7 3
diff --git a/lib-python/2.2/test/output/test_gettext b/lib-python/2.2/test/output/test_gettext
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_gettext
@@ -0,0 +1,46 @@
+test_gettext
+test api 1
+installing gettext
+albatross
+bacon
+Throatwobbler Mangrove
+wink wink
+albatross
+bacon
+Throatwobbler Mangrove
+wink wink
+albatross
+bacon
+Throatwobbler Mangrove
+wink wink
+albatross
+bacon
+Throatwobbler Mangrove
+wink wink
+Guvf zbqhyr cebivqrf vagreangvbanyvmngvba naq ybpnyvmngvba
+fhccbeg sbe lbhe Clguba cebtenzf ol cebivqvat na vagresnpr gb gur TAH
+trggrkg zrffntr pngnybt yvoenel.
+wink wink
+bacon
+test api 2
+1
+gettext
+albatross
+bacon
+Throatwobbler Mangrove
+wink wink
+albatross
+bacon
+Throatwobbler Mangrove
+wink wink
+albatross
+bacon
+Throatwobbler Mangrove
+wink wink
+albatross
+bacon
+Throatwobbler Mangrove
+wink wink
+Guvf zbqhyr cebivqrf vagreangvbanyvmngvba naq ybpnyvmngvba
+fhccbeg sbe lbhe Clguba cebtenzf ol cebivqvat na vagresnpr gb gur TAH
+trggrkg zrffntr pngnybt yvoenel.
diff --git a/lib-python/2.2/test/output/test_global b/lib-python/2.2/test/output/test_global
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_global
@@ -0,0 +1,5 @@
+test_global
+got SyntaxError as expected
+got SyntaxError as expected
+got SyntaxError as expected
+as expected, no SyntaxError
diff --git a/lib-python/2.2/test/output/test_grammar b/lib-python/2.2/test/output/test_grammar
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_grammar
@@ -0,0 +1,66 @@
+test_grammar
+1. Parser
+1.1 Tokens
+1.1.1 Backslashes
+1.1.2 Numeric literals
+1.1.2.1 Plain integers
+1.1.2.2 Long integers
+1.1.2.3 Floating point
+1.1.3 String literals
+1.2 Grammar
+single_input
+file_input
+expr_input
+eval_input
+funcdef
+lambdef
+simple_stmt
+expr_stmt
+print_stmt
+1 2 3
+1 2 3
+1 1 1
+extended print_stmt
+1 2 3
+1 2 3
+1 1 1
+hello world
+del_stmt
+pass_stmt
+flow_stmt
+break_stmt
+continue_stmt
+continue + try/except ok
+continue + try/finally ok
+testing continue and break in try/except in loop
+return_stmt
+raise_stmt
+import_stmt
+global_stmt
+exec_stmt
+assert_stmt
+if_stmt
+while_stmt
+for_stmt
+try_stmt
+suite
+test
+comparison
+binary mask ops
+shift ops
+additive ops
+multiplicative ops
+unary ops
+selectors
+
+atoms
+classdef
+['Apple', 'Banana', 'Coco  nut']
+[3, 6, 9, 12, 15]
+[3, 4, 5]
+[(1, 'Apple'), (1, 'Banana'), (1, 'Coconut'), (2, 'Apple'), (2, 'Banana'), (2, 'Coconut'), (3, 'Apple'), (3, 'Banana'), (3, 'Coconut'), (4, 'Apple'), (4, 'Banana'), (4, 'Coconut'), (5, 'Apple'), (5, 'Banana'), (5, 'Coconut')]
+[(1, 'Banana'), (1, 'Coconut'), (2, 'Banana'), (2, 'Coconut'), (3, 'Banana'), (3, 'Coconut'), (4, 'Banana'), (4, 'Coconut'), (5, 'Banana'), (5, 'Coconut')]
+[[1], [1, 1], [1, 2, 4], [1, 3, 9, 27], [1, 4, 16, 64, 256]]
+[0, 0, 0]
+[[1, 2], [3, 4], [5, 6]]
+[('Boeing', 'Airliner'), ('Boeing', 'Engine'), ('Ford', 'Engine'), ('Macdonalds', 'Cheeseburger')]
diff --git a/lib-python/2.2/test/output/test_httplib b/lib-python/2.2/test/output/test_httplib
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_httplib
@@ -0,0 +1,10 @@
+test_httplib
+reply: 'HTTP/1.1 200 Ok\n'
+Text
+reply: 'HTTP/1.1 400.100 Not Ok\n'
+BadStatusLine raised as expected
+InvalidURL raised as expected
+InvalidURL raised as expected
+reply: 'HTTP/1.1 200 OK\n'
+header: Set-Cookie: Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"
+header: Set-Cookie: Part_Number="Rocket_Launcher_0001"; Version="1"; Path="/acme"
diff --git a/lib-python/2.2/test/output/test_linuxaudiodev b/lib-python/2.2/test/output/test_linuxaudiodev
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_linuxaudiodev
@@ -0,0 +1,7 @@
+test_linuxaudiodev
+expected rate >= 0, not -1
+expected sample size >= 0, not -2
+nchannels must be 1 or 2, not 3
+unknown audio encoding: 177
+for linear unsigned 16-bit little-endian audio, expected sample size 16, not 8
+for linear unsigned 8-bit audio, expected sample size 8, not 16
diff --git a/lib-python/2.2/test/output/test_longexp b/lib-python/2.2/test/output/test_longexp
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_longexp
@@ -0,0 +1,2 @@
+test_longexp
+65580
diff --git a/lib-python/2.2/test/output/test_math b/lib-python/2.2/test/output/test_math
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_math
@@ -0,0 +1,26 @@
+test_math
+math module, testing with eps 1e-05
+constants
+acos
+asin
+atan
+atan2
+ceil
+cos
+cosh
+exp
+fabs
+floor
+fmod
+frexp
+hypot
+ldexp
+log
+log10
+modf
+pow
+sin
+sinh
+sqrt
+tan
+tanh
diff --git a/lib-python/2.2/test/output/test_md5 b/lib-python/2.2/test/output/test_md5
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_md5
@@ -0,0 +1,9 @@
+test_md5
+MD5 test suite:
+MD5 ("") = d41d8cd98f00b204e9800998ecf8427e
+MD5 ("a") = 0cc175b9c0f1b6a831c399e269772661
+MD5 ("abc") = 900150983cd24fb0d6963f7d28e17f72
+MD5 ("message digest") = f96b697d7cb7938d525a2f31aaf161d0
+MD5 ("abcdefghijklmnopqrstuvwxyz") = c3fcd3d76192e4007dfb496cca67e13b
+MD5 ("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789") = d174ab98d277d9f5a5611c2c9f419d9f
+MD5 ("12345678901234567890123456789012345678901234567890123456789012345678901234567890") = 57edf4a22be3c955ac49da2e2107b67a
diff --git a/lib-python/2.2/test/output/test_mimetools b/lib-python/2.2/test/output/test_mimetools
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_mimetools
@@ -0,0 +1,5 @@
+test_mimetools
+7bit PASS
+8bit PASS
+base64 PASS
+quoted-printable PASS
diff --git a/lib-python/2.2/test/output/test_mmap b/lib-python/2.2/test/output/test_mmap
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_mmap
@@ -0,0 +1,34 @@
+test_mmap
+<type 'mmap.mmap'>
+  Position of foo: 1.0 pages
+  Length of file: 2.0 pages
+  Contents of byte 0: '\x00'
+  Contents of first 3 bytes: '\x00\x00\x00'
+
+  Modifying file's content...
+  Contents of byte 0: '3'
+  Contents of first 3 bytes: '3\x00\x00'
+  Contents of second page: '\x00foobar\x00'
+  Regex match on mmap (page start, length of match): 1.0 6
+  Seek to zeroth byte
+  Seek to 42nd byte
+  Seek to last byte
+  Try to seek to negative position...
+  Try to seek beyond end of mmap...
+  Try to seek to negative position...
+  Attempting resize()
+  Creating 10 byte test data file.
+  Opening mmap with access=ACCESS_READ
+  Ensuring that readonly mmap can't be slice assigned.
+  Ensuring that readonly mmap can't be item assigned.
+  Ensuring that readonly mmap can't be write() to.
+  Ensuring that readonly mmap can't be write_byte() to.
+  Ensuring that readonly mmap can't be resized.
+  Opening mmap with size too big
+  Opening mmap with access=ACCESS_WRITE
+  Modifying write-through memory map.
+  Opening mmap with access=ACCESS_COPY
+  Modifying copy-on-write memory map.
+  Ensuring copy-on-write maps cannot be resized.
+  Ensuring invalid access parameter raises exception.
+ Test passed
diff --git a/lib-python/2.2/test/output/test_new b/lib-python/2.2/test/output/test_new
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_new
@@ -0,0 +1,7 @@
+test_new
+new.module()
+new.classobj()
+new.instance()
+new.instancemethod()
+new.function()
+new.code()
diff --git a/lib-python/2.2/test/output/test_nis b/lib-python/2.2/test/output/test_nis
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_nis
@@ -0,0 +1,2 @@
+test_nis
+nis.maps()
diff --git a/lib-python/2.2/test/output/test_opcodes b/lib-python/2.2/test/output/test_opcodes
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_opcodes
@@ -0,0 +1,6 @@
+test_opcodes
+2. Opcodes
+XXX Not yet fully implemented
+2.1 try inside for loop
+2.2 raise class exceptions
+2.3 comparing function objects
diff --git a/lib-python/2.2/test/output/test_openpty b/lib-python/2.2/test/output/test_openpty
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_openpty
@@ -0,0 +1,2 @@
+test_openpty
+Ping!
diff --git a/lib-python/2.2/test/output/test_operations b/lib-python/2.2/test/output/test_operations
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_operations
@@ -0,0 +1,6 @@
+test_operations
+3. Operations
+XXX Mostly not yet implemented
+3.1 Dictionary lookups succeed even if __cmp__() raises an exception
+raising error
+No exception passed through.
diff --git a/lib-python/2.2/test/output/test_pkg b/lib-python/2.2/test/output/test_pkg
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_pkg
@@ -0,0 +1,45 @@
+test_pkg
+running test t1
+running test t2
+t2 loading
+doc for t2
+t2.sub.subsub loading
+t2 t2.sub t2.sub.subsub
+['sub', 't2']
+t2.sub t2.sub.subsub
+t2.sub.subsub
+['spam', 'sub', 'subsub', 't2']
+t2 t2.sub t2.sub.subsub
+['spam', 'sub', 'subsub', 't2']
+running test t3
+t3 loading
+t3.sub.subsub loading
+t3 t3.sub t3.sub.subsub
+t3 loading
+t3.sub.subsub loading
+running test t4
+t4 loading
+t4.sub.subsub loading
+t4.sub.subsub.spam = 1
+running test t5
+t5.foo loading
+t5.string loading
+1
+['foo', 'string', 't5']
+['__doc__', '__file__', '__name__', '__path__', 'foo', 'string', 't5']
+['__doc__', '__file__', '__name__', 'string']
+['__doc__', '__file__', '__name__', 'spam']
+running test t6
+['__all__', '__doc__', '__file__', '__name__', '__path__']
+t6.spam loading
+t6.ham loading
+t6.eggs loading
+['__all__', '__doc__', '__file__', '__name__', '__path__', 'eggs', 'ham', 'spam']
+['eggs', 'ham', 'spam', 't6']
+running test t7
+t7 loading
+['__doc__', '__file__', '__name__', '__path__']
+['__doc__', '__file__', '__name__', '__path__']
+t7.sub.subsub loading
+['__doc__', '__file__', '__name__', '__path__', 'spam']
+t7.sub.subsub.spam = 1
diff --git a/lib-python/2.2/test/output/test_poll b/lib-python/2.2/test/output/test_poll
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_poll
@@ -0,0 +1,17 @@
+test_poll
+Running poll test 1
+ This is a test.
+ This is a test.
+ This is a test.
+ This is a test.
+ This is a test.
+ This is a test.
+ This is a test.
+ This is a test.
+ This is a test.
+ This is a test.
+ This is a test.
+ This is a test.
+Poll test 1 complete
+Running poll test 2
+Poll test 2 complete
diff --git a/lib-python/2.2/test/output/test_popen2 b/lib-python/2.2/test/output/test_popen2
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_popen2
@@ -0,0 +1,9 @@
+test_popen2
+Test popen2 module:
+testing popen2...
+testing popen3...
+All OK
+Testing os module:
+testing popen2...
+testing popen3...
+All OK
diff --git a/lib-python/2.2/test/output/test_posixpath b/lib-python/2.2/test/output/test_posixpath
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_posixpath
@@ -0,0 +1,2 @@
+test_posixpath
+No errors.  Thank your lucky stars.
diff --git a/lib-python/2.2/test/output/test_pow b/lib-python/2.2/test/output/test_pow
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_pow
@@ -0,0 +1,25 @@
+test_pow
+Testing integer mode...
+    Testing 2-argument pow() function...
+    Testing 3-argument pow() function...
+Testing long integer mode...
+    Testing 2-argument pow() function...
+    Testing 3-argument pow() function...
+Testing floating point mode...
+    Testing 3-argument pow() function...
+The number in both columns should match.
+3 3
+-5 -5
+-1 -1
+5 5
+-3 -3
+-7 -7
+
+3L 3L
+-5L -5L
+-1L -1L
+5L 5L
+-3L -3L
+-7L -7L
+
+
diff --git a/lib-python/2.2/test/output/test_profile b/lib-python/2.2/test/output/test_profile
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_profile
@@ -0,0 +1,17 @@
+test_profile
+         53 function calls in 1.000 CPU seconds
+
+   Ordered by: standard name
+
+   ncalls  tottime  percall  cumtime  percall filename:lineno(function)
+        1    0.000    0.000    1.000    1.000 <string>:1(?)
+        0    0.000             0.000          profile:0(profiler)
+        1    0.000    0.000    1.000    1.000 profile:0(testfunc())
+        1    0.400    0.400    1.000    1.000 test_profile.py:21(testfunc)
+        2    0.080    0.040    0.600    0.300 test_profile.py:30(helper)
+        4    0.116    0.029    0.120    0.030 test_profile.py:48(helper1)
+        8    0.312    0.039    0.400    0.050 test_profile.py:56(helper2)
+        8    0.064    0.008    0.080    0.010 test_profile.py:66(subhelper)
+       28    0.028    0.001    0.028    0.001 test_profile.py:78(__getattr__)
+
+
diff --git a/lib-python/2.2/test/output/test_pty b/lib-python/2.2/test/output/test_pty
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_pty
@@ -0,0 +1,3 @@
+test_pty
+I wish to buy a fish license.
+For my pet fish, Eric.
diff --git a/lib-python/2.2/test/output/test_pwd b/lib-python/2.2/test/output/test_pwd
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_pwd
@@ -0,0 +1,7 @@
+test_pwd
+pwd.getpwall()
+pwd.getpwuid()
+pwd.getpwnam()
+name matches uid
+caught expected exception
+caught expected exception
diff --git a/lib-python/2.2/test/output/test_pyexpat b/lib-python/2.2/test/output/test_pyexpat
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_pyexpat
@@ -0,0 +1,110 @@
+test_pyexpat
+OK.
+OK.
+OK.
+OK.
+OK.
+OK.
+OK.
+OK.
+OK.
+OK.
+OK.
+OK.
+PI:
+	'xml-stylesheet' 'href="stylesheet.css"'
+Comment:
+	' comment data '
+Notation declared: ('notation', None, 'notation.jpeg', None)
+Unparsed entity decl:
+	('unparsed_entity', None, 'entity.file', None, 'notation')
+Start element:
+	'root' {'attr1': 'value1', 'attr2': 'value2\xe1\xbd\x80'}
+NS decl:
+	'myns' 'http://www.python.org/namespace'
+Start element:
+	'http://www.python.org/namespace!subelement' {}
+Character data:
+	'Contents of subelements'
+End element:
+	'http://www.python.org/namespace!subelement'
+End of NS decl:
+	'myns'
+Start element:
+	'sub2' {}
+Start of CDATA section
+Character data:
+	'contents of CDATA section'
+End of CDATA section
+End element:
+	'sub2'
+External entity ref: (None, 'entity.file', None)
+End element:
+	'root'
+PI:
+	u'xml-stylesheet' u'href="stylesheet.css"'
+Comment:
+	u' comment data '
+Notation declared: (u'notation', None, u'notation.jpeg', None)
+Unparsed entity decl:
+	(u'unparsed_entity', None, u'entity.file', None, u'notation')
+Start element:
+	u'root' {u'attr1': u'value1', u'attr2': u'value2\u1f40'}
+NS decl:
+	u'myns' u'http://www.python.org/namespace'
+Start element:
+	u'http://www.python.org/namespace!subelement' {}
+Character data:
+	u'Contents of subelements'
+End element:
+	u'http://www.python.org/namespace!subelement'
+End of NS decl:
+	u'myns'
+Start element:
+	u'sub2' {}
+Start of CDATA section
+Character data:
+	u'contents of CDATA section'
+End of CDATA section
+End element:
+	u'sub2'
+External entity ref: (None, u'entity.file', None)
+End element:
+	u'root'
+PI:
+	u'xml-stylesheet' u'href="stylesheet.css"'
+Comment:
+	u' comment data '
+Notation declared: (u'notation', None, u'notation.jpeg', None)
+Unparsed entity decl:
+	(u'unparsed_entity', None, u'entity.file', None, u'notation')
+Start element:
+	u'root' {u'attr1': u'value1', u'attr2': u'value2\u1f40'}
+NS decl:
+	u'myns' u'http://www.python.org/namespace'
+Start element:
+	u'http://www.python.org/namespace!subelement' {}
+Character data:
+	u'Contents of subelements'
+End element:
+	u'http://www.python.org/namespace!subelement'
+End of NS decl:
+	u'myns'
+Start element:
+	u'sub2' {}
+Start of CDATA section
+Character data:
+	u'contents of CDATA section'
+End of CDATA section
+End element:
+	u'sub2'
+External entity ref: (None, u'entity.file', None)
+End element:
+	u'root'
+
+Testing constructor for proper handling of namespace_separator values:
+Legal values tested o.k.
+Caught expected TypeError:
+ParserCreate() argument 2 must be string or None, not int
+Caught expected ValueError:
+namespace_separator must be at most one character, omitted, or None
diff --git a/lib-python/2.2/test/output/test_re b/lib-python/2.2/test/output/test_re
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_re
@@ -0,0 +1,2 @@
+test_re
+maximum recursion limit exceeded
diff --git a/lib-python/2.2/test/output/test_regex b/lib-python/2.2/test/output/test_regex
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_regex
@@ -0,0 +1,29 @@
+test_regex
+no match: -1
+successful search: 6
+caught expected exception
+failed awk syntax: -1
+successful awk syntax: 2
+failed awk syntax: -1
+matching with group names and compile()
+-1
+caught expected exception
+matching with group names and symcomp()
+7
+801 999
+801
+('801', '999')
+('801', '999')
+realpat: \([0-9]+\) *\([0-9]+\)
+groupindex: {'one': 1, 'two': 2}
+not case folded search: -1
+case folded search: 6
+__members__: ['last', 'regs', 'translate', 'groupindex', 'realpat', 'givenpat']
+regs: ((6, 11), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1))
+last: HELLO WORLD
+translate: 256
+givenpat: world
+match with pos: -1
+search with pos: 18
+bogus group: ('world', None, None)
+no name: caught expected exception
diff --git a/lib-python/2.2/test/output/test_rgbimg b/lib-python/2.2/test/output/test_rgbimg
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_rgbimg
@@ -0,0 +1,2 @@
+test_rgbimg
+RGBimg test suite:
diff --git a/lib-python/2.2/test/output/test_richcmp b/lib-python/2.2/test/output/test_richcmp
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_richcmp
@@ -0,0 +1,187 @@
+test_richcmp
+
+operator: <
+
+          | Number(0) | Number(1) | Number(2) |
+----------+-----------+-----------+-----------+-
+Number(0) |         0 |         1 |         1 |
+Number(1) |         0 |         0 |         1 |
+Number(2) |         0 |         0 |         0 |
+----------+-----------+-----------+-----------+-
+
+operator: <=
+
+          | Number(0) | Number(1) | Number(2) |
+----------+-----------+-----------+-----------+-
+Number(0) |         1 |         1 |         1 |
+Number(1) |         0 |         1 |         1 |
+Number(2) |         0 |         0 |         1 |
+----------+-----------+-----------+-----------+-
+
+operator: ==
+
+          | Number(0) | Number(1) | Number(2) |
+----------+-----------+-----------+-----------+-
+Number(0) |         1 |         0 |         0 |
+Number(1) |         0 |         1 |         0 |
+Number(2) |         0 |         0 |         1 |
+----------+-----------+-----------+-----------+-
+
+operator: !=
+
+          | Number(0) | Number(1) | Number(2) |
+----------+-----------+-----------+-----------+-
+Number(0) |         0 |         1 |         1 |
+Number(1) |         1 |         0 |         1 |
+Number(2) |         1 |         1 |         0 |
+----------+-----------+-----------+-----------+-
+
+operator: >
+
+          | Number(0) | Number(1) | Number(2) |
+----------+-----------+-----------+-----------+-
+Number(0) |         0 |         0 |         0 |
+Number(1) |         1 |         0 |         0 |
+Number(2) |         1 |         1 |         0 |
+----------+-----------+-----------+-----------+-
+
+operator: >=
+
+          | Number(0) | Number(1) | Number(2) |
+----------+-----------+-----------+-----------+-
+Number(0) |         1 |         0 |         0 |
+Number(1) |         1 |         1 |         0 |
+Number(2) |         1 |         1 |         1 |
+----------+-----------+-----------+-----------+-
+
+**************************************************
+
+operator: <
+
+          | Number(0) | Number(1) | Number(2) |
+----------+-----------+-----------+-----------+-
+        0 |         0 |         1 |         1 |
+        1 |         0 |         0 |         1 |
+        2 |         0 |         0 |         0 |
+----------+-----------+-----------+-----------+-
+
+operator: <=
+
+          | Number(0) | Number(1) | Number(2) |
+----------+-----------+-----------+-----------+-
+        0 |         1 |         1 |         1 |
+        1 |         0 |         1 |         1 |
+        2 |         0 |         0 |         1 |
+----------+-----------+-----------+-----------+-
+
+operator: ==
+
+          | Number(0) | Number(1) | Number(2) |
+----------+-----------+-----------+-----------+-
+        0 |         1 |         0 |         0 |
+        1 |         0 |         1 |         0 |
+        2 |         0 |         0 |         1 |
+----------+-----------+-----------+-----------+-
+
+operator: !=
+
+          | Number(0) | Number(1) | Number(2) |
+----------+-----------+-----------+-----------+-
+        0 |         0 |         1 |         1 |
+        1 |         1 |         0 |         1 |
+        2 |         1 |         1 |         0 |
+----------+-----------+-----------+-----------+-
+
+operator: >
+
+          | Number(0) | Number(1) | Number(2) |
+----------+-----------+-----------+-----------+-
+        0 |         0 |         0 |         0 |
+        1 |         1 |         0 |         0 |
+        2 |         1 |         1 |         0 |
+----------+-----------+-----------+-----------+-
+
+operator: >=
+
+          | Number(0) | Number(1) | Number(2) |
+----------+-----------+-----------+-----------+-
+        0 |         1 |         0 |         0 |
+        1 |         1 |         1 |         0 |
+        2 |         1 |         1 |         1 |
+----------+-----------+-----------+-----------+-
+
+**************************************************
+
+operator: <
+
+          |         0 |         1 |         2 |
+----------+-----------+-----------+-----------+-
+Number(0) |         0 |         1 |         1 |
+Number(1) |         0 |         0 |         1 |
+Number(2) |         0 |         0 |         0 |
+----------+-----------+-----------+-----------+-
+
+operator: <=
+
+          |         0 |         1 |         2 |
+----------+-----------+-----------+-----------+-
+Number(0) |         1 |         1 |         1 |
+Number(1) |         0 |         1 |         1 |
+Number(2) |         0 |         0 |         1 |
+----------+-----------+-----------+-----------+-
+
+operator: ==
+
+          |         0 |         1 |         2 |
+----------+-----------+-----------+-----------+-
+Number(0) |         1 |         0 |         0 |
+Number(1) |         0 |         1 |         0 |
+Number(2) |         0 |         0 |         1 |
+----------+-----------+-----------+-----------+-
+
+operator: !=
+
+          |         0 |         1 |         2 |
+----------+-----------+-----------+-----------+-
+Number(0) |         0 |         1 |         1 |
+Number(1) |         1 |         0 |         1 |
+Number(2) |         1 |         1 |         0 |
+----------+-----------+-----------+-----------+-
+
+operator: >
+
+          |         0 |         1 |         2 |
+----------+-----------+-----------+-----------+-
+Number(0) |         0 |         0 |         0 |
+Number(1) |         1 |         0 |         0 |
+Number(2) |         1 |         1 |         0 |
+----------+-----------+-----------+-----------+-
+
+operator: >=
+
+          |         0 |         1 |         2 |
+----------+-----------+-----------+-----------+-
+Number(0) |         1 |         0 |         0 |
+Number(1) |         1 |         1 |         0 |
+Number(2) |         1 |         1 |         1 |
+----------+-----------+-----------+-----------+-
+
+**************************************************
+Vector([0, 1, 2, 3, 4]) <  Vector([2, 2, 2, 2, 2]) -> Vector([1, 1, 0, 0, 0])
+Vector([0, 1, 2, 3, 4]) <  [2, 2, 2, 2, 2]         -> Vector([1, 1, 0, 0, 0])
+        [0, 1, 2, 3, 4] <  Vector([2, 2, 2, 2, 2]) -> Vector([1, 1, 0, 0, 0])
+Vector([0, 1, 2, 3, 4]) <= Vector([2, 2, 2, 2, 2]) -> Vector([1, 1, 1, 0, 0])
+Vector([0, 1, 2, 3, 4]) <= [2, 2, 2, 2, 2]         -> Vector([1, 1, 1, 0, 0])
+        [0, 1, 2, 3, 4] <= Vector([2, 2, 2, 2, 2]) -> Vector([1, 1, 1, 0, 0])
+Vector([0, 1, 2, 3, 4]) == Vector([2, 2, 2, 2, 2]) -> Vector([0, 0, 1, 0, 0])
+Vector([0, 1, 2, 3, 4]) == [2, 2, 2, 2, 2]         -> Vector([0, 0, 1, 0, 0])
+        [0, 1, 2, 3, 4] == Vector([2, 2, 2, 2, 2]) -> Vector([0, 0, 1, 0, 0])
+Vector([0, 1, 2, 3, 4]) != Vector([2, 2, 2, 2, 2]) -> Vector([1, 1, 0, 1, 1])
+Vector([0, 1, 2, 3, 4]) != [2, 2, 2, 2, 2]         -> Vector([1, 1, 0, 1, 1])
+        [0, 1, 2, 3, 4] != Vector([2, 2, 2, 2, 2]) -> Vector([1, 1, 0, 1, 1])
+Vector([0, 1, 2, 3, 4]) >  Vector([2, 2, 2, 2, 2]) -> Vector([0, 0, 0, 1, 1])
+Vector([0, 1, 2, 3, 4]) >  [2, 2, 2, 2, 2]         -> Vector([0, 0, 0, 1, 1])
+        [0, 1, 2, 3, 4] >  Vector([2, 2, 2, 2, 2]) -> Vector([0, 0, 0, 1, 1])
+Vector([0, 1, 2, 3, 4]) >= Vector([2, 2, 2, 2, 2]) -> Vector([0, 0, 1, 1, 1])
+Vector([0, 1, 2, 3, 4]) >= [2, 2, 2, 2, 2]         -> Vector([0, 0, 1, 1, 1])
+        [0, 1, 2, 3, 4] >= Vector([2, 2, 2, 2, 2]) -> Vector([0, 0, 1, 1, 1])
diff --git a/lib-python/2.2/test/output/test_rotor b/lib-python/2.2/test/output/test_rotor
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_rotor
@@ -0,0 +1,5 @@
+test_rotor
+'\xb5q\x90\xa3\xe8IT\xd1\x07\xda\x1d\xdc\xdf'
+'\x1b>k(\x1f\x8a,\x81\xcb\x9f\xe2'
+spam and eggs
+cheese shop
diff --git a/lib-python/2.2/test/output/test_sax b/lib-python/2.2/test/output/test_sax
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_sax
@@ -0,0 +1,42 @@
+test_sax
+Passed test_attrs_empty
+Passed test_attrs_wattr
+Passed test_double_quoteattr
+Passed test_escape_all
+Passed test_escape_basic
+Passed test_escape_extra
+Passed test_expat_attrs_empty
+Passed test_expat_attrs_wattr
+Passed test_expat_dtdhandler
+Passed test_expat_entityresolver
+Passed test_expat_file
+Passed test_expat_incomplete
+Passed test_expat_incremental
+Passed test_expat_incremental_reset
+Passed test_expat_inpsource_filename
+Passed test_expat_inpsource_location
+Passed test_expat_inpsource_stream
+Passed test_expat_inpsource_sysid
+Passed test_expat_locator_noinfo
+Passed test_expat_locator_withinfo
+Passed test_expat_nsattrs_empty
+Passed test_expat_nsattrs_wattr
+Passed test_expat_nsdecl_pair_diff
+Passed test_expat_nsdecl_pair_same
+Passed test_expat_nsdecl_single
+Passed test_filter_basic
+Passed test_make_parser
+Passed test_make_parser2
+Passed test_nsattrs_empty
+Passed test_nsattrs_wattr
+Passed test_quoteattr_basic
+Passed test_single_double_quoteattr
+Passed test_single_quoteattr
+Passed test_xmlgen_attr_escape
+Passed test_xmlgen_basic
+Passed test_xmlgen_content
+Passed test_xmlgen_content_escape
+Passed test_xmlgen_ignorable
+Passed test_xmlgen_ns
+Passed test_xmlgen_pi
+40 tests, 0 failures
diff --git a/lib-python/2.2/test/output/test_scope b/lib-python/2.2/test/output/test_scope
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_scope
@@ -0,0 +1,24 @@
+test_scope
+1. simple nesting
+2. extra nesting
+3. simple nesting + rebinding
+4. nesting with global but no free
+5. nesting through class
+6. nesting plus free ref to global
+7. nearest enclosing scope
+8. mixed freevars and cellvars
+9. free variable in method
+10. recursion
+11. unoptimized namespaces
+12. lambdas
+13. UnboundLocal
+14. complex definitions
+15. scope of global statements
+16. check leaks
+17. class and global
+18. verify that locals() works
+19. var is bound and free in class
+20. interaction with trace function
+20. eval and exec with free variables
+21. list comprehension with local variables
+22. eval with free variables
diff --git a/lib-python/2.2/test/output/test_signal b/lib-python/2.2/test/output/test_signal
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_signal
@@ -0,0 +1,2 @@
+test_signal
+starting pause() loop...
diff --git a/lib-python/2.2/test/output/test_socket b/lib-python/2.2/test/output/test_socket
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_socket
@@ -0,0 +1,2 @@
+test_socket
+socket.error
diff --git a/lib-python/2.2/test/output/test_string b/lib-python/2.2/test/output/test_string
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_string
@@ -0,0 +1,3 @@
+test_string
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
diff --git a/lib-python/2.2/test/output/test_thread b/lib-python/2.2/test/output/test_thread
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_thread
@@ -0,0 +1,6 @@
+test_thread
+waiting for all tasks to complete
+all tasks done
+
+*** Barrier Test ***
+all tasks done
diff --git a/lib-python/2.2/test/output/test_threadedtempfile b/lib-python/2.2/test/output/test_threadedtempfile
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_threadedtempfile
@@ -0,0 +1,5 @@
+test_threadedtempfile
+Creating
+Starting
+Reaping
+Done: errors 0 ok 1000
diff --git a/lib-python/2.2/test/output/test_tokenize b/lib-python/2.2/test/output/test_tokenize
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_tokenize
@@ -0,0 +1,648 @@
+test_tokenize
+1,0-1,35:	COMMENT	"# Tests for the 'tokenize' module.\n"
+2,0-2,43:	COMMENT	'# Large bits stolen from test_grammar.py. \n'
+3,0-3,1:	NL	'\n'
+4,0-4,11:	COMMENT	'# Comments\n'
+5,0-5,3:	STRING	'"#"'
+5,3-5,4:	NEWLINE	'\n'
+6,0-6,3:	COMMENT	"#'\n"
+7,0-7,3:	COMMENT	'#"\n'
+8,0-8,3:	COMMENT	'#\\\n'
+9,7-9,9:	COMMENT	'#\n'
+10,4-10,10:	COMMENT	'# abc\n'
+11,0-12,4:	STRING	"'''#\n#'''"
+12,4-12,5:	NEWLINE	'\n'
+13,0-13,1:	NL	'\n'
+14,0-14,1:	NAME	'x'
+14,2-14,3:	OP	'='
+14,4-14,5:	NUMBER	'1'
+14,7-14,8:	COMMENT	'#'
+14,8-14,9:	NEWLINE	'\n'
+15,0-15,1:	NL	'\n'
+16,0-16,25:	COMMENT	'# Balancing continuation\n'
+17,0-17,1:	NL	'\n'
+18,0-18,1:	NAME	'a'
+18,2-18,3:	OP	'='
+18,4-18,5:	OP	'('
+18,5-18,6:	NUMBER	'3'
+18,6-18,7:	OP	','
+18,8-18,9:	NUMBER	'4'
+18,9-18,10:	OP	','
+18,10-18,11:	NL	'\n'
+19,2-19,3:	NUMBER	'5'
+19,3-19,4:	OP	','
+19,5-19,6:	NUMBER	'6'
+19,6-19,7:	OP	')'
+19,7-19,8:	NEWLINE	'\n'
+20,0-20,1:	NAME	'y'
+20,2-20,3:	OP	'='
+20,4-20,5:	OP	'['
+20,5-20,6:	NUMBER	'3'
+20,6-20,7:	OP	','
+20,8-20,9:	NUMBER	'4'
+20,9-20,10:	OP	','
+20,10-20,11:	NL	'\n'
+21,2-21,3:	NUMBER	'5'
+21,3-21,4:	OP	']'
+21,4-21,5:	NEWLINE	'\n'
+22,0-22,1:	NAME	'z'
+22,2-22,3:	OP	'='
+22,4-22,5:	OP	'{'
+22,5-22,8:	STRING	"'a'"
+22,8-22,9:	OP	':'
+22,9-22,10:	NUMBER	'5'
+22,10-22,11:	OP	','
+22,11-22,12:	NL	'\n'
+23,2-23,5:	STRING	"'b'"
+23,5-23,6:	OP	':'
+23,6-23,7:	NUMBER	'6'
+23,7-23,8:	OP	'}'
+23,8-23,9:	NEWLINE	'\n'
+24,0-24,1:	NAME	'x'
+24,2-24,3:	OP	'='
+24,4-24,5:	OP	'('
+24,5-24,8:	NAME	'len'
+24,8-24,9:	OP	'('
+24,9-24,10:	OP	'`'
+24,10-24,11:	NAME	'y'
+24,11-24,12:	OP	'`'
+24,12-24,13:	OP	')'
+24,14-24,15:	OP	'+'
+24,16-24,17:	NUMBER	'5'
+24,17-24,18:	OP	'*'
+24,18-24,19:	NAME	'x'
+24,20-24,21:	OP	'-'
+24,22-24,23:	NAME	'a'
+24,23-24,24:	OP	'['
+24,24-24,25:	NL	'\n'
+25,3-25,4:	NUMBER	'3'
+25,5-25,6:	OP	']'
+25,6-25,7:	NL	'\n'
+26,3-26,4:	OP	'-'
+26,5-26,6:	NAME	'x'
+26,7-26,8:	OP	'+'
+26,9-26,12:	NAME	'len'
+26,12-26,13:	OP	'('
+26,13-26,14:	OP	'{'
+26,14-26,15:	NL	'\n'
+27,3-27,4:	OP	'}'
+27,4-27,5:	NL	'\n'
+28,4-28,5:	OP	')'
+28,5-28,6:	NL	'\n'
+29,2-29,3:	OP	')'
+29,3-29,4:	NEWLINE	'\n'
+30,0-30,1:	NL	'\n'
+31,0-31,37:	COMMENT	'# Backslash means line continuation:\n'
+32,0-32,1:	NAME	'x'
+32,2-32,3:	OP	'='
+32,4-32,5:	NUMBER	'1'
+33,0-33,1:	OP	'+'
+33,2-33,3:	NUMBER	'1'
+33,3-33,4:	NEWLINE	'\n'
+34,0-34,1:	NL	'\n'
+35,0-35,55:	COMMENT	'# Backslash does not means continuation in comments :\\\n'
+36,0-36,1:	NAME	'x'
+36,2-36,3:	OP	'='
+36,4-36,5:	NUMBER	'0'
+36,5-36,6:	NEWLINE	'\n'
+37,0-37,1:	NL	'\n'
+38,0-38,20:	COMMENT	'# Ordinary integers\n'
+39,0-39,4:	NUMBER	'0xff'
+39,5-39,7:	OP	'<>'
+39,8-39,11:	NUMBER	'255'
+39,11-39,12:	NEWLINE	'\n'
+40,0-40,4:	NUMBER	'0377'
+40,5-40,7:	OP	'<>'
+40,8-40,11:	NUMBER	'255'
+40,11-40,12:	NEWLINE	'\n'
+41,0-41,10:	NUMBER	'2147483647'
+41,13-41,15:	OP	'!='
+41,16-41,28:	NUMBER	'017777777777'
+41,28-41,29:	NEWLINE	'\n'
+42,0-42,1:	OP	'-'
+42,1-42,11:	NUMBER	'2147483647'
+42,11-42,12:	OP	'-'
+42,12-42,13:	NUMBER	'1'
+42,14-42,16:	OP	'!='
+42,17-42,29:	NUMBER	'020000000000'
+42,29-42,30:	NEWLINE	'\n'
+43,0-43,12:	NUMBER	'037777777777'
+43,13-43,15:	OP	'!='
+43,16-43,17:	OP	'-'
+43,17-43,18:	NUMBER	'1'
+43,18-43,19:	NEWLINE	'\n'
+44,0-44,10:	NUMBER	'0xffffffff'
+44,11-44,13:	OP	'!='
+44,14-44,15:	OP	'-'
+44,15-44,16:	NUMBER	'1'
+44,16-44,17:	NEWLINE	'\n'
+45,0-45,1:	NL	'\n'
+46,0-46,16:	COMMENT	'# Long integers\n'
+47,0-47,1:	NAME	'x'
+47,2-47,3:	OP	'='
+47,4-47,6:	NUMBER	'0L'
+47,6-47,7:	NEWLINE	'\n'
+48,0-48,1:	NAME	'x'
+48,2-48,3:	OP	'='
+48,4-48,6:	NUMBER	'0l'
+48,6-48,7:	NEWLINE	'\n'
+49,0-49,1:	NAME	'x'
+49,2-49,3:	OP	'='
+49,4-49,23:	NUMBER	'0xffffffffffffffffL'
+49,23-49,24:	NEWLINE	'\n'
+50,0-50,1:	NAME	'x'
+50,2-50,3:	OP	'='
+50,4-50,23:	NUMBER	'0xffffffffffffffffl'
+50,23-50,24:	NEWLINE	'\n'
+51,0-51,1:	NAME	'x'
+51,2-51,3:	OP	'='
+51,4-51,23:	NUMBER	'077777777777777777L'
+51,23-51,24:	NEWLINE	'\n'
+52,0-52,1:	NAME	'x'
+52,2-52,3:	OP	'='
+52,4-52,23:	NUMBER	'077777777777777777l'
+52,23-52,24:	NEWLINE	'\n'
+53,0-53,1:	NAME	'x'
+53,2-53,3:	OP	'='
+53,4-53,35:	NUMBER	'123456789012345678901234567890L'
+53,35-53,36:	NEWLINE	'\n'
+54,0-54,1:	NAME	'x'
+54,2-54,3:	OP	'='
+54,4-54,35:	NUMBER	'123456789012345678901234567890l'
+54,35-54,36:	NEWLINE	'\n'
+55,0-55,1:	NL	'\n'
+56,0-56,25:	COMMENT	'# Floating-point numbers\n'
+57,0-57,1:	NAME	'x'
+57,2-57,3:	OP	'='
+57,4-57,8:	NUMBER	'3.14'
+57,8-57,9:	NEWLINE	'\n'
+58,0-58,1:	NAME	'x'
+58,2-58,3:	OP	'='
+58,4-58,8:	NUMBER	'314.'
+58,8-58,9:	NEWLINE	'\n'
+59,0-59,1:	NAME	'x'
+59,2-59,3:	OP	'='
+59,4-59,9:	NUMBER	'0.314'
+59,9-59,10:	NEWLINE	'\n'
+60,0-60,18:	COMMENT	'# XXX x = 000.314\n'
+61,0-61,1:	NAME	'x'
+61,2-61,3:	OP	'='
+61,4-61,8:	NUMBER	'.314'
+61,8-61,9:	NEWLINE	'\n'
+62,0-62,1:	NAME	'x'
+62,2-62,3:	OP	'='
+62,4-62,8:	NUMBER	'3e14'
+62,8-62,9:	NEWLINE	'\n'
+63,0-63,1:	NAME	'x'
+63,2-63,3:	OP	'='
+63,4-63,8:	NUMBER	'3E14'
+63,8-63,9:	NEWLINE	'\n'
+64,0-64,1:	NAME	'x'
+64,2-64,3:	OP	'='
+64,4-64,9:	NUMBER	'3e-14'
+64,9-64,10:	NEWLINE	'\n'
+65,0-65,1:	NAME	'x'
+65,2-65,3:	OP	'='
+65,4-65,9:	NUMBER	'3e+14'
+65,9-65,10:	NEWLINE	'\n'
+66,0-66,1:	NAME	'x'
+66,2-66,3:	OP	'='
+66,4-66,9:	NUMBER	'3.e14'
+66,9-66,10:	NEWLINE	'\n'
+67,0-67,1:	NAME	'x'
+67,2-67,3:	OP	'='
+67,4-67,9:	NUMBER	'.3e14'
+67,9-67,10:	NEWLINE	'\n'
+68,0-68,1:	NAME	'x'
+68,2-68,3:	OP	'='
+68,4-68,9:	NUMBER	'3.1e4'
+68,9-68,10:	NEWLINE	'\n'
+69,0-69,1:	NL	'\n'
+70,0-70,18:	COMMENT	'# String literals\n'
+71,0-71,1:	NAME	'x'
+71,2-71,3:	OP	'='
+71,4-71,6:	STRING	"''"
+71,6-71,7:	OP	';'
+71,8-71,9:	NAME	'y'
+71,10-71,11:	OP	'='
+71,12-71,14:	STRING	'""'
+71,14-71,15:	OP	';'
+71,15-71,16:	NEWLINE	'\n'
+72,0-72,1:	NAME	'x'
+72,2-72,3:	OP	'='
+72,4-72,8:	STRING	"'\\''"
+72,8-72,9:	OP	';'
+72,10-72,11:	NAME	'y'
+72,12-72,13:	OP	'='
+72,14-72,17:	STRING	'"\'"'
+72,17-72,18:	OP	';'
+72,18-72,19:	NEWLINE	'\n'
+73,0-73,1:	NAME	'x'
+73,2-73,3:	OP	'='
+73,4-73,7:	STRING	'\'"\''
+73,7-73,8:	OP	';'
+73,9-73,10:	NAME	'y'
+73,11-73,12:	OP	'='
+73,13-73,17:	STRING	'"\\""'
+73,17-73,18:	OP	';'
+73,18-73,19:	NEWLINE	'\n'
+74,0-74,1:	NAME	'x'
+74,2-74,3:	OP	'='
+74,4-74,32:	STRING	'"doesn\'t \\"shrink\\" does it"'
+74,32-74,33:	NEWLINE	'\n'
+75,0-75,1:	NAME	'y'
+75,2-75,3:	OP	'='
+75,4-75,31:	STRING	'\'doesn\\\'t "shrink" does it\''
+75,31-75,32:	NEWLINE	'\n'
+76,0-76,1:	NAME	'x'
+76,2-76,3:	OP	'='
+76,4-76,32:	STRING	'"does \\"shrink\\" doesn\'t it"'
+76,32-76,33:	NEWLINE	'\n'
+77,0-77,1:	NAME	'y'
+77,2-77,3:	OP	'='
+77,4-77,31:	STRING	'\'does "shrink" doesn\\\'t it\''
+77,31-77,32:	NEWLINE	'\n'
+78,0-78,1:	NAME	'x'
+78,2-78,3:	OP	'='
+78,4-83,3:	STRING	'"""\nThe "quick"\nbrown fox\njumps over\nthe \'lazy\' dog.\n"""'
+83,3-83,4:	NEWLINE	'\n'
+84,0-84,1:	NAME	'y'
+84,2-84,3:	OP	'='
+84,4-84,63:	STRING	'\'\\nThe "quick"\\nbrown fox\\njumps over\\nthe \\\'lazy\\\' dog.\\n\''
+84,63-84,64:	NEWLINE	'\n'
+85,0-85,1:	NAME	'y'
+85,2-85,3:	OP	'='
+85,4-90,3:	STRING	'\'\'\'\nThe "quick"\nbrown fox\njumps over\nthe \'lazy\' dog.\n\'\'\''
+90,3-90,4:	OP	';'
+90,4-90,5:	NEWLINE	'\n'
+91,0-91,1:	NAME	'y'
+91,2-91,3:	OP	'='
+91,4-96,1:	STRING	'"\\n\\\nThe \\"quick\\"\\n\\\nbrown fox\\n\\\njumps over\\n\\\nthe \'lazy\' dog.\\n\\\n"'
+96,1-96,2:	OP	';'
+96,2-96,3:	NEWLINE	'\n'
+97,0-97,1:	NAME	'y'
+97,2-97,3:	OP	'='
+97,4-102,1:	STRING	'\'\\n\\\nThe \\"quick\\"\\n\\\nbrown fox\\n\\\njumps over\\n\\\nthe \\\'lazy\\\' dog.\\n\\\n\''
+102,1-102,2:	OP	';'
+102,2-102,3:	NEWLINE	'\n'
+103,0-103,1:	NAME	'x'
+103,2-103,3:	OP	'='
+103,4-103,9:	STRING	"r'\\\\'"
+103,10-103,11:	OP	'+'
+103,12-103,17:	STRING	"R'\\\\'"
+103,17-103,18:	NEWLINE	'\n'
+104,0-104,1:	NAME	'x'
+104,2-104,3:	OP	'='
+104,4-104,9:	STRING	"r'\\''"
+104,10-104,11:	OP	'+'
+104,12-104,14:	STRING	"''"
+104,14-104,15:	NEWLINE	'\n'
+105,0-105,1:	NAME	'y'
+105,2-105,3:	OP	'='
+105,4-107,6:	STRING	"r'''\nfoo bar \\\\\nbaz'''"
+107,7-107,8:	OP	'+'
+107,9-108,6:	STRING	"R'''\nfoo'''"
+108,6-108,7:	NEWLINE	'\n'
+109,0-109,1:	NAME	'y'
+109,2-109,3:	OP	'='
+109,4-111,3:	STRING	'r"""foo\nbar \\\\ baz\n"""'
+111,4-111,5:	OP	'+'
+111,6-112,3:	STRING	"R'''spam\n'''"
+112,3-112,4:	NEWLINE	'\n'
+113,0-113,1:	NAME	'x'
+113,2-113,3:	OP	'='
+113,4-113,10:	STRING	"u'abc'"
+113,11-113,12:	OP	'+'
+113,13-113,19:	STRING	"U'ABC'"
+113,19-113,20:	NEWLINE	'\n'
+114,0-114,1:	NAME	'y'
+114,2-114,3:	OP	'='
+114,4-114,10:	STRING	'u"abc"'
+114,11-114,12:	OP	'+'
+114,13-114,19:	STRING	'U"ABC"'
+114,19-114,20:	NEWLINE	'\n'
+115,0-115,1:	NAME	'x'
+115,2-115,3:	OP	'='
+115,4-115,11:	STRING	"ur'abc'"
+115,12-115,13:	OP	'+'
+115,14-115,21:	STRING	"Ur'ABC'"
+115,22-115,23:	OP	'+'
+115,24-115,31:	STRING	"uR'ABC'"
+115,32-115,33:	OP	'+'
+115,34-115,41:	STRING	"UR'ABC'"
+115,41-115,42:	NEWLINE	'\n'
+116,0-116,1:	NAME	'y'
+116,2-116,3:	OP	'='
+116,4-116,11:	STRING	'ur"abc"'
+116,12-116,13:	OP	'+'
+116,14-116,21:	STRING	'Ur"ABC"'
+116,22-116,23:	OP	'+'
+116,24-116,31:	STRING	'uR"ABC"'
+116,32-116,33:	OP	'+'
+116,34-116,41:	STRING	'UR"ABC"'
+116,41-116,42:	NEWLINE	'\n'
+117,0-117,1:	NAME	'x'
+117,2-117,3:	OP	'='
+117,4-117,10:	STRING	"ur'\\\\'"
+117,11-117,12:	OP	'+'
+117,13-117,19:	STRING	"UR'\\\\'"
+117,19-117,20:	NEWLINE	'\n'
+118,0-118,1:	NAME	'x'
+118,2-118,3:	OP	'='
+118,4-118,10:	STRING	"ur'\\''"
+118,11-118,12:	OP	'+'
+118,13-118,15:	STRING	"''"
+118,15-118,16:	NEWLINE	'\n'
+119,0-119,1:	NAME	'y'
+119,2-119,3:	OP	'='
+119,4-121,6:	STRING	"ur'''\nfoo bar \\\\\nbaz'''"
+121,7-121,8:	OP	'+'
+121,9-122,6:	STRING	"UR'''\nfoo'''"
+122,6-122,7:	NEWLINE	'\n'
+123,0-123,1:	NAME	'y'
+123,2-123,3:	OP	'='
+123,4-125,3:	STRING	'Ur"""foo\nbar \\\\ baz\n"""'
+125,4-125,5:	OP	'+'
+125,6-126,3:	STRING	"uR'''spam\n'''"
+126,3-126,4:	NEWLINE	'\n'
+127,0-127,1:	NL	'\n'
+128,0-128,14:	COMMENT	'# Indentation\n'
+129,0-129,2:	NAME	'if'
+129,3-129,4:	NUMBER	'1'
+129,4-129,5:	OP	':'
+129,5-129,6:	NEWLINE	'\n'
+130,0-130,4:	INDENT	'    '
+130,4-130,5:	NAME	'x'
+130,6-130,7:	OP	'='
+130,8-130,9:	NUMBER	'2'
+130,9-130,10:	NEWLINE	'\n'
+131,0-131,0:	DEDENT	''
+131,0-131,2:	NAME	'if'
+131,3-131,4:	NUMBER	'1'
+131,4-131,5:	OP	':'
+131,5-131,6:	NEWLINE	'\n'
+132,0-132,8:	INDENT	'        '
+132,8-132,9:	NAME	'x'
+132,10-132,11:	OP	'='
+132,12-132,13:	NUMBER	'2'
+132,13-132,14:	NEWLINE	'\n'
+133,0-133,0:	DEDENT	''
+133,0-133,2:	NAME	'if'
+133,3-133,4:	NUMBER	'1'
+133,4-133,5:	OP	':'
+133,5-133,6:	NEWLINE	'\n'
+134,0-134,4:	INDENT	'    '
+134,4-134,9:	NAME	'while'
+134,10-134,11:	NUMBER	'0'
+134,11-134,12:	OP	':'
+134,12-134,13:	NEWLINE	'\n'
+135,0-135,5:	INDENT	'     '
+135,5-135,7:	NAME	'if'
+135,8-135,9:	NUMBER	'0'
+135,9-135,10:	OP	':'
+135,10-135,11:	NEWLINE	'\n'
+136,0-136,11:	INDENT	'           '
+136,11-136,12:	NAME	'x'
+136,13-136,14:	OP	'='
+136,15-136,16:	NUMBER	'2'
+136,16-136,17:	NEWLINE	'\n'
+137,5-137,5:	DEDENT	''
+137,5-137,6:	NAME	'x'
+137,7-137,8:	OP	'='
+137,9-137,10:	NUMBER	'2'
+137,10-137,11:	NEWLINE	'\n'
+138,0-138,0:	DEDENT	''
+138,0-138,0:	DEDENT	''
+138,0-138,2:	NAME	'if'
+138,3-138,4:	NUMBER	'0'
+138,4-138,5:	OP	':'
+138,5-138,6:	NEWLINE	'\n'
+139,0-139,2:	INDENT	'  '
+139,2-139,4:	NAME	'if'
+139,5-139,6:	NUMBER	'2'
+139,6-139,7:	OP	':'
+139,7-139,8:	NEWLINE	'\n'
+140,0-140,3:	INDENT	'   '
+140,3-140,8:	NAME	'while'
+140,9-140,10:	NUMBER	'0'
+140,10-140,11:	OP	':'
+140,11-140,12:	NEWLINE	'\n'
+141,0-141,8:	INDENT	'        '
+141,8-141,10:	NAME	'if'
+141,11-141,12:	NUMBER	'1'
+141,12-141,13:	OP	':'
+141,13-141,14:	NEWLINE	'\n'
+142,0-142,10:	INDENT	'          '
+142,10-142,11:	NAME	'x'
+142,12-142,13:	OP	'='
+142,14-142,15:	NUMBER	'2'
+142,15-142,16:	NEWLINE	'\n'
+143,0-143,1:	NL	'\n'
+144,0-144,12:	COMMENT	'# Operators\n'
+145,0-145,1:	NL	'\n'
+146,0-146,0:	DEDENT	''
+146,0-146,0:	DEDENT	''
+146,0-146,0:	DEDENT	''
+146,0-146,0:	DEDENT	''
+146,0-146,3:	NAME	'def'
+146,4-146,7:	NAME	'd22'
+146,7-146,8:	OP	'('
+146,8-146,9:	NAME	'a'
+146,9-146,10:	OP	','
+146,11-146,12:	NAME	'b'
+146,12-146,13:	OP	','
+146,14-146,15:	NAME	'c'
+146,15-146,16:	OP	'='
+146,16-146,17:	NUMBER	'1'
+146,17-146,18:	OP	','
+146,19-146,20:	NAME	'd'
+146,20-146,21:	OP	'='
+146,21-146,22:	NUMBER	'2'
+146,22-146,23:	OP	')'
+146,23-146,24:	OP	':'
+146,25-146,29:	NAME	'pass'
+146,29-146,30:	NEWLINE	'\n'
+147,0-147,3:	NAME	'def'
+147,4-147,8:	NAME	'd01v'
+147,8-147,9:	OP	'('
+147,9-147,10:	NAME	'a'
+147,10-147,11:	OP	'='
+147,11-147,12:	NUMBER	'1'
+147,12-147,13:	OP	','
+147,14-147,15:	OP	'*'
+147,15-147,20:	NAME	'restt'
+147,20-147,21:	OP	','
+147,22-147,24:	OP	'**'
+147,24-147,29:	NAME	'restd'
+147,29-147,30:	OP	')'
+147,30-147,31:	OP	':'
+147,32-147,36:	NAME	'pass'
+147,36-147,37:	NEWLINE	'\n'
+148,0-148,1:	NL	'\n'
+149,0-149,1:	OP	'('
+149,1-149,2:	NAME	'x'
+149,2-149,3:	OP	','
+149,4-149,5:	NAME	'y'
+149,5-149,6:	OP	')'
+149,7-149,9:	OP	'<>'
+149,10-149,11:	OP	'('
+149,11-149,12:	OP	'{'
+149,12-149,15:	STRING	"'a'"
+149,15-149,16:	OP	':'
+149,16-149,17:	NUMBER	'1'
+149,17-149,18:	OP	'}'
+149,18-149,19:	OP	','
+149,20-149,21:	OP	'{'
+149,21-149,24:	STRING	"'b'"
+149,24-149,25:	OP	':'
+149,25-149,26:	NUMBER	'2'
+149,26-149,27:	OP	'}'
+149,27-149,28:	OP	')'
+149,28-149,29:	NEWLINE	'\n'
+150,0-150,1:	NL	'\n'
+151,0-151,13:	COMMENT	'# comparison\n'
+152,0-152,2:	NAME	'if'
+152,3-152,4:	NUMBER	'1'
+152,5-152,6:	OP	'<'
+152,7-152,8:	NUMBER	'1'
+152,9-152,10:	OP	'>'
+152,11-152,12:	NUMBER	'1'
+152,13-152,15:	OP	'=='
+152,16-152,17:	NUMBER	'1'
+152,18-152,20:	OP	'>='
+152,21-152,22:	NUMBER	'1'
+152,23-152,25:	OP	'<='
+152,26-152,27:	NUMBER	'1'
+152,28-152,30:	OP	'<>'
+152,31-152,32:	NUMBER	'1'
+152,33-152,35:	OP	'!='
+152,36-152,37:	NUMBER	'1'
+152,38-152,40:	NAME	'in'
+152,41-152,42:	NUMBER	'1'
+152,43-152,46:	NAME	'not'
+152,47-152,49:	NAME	'in'
+152,50-152,51:	NUMBER	'1'
+152,52-152,54:	NAME	'is'
+152,55-152,56:	NUMBER	'1'
+152,57-152,59:	NAME	'is'
+152,60-152,63:	NAME	'not'
+152,64-152,65:	NUMBER	'1'
+152,65-152,66:	OP	':'
+152,67-152,71:	NAME	'pass'
+152,71-152,72:	NEWLINE	'\n'
+153,0-153,1:	NL	'\n'
+154,0-154,9:	COMMENT	'# binary\n'
+155,0-155,1:	NAME	'x'
+155,2-155,3:	OP	'='
+155,4-155,5:	NUMBER	'1'
+155,6-155,7:	OP	'&'
+155,8-155,9:	NUMBER	'1'
+155,9-155,10:	NEWLINE	'\n'
+156,0-156,1:	NAME	'x'
+156,2-156,3:	OP	'='
+156,4-156,5:	NUMBER	'1'
+156,6-156,7:	OP	'^'
+156,8-156,9:	NUMBER	'1'
+156,9-156,10:	NEWLINE	'\n'
+157,0-157,1:	NAME	'x'
+157,2-157,3:	OP	'='
+157,4-157,5:	NUMBER	'1'
+157,6-157,7:	OP	'|'
+157,8-157,9:	NUMBER	'1'
+157,9-157,10:	NEWLINE	'\n'
+158,0-158,1:	NL	'\n'
+159,0-159,8:	COMMENT	'# shift\n'
+160,0-160,1:	NAME	'x'
+160,2-160,3:	OP	'='
+160,4-160,5:	NUMBER	'1'
+160,6-160,8:	OP	'<<'
+160,9-160,10:	NUMBER	'1'
+160,11-160,13:	OP	'>>'
+160,14-160,15:	NUMBER	'1'
+160,15-160,16:	NEWLINE	'\n'
+161,0-161,1:	NL	'\n'
+162,0-162,11:	COMMENT	'# additive\n'
+163,0-163,1:	NAME	'x'
+163,2-163,3:	OP	'='
+163,4-163,5:	NUMBER	'1'
+163,6-163,7:	OP	'-'
+163,8-163,9:	NUMBER	'1'
+163,10-163,11:	OP	'+'
+163,12-163,13:	NUMBER	'1'
+163,14-163,15:	OP	'-'
+163,16-163,17:	NUMBER	'1'
+163,18-163,19:	OP	'+'
+163,20-163,21:	NUMBER	'1'
+163,21-163,22:	NEWLINE	'\n'
+164,0-164,1:	NL	'\n'
+165,0-165,17:	COMMENT	'# multiplicative\n'
+166,0-166,1:	NAME	'x'
+166,2-166,3:	OP	'='
+166,4-166,5:	NUMBER	'1'
+166,6-166,7:	OP	'/'
+166,8-166,9:	NUMBER	'1'
+166,10-166,11:	OP	'*'
+166,12-166,13:	NUMBER	'1'
+166,14-166,15:	OP	'%'
+166,16-166,17:	NUMBER	'1'
+166,17-166,18:	NEWLINE	'\n'
+167,0-167,1:	NL	'\n'
+168,0-168,8:	COMMENT	'# unary\n'
+169,0-169,1:	NAME	'x'
+169,2-169,3:	OP	'='
+169,4-169,5:	OP	'~'
+169,5-169,6:	NUMBER	'1'
+169,7-169,8:	OP	'^'
+169,9-169,10:	NUMBER	'1'
+169,11-169,12:	OP	'&'
+169,13-169,14:	NUMBER	'1'
+169,15-169,16:	OP	'|'
+169,17-169,18:	NUMBER	'1'
+169,19-169,20:	OP	'&'
+169,21-169,22:	NUMBER	'1'
+169,23-169,24:	OP	'^'
+169,25-169,26:	OP	'-'
+169,26-169,27:	NUMBER	'1'
+169,27-169,28:	NEWLINE	'\n'
+170,0-170,1:	NAME	'x'
+170,2-170,3:	OP	'='
+170,4-170,5:	OP	'-'
+170,5-170,6:	NUMBER	'1'
+170,6-170,7:	OP	'*'
+170,7-170,8:	NUMBER	'1'
+170,8-170,9:	OP	'/'
+170,9-170,10:	NUMBER	'1'
+170,11-170,12:	OP	'+'
+170,13-170,14:	NUMBER	'1'
+170,14-170,15:	OP	'*'
+170,15-170,16:	NUMBER	'1'
+170,17-170,18:	OP	'-'
+170,19-170,20:	OP	'-'
+170,20-170,21:	OP	'-'
+170,21-170,22:	OP	'-'
+170,22-170,23:	NUMBER	'1'
+170,23-170,24:	OP	'*'
+170,24-170,25:	NUMBER	'1'
+170,25-170,26:	NEWLINE	'\n'
+171,0-171,1:	NL	'\n'
+172,0-172,11:	COMMENT	'# selector\n'
+173,0-173,6:	NAME	'import'
+173,7-173,10:	NAME	'sys'
+173,10-173,11:	OP	','
+173,12-173,16:	NAME	'time'
+173,16-173,17:	NEWLINE	'\n'
+174,0-174,1:	NAME	'x'
+174,2-174,3:	OP	'='
+174,4-174,7:	NAME	'sys'
+174,7-174,8:	OP	'.'
+174,8-174,15:	NAME	'modules'
+174,15-174,16:	OP	'['
+174,16-174,22:	STRING	"'time'"
+174,22-174,23:	OP	']'
+174,23-174,24:	OP	'.'
+174,24-174,28:	NAME	'time'
+174,28-174,29:	OP	'('
+174,29-174,30:	OP	')'
+174,30-174,31:	NEWLINE	'\n'
+175,0-175,1:	NL	'\n'
+176,0-176,0:	ENDMARKER	''
diff --git a/lib-python/2.2/test/output/test_types b/lib-python/2.2/test/output/test_types
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_types
@@ -0,0 +1,16 @@
+test_types
+6. Built-in types
+6.1 Truth value testing
+6.2 Boolean operations
+6.3 Comparisons
+6.4 Numeric types (mostly conversions)
+6.4.1 32-bit integers
+6.4.2 Long integers
+6.4.3 Floating point numbers
+6.5 Sequence types
+6.5.1 Strings
+6.5.2 Tuples
+6.5.3 Lists
+6.5.3a Additional list operations
+6.6 Mappings == Dictionaries
+6.7 Buffers
diff --git a/lib-python/2.2/test/output/test_ucn b/lib-python/2.2/test/output/test_ucn
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_ucn
@@ -0,0 +1,7 @@
+test_ucn
+Testing General Unicode Character Name, and case insensitivity... done.
+Testing name to code mapping.... done.
+Testing code to name mapping for all characters.... done.
+Found 10538 characters in the unicode name database
+Testing misc. symbols for unicode character name expansion.... done.
+Testing unicode character name expansion strict error handling.... done.
diff --git a/lib-python/2.2/test/output/test_unicode b/lib-python/2.2/test/output/test_unicode
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_unicode
@@ -0,0 +1,21 @@
+test_unicode
+Testing Unicode comparisons... done.
+Testing Unicode contains method... done.
+Testing Unicode formatting strings... done.
+Testing builtin unicode()... done.
+Testing builtin codecs... done.
+Testing standard mapping codecs... 0-127... 128-255... done.
+Testing Unicode string concatenation... done.
+Testing Unicode printing... abc
+abc def
+abc def
+abc def
+abc
+
+abc
+abc
+def
+
+def
+
+done.
diff --git a/lib-python/2.2/test/output/test_unicode_file b/lib-python/2.2/test/output/test_unicode_file
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_unicode_file
@@ -0,0 +1,2 @@
+test_unicode_file
+All the Unicode tests appeared to work
diff --git a/lib-python/2.2/test/output/test_unicodedata b/lib-python/2.2/test/output/test_unicodedata
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_unicodedata
@@ -0,0 +1,5 @@
+test_unicodedata
+Testing Unicode Database...
+Methods: 84b72943b1d4320bc1e64a4888f7cdf62eea219a
+Functions: 41e1d4792185d6474a43c83ce4f593b1bdb01f8a
+API: ok
diff --git a/lib-python/2.2/test/output/test_urlparse b/lib-python/2.2/test/output/test_urlparse
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_urlparse
@@ -0,0 +1,47 @@
+test_urlparse
+http://www.python.org = ('http', 'www.python.org', '', '', '', '')
+http://www.python.org#abc = ('http', 'www.python.org', '', '', '', 'abc')
+http://www.python.org/#abc = ('http', 'www.python.org', '/', '', '', 'abc')
+http://a/b/c/d;p?q#f = ('http', 'a', '/b/c/d', 'p', 'q', 'f')
+file:///tmp/junk.txt = ('file', '', '/tmp/junk.txt', '', '', '')
+
+urlparse.urljoin() tests
+
+g:h           = 'g:h'
+g             = 'http://a/b/c/g'
+./g           = 'http://a/b/c/g'
+g/            = 'http://a/b/c/g/'
+/g            = 'http://a/g'
+//g           = 'http://g'
+?y            = 'http://a/b/c/d;p?y'
+g?y           = 'http://a/b/c/g?y'
+g?y/./x       = 'http://a/b/c/g?y/./x'
+#s            = 'http://a/b/c/d;p?q#s'
+g#s           = 'http://a/b/c/g#s'
+g#s/./x       = 'http://a/b/c/g#s/./x'
+g?y#s         = 'http://a/b/c/g?y#s'
+;x            = 'http://a/b/c/d;x'
+g;x           = 'http://a/b/c/g;x'
+g;x?y#s       = 'http://a/b/c/g;x?y#s'
+.             = 'http://a/b/c/'
+./            = 'http://a/b/c/'
+..            = 'http://a/b/'
+../           = 'http://a/b/'
+../g          = 'http://a/b/g'
+../..         = 'http://a/'
+../../        = 'http://a/'
+../../g       = 'http://a/g'
+              = 'http://a/b/c/d;p?q#f'
+../../../g    = 'http://a/../g'
+../../../../g = 'http://a/../../g'
+/./g          = 'http://a/./g'
+/../g         = 'http://a/../g'
+g.            = 'http://a/b/c/g.'
+.g            = 'http://a/b/c/.g'
+g..           = 'http://a/b/c/g..'
+..g           = 'http://a/b/c/..g'
+./../g        = 'http://a/b/g'
+./g/.         = 'http://a/b/c/g/'
+g/./h         = 'http://a/b/c/g/h'
+g/../h        = 'http://a/b/c/h'
+0 errors
diff --git a/lib-python/2.2/test/output/test_winreg b/lib-python/2.2/test/output/test_winreg
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_winreg
@@ -0,0 +1,3 @@
+test_winreg
+Local registry tests worked
+Remote registry calls can be tested using 'test_winreg.py --remote \\machine_name'
diff --git a/lib-python/2.2/test/output/test_winsound b/lib-python/2.2/test/output/test_winsound
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_winsound
@@ -0,0 +1,2 @@
+test_winsound
+Hopefully you heard some sounds increasing in frequency!
diff --git a/lib-python/2.2/test/output/test_xreadline b/lib-python/2.2/test/output/test_xreadline
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_xreadline
@@ -0,0 +1,4 @@
+test_xreadline
+AttributeError (expected)
+TypeError (expected)
+RuntimeError (expected): xreadlines object accessed out of order
diff --git a/lib-python/2.2/test/output/test_zlib b/lib-python/2.2/test/output/test_zlib
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/output/test_zlib
@@ -0,0 +1,14 @@
+test_zlib
+0xe5c1a120 0x43b6aa94
+0xbd602f7 0xbd602f7
+expecting Bad compression level
+expecting Invalid initialization option
+expecting Invalid initialization option
+normal compression/decompression succeeded
+compress/decompression obj succeeded
+decompress with init options succeeded
+decompressobj with init options succeeded
+should be '': ''
+max_length decompressobj succeeded
+unconsumed_tail should be '': ''
+Testing on 17K of random data
diff --git a/lib-python/2.2/test/pickletester.py b/lib-python/2.2/test/pickletester.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/pickletester.py
@@ -0,0 +1,285 @@
+import unittest
+from test_support import TestFailed, have_unicode
+
+class C:
+    def __cmp__(self, other):
+        return cmp(self.__dict__, other.__dict__)
+
+import __main__
+__main__.C = C
+C.__module__ = "__main__"
+
+class myint(int):
+    def __init__(self, x):
+        self.str = str(x)
+
+class initarg(C):
+
+    __safe_for_unpickling__ = 1
+
+    def __init__(self, a, b):
+        self.a = a
+        self.b = b
+
+    def __getinitargs__(self):
+        return self.a, self.b
+
+class metaclass(type):
+    pass
+
+class use_metaclass(object):
+    __metaclass__ = metaclass
+
+# break into multiple strings to avoid confusing font-lock-mode
+DATA = """(lp1
+I0
+aL1L
+aF2
+ac__builtin__
+complex
+p2
+""" + \
+"""(F3
+F0
+tRp3
+aI1
+aI-1
+aI255
+aI-255
+aI-256
+aI65535
+aI-65535
+aI-65536
+aI2147483647
+aI-2147483647
+aI-2147483648
+a""" + \
+"""(S'abc'
+p4
+g4
+""" + \
+"""(i__main__
+C
+p5
+""" + \
+"""(dp6
+S'foo'
+p7
+I1
+sS'bar'
+p8
+I2
+sbg5
+tp9
+ag9
+aI5
+a.
+"""
+
+BINDATA = ']q\x01(K\x00L1L\nG@\x00\x00\x00\x00\x00\x00\x00' + \
+          'c__builtin__\ncomplex\nq\x02(G@\x08\x00\x00\x00\x00\x00' + \
+          '\x00G\x00\x00\x00\x00\x00\x00\x00\x00tRq\x03K\x01J\xff\xff' + \
+          '\xff\xffK\xffJ\x01\xff\xff\xffJ\x00\xff\xff\xffM\xff\xff' + \
+          'J\x01\x00\xff\xffJ\x00\x00\xff\xffJ\xff\xff\xff\x7fJ\x01\x00' + \
+          '\x00\x80J\x00\x00\x00\x80(U\x03abcq\x04h\x04(c__main__\n' + \
+          'C\nq\x05oq\x06}q\x07(U\x03fooq\x08K\x01U\x03barq\tK\x02ubh' + \
+          '\x06tq\nh\nK\x05e.'
+
+def create_data():
+    c = C()
+    c.foo = 1
+    c.bar = 2
+    x = [0, 1L, 2.0, 3.0+0j]
+    # Append some integer test cases at cPickle.c's internal size
+    # cutoffs.
+    uint1max = 0xff
+    uint2max = 0xffff
+    int4max = 0x7fffffff
+    x.extend([1, -1,
+              uint1max, -uint1max, -uint1max-1,
+              uint2max, -uint2max, -uint2max-1,
+               int4max,  -int4max,  -int4max-1])
+    y = ('abc', 'abc', c, c)
+    x.append(y)
+    x.append(y)
+    x.append(5)
+    return x
+
+class AbstractPickleTests(unittest.TestCase):
+
+    _testdata = create_data()
+
+    def setUp(self):
+        # subclass must define self.dumps, self.loads, self.error
+        pass
+
+    def test_misc(self):
+        # test various datatypes not tested by testdata
+        x = myint(4)
+        s = self.dumps(x)
+        y = self.loads(s)
+        self.assertEqual(x, y)
+
+        x = (1, ())
+        s = self.dumps(x)
+        y = self.loads(s)
+        self.assertEqual(x, y)
+
+        x = initarg(1, x)
+        s = self.dumps(x)
+        y = self.loads(s)
+        self.assertEqual(x, y)
+
+        # XXX test __reduce__ protocol?
+
+    def test_identity(self):
+        s = self.dumps(self._testdata)
+        x = self.loads(s)
+        self.assertEqual(x, self._testdata)
+
+    def test_constant(self):
+        x = self.loads(DATA)
+        self.assertEqual(x, self._testdata)
+        x = self.loads(BINDATA)
+        self.assertEqual(x, self._testdata)
+
+    def test_binary(self):
+        s = self.dumps(self._testdata, 1)
+        x = self.loads(s)
+        self.assertEqual(x, self._testdata)
+
+    def test_recursive_list(self):
+        l = []
+        l.append(l)
+        s = self.dumps(l)
+        x = self.loads(s)
+        self.assertEqual(x, l)
+        self.assertEqual(x, x[0])
+        self.assertEqual(id(x), id(x[0]))
+
+    def test_recursive_dict(self):
+        d = {}
+        d[1] = d
+        s = self.dumps(d)
+        x = self.loads(s)
+        self.assertEqual(x, d)
+        self.assertEqual(x[1], x)
+        self.assertEqual(id(x[1]), id(x))
+
+    def test_recursive_inst(self):
+        i = C()
+        i.attr = i
+        s = self.dumps(i)
+        x = self.loads(s)
+        self.assertEqual(x, i)
+        self.assertEqual(x.attr, x)
+        self.assertEqual(id(x.attr), id(x))
+
+    def test_recursive_multi(self):
+        l = []
+        d = {1:l}
+        i = C()
+        i.attr = d
+        l.append(i)
+        s = self.dumps(l)
+        x = self.loads(s)
+        self.assertEqual(x, l)
+        self.assertEqual(x[0], i)
+        self.assertEqual(x[0].attr, d)
+        self.assertEqual(x[0].attr[1], x)
+        self.assertEqual(x[0].attr[1][0], i)
+        self.assertEqual(x[0].attr[1][0].attr, d)
+
+    def test_garyp(self):
+        self.assertRaises(self.error, self.loads, 'garyp')
+
+    def test_insecure_strings(self):
+        insecure = ["abc", "2 + 2", # not quoted
+                    "'abc' + 'def'", # not a single quoted string
+                    "'abc", # quote is not closed
+                    "'abc\"", # open quote and close quote don't match
+                    "'abc'   ?", # junk after close quote
+                    # some tests of the quoting rules
+                    "'abc\"\''",
+                    "'\\\\a\'\'\'\\\'\\\\\''",
+                    ]
+        for s in insecure:
+            buf = "S" + s + "\012p0\012."
+            self.assertRaises(ValueError, self.loads, buf)
+
+    if have_unicode:
+        def test_unicode(self):
+            endcases = [unicode(''), unicode('<\\u>'), unicode('<\\\u1234>'),
+                        unicode('<\n>'),  unicode('<\\>')]
+            for u in endcases:
+                p = self.dumps(u)
+                u2 = self.loads(p)
+                self.assertEqual(u2, u)
+
+    def test_ints(self):
+        import sys
+        n = sys.maxint
+        while n:
+            for expected in (-n, n):
+                s = self.dumps(expected)
+                n2 = self.loads(s)
+                self.assertEqual(expected, n2)
+            n = n >> 1
+
+    def test_maxint64(self):
+        maxint64 = (1L << 63) - 1
+        data = 'I' + str(maxint64) + '\n.'
+        got = self.loads(data)
+        self.assertEqual(got, maxint64)
+
+        # Try too with a bogus literal.
+        data = 'I' + str(maxint64) + 'JUNK\n.'
+        self.assertRaises(ValueError, self.loads, data)
+
+    def test_reduce(self):
+        pass
+
+    def test_getinitargs(self):
+        pass
+
+    def test_metaclass(self):
+        a = use_metaclass()
+        s = self.dumps(a)
+        b = self.loads(s)
+        self.assertEqual(a.__class__, b.__class__)
+
+    def test_structseq(self):
+        import time
+        t = time.localtime()
+        s = self.dumps(t)
+        u = self.loads(s)
+        self.assertEqual(t, u)
+        import os
+        if hasattr(os, "stat"):
+            t = os.stat(os.curdir)
+            s = self.dumps(t)
+            u = self.loads(s)
+            self.assertEqual(t, u)
+        if hasattr(os, "statvfs"):
+            t = os.statvfs(os.curdir)
+            s = self.dumps(t)
+            u = self.loads(s)
+            self.assertEqual(t, u)
+
+class AbstractPickleModuleTests(unittest.TestCase):
+
+    def test_dump_closed_file(self):
+        import tempfile, os
+        fn = tempfile.mktemp()
+        f = open(fn, "w")
+        f.close()
+        self.assertRaises(ValueError, self.module.dump, 123, f)
+        os.remove(fn)
+
+    def test_load_closed_file(self):
+        import tempfile, os
+        fn = tempfile.mktemp()
+        f = open(fn, "w")
+        f.close()
+        self.assertRaises(ValueError, self.module.dump, 123, f)
+        os.remove(fn)
diff --git a/lib-python/2.2/test/pydocfodder.py b/lib-python/2.2/test/pydocfodder.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/pydocfodder.py
@@ -0,0 +1,210 @@
+"""Something just to look at via pydoc."""
+
+class A_classic:
+    "A classic class."
+    def A_method(self):
+        "Method defined in A."
+    def AB_method(self):
+        "Method defined in A and B."
+    def AC_method(self):
+        "Method defined in A and C."
+    def AD_method(self):
+        "Method defined in A and D."
+    def ABC_method(self):
+        "Method defined in A, B and C."
+    def ABD_method(self):
+        "Method defined in A, B and D."
+    def ACD_method(self):
+        "Method defined in A, C and D."
+    def ABCD_method(self):
+        "Method defined in A, B, C and D."
+
+
+class B_classic(A_classic):
+    "A classic class, derived from A_classic."
+    def AB_method(self):
+        "Method defined in A and B."
+    def ABC_method(self):
+        "Method defined in A, B and C."
+    def ABD_method(self):
+        "Method defined in A, B and D."
+    def ABCD_method(self):
+        "Method defined in A, B, C and D."
+    def B_method(self):
+        "Method defined in B."
+    def BC_method(self):
+        "Method defined in B and C."
+    def BD_method(self):
+        "Method defined in B and D."
+    def BCD_method(self):
+        "Method defined in B, C and D."
+
+class C_classic(A_classic):
+    "A classic class, derived from A_classic."
+    def AC_method(self):
+        "Method defined in A and C."
+    def ABC_method(self):
+        "Method defined in A, B and C."
+    def ACD_method(self):
+        "Method defined in A, C and D."
+    def ABCD_method(self):
+        "Method defined in A, B, C and D."
+    def BC_method(self):
+        "Method defined in B and C."
+    def BCD_method(self):
+        "Method defined in B, C and D."
+    def C_method(self):
+        "Method defined in C."
+    def CD_method(self):
+        "Method defined in C and D."
+
+class D_classic(B_classic, C_classic):
+    "A classic class, derived from B_classic and C_classic."
+    def AD_method(self):
+        "Method defined in A and D."
+    def ABD_method(self):
+        "Method defined in A, B and D."
+    def ACD_method(self):
+        "Method defined in A, C and D."
+    def ABCD_method(self):
+        "Method defined in A, B, C and D."
+    def BD_method(self):
+        "Method defined in B and D."
+    def BCD_method(self):
+        "Method defined in B, C and D."
+    def CD_method(self):
+        "Method defined in C and D."
+    def D_method(self):
+        "Method defined in D."
+
+
+class A_new(object):
+    "A new-style class."
+
+    def A_method(self):
+        "Method defined in A."
+    def AB_method(self):
+        "Method defined in A and B."
+    def AC_method(self):
+        "Method defined in A and C."
+    def AD_method(self):
+        "Method defined in A and D."
+    def ABC_method(self):
+        "Method defined in A, B and C."
+    def ABD_method(self):
+        "Method defined in A, B and D."
+    def ACD_method(self):
+        "Method defined in A, C and D."
+    def ABCD_method(self):
+        "Method defined in A, B, C and D."
+
+    def A_classmethod(cls, x):
+        "A class method defined in A."
+    A_classmethod = classmethod(A_classmethod)
+
+    def A_staticmethod():
+        "A static method defined in A."
+    A_staticmethod = staticmethod(A_staticmethod)
+
+    def _getx(self):
+        "A property getter function."
+    def _setx(self, value):
+        "A property setter function."
+    def _delx(self):
+        "A property deleter function."
+    A_property = property(fdel=_delx, fget=_getx, fset=_setx,
+                          doc="A sample property defined in A.")
+
+    A_int_alias = int
+
+class B_new(A_new):
+    "A new-style class, derived from A_new."
+
+    def AB_method(self):
+        "Method defined in A and B."
+    def ABC_method(self):
+        "Method defined in A, B and C."
+    def ABD_method(self):
+        "Method defined in A, B and D."
+    def ABCD_method(self):
+        "Method defined in A, B, C and D."
+    def B_method(self):
+        "Method defined in B."
+    def BC_method(self):
+        "Method defined in B and C."
+    def BD_method(self):
+        "Method defined in B and D."
+    def BCD_method(self):
+        "Method defined in B, C and D."
+
+class C_new(A_new):
+    "A new-style class, derived from A_new."
+
+    def AC_method(self):
+        "Method defined in A and C."
+    def ABC_method(self):
+        "Method defined in A, B and C."
+    def ACD_method(self):
+        "Method defined in A, C and D."
+    def ABCD_method(self):
+        "Method defined in A, B, C and D."
+    def BC_method(self):
+        "Method defined in B and C."
+    def BCD_method(self):
+        "Method defined in B, C and D."
+    def C_method(self):
+        "Method defined in C."
+    def CD_method(self):
+        "Method defined in C and D."
+
+class D_new(B_new, C_new):
+    """A new-style class, derived from B_new and C_new.
+    """
+
+    def AD_method(self):
+        "Method defined in A and D."
+    def ABD_method(self):
+        "Method defined in A, B and D."
+    def ACD_method(self):
+        "Method defined in A, C and D."
+    def ABCD_method(self):
+        "Method defined in A, B, C and D."
+    def BD_method(self):
+        "Method defined in B and D."
+    def BCD_method(self):
+        "Method defined in B, C and D."
+    def CD_method(self):
+        "Method defined in C and D."
+    def D_method(self):
+        "Method defined in D."
+
+class FunkyProperties(object):
+    """From SF bug 472347, by Roeland Rengelink.
+
+    Property getters etc may not be vanilla functions or methods,
+    and this used to make GUI pydoc blow up.
+    """
+
+    def __init__(self):
+        self.desc = {'x':0}
+
+    class get_desc:
+        def __init__(self, attr):
+            self.attr = attr
+        def __call__(self, inst):
+            print 'Get called', self, inst
+            return inst.desc[self.attr]
+    class set_desc:
+        def __init__(self, attr):
+            self.attr = attr
+        def __call__(self, inst, val):
+            print 'Set called', self, inst, val
+            inst.desc[self.attr] = val
+    class del_desc:
+        def __init__(self, attr):
+            self.attr = attr
+        def __call__(self, inst):
+            print 'Del called', self, inst
+            del inst.desc[self.attr]
+
+    x = property(get_desc('x'), set_desc('x'), del_desc('x'), 'prop x')
diff --git a/lib-python/2.2/test/pystone.py b/lib-python/2.2/test/pystone.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/test/pystone.py
@@ -0,0 +1,252 @@
+#! /usr/bin/env python
+
+"""
+"PYSTONE" Benchmark Program
+
+Version:        Python/1.1 (corresponds to C/1.1 plus 2 Pystone fixes)
+
+Author:         Reinhold P. Weicker,  CACM Vol 27, No 10, 10/84 pg. 1013.
+
+                Translated from ADA to C by Rick Richardson.
+                Every method to preserve ADA-likeness has been used,
+                at the expense of C-ness.
+
+                Translated from C to Python by Guido van Rossum.
+
+Version History:
+
+                Version 1.1 corrects two bugs in version 1.0:
+
+                First, it leaked memory: in Proc1(), NextRecord ends
+                up having a pointer to itself.  I have corrected this
+                by zapping NextRecord.PtrComp at the end of Proc1().
+
+                Second, Proc3() used the operator != to compare a
+                record to None.  This is rather inefficient and not
+                true to the intention of the original benchmark (where
+                a pointer comparison to None is intended; the !=
+                operator attempts to find a method __cmp__ to do value
+                comparison of the record).  Version 1.1 runs 5-10
+                percent faster than version 1.0, so benchmark figures
+                of different versions can't be compared directly.
+
+"""
+
+LOOPS = 10000
+
+from time import clock
+
+__version__ = "1.1"
+
+[Ident1, Ident2, Ident3, Ident4, Ident5] = range(1, 6)
+
+class Record:
+
+    def __init__(self, PtrComp = None, Discr = 0, EnumComp = 0,
+                       IntComp = 0, StringComp = 0):
+        self.PtrComp = PtrComp
+        self.Discr = Discr
+        self.EnumComp = EnumComp
+        self.IntComp = IntComp
+        self.StringComp = StringComp
+
+    def copy(self):
+        return Record(self.PtrComp, self.Discr, self.EnumComp,
+                      self.IntComp, self.StringComp)
+
+TRUE = 1
+FALSE = 0
+
+def main():
+    benchtime, stones = pystones()
+    print "Pystone(%s) time for %d passes = %g" % \
+          (__version__, LOOPS, benchtime)
+    print "This machine benchmarks at %g pystones/second" % stones
+
+
+def pystones(loops=LOOPS):
+    return Proc0(loops)
+
+IntGlob = 0
+BoolGlob = FALSE
+Char1Glob = '\0'
+Char2Glob = '\0'
+Array1Glob = [0]*51
+Array2Glob = map(lambda x: x[:], [Array1Glob]*51)
+PtrGlb = None
+PtrGlbNext = None
+
+def Proc0(loops=LOOPS):
+    global IntGlob
+    global BoolGlob
+    global Char1Glob
+    global Char2Glob
+    global Array1Glob
+    global Array2Glob
+    global PtrGlb
+    global PtrGlbNext
+
+    starttime = clock()
+    for i in range(loops):
+        pass
+    nulltime = clock() - starttime
+
+    PtrGlbNext = Record()
+    PtrGlb = Record()
+    PtrGlb.PtrComp = PtrGlbNext
+    PtrGlb.Discr = Ident1
+    PtrGlb.EnumComp = Ident3
+    PtrGlb.IntComp = 40
+    PtrGlb.StringComp = "DHRYSTONE PROGRAM, SOME STRING"
+    String1Loc = "DHRYSTONE PROGRAM, 1'ST STRING"
+    Array2Glob[8][7] = 10
+
+    starttime = clock()
+
+    for i in range(loops):
+        Proc5()
+        Proc4()
+        IntLoc1 = 2
+        IntLoc2 = 3
+        String2Loc = "DHRYSTONE PROGRAM, 2'ND STRING"
+        EnumLoc = Ident2
+        BoolGlob = not Func2(String1Loc, String2Loc)
+        while IntLoc1 < IntLoc2:
+            IntLoc3 = 5 * IntLoc1 - IntLoc2
+            IntLoc3 = Proc7(IntLoc1, IntLoc2)
+            IntLoc1 = IntLoc1 + 1
+        Proc8(Array1Glob, Array2Glob, IntLoc1, IntLoc3)
+        PtrGlb = Proc1(PtrGlb)
+        CharIndex = 'A'
+        while CharIndex <= Char2Glob:
+            if EnumLoc == Func1(CharIndex, 'C'):
+                EnumLoc = Proc6(Ident1)
+            CharIndex = chr(ord(CharIndex)+1)
+        IntLoc3 = IntLoc2 * IntLoc1
+        IntLoc2 = IntLoc3 / IntLoc1
+        IntLoc2 = 7 * (IntLoc3 - IntLoc2) - IntLoc1
+        IntLoc1 = Proc2(IntLoc1)
+
+    benchtime = clock() - starttime - nulltime
+    return benchtime, (loops / benchtime)
+
+def Proc1(PtrParIn):
+    PtrParIn.PtrComp = NextRecord = PtrGlb.copy()
+    PtrParIn.IntComp = 5
+    NextRecord.IntComp = PtrParIn.IntComp
+    NextRecord.PtrComp = PtrParIn.PtrComp
+    NextRecord.PtrComp = Proc3(NextRecord.PtrComp)
+    if NextRecord.Discr == Ident1:
+        NextRecord.IntComp = 6
+        NextRecord.EnumComp = Proc6(PtrParIn.EnumComp)
+        NextRecord.PtrComp = PtrGlb.PtrComp
+        NextRecord.IntComp = Proc7(NextRecord.IntComp, 10)
+    else:
+        PtrParIn = NextRecord.copy()
+    NextRecord.PtrComp = None
+    return PtrParIn
+
+def Proc2(IntParIO):
+    IntLoc = IntParIO + 10
+    while 1:
+        if Char1Glob == 'A':
+            IntLoc = IntLoc - 1
+            IntParIO = IntLoc - IntGlob
+            EnumLoc = Ident1
+        if EnumLoc == Ident1:
+            break
+    return IntParIO
+
+def Proc3(PtrParOut):
+    global IntGlob
+
+    if PtrGlb is not None:
+        PtrParOut = PtrGlb.PtrComp
+    else:
+        IntGlob = 100
+    PtrGlb.IntComp = Proc7(10, IntGlob)
+    return PtrParOut
+
+def Proc4():
+    global Char2Glob
+
+    BoolLoc = Char1Glob == 'A'
+    BoolLoc = BoolLoc or BoolGlob
+    Char2Glob = 'B'
+
+def Proc5():
+    global Char1Glob
+    global BoolGlob
+
+    Char1Glob = 'A'
+    BoolGlob = FALSE
+
+def Proc6(EnumParIn):
+    EnumParOut = EnumParIn
+    if not Func3(EnumParIn):
+        EnumParOut = Ident4
+    if EnumParIn == Ident1:
+        EnumParOut = Ident1
+    elif EnumParIn == Ident2:
+        if IntGlob > 100:
+            EnumParOut = Ident1
+        else:
+            EnumParOut = Ident4
+    elif EnumParIn == Ident3:
+        EnumParOut = Ident2
+    elif EnumParIn == Ident4:
+        pass
+    elif EnumParIn == Ident5:
+        EnumParOut = Ident3
+    return EnumParOut
+
+def Proc7(IntParI1, IntParI2):
+    IntLoc = IntParI1 + 2
+    IntParOut = IntParI2 + IntLoc
+    return IntParOut
+
+def Proc8(Array1Par, Array2Par, IntParI1, IntParI2):
+    global IntGlob
+
+    IntLoc = IntParI1 + 5
+    Array1Par[IntLoc] = IntParI2
+    Array1Par[IntLoc+1] = Array1Par[IntLoc]
+    Array1Par[IntLoc+30] = IntLoc
+    for IntIndex in range(IntLoc, IntLoc+2):
+        Array2Par[IntLoc][IntIndex] = IntLoc
+    Array2Par[IntLoc][IntLoc-1] = Array2Par[IntLoc][IntLoc-1] + 1
+    Array2Par[IntLoc+20][IntLoc] = Array1Par[IntLoc]
+    IntGlob = 5
+
+def Func1(CharPar1, CharPar2):
+    CharLoc1 = CharPar1
+    CharLoc2 = CharLoc1
+    if CharLoc2 != CharPar2:
+        return Ident1
+    else:
+        return Ident2
+
+def Func2(StrParI1, StrParI2):
+    IntLoc = 1
+    while IntLoc <= 1:
+        if Func1(StrParI1[IntLoc], StrParI2[IntLoc+1]) == Ident1:
+            CharLoc = 'A'
+            IntLoc = IntLoc + 1
+    if CharLoc >= 'W' and CharLoc <= 'Z':
+        IntLoc = 7
+    if CharLoc == 'X':
+        return TRUE
+    else:
+        if StrParI1 > StrParI2:
+            IntLoc = IntLoc + 7
+            return TRUE
+        else:
+            return FALSE
+
+def Func3(EnumParIn):
+    EnumLoc = EnumParIn
+    if EnumLoc == Ident3: return TRUE
+    return FALSE
+
+if __name__ == '__main__':
+    main()
diff --git a/lib-python/2.2/test/re_tests.py b/lib-python/2.2/test/re_tests.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/test/re_tests.py
@@ -0,0 +1,661 @@
+#!/usr/bin/env python
+# -*- mode: python -*-
+
+# Re test suite and benchmark suite v1.5
+
+# The 3 possible outcomes for each pattern
+[SUCCEED, FAIL, SYNTAX_ERROR] = range(3)
+
+# Benchmark suite (needs expansion)
+#
+# The benchmark suite does not test correctness, just speed.  The
+# first element of each tuple is the regex pattern; the second is a
+# string to match it against.  The benchmarking code will embed the
+# second string inside several sizes of padding, to test how regex
+# matching performs on large strings.
+
+benchmarks = [
+
+    # test common prefix
+    ('Python|Perl', 'Perl'),    # Alternation
+    ('(Python|Perl)', 'Perl'),  # Grouped alternation
+
+    ('Python|Perl|Tcl', 'Perl'),        # Alternation
+    ('(Python|Perl|Tcl)', 'Perl'),      # Grouped alternation
+
+    ('(Python)\\1', 'PythonPython'),    # Backreference
+    ('([0a-z][a-z0-9]*,)+', 'a5,b7,c9,'), # Disable the fastmap optimization
+    ('([a-z][a-z0-9]*,)+', 'a5,b7,c9,'), # A few sets
+
+    ('Python', 'Python'),               # Simple text literal
+    ('.*Python', 'Python'),             # Bad text literal
+    ('.*Python.*', 'Python'),           # Worse text literal
+    ('.*(Python)', 'Python'),           # Bad text literal with grouping
+
+]
+
+# Test suite (for verifying correctness)
+#
+# The test suite is a list of 5- or 3-tuples.  The 5 parts of a
+# complete tuple are:
+# element 0: a string containing the pattern
+#         1: the string to match against the pattern
+#         2: the expected result (SUCCEED, FAIL, SYNTAX_ERROR)
+#         3: a string that will be eval()'ed to produce a test string.
+#            This is an arbitrary Python expression; the available
+#            variables are "found" (the whole match), and "g1", "g2", ...
+#            up to "g99" contain the contents of each group, or the
+#            string 'None' if the group wasn't given a value, or the
+#            string 'Error' if the group index was out of range;
+#            also "groups", the return value of m.group() (a tuple).
+#         4: The expected result of evaluating the expression.
+#            If the two don't match, an error is reported.
+#
+# If the regex isn't expected to work, the latter two elements can be omitted.
+
+tests = [
+    # Test ?P< and ?P= extensions
+    ('(?P<foo_123', '', SYNTAX_ERROR),      # Unterminated group identifier
+    ('(?P<1>a)', '', SYNTAX_ERROR),         # Begins with a digit
+    ('(?P<!>a)', '', SYNTAX_ERROR),         # Begins with an illegal char
+    ('(?P<foo!>a)', '', SYNTAX_ERROR),      # Begins with an illegal char
+
+    # Same tests, for the ?P= form
+    ('(?P<foo_123>a)(?P=foo_123', 'aa', SYNTAX_ERROR),
+    ('(?P<foo_123>a)(?P=1)', 'aa', SYNTAX_ERROR),
+    ('(?P<foo_123>a)(?P=!)', 'aa', SYNTAX_ERROR),
+    ('(?P<foo_123>a)(?P=foo_124', 'aa', SYNTAX_ERROR),  # Backref to undefined group
+
+    ('(?P<foo_123>a)', 'a', SUCCEED, 'g1', 'a'),
+    ('(?P<foo_123>a)(?P=foo_123)', 'aa', SUCCEED, 'g1', 'a'),
+
+    # Test octal escapes
+    ('\\1', 'a', SYNTAX_ERROR),    # Backreference
+    ('[\\1]', '\1', SUCCEED, 'found', '\1'),  # Character
+    ('\\09', chr(0) + '9', SUCCEED, 'found', chr(0) + '9'),
+    ('\\141', 'a', SUCCEED, 'found', 'a'),
+    ('(a)(b)(c)(d)(e)(f)(g)(h)(i)(j)(k)(l)\\119', 'abcdefghijklk9', SUCCEED, 'found+"-"+g11', 'abcdefghijklk9-k'),
+
+    # Test \0 is handled everywhere
+    (r'\0', '\0', SUCCEED, 'found', '\0'),
+    (r'[\0a]', '\0', SUCCEED, 'found', '\0'),
+    (r'[a\0]', '\0', SUCCEED, 'found', '\0'),
+    (r'[^a\0]', '\0', FAIL),
+
+    # Test various letter escapes
+    (r'\a[\b]\f\n\r\t\v', '\a\b\f\n\r\t\v', SUCCEED, 'found', '\a\b\f\n\r\t\v'),
+    (r'[\a][\b][\f][\n][\r][\t][\v]', '\a\b\f\n\r\t\v', SUCCEED, 'found', '\a\b\f\n\r\t\v'),
+    # NOTE: not an error under PCRE/PRE:
+    # (r'\u', '', SYNTAX_ERROR),    # A Perl escape
+    (r'\c\e\g\h\i\j\k\m\o\p\q\y\z', 'ceghijkmopqyz', SUCCEED, 'found', 'ceghijkmopqyz'),
+    (r'\xff', '\377', SUCCEED, 'found', chr(255)),
+    # new \x semantics
+    (r'\x00ffffffffffffff', '\377', FAIL, 'found', chr(255)),
+    (r'\x00f', '\017', FAIL, 'found', chr(15)),
+    (r'\x00fe', '\376', FAIL, 'found', chr(254)),
+    # (r'\x00ffffffffffffff', '\377', SUCCEED, 'found', chr(255)),
+    # (r'\x00f', '\017', SUCCEED, 'found', chr(15)),
+    # (r'\x00fe', '\376', SUCCEED, 'found', chr(254)),
+
+    (r"^\w+=(\\[\000-\277]|[^\n\\])*", "SRC=eval.c g.c blah blah blah \\\\\n\tapes.c",
+     SUCCEED, 'found', "SRC=eval.c g.c blah blah blah \\\\"),
+
+    # Test that . only matches \n in DOTALL mode
+    ('a.b', 'acb', SUCCEED, 'found', 'acb'),
+    ('a.b', 'a\nb', FAIL),
+    ('a.*b', 'acc\nccb', FAIL),
+    ('a.{4,5}b', 'acc\nccb', FAIL),
+    ('a.b', 'a\rb', SUCCEED, 'found', 'a\rb'),
+    ('a.b(?s)', 'a\nb', SUCCEED, 'found', 'a\nb'),
+    ('a.*(?s)b', 'acc\nccb', SUCCEED, 'found', 'acc\nccb'),
+    ('(?s)a.{4,5}b', 'acc\nccb', SUCCEED, 'found', 'acc\nccb'),
+    ('(?s)a.b', 'a\nb', SUCCEED, 'found', 'a\nb'),
+
+    (')', '', SYNTAX_ERROR),           # Unmatched right bracket
+    ('', '', SUCCEED, 'found', ''),    # Empty pattern
+    ('abc', 'abc', SUCCEED, 'found', 'abc'),
+    ('abc', 'xbc', FAIL),
+    ('abc', 'axc', FAIL),
+    ('abc', 'abx', FAIL),
+    ('abc', 'xabcy', SUCCEED, 'found', 'abc'),
+    ('abc', 'ababc', SUCCEED, 'found', 'abc'),
+    ('ab*c', 'abc', SUCCEED, 'found', 'abc'),
+    ('ab*bc', 'abc', SUCCEED, 'found', 'abc'),
+    ('ab*bc', 'abbc', SUCCEED, 'found', 'abbc'),
+    ('ab*bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
+    ('ab+bc', 'abbc', SUCCEED, 'found', 'abbc'),
+    ('ab+bc', 'abc', FAIL),
+    ('ab+bc', 'abq', FAIL),
+    ('ab+bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
+    ('ab?bc', 'abbc', SUCCEED, 'found', 'abbc'),
+    ('ab?bc', 'abc', SUCCEED, 'found', 'abc'),
+    ('ab?bc', 'abbbbc', FAIL),
+    ('ab?c', 'abc', SUCCEED, 'found', 'abc'),
+    ('^abc$', 'abc', SUCCEED, 'found', 'abc'),
+    ('^abc$', 'abcc', FAIL),
+    ('^abc', 'abcc', SUCCEED, 'found', 'abc'),
+    ('^abc$', 'aabc', FAIL),
+    ('abc$', 'aabc', SUCCEED, 'found', 'abc'),
+    ('^', 'abc', SUCCEED, 'found+"-"', '-'),
+    ('$', 'abc', SUCCEED, 'found+"-"', '-'),
+    ('a.c', 'abc', SUCCEED, 'found', 'abc'),
+    ('a.c', 'axc', SUCCEED, 'found', 'axc'),
+    ('a.*c', 'axyzc', SUCCEED, 'found', 'axyzc'),
+    ('a.*c', 'axyzd', FAIL),
+    ('a[bc]d', 'abc', FAIL),
+    ('a[bc]d', 'abd', SUCCEED, 'found', 'abd'),
+    ('a[b-d]e', 'abd', FAIL),
+    ('a[b-d]e', 'ace', SUCCEED, 'found', 'ace'),
+    ('a[b-d]', 'aac', SUCCEED, 'found', 'ac'),
+    ('a[-b]', 'a-', SUCCEED, 'found', 'a-'),
+    ('a[\\-b]', 'a-', SUCCEED, 'found', 'a-'),
+    # NOTE: not an error under PCRE/PRE:
+    # ('a[b-]', 'a-', SYNTAX_ERROR),
+    ('a[]b', '-', SYNTAX_ERROR),
+    ('a[', '-', SYNTAX_ERROR),
+    ('a\\', '-', SYNTAX_ERROR),
+    ('abc)', '-', SYNTAX_ERROR),
+    ('(abc', '-', SYNTAX_ERROR),
+    ('a]', 'a]', SUCCEED, 'found', 'a]'),
+    ('a[]]b', 'a]b', SUCCEED, 'found', 'a]b'),
+    ('a[\]]b', 'a]b', SUCCEED, 'found', 'a]b'),
+    ('a[^bc]d', 'aed', SUCCEED, 'found', 'aed'),
+    ('a[^bc]d', 'abd', FAIL),
+    ('a[^-b]c', 'adc', SUCCEED, 'found', 'adc'),
+    ('a[^-b]c', 'a-c', FAIL),
+    ('a[^]b]c', 'a]c', FAIL),
+    ('a[^]b]c', 'adc', SUCCEED, 'found', 'adc'),
+    ('\\ba\\b', 'a-', SUCCEED, '"-"', '-'),
+    ('\\ba\\b', '-a', SUCCEED, '"-"', '-'),
+    ('\\ba\\b', '-a-', SUCCEED, '"-"', '-'),
+    ('\\by\\b', 'xy', FAIL),
+    ('\\by\\b', 'yz', FAIL),
+    ('\\by\\b', 'xyz', FAIL),
+    ('x\\b', 'xyz', FAIL),
+    ('x\\B', 'xyz', SUCCEED, '"-"', '-'),
+    ('\\Bz', 'xyz', SUCCEED, '"-"', '-'),
+    ('z\\B', 'xyz', FAIL),
+    ('\\Bx', 'xyz', FAIL),
+    ('\\Ba\\B', 'a-', FAIL, '"-"', '-'),
+    ('\\Ba\\B', '-a', FAIL, '"-"', '-'),
+    ('\\Ba\\B', '-a-', FAIL, '"-"', '-'),
+    ('\\By\\B', 'xy', FAIL),
+    ('\\By\\B', 'yz', FAIL),
+    ('\\By\\b', 'xy', SUCCEED, '"-"', '-'),
+    ('\\by\\B', 'yz', SUCCEED, '"-"', '-'),
+    ('\\By\\B', 'xyz', SUCCEED, '"-"', '-'),
+    ('ab|cd', 'abc', SUCCEED, 'found', 'ab'),
+    ('ab|cd', 'abcd', SUCCEED, 'found', 'ab'),
+    ('()ef', 'def', SUCCEED, 'found+"-"+g1', 'ef-'),
+    ('$b', 'b', FAIL),
+    ('a\\(b', 'a(b', SUCCEED, 'found+"-"+g1', 'a(b-Error'),
+    ('a\\(*b', 'ab', SUCCEED, 'found', 'ab'),
+    ('a\\(*b', 'a((b', SUCCEED, 'found', 'a((b'),
+    ('a\\\\b', 'a\\b', SUCCEED, 'found', 'a\\b'),
+    ('((a))', 'abc', SUCCEED, 'found+"-"+g1+"-"+g2', 'a-a-a'),
+    ('(a)b(c)', 'abc', SUCCEED, 'found+"-"+g1+"-"+g2', 'abc-a-c'),
+    ('a+b+c', 'aabbabc', SUCCEED, 'found', 'abc'),
+    ('(a+|b)*', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
+    ('(a+|b)+', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
+    ('(a+|b)?', 'ab', SUCCEED, 'found+"-"+g1', 'a-a'),
+    (')(', '-', SYNTAX_ERROR),
+    ('[^ab]*', 'cde', SUCCEED, 'found', 'cde'),
+    ('abc', '', FAIL),
+    ('a*', '', SUCCEED, 'found', ''),
+    ('a|b|c|d|e', 'e', SUCCEED, 'found', 'e'),
+    ('(a|b|c|d|e)f', 'ef', SUCCEED, 'found+"-"+g1', 'ef-e'),
+    ('abcd*efg', 'abcdefg', SUCCEED, 'found', 'abcdefg'),
+    ('ab*', 'xabyabbbz', SUCCEED, 'found', 'ab'),
+    ('ab*', 'xayabbbz', SUCCEED, 'found', 'a'),
+    ('(ab|cd)e', 'abcde', SUCCEED, 'found+"-"+g1', 'cde-cd'),
+    ('[abhgefdc]ij', 'hij', SUCCEED, 'found', 'hij'),
+    ('^(ab|cd)e', 'abcde', FAIL, 'xg1y', 'xy'),
+    ('(abc|)ef', 'abcdef', SUCCEED, 'found+"-"+g1', 'ef-'),
+    ('(a|b)c*d', 'abcd', SUCCEED, 'found+"-"+g1', 'bcd-b'),
+    ('(ab|ab*)bc', 'abc', SUCCEED, 'found+"-"+g1', 'abc-a'),
+    ('a([bc]*)c*', 'abc', SUCCEED, 'found+"-"+g1', 'abc-bc'),
+    ('a([bc]*)(c*d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-bc-d'),
+    ('a([bc]+)(c*d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-bc-d'),
+    ('a([bc]*)(c+d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-b-cd'),
+    ('a[bcd]*dcdcde', 'adcdcde', SUCCEED, 'found', 'adcdcde'),
+    ('a[bcd]+dcdcde', 'adcdcde', FAIL),
+    ('(ab|a)b*c', 'abc', SUCCEED, 'found+"-"+g1', 'abc-ab'),
+    ('((a)(b)c)(d)', 'abcd', SUCCEED, 'g1+"-"+g2+"-"+g3+"-"+g4', 'abc-a-b-d'),
+    ('[a-zA-Z_][a-zA-Z0-9_]*', 'alpha', SUCCEED, 'found', 'alpha'),
+    ('^a(bc+|b[eh])g|.h$', 'abh', SUCCEED, 'found+"-"+g1', 'bh-None'),
+    ('(bc+d$|ef*g.|h?i(j|k))', 'effgz', SUCCEED, 'found+"-"+g1+"-"+g2', 'effgz-effgz-None'),
+    ('(bc+d$|ef*g.|h?i(j|k))', 'ij', SUCCEED, 'found+"-"+g1+"-"+g2', 'ij-ij-j'),
+    ('(bc+d$|ef*g.|h?i(j|k))', 'effg', FAIL),
+    ('(bc+d$|ef*g.|h?i(j|k))', 'bcdd', FAIL),
+    ('(bc+d$|ef*g.|h?i(j|k))', 'reffgz', SUCCEED, 'found+"-"+g1+"-"+g2', 'effgz-effgz-None'),
+    ('(((((((((a)))))))))', 'a', SUCCEED, 'found', 'a'),
+    ('multiple words of text', 'uh-uh', FAIL),
+    ('multiple words', 'multiple words, yeah', SUCCEED, 'found', 'multiple words'),
+    ('(.*)c(.*)', 'abcde', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcde-ab-de'),
+    ('\\((.*), (.*)\\)', '(a, b)', SUCCEED, 'g2+"-"+g1', 'b-a'),
+    ('[k]', 'ab', FAIL),
+    ('a[-]?c', 'ac', SUCCEED, 'found', 'ac'),
+    ('(abc)\\1', 'abcabc', SUCCEED, 'g1', 'abc'),
+    ('([a-c]*)\\1', 'abcabc', SUCCEED, 'g1', 'abc'),
+    ('^(.+)?B', 'AB', SUCCEED, 'g1', 'A'),
+    ('(a+).\\1$', 'aaaaa', SUCCEED, 'found+"-"+g1', 'aaaaa-aa'),
+    ('^(a+).\\1$', 'aaaa', FAIL),
+    ('(abc)\\1', 'abcabc', SUCCEED, 'found+"-"+g1', 'abcabc-abc'),
+    ('([a-c]+)\\1', 'abcabc', SUCCEED, 'found+"-"+g1', 'abcabc-abc'),
+    ('(a)\\1', 'aa', SUCCEED, 'found+"-"+g1', 'aa-a'),
+    ('(a+)\\1', 'aa', SUCCEED, 'found+"-"+g1', 'aa-a'),
+    ('(a+)+\\1', 'aa', SUCCEED, 'found+"-"+g1', 'aa-a'),
+    ('(a).+\\1', 'aba', SUCCEED, 'found+"-"+g1', 'aba-a'),
+    ('(a)ba*\\1', 'aba', SUCCEED, 'found+"-"+g1', 'aba-a'),
+    ('(aa|a)a\\1$', 'aaa', SUCCEED, 'found+"-"+g1', 'aaa-a'),
+    ('(a|aa)a\\1$', 'aaa', SUCCEED, 'found+"-"+g1', 'aaa-a'),
+    ('(a+)a\\1$', 'aaa', SUCCEED, 'found+"-"+g1', 'aaa-a'),
+    ('([abc]*)\\1', 'abcabc', SUCCEED, 'found+"-"+g1', 'abcabc-abc'),
+    ('(a)(b)c|ab', 'ab', SUCCEED, 'found+"-"+g1+"-"+g2', 'ab-None-None'),
+    ('(a)+x', 'aaax', SUCCEED, 'found+"-"+g1', 'aaax-a'),
+    ('([ac])+x', 'aacx', SUCCEED, 'found+"-"+g1', 'aacx-c'),
+    ('([^/]*/)*sub1/', 'd:msgs/tdir/sub1/trial/away.cpp', SUCCEED, 'found+"-"+g1', 'd:msgs/tdir/sub1/-tdir/'),
+    ('([^.]*)\\.([^:]*):[T ]+(.*)', 'track1.title:TBlah blah blah', SUCCEED, 'found+"-"+g1+"-"+g2+"-"+g3', 'track1.title:TBlah blah blah-track1-title-Blah blah blah'),
+    ('([^N]*N)+', 'abNNxyzN', SUCCEED, 'found+"-"+g1', 'abNNxyzN-xyzN'),
+    ('([^N]*N)+', 'abNNxyz', SUCCEED, 'found+"-"+g1', 'abNN-N'),
+    ('([abc]*)x', 'abcx', SUCCEED, 'found+"-"+g1', 'abcx-abc'),
+    ('([abc]*)x', 'abc', FAIL),
+    ('([xyz]*)x', 'abcx', SUCCEED, 'found+"-"+g1', 'x-'),
+    ('(a)+b|aac', 'aac', SUCCEED, 'found+"-"+g1', 'aac-None'),
+
+    # Test symbolic groups
+
+    ('(?P<i d>aaa)a', 'aaaa', SYNTAX_ERROR),
+    ('(?P<id>aaa)a', 'aaaa', SUCCEED, 'found+"-"+id', 'aaaa-aaa'),
+    ('(?P<id>aa)(?P=id)', 'aaaa', SUCCEED, 'found+"-"+id', 'aaaa-aa'),
+    ('(?P<id>aa)(?P=xd)', 'aaaa', SYNTAX_ERROR),
+
+    # Test octal escapes/memory references
+
+    ('\\1', 'a', SYNTAX_ERROR),
+    ('\\09', chr(0) + '9', SUCCEED, 'found', chr(0) + '9'),
+    ('\\141', 'a', SUCCEED, 'found', 'a'),
+    ('(a)(b)(c)(d)(e)(f)(g)(h)(i)(j)(k)(l)\\119', 'abcdefghijklk9', SUCCEED, 'found+"-"+g11', 'abcdefghijklk9-k'),
+
+    # All tests from Perl
+
+    ('abc', 'abc', SUCCEED, 'found', 'abc'),
+    ('abc', 'xbc', FAIL),
+    ('abc', 'axc', FAIL),
+    ('abc', 'abx', FAIL),
+    ('abc', 'xabcy', SUCCEED, 'found', 'abc'),
+    ('abc', 'ababc', SUCCEED, 'found', 'abc'),
+    ('ab*c', 'abc', SUCCEED, 'found', 'abc'),
+    ('ab*bc', 'abc', SUCCEED, 'found', 'abc'),
+    ('ab*bc', 'abbc', SUCCEED, 'found', 'abbc'),
+    ('ab*bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
+    ('ab{0,}bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
+    ('ab+bc', 'abbc', SUCCEED, 'found', 'abbc'),
+    ('ab+bc', 'abc', FAIL),
+    ('ab+bc', 'abq', FAIL),
+    ('ab{1,}bc', 'abq', FAIL),
+    ('ab+bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
+    ('ab{1,}bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
+    ('ab{1,3}bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
+    ('ab{3,4}bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
+    ('ab{4,5}bc', 'abbbbc', FAIL),
+    ('ab?bc', 'abbc', SUCCEED, 'found', 'abbc'),
+    ('ab?bc', 'abc', SUCCEED, 'found', 'abc'),
+    ('ab{0,1}bc', 'abc', SUCCEED, 'found', 'abc'),
+    ('ab?bc', 'abbbbc', FAIL),
+    ('ab?c', 'abc', SUCCEED, 'found', 'abc'),
+    ('ab{0,1}c', 'abc', SUCCEED, 'found', 'abc'),
+    ('^abc$', 'abc', SUCCEED, 'found', 'abc'),
+    ('^abc$', 'abcc', FAIL),
+    ('^abc', 'abcc', SUCCEED, 'found', 'abc'),
+    ('^abc$', 'aabc', FAIL),
+    ('abc$', 'aabc', SUCCEED, 'found', 'abc'),
+    ('^', 'abc', SUCCEED, 'found', ''),
+    ('$', 'abc', SUCCEED, 'found', ''),
+    ('a.c', 'abc', SUCCEED, 'found', 'abc'),
+    ('a.c', 'axc', SUCCEED, 'found', 'axc'),
+    ('a.*c', 'axyzc', SUCCEED, 'found', 'axyzc'),
+    ('a.*c', 'axyzd', FAIL),
+    ('a[bc]d', 'abc', FAIL),
+    ('a[bc]d', 'abd', SUCCEED, 'found', 'abd'),
+    ('a[b-d]e', 'abd', FAIL),
+    ('a[b-d]e', 'ace', SUCCEED, 'found', 'ace'),
+    ('a[b-d]', 'aac', SUCCEED, 'found', 'ac'),
+    ('a[-b]', 'a-', SUCCEED, 'found', 'a-'),
+    ('a[b-]', 'a-', SUCCEED, 'found', 'a-'),
+    ('a[b-a]', '-', SYNTAX_ERROR),
+    ('a[]b', '-', SYNTAX_ERROR),
+    ('a[', '-', SYNTAX_ERROR),
+    ('a]', 'a]', SUCCEED, 'found', 'a]'),
+    ('a[]]b', 'a]b', SUCCEED, 'found', 'a]b'),
+    ('a[^bc]d', 'aed', SUCCEED, 'found', 'aed'),
+    ('a[^bc]d', 'abd', FAIL),
+    ('a[^-b]c', 'adc', SUCCEED, 'found', 'adc'),
+    ('a[^-b]c', 'a-c', FAIL),
+    ('a[^]b]c', 'a]c', FAIL),
+    ('a[^]b]c', 'adc', SUCCEED, 'found', 'adc'),
+    ('ab|cd', 'abc', SUCCEED, 'found', 'ab'),
+    ('ab|cd', 'abcd', SUCCEED, 'found', 'ab'),
+    ('()ef', 'def', SUCCEED, 'found+"-"+g1', 'ef-'),
+    ('*a', '-', SYNTAX_ERROR),
+    ('(*)b', '-', SYNTAX_ERROR),
+    ('$b', 'b', FAIL),
+    ('a\\', '-', SYNTAX_ERROR),
+    ('a\\(b', 'a(b', SUCCEED, 'found+"-"+g1', 'a(b-Error'),
+    ('a\\(*b', 'ab', SUCCEED, 'found', 'ab'),
+    ('a\\(*b', 'a((b', SUCCEED, 'found', 'a((b'),
+    ('a\\\\b', 'a\\b', SUCCEED, 'found', 'a\\b'),
+    ('abc)', '-', SYNTAX_ERROR),
+    ('(abc', '-', SYNTAX_ERROR),
+    ('((a))', 'abc', SUCCEED, 'found+"-"+g1+"-"+g2', 'a-a-a'),
+    ('(a)b(c)', 'abc', SUCCEED, 'found+"-"+g1+"-"+g2', 'abc-a-c'),
+    ('a+b+c', 'aabbabc', SUCCEED, 'found', 'abc'),
+    ('a{1,}b{1,}c', 'aabbabc', SUCCEED, 'found', 'abc'),
+    ('a**', '-', SYNTAX_ERROR),
+    ('a.+?c', 'abcabc', SUCCEED, 'found', 'abc'),
+    ('(a+|b)*', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
+    ('(a+|b){0,}', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
+    ('(a+|b)+', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
+    ('(a+|b){1,}', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
+    ('(a+|b)?', 'ab', SUCCEED, 'found+"-"+g1', 'a-a'),
+    ('(a+|b){0,1}', 'ab', SUCCEED, 'found+"-"+g1', 'a-a'),
+    (')(', '-', SYNTAX_ERROR),
+    ('[^ab]*', 'cde', SUCCEED, 'found', 'cde'),
+    ('abc', '', FAIL),
+    ('a*', '', SUCCEED, 'found', ''),
+    ('([abc])*d', 'abbbcd', SUCCEED, 'found+"-"+g1', 'abbbcd-c'),
+    ('([abc])*bcd', 'abcd', SUCCEED, 'found+"-"+g1', 'abcd-a'),
+    ('a|b|c|d|e', 'e', SUCCEED, 'found', 'e'),
+    ('(a|b|c|d|e)f', 'ef', SUCCEED, 'found+"-"+g1', 'ef-e'),
+    ('abcd*efg', 'abcdefg', SUCCEED, 'found', 'abcdefg'),
+    ('ab*', 'xabyabbbz', SUCCEED, 'found', 'ab'),
+    ('ab*', 'xayabbbz', SUCCEED, 'found', 'a'),
+    ('(ab|cd)e', 'abcde', SUCCEED, 'found+"-"+g1', 'cde-cd'),
+    ('[abhgefdc]ij', 'hij', SUCCEED, 'found', 'hij'),
+    ('^(ab|cd)e', 'abcde', FAIL),
+    ('(abc|)ef', 'abcdef', SUCCEED, 'found+"-"+g1', 'ef-'),
+    ('(a|b)c*d', 'abcd', SUCCEED, 'found+"-"+g1', 'bcd-b'),
+    ('(ab|ab*)bc', 'abc', SUCCEED, 'found+"-"+g1', 'abc-a'),
+    ('a([bc]*)c*', 'abc', SUCCEED, 'found+"-"+g1', 'abc-bc'),
+    ('a([bc]*)(c*d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-bc-d'),
+    ('a([bc]+)(c*d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-bc-d'),
+    ('a([bc]*)(c+d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-b-cd'),
+    ('a[bcd]*dcdcde', 'adcdcde', SUCCEED, 'found', 'adcdcde'),
+    ('a[bcd]+dcdcde', 'adcdcde', FAIL),
+    ('(ab|a)b*c', 'abc', SUCCEED, 'found+"-"+g1', 'abc-ab'),
+    ('((a)(b)c)(d)', 'abcd', SUCCEED, 'g1+"-"+g2+"-"+g3+"-"+g4', 'abc-a-b-d'),
+    ('[a-zA-Z_][a-zA-Z0-9_]*', 'alpha', SUCCEED, 'found', 'alpha'),
+    ('^a(bc+|b[eh])g|.h$', 'abh', SUCCEED, 'found+"-"+g1', 'bh-None'),
+    ('(bc+d$|ef*g.|h?i(j|k))', 'effgz', SUCCEED, 'found+"-"+g1+"-"+g2', 'effgz-effgz-None'),
+    ('(bc+d$|ef*g.|h?i(j|k))', 'ij', SUCCEED, 'found+"-"+g1+"-"+g2', 'ij-ij-j'),
+    ('(bc+d$|ef*g.|h?i(j|k))', 'effg', FAIL),
+    ('(bc+d$|ef*g.|h?i(j|k))', 'bcdd', FAIL),
+    ('(bc+d$|ef*g.|h?i(j|k))', 'reffgz', SUCCEED, 'found+"-"+g1+"-"+g2', 'effgz-effgz-None'),
+    ('((((((((((a))))))))))', 'a', SUCCEED, 'g10', 'a'),
+    ('((((((((((a))))))))))\\10', 'aa', SUCCEED, 'found', 'aa'),
+# Python does not have the same rules for \\41 so this is a syntax error
+#    ('((((((((((a))))))))))\\41', 'aa', FAIL),
+#    ('((((((((((a))))))))))\\41', 'a!', SUCCEED, 'found', 'a!'),
+    ('((((((((((a))))))))))\\41', '', SYNTAX_ERROR),
+    ('(?i)((((((((((a))))))))))\\41', '', SYNTAX_ERROR),
+    ('(((((((((a)))))))))', 'a', SUCCEED, 'found', 'a'),
+    ('multiple words of text', 'uh-uh', FAIL),
+    ('multiple words', 'multiple words, yeah', SUCCEED, 'found', 'multiple words'),
+    ('(.*)c(.*)', 'abcde', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcde-ab-de'),
+    ('\\((.*), (.*)\\)', '(a, b)', SUCCEED, 'g2+"-"+g1', 'b-a'),
+    ('[k]', 'ab', FAIL),
+    ('a[-]?c', 'ac', SUCCEED, 'found', 'ac'),
+    ('(abc)\\1', 'abcabc', SUCCEED, 'g1', 'abc'),
+    ('([a-c]*)\\1', 'abcabc', SUCCEED, 'g1', 'abc'),
+    ('(?i)abc', 'ABC', SUCCEED, 'found', 'ABC'),
+    ('(?i)abc', 'XBC', FAIL),
+    ('(?i)abc', 'AXC', FAIL),
+    ('(?i)abc', 'ABX', FAIL),
+    ('(?i)abc', 'XABCY', SUCCEED, 'found', 'ABC'),
+    ('(?i)abc', 'ABABC', SUCCEED, 'found', 'ABC'),
+    ('(?i)ab*c', 'ABC', SUCCEED, 'found', 'ABC'),
+    ('(?i)ab*bc', 'ABC', SUCCEED, 'found', 'ABC'),
+    ('(?i)ab*bc', 'ABBC', SUCCEED, 'found', 'ABBC'),
+    ('(?i)ab*?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
+    ('(?i)ab{0,}?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
+    ('(?i)ab+?bc', 'ABBC', SUCCEED, 'found', 'ABBC'),
+    ('(?i)ab+bc', 'ABC', FAIL),
+    ('(?i)ab+bc', 'ABQ', FAIL),
+    ('(?i)ab{1,}bc', 'ABQ', FAIL),
+    ('(?i)ab+bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
+    ('(?i)ab{1,}?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
+    ('(?i)ab{1,3}?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
+    ('(?i)ab{3,4}?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
+    ('(?i)ab{4,5}?bc', 'ABBBBC', FAIL),
+    ('(?i)ab??bc', 'ABBC', SUCCEED, 'found', 'ABBC'),
+    ('(?i)ab??bc', 'ABC', SUCCEED, 'found', 'ABC'),
+    ('(?i)ab{0,1}?bc', 'ABC', SUCCEED, 'found', 'ABC'),
+    ('(?i)ab??bc', 'ABBBBC', FAIL),
+    ('(?i)ab??c', 'ABC', SUCCEED, 'found', 'ABC'),
+    ('(?i)ab{0,1}?c', 'ABC', SUCCEED, 'found', 'ABC'),
+    ('(?i)^abc$', 'ABC', SUCCEED, 'found', 'ABC'),
+    ('(?i)^abc$', 'ABCC', FAIL),
+    ('(?i)^abc', 'ABCC', SUCCEED, 'found', 'ABC'),
+    ('(?i)^abc$', 'AABC', FAIL),
+    ('(?i)abc$', 'AABC', SUCCEED, 'found', 'ABC'),
+    ('(?i)^', 'ABC', SUCCEED, 'found', ''),
+    ('(?i)$', 'ABC', SUCCEED, 'found', ''),
+    ('(?i)a.c', 'ABC', SUCCEED, 'found', 'ABC'),
+    ('(?i)a.c', 'AXC', SUCCEED, 'found', 'AXC'),
+    ('(?i)a.*?c', 'AXYZC', SUCCEED, 'found', 'AXYZC'),
+    ('(?i)a.*c', 'AXYZD', FAIL),
+    ('(?i)a[bc]d', 'ABC', FAIL),
+    ('(?i)a[bc]d', 'ABD', SUCCEED, 'found', 'ABD'),
+    ('(?i)a[b-d]e', 'ABD', FAIL),
+    ('(?i)a[b-d]e', 'ACE', SUCCEED, 'found', 'ACE'),
+    ('(?i)a[b-d]', 'AAC', SUCCEED, 'found', 'AC'),
+    ('(?i)a[-b]', 'A-', SUCCEED, 'found', 'A-'),
+    ('(?i)a[b-]', 'A-', SUCCEED, 'found', 'A-'),
+    ('(?i)a[b-a]', '-', SYNTAX_ERROR),
+    ('(?i)a[]b', '-', SYNTAX_ERROR),
+    ('(?i)a[', '-', SYNTAX_ERROR),
+    ('(?i)a]', 'A]', SUCCEED, 'found', 'A]'),
+    ('(?i)a[]]b', 'A]B', SUCCEED, 'found', 'A]B'),
+    ('(?i)a[^bc]d', 'AED', SUCCEED, 'found', 'AED'),
+    ('(?i)a[^bc]d', 'ABD', FAIL),
+    ('(?i)a[^-b]c', 'ADC', SUCCEED, 'found', 'ADC'),
+    ('(?i)a[^-b]c', 'A-C', FAIL),
+    ('(?i)a[^]b]c', 'A]C', FAIL),
+    ('(?i)a[^]b]c', 'ADC', SUCCEED, 'found', 'ADC'),
+    ('(?i)ab|cd', 'ABC', SUCCEED, 'found', 'AB'),
+    ('(?i)ab|cd', 'ABCD', SUCCEED, 'found', 'AB'),
+    ('(?i)()ef', 'DEF', SUCCEED, 'found+"-"+g1', 'EF-'),
+    ('(?i)*a', '-', SYNTAX_ERROR),
+    ('(?i)(*)b', '-', SYNTAX_ERROR),
+    ('(?i)$b', 'B', FAIL),
+    ('(?i)a\\', '-', SYNTAX_ERROR),
+    ('(?i)a\\(b', 'A(B', SUCCEED, 'found+"-"+g1', 'A(B-Error'),
+    ('(?i)a\\(*b', 'AB', SUCCEED, 'found', 'AB'),
+    ('(?i)a\\(*b', 'A((B', SUCCEED, 'found', 'A((B'),
+    ('(?i)a\\\\b', 'A\\B', SUCCEED, 'found', 'A\\B'),
+    ('(?i)abc)', '-', SYNTAX_ERROR),
+    ('(?i)(abc', '-', SYNTAX_ERROR),
+    ('(?i)((a))', 'ABC', SUCCEED, 'found+"-"+g1+"-"+g2', 'A-A-A'),
+    ('(?i)(a)b(c)', 'ABC', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABC-A-C'),
+    ('(?i)a+b+c', 'AABBABC', SUCCEED, 'found', 'ABC'),
+    ('(?i)a{1,}b{1,}c', 'AABBABC', SUCCEED, 'found', 'ABC'),
+    ('(?i)a**', '-', SYNTAX_ERROR),
+    ('(?i)a.+?c', 'ABCABC', SUCCEED, 'found', 'ABC'),
+    ('(?i)a.*?c', 'ABCABC', SUCCEED, 'found', 'ABC'),
+    ('(?i)a.{0,5}?c', 'ABCABC', SUCCEED, 'found', 'ABC'),
+    ('(?i)(a+|b)*', 'AB', SUCCEED, 'found+"-"+g1', 'AB-B'),
+    ('(?i)(a+|b){0,}', 'AB', SUCCEED, 'found+"-"+g1', 'AB-B'),
+    ('(?i)(a+|b)+', 'AB', SUCCEED, 'found+"-"+g1', 'AB-B'),
+    ('(?i)(a+|b){1,}', 'AB', SUCCEED, 'found+"-"+g1', 'AB-B'),
+    ('(?i)(a+|b)?', 'AB', SUCCEED, 'found+"-"+g1', 'A-A'),
+    ('(?i)(a+|b){0,1}', 'AB', SUCCEED, 'found+"-"+g1', 'A-A'),
+    ('(?i)(a+|b){0,1}?', 'AB', SUCCEED, 'found+"-"+g1', '-None'),
+    ('(?i))(', '-', SYNTAX_ERROR),
+    ('(?i)[^ab]*', 'CDE', SUCCEED, 'found', 'CDE'),
+    ('(?i)abc', '', FAIL),
+    ('(?i)a*', '', SUCCEED, 'found', ''),
+    ('(?i)([abc])*d', 'ABBBCD', SUCCEED, 'found+"-"+g1', 'ABBBCD-C'),
+    ('(?i)([abc])*bcd', 'ABCD', SUCCEED, 'found+"-"+g1', 'ABCD-A'),
+    ('(?i)a|b|c|d|e', 'E', SUCCEED, 'found', 'E'),
+    ('(?i)(a|b|c|d|e)f', 'EF', SUCCEED, 'found+"-"+g1', 'EF-E'),
+    ('(?i)abcd*efg', 'ABCDEFG', SUCCEED, 'found', 'ABCDEFG'),
+    ('(?i)ab*', 'XABYABBBZ', SUCCEED, 'found', 'AB'),
+    ('(?i)ab*', 'XAYABBBZ', SUCCEED, 'found', 'A'),
+    ('(?i)(ab|cd)e', 'ABCDE', SUCCEED, 'found+"-"+g1', 'CDE-CD'),
+    ('(?i)[abhgefdc]ij', 'HIJ', SUCCEED, 'found', 'HIJ'),
+    ('(?i)^(ab|cd)e', 'ABCDE', FAIL),
+    ('(?i)(abc|)ef', 'ABCDEF', SUCCEED, 'found+"-"+g1', 'EF-'),
+    ('(?i)(a|b)c*d', 'ABCD', SUCCEED, 'found+"-"+g1', 'BCD-B'),
+    ('(?i)(ab|ab*)bc', 'ABC', SUCCEED, 'found+"-"+g1', 'ABC-A'),
+    ('(?i)a([bc]*)c*', 'ABC', SUCCEED, 'found+"-"+g1', 'ABC-BC'),
+    ('(?i)a([bc]*)(c*d)', 'ABCD', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABCD-BC-D'),
+    ('(?i)a([bc]+)(c*d)', 'ABCD', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABCD-BC-D'),
+    ('(?i)a([bc]*)(c+d)', 'ABCD', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABCD-B-CD'),
+    ('(?i)a[bcd]*dcdcde', 'ADCDCDE', SUCCEED, 'found', 'ADCDCDE'),
+    ('(?i)a[bcd]+dcdcde', 'ADCDCDE', FAIL),
+    ('(?i)(ab|a)b*c', 'ABC', SUCCEED, 'found+"-"+g1', 'ABC-AB'),
+    ('(?i)((a)(b)c)(d)', 'ABCD', SUCCEED, 'g1+"-"+g2+"-"+g3+"-"+g4', 'ABC-A-B-D'),
+    ('(?i)[a-zA-Z_][a-zA-Z0-9_]*', 'ALPHA', SUCCEED, 'found', 'ALPHA'),
+    ('(?i)^a(bc+|b[eh])g|.h$', 'ABH', SUCCEED, 'found+"-"+g1', 'BH-None'),
+    ('(?i)(bc+d$|ef*g.|h?i(j|k))', 'EFFGZ', SUCCEED, 'found+"-"+g1+"-"+g2', 'EFFGZ-EFFGZ-None'),
+    ('(?i)(bc+d$|ef*g.|h?i(j|k))', 'IJ', SUCCEED, 'found+"-"+g1+"-"+g2', 'IJ-IJ-J'),
+    ('(?i)(bc+d$|ef*g.|h?i(j|k))', 'EFFG', FAIL),
+    ('(?i)(bc+d$|ef*g.|h?i(j|k))', 'BCDD', FAIL),
+    ('(?i)(bc+d$|ef*g.|h?i(j|k))', 'REFFGZ', SUCCEED, 'found+"-"+g1+"-"+g2', 'EFFGZ-EFFGZ-None'),
+    ('(?i)((((((((((a))))))))))', 'A', SUCCEED, 'g10', 'A'),
+    ('(?i)((((((((((a))))))))))\\10', 'AA', SUCCEED, 'found', 'AA'),
+    #('(?i)((((((((((a))))))))))\\41', 'AA', FAIL),
+    #('(?i)((((((((((a))))))))))\\41', 'A!', SUCCEED, 'found', 'A!'),
+    ('(?i)(((((((((a)))))))))', 'A', SUCCEED, 'found', 'A'),
+    ('(?i)(?:(?:(?:(?:(?:(?:(?:(?:(?:(a))))))))))', 'A', SUCCEED, 'g1', 'A'),
+    ('(?i)(?:(?:(?:(?:(?:(?:(?:(?:(?:(a|b|c))))))))))', 'C', SUCCEED, 'g1', 'C'),
+    ('(?i)multiple words of text', 'UH-UH', FAIL),
+    ('(?i)multiple words', 'MULTIPLE WORDS, YEAH', SUCCEED, 'found', 'MULTIPLE WORDS'),
+    ('(?i)(.*)c(.*)', 'ABCDE', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABCDE-AB-DE'),
+    ('(?i)\\((.*), (.*)\\)', '(A, B)', SUCCEED, 'g2+"-"+g1', 'B-A'),
+    ('(?i)[k]', 'AB', FAIL),
+#    ('(?i)abcd', 'ABCD', SUCCEED, 'found+"-"+\\found+"-"+\\\\found', 'ABCD-$&-\\ABCD'),
+#    ('(?i)a(bc)d', 'ABCD', SUCCEED, 'g1+"-"+\\g1+"-"+\\\\g1', 'BC-$1-\\BC'),
+    ('(?i)a[-]?c', 'AC', SUCCEED, 'found', 'AC'),
+    ('(?i)(abc)\\1', 'ABCABC', SUCCEED, 'g1', 'ABC'),
+    ('(?i)([a-c]*)\\1', 'ABCABC', SUCCEED, 'g1', 'ABC'),
+    ('a(?!b).', 'abad', SUCCEED, 'found', 'ad'),
+    ('a(?=d).', 'abad', SUCCEED, 'found', 'ad'),
+    ('a(?=c|d).', 'abad', SUCCEED, 'found', 'ad'),
+    ('a(?:b|c|d)(.)', 'ace', SUCCEED, 'g1', 'e'),
+    ('a(?:b|c|d)*(.)', 'ace', SUCCEED, 'g1', 'e'),
+    ('a(?:b|c|d)+?(.)', 'ace', SUCCEED, 'g1', 'e'),
+    ('a(?:b|(c|e){1,2}?|d)+?(.)', 'ace', SUCCEED, 'g1 + g2', 'ce'),
+    ('^(.+)?B', 'AB', SUCCEED, 'g1', 'A'),
+
+    # Comments using the (?#...) syntax
+
+    ('w(?# comment', 'w', SYNTAX_ERROR),
+    ('w(?# comment 1)xy(?# comment 2)z', 'wxyz', SUCCEED, 'found', 'wxyz'),
+
+    # Check odd placement of embedded pattern modifiers
+
+    # not an error under PCRE/PRE:
+    ('w(?i)', 'W', SUCCEED, 'found', 'W'),
+    # ('w(?i)', 'W', SYNTAX_ERROR),
+
+    # Comments using the x embedded pattern modifier
+
+    ("""(?x)w# comment 1
+        x y
+        # comment 2
+        z""", 'wxyz', SUCCEED, 'found', 'wxyz'),
+
+    # using the m embedded pattern modifier
+
+    ('^abc', """jkl
+abc
+xyz""", FAIL),
+    ('(?m)^abc', """jkl
+abc
+xyz""", SUCCEED, 'found', 'abc'),
+
+    ('(?m)abc$', """jkl
+xyzabc
+123""", SUCCEED, 'found', 'abc'),
+
+    # using the s embedded pattern modifier
+
+    ('a.b', 'a\nb', FAIL),
+    ('(?s)a.b', 'a\nb', SUCCEED, 'found', 'a\nb'),
+
+    # test \w, etc. both inside and outside character classes
+
+    ('\\w+', '--ab_cd0123--', SUCCEED, 'found', 'ab_cd0123'),
+    ('[\\w]+', '--ab_cd0123--', SUCCEED, 'found', 'ab_cd0123'),
+    ('\\D+', '1234abc5678', SUCCEED, 'found', 'abc'),
+    ('[\\D]+', '1234abc5678', SUCCEED, 'found', 'abc'),
+    ('[\\da-fA-F]+', '123abc', SUCCEED, 'found', '123abc'),
+    # not an error under PCRE/PRE:
+    # ('[\\d-x]', '-', SYNTAX_ERROR),
+    (r'([\s]*)([\S]*)([\s]*)', ' testing!1972', SUCCEED, 'g3+g2+g1', 'testing!1972 '),
+    (r'(\s*)(\S*)(\s*)', ' testing!1972', SUCCEED, 'g3+g2+g1', 'testing!1972 '),
+
+    (r'\xff', '\377', SUCCEED, 'found', chr(255)),
+    # new \x semantics
+    (r'\x00ff', '\377', FAIL),
+    # (r'\x00ff', '\377', SUCCEED, 'found', chr(255)),
+    (r'\t\n\v\r\f\a\g', '\t\n\v\r\f\ag', SUCCEED, 'found', '\t\n\v\r\f\ag'),
+    ('\t\n\v\r\f\a\g', '\t\n\v\r\f\ag', SUCCEED, 'found', '\t\n\v\r\f\ag'),
+    (r'\t\n\v\r\f\a', '\t\n\v\r\f\a', SUCCEED, 'found', chr(9)+chr(10)+chr(11)+chr(13)+chr(12)+chr(7)),
+    (r'[\t][\n][\v][\r][\f][\b]', '\t\n\v\r\f\b', SUCCEED, 'found', '\t\n\v\r\f\b'),
+
+    #
+    # post-1.5.2 additions
+
+    # xmllib problem
+    (r'(([a-z]+):)?([a-z]+)$', 'smil', SUCCEED, 'g1+"-"+g2+"-"+g3', 'None-None-smil'),
+    # bug 110866: reference to undefined group
+    (r'((.)\1+)', '', SYNTAX_ERROR),
+    # bug 111869: search (PRE/PCRE fails on this one, SRE doesn't)
+    (r'.*d', 'abc\nabd', SUCCEED, 'found', 'abd'),
+    # bug 112468: various expected syntax errors
+    (r'(', '', SYNTAX_ERROR),
+    (r'[\41]', '!', SUCCEED, 'found', '!'),
+    # bug 114033: nothing to repeat
+    (r'(x?)?', 'x', SUCCEED, 'found', 'x'),
+    # bug 115040: rescan if flags are modified inside pattern
+    (r' (?x)foo ', 'foo', SUCCEED, 'found', 'foo'),
+    # bug 115618: negative lookahead
+    (r'(?<!abc)(d.f)', 'abcdefdof', SUCCEED, 'found', 'dof'),
+    # bug 116251: character class bug
+    (r'[\w-]+', 'laser_beam', SUCCEED, 'found', 'laser_beam'),
+    # bug 123769+127259: non-greedy backtracking bug
+    (r'.*?\S *:', 'xx:', SUCCEED, 'found', 'xx:'),
+    (r'a[ ]*?\ (\d+).*', 'a   10', SUCCEED, 'found', 'a   10'),
+    (r'a[ ]*?\ (\d+).*', 'a    10', SUCCEED, 'found', 'a    10'),
+    # bug 127259: \Z shouldn't depend on multiline mode
+    (r'(?ms).*?x\s*\Z(.*)','xx\nx\n', SUCCEED, 'g1', ''),
+    # bug 128899: uppercase literals under the ignorecase flag
+    (r'(?i)M+', 'MMM', SUCCEED, 'found', 'MMM'),
+    (r'(?i)m+', 'MMM', SUCCEED, 'found', 'MMM'),
+    (r'(?i)[M]+', 'MMM', SUCCEED, 'found', 'MMM'),
+    (r'(?i)[m]+', 'MMM', SUCCEED, 'found', 'MMM'),
+    # bug 130748: ^* should be an error (nothing to repeat)
+    (r'^*', '', SYNTAX_ERROR),
+    # bug 133283: minimizing repeat problem
+    (r'"(?:\\"|[^"])*?"', r'"\""', SUCCEED, 'found', r'"\""'),
+    # bug 477728: minimizing repeat problem
+    (r'^.*?$', 'one\ntwo\nthree\n', FAIL),
+    # bug 483789: minimizing repeat problem
+    (r'a[^>]*?b', 'a>b', FAIL),
+    # bug 490573: minimizing repeat problem
+    (r'^a*?$', 'foo', FAIL),
+]
+
+try:
+    u = eval("u'\N{LATIN CAPITAL LETTER A WITH DIAERESIS}'")
+except SyntaxError:
+    pass
+else:
+    tests.extend([
+    # bug 410271: \b broken under locales
+    (r'\b.\b', 'a', SUCCEED, 'found', 'a'),
+    (r'(?u)\b.\b', u, SUCCEED, 'found', u),
+    (r'(?u)\w', u, SUCCEED, 'found', u),
+    ])
diff --git a/lib-python/2.2/test/regex_tests.py b/lib-python/2.2/test/regex_tests.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/regex_tests.py
@@ -0,0 +1,287 @@
+# Regex test suite and benchmark suite v1.5a2
+# Due to the use of r"aw" strings, this file will
+# only work with Python 1.5 or higher.
+
+# The 3 possible outcomes for each pattern
+[SUCCEED, FAIL, SYNTAX_ERROR] = range(3)
+
+# Benchmark suite (needs expansion)
+#
+# The benchmark suite does not test correctness, just speed.  The
+# first element of each tuple is the regex pattern; the second is a
+# string to match it against.  The benchmarking code will embed the
+# second string inside several sizes of padding, to test how regex
+# matching performs on large strings.
+
+benchmarks = [
+        ('Python', 'Python'),                     # Simple text literal
+        ('.*Python', 'Python'),                   # Bad text literal
+        ('.*Python.*', 'Python'),                 # Worse text literal
+        ('.*\\(Python\\)', 'Python'),             # Bad text literal with grouping
+
+        ('(Python\\|Perl\\|Tcl', 'Perl'),          # Alternation
+        ('\\(Python\\|Perl\\|Tcl\\)', 'Perl'),     # Grouped alternation
+        ('\\(Python\\)\\1', 'PythonPython'),       # Backreference
+#       ('\\([0a-z][a-z]*,\\)+', 'a5,b7,c9,'),     # Disable the fastmap optimization
+        ('\\([a-z][a-z0-9]*,\\)+', 'a5,b7,c9,')    # A few sets
+]
+
+# Test suite (for verifying correctness)
+#
+# The test suite is a list of 5- or 3-tuples.  The 5 parts of a
+# complete tuple are:
+# element 0: a string containing the pattern
+#         1: the string to match against the pattern
+#         2: the expected result (SUCCEED, FAIL, SYNTAX_ERROR)
+#         3: a string that will be eval()'ed to produce a test string.
+#            This is an arbitrary Python expression; the available
+#            variables are "found" (the whole match), and "g1", "g2", ...
+#            up to "g10" contain the contents of each group, or the
+#            string 'None' if the group wasn't given a value.
+#         4: The expected result of evaluating the expression.
+#            If the two don't match, an error is reported.
+#
+# If the regex isn't expected to work, the latter two elements can be omitted.
+
+tests = [
+('abc', 'abc', SUCCEED,
+ 'found', 'abc'),
+('abc', 'xbc', FAIL),
+('abc', 'axc', FAIL),
+('abc', 'abx', FAIL),
+('abc', 'xabcy', SUCCEED,
+ 'found', 'abc'),
+('abc', 'ababc', SUCCEED,
+ 'found', 'abc'),
+('ab*c', 'abc', SUCCEED,
+ 'found', 'abc'),
+('ab*bc', 'abc', SUCCEED,
+ 'found', 'abc'),
+('ab*bc', 'abbc', SUCCEED,
+ 'found', 'abbc'),
+('ab*bc', 'abbbbc', SUCCEED,
+ 'found', 'abbbbc'),
+('ab+bc', 'abbc', SUCCEED,
+ 'found', 'abbc'),
+('ab+bc', 'abc', FAIL),
+('ab+bc', 'abq', FAIL),
+('ab+bc', 'abbbbc', SUCCEED,
+ 'found', 'abbbbc'),
+('ab?bc', 'abbc', SUCCEED,
+ 'found', 'abbc'),
+('ab?bc', 'abc', SUCCEED,
+ 'found', 'abc'),
+('ab?bc', 'abbbbc', FAIL),
+('ab?c', 'abc', SUCCEED,
+ 'found', 'abc'),
+('^abc$', 'abc', SUCCEED,
+ 'found', 'abc'),
+('^abc$', 'abcc', FAIL),
+('^abc', 'abcc', SUCCEED,
+ 'found', 'abc'),
+('^abc$', 'aabc', FAIL),
+('abc$', 'aabc', SUCCEED,
+ 'found', 'abc'),
+('^', 'abc', SUCCEED,
+ 'found+"-"', '-'),
+('$', 'abc', SUCCEED,
+ 'found+"-"', '-'),
+('a.c', 'abc', SUCCEED,
+ 'found', 'abc'),
+('a.c', 'axc', SUCCEED,
+ 'found', 'axc'),
+('a.*c', 'axyzc', SUCCEED,
+ 'found', 'axyzc'),
+('a.*c', 'axyzd', FAIL),
+('a[bc]d', 'abc', FAIL),
+('a[bc]d', 'abd', SUCCEED,
+ 'found', 'abd'),
+('a[b-d]e', 'abd', FAIL),
+('a[b-d]e', 'ace', SUCCEED,
+ 'found', 'ace'),
+('a[b-d]', 'aac', SUCCEED,
+ 'found', 'ac'),
+('a[-b]', 'a-', SUCCEED,
+ 'found', 'a-'),
+('a[b-]', 'a-', SUCCEED,
+ 'found', 'a-'),
+('a[]b', '-', SYNTAX_ERROR),
+('a[', '-', SYNTAX_ERROR),
+('a\\', '-', SYNTAX_ERROR),
+('abc\\)', '-', SYNTAX_ERROR),
+('\\(abc', '-', SYNTAX_ERROR),
+('a]', 'a]', SUCCEED,
+ 'found', 'a]'),
+('a[]]b', 'a]b', SUCCEED,
+ 'found', 'a]b'),
+('a[^bc]d', 'aed', SUCCEED,
+ 'found', 'aed'),
+('a[^bc]d', 'abd', FAIL),
+('a[^-b]c', 'adc', SUCCEED,
+ 'found', 'adc'),
+('a[^-b]c', 'a-c', FAIL),
+('a[^]b]c', 'a]c', FAIL),
+('a[^]b]c', 'adc', SUCCEED,
+ 'found', 'adc'),
+('\\ba\\b', 'a-', SUCCEED,
+ '"-"', '-'),
+('\\ba\\b', '-a', SUCCEED,
+ '"-"', '-'),
+('\\ba\\b', '-a-', SUCCEED,
+ '"-"', '-'),
+('\\by\\b', 'xy', FAIL),
+('\\by\\b', 'yz', FAIL),
+('\\by\\b', 'xyz', FAIL),
+('ab\\|cd', 'abc', SUCCEED,
+ 'found', 'ab'),
+('ab\\|cd', 'abcd', SUCCEED,
+ 'found', 'ab'),
+('\\(\\)ef', 'def', SUCCEED,
+ 'found+"-"+g1', 'ef-'),
+('$b', 'b', FAIL),
+('a(b', 'a(b', SUCCEED,
+ 'found+"-"+g1', 'a(b-None'),
+('a(*b', 'ab', SUCCEED,
+ 'found', 'ab'),
+('a(*b', 'a((b', SUCCEED,
+ 'found', 'a((b'),
+('a\\\\b', 'a\\b', SUCCEED,
+ 'found', 'a\\b'),
+('\\(\\(a\\)\\)', 'abc', SUCCEED,
+ 'found+"-"+g1+"-"+g2', 'a-a-a'),
+('\\(a\\)b\\(c\\)', 'abc', SUCCEED,
+ 'found+"-"+g1+"-"+g2', 'abc-a-c'),
+('a+b+c', 'aabbabc', SUCCEED,
+ 'found', 'abc'),
+('\\(a+\\|b\\)*', 'ab', SUCCEED,
+ 'found+"-"+g1', 'ab-b'),
+('\\(a+\\|b\\)+', 'ab', SUCCEED,
+ 'found+"-"+g1', 'ab-b'),
+('\\(a+\\|b\\)?', 'ab', SUCCEED,
+ 'found+"-"+g1', 'a-a'),
+('\\)\\(', '-', SYNTAX_ERROR),
+('[^ab]*', 'cde', SUCCEED,
+ 'found', 'cde'),
+('abc', '', FAIL),
+('a*', '', SUCCEED,
+ 'found', ''),
+('a\\|b\\|c\\|d\\|e', 'e', SUCCEED,
+ 'found', 'e'),
+('\\(a\\|b\\|c\\|d\\|e\\)f', 'ef', SUCCEED,
+ 'found+"-"+g1', 'ef-e'),
+('abcd*efg', 'abcdefg', SUCCEED,
+ 'found', 'abcdefg'),
+('ab*', 'xabyabbbz', SUCCEED,
+ 'found', 'ab'),
+('ab*', 'xayabbbz', SUCCEED,
+ 'found', 'a'),
+('\\(ab\\|cd\\)e', 'abcde', SUCCEED,
+ 'found+"-"+g1', 'cde-cd'),
+('[abhgefdc]ij', 'hij', SUCCEED,
+ 'found', 'hij'),
+('^\\(ab\\|cd\\)e', 'abcde', FAIL,
+ 'xg1y', 'xy'),
+('\\(abc\\|\\)ef', 'abcdef', SUCCEED,
+ 'found+"-"+g1', 'ef-'),
+('\\(a\\|b\\)c*d', 'abcd', SUCCEED,
+ 'found+"-"+g1', 'bcd-b'),
+('\\(ab\\|ab*\\)bc', 'abc', SUCCEED,
+ 'found+"-"+g1', 'abc-a'),
+('a\\([bc]*\\)c*', 'abc', SUCCEED,
+ 'found+"-"+g1', 'abc-bc'),
+('a\\([bc]*\\)\\(c*d\\)', 'abcd', SUCCEED,
+ 'found+"-"+g1+"-"+g2', 'abcd-bc-d'),
+('a\\([bc]+\\)\\(c*d\\)', 'abcd', SUCCEED,
+ 'found+"-"+g1+"-"+g2', 'abcd-bc-d'),
+('a\\([bc]*\\)\\(c+d\\)', 'abcd', SUCCEED,
+ 'found+"-"+g1+"-"+g2', 'abcd-b-cd'),
+('a[bcd]*dcdcde', 'adcdcde', SUCCEED,
+ 'found', 'adcdcde'),
+('a[bcd]+dcdcde', 'adcdcde', FAIL),
+('\\(ab\\|a\\)b*c', 'abc', SUCCEED,
+ 'found+"-"+g1', 'abc-ab'),
+('\\(\\(a\\)\\(b\\)c\\)\\(d\\)', 'abcd', SUCCEED,
+ 'g1+"-"+g2+"-"+g3+"-"+g4', 'abc-a-b-d'),
+('[a-zA-Z_][a-zA-Z0-9_]*', 'alpha', SUCCEED,
+ 'found', 'alpha'),
+('^a\\(bc+\\|b[eh]\\)g\\|.h$', 'abh', SUCCEED,
+ 'found+"-"+g1', 'bh-None'),
+('\\(bc+d$\\|ef*g.\\|h?i\\(j\\|k\\)\\)', 'effgz', SUCCEED,
+ 'found+"-"+g1+"-"+g2', 'effgz-effgz-None'),
+('\\(bc+d$\\|ef*g.\\|h?i\\(j\\|k\\)\\)', 'ij', SUCCEED,
+ 'found+"-"+g1+"-"+g2', 'ij-ij-j'),
+('\\(bc+d$\\|ef*g.\\|h?i\\(j\\|k\\)\\)', 'effg', FAIL),
+('\\(bc+d$\\|ef*g.\\|h?i\\(j\\|k\\)\\)', 'bcdd', FAIL),
+('\\(bc+d$\\|ef*g.\\|h?i\\(j\\|k\\)\\)', 'reffgz', SUCCEED,
+ 'found+"-"+g1+"-"+g2', 'effgz-effgz-None'),
+('\\(\\(\\(\\(\\(\\(\\(\\(\\(a\\)\\)\\)\\)\\)\\)\\)\\)\\)', 'a', SUCCEED,
+ 'found', 'a'),
+('multiple words of text', 'uh-uh', FAIL),
+('multiple words', 'multiple words, yeah', SUCCEED,
+ 'found', 'multiple words'),
+('\\(.*\\)c\\(.*\\)', 'abcde', SUCCEED,
+ 'found+"-"+g1+"-"+g2', 'abcde-ab-de'),
+('(\\(.*\\), \\(.*\\))', '(a, b)', SUCCEED,
+ 'g2+"-"+g1', 'b-a'),
+('[k]', 'ab', FAIL),
+('a[-]?c', 'ac', SUCCEED,
+ 'found', 'ac'),
+('\\(abc\\)\\1', 'abcabc', SUCCEED,
+ 'g1', 'abc'),
+('\\([a-c]*\\)\\1', 'abcabc', SUCCEED,
+ 'g1', 'abc'),
+('^\\(.+\\)?B', 'AB', SUCCEED,
+ 'g1', 'A'),
+('\\(a+\\).\\1$', 'aaaaa', SUCCEED,
+ 'found+"-"+g1', 'aaaaa-aa'),
+('^\\(a+\\).\\1$', 'aaaa', FAIL),
+('\\(abc\\)\\1', 'abcabc', SUCCEED,
+ 'found+"-"+g1', 'abcabc-abc'),
+('\\([a-c]+\\)\\1', 'abcabc', SUCCEED,
+ 'found+"-"+g1', 'abcabc-abc'),
+('\\(a\\)\\1', 'aa', SUCCEED,
+ 'found+"-"+g1', 'aa-a'),
+('\\(a+\\)\\1', 'aa', SUCCEED,
+ 'found+"-"+g1', 'aa-a'),
+('\\(a+\\)+\\1', 'aa', SUCCEED,
+ 'found+"-"+g1', 'aa-a'),
+('\\(a\\).+\\1', 'aba', SUCCEED,
+ 'found+"-"+g1', 'aba-a'),
+('\\(a\\)ba*\\1', 'aba', SUCCEED,
+ 'found+"-"+g1', 'aba-a'),
+('\\(aa\\|a\\)a\\1$', 'aaa', SUCCEED,
+ 'found+"-"+g1', 'aaa-a'),
+('\\(a\\|aa\\)a\\1$', 'aaa', SUCCEED,
+ 'found+"-"+g1', 'aaa-a'),
+('\\(a+\\)a\\1$', 'aaa', SUCCEED,
+ 'found+"-"+g1', 'aaa-a'),
+('\\([abc]*\\)\\1', 'abcabc', SUCCEED,
+ 'found+"-"+g1', 'abcabc-abc'),
+('\\(a\\)\\(b\\)c\\|ab', 'ab', SUCCEED,
+ 'found+"-"+g1+"-"+g2', 'ab-None-None'),
+('\\(a\\)+x', 'aaax', SUCCEED,
+ 'found+"-"+g1', 'aaax-a'),
+('\\([ac]\\)+x', 'aacx', SUCCEED,
+ 'found+"-"+g1', 'aacx-c'),
+('\\([^/]*/\\)*sub1/', 'd:msgs/tdir/sub1/trial/away.cpp', SUCCEED,
+ 'found+"-"+g1', 'd:msgs/tdir/sub1/-tdir/'),
+('\\([^.]*\\)\\.\\([^:]*\\):[T ]+\\(.*\\)', 'track1.title:TBlah blah blah', SUCCEED,
+ 'found+"-"+g1+"-"+g2+"-"+g3', 'track1.title:TBlah blah blah-track1-title-Blah blah blah'),
+('\\([^N]*N\\)+', 'abNNxyzN', SUCCEED,
+ 'found+"-"+g1', 'abNNxyzN-xyzN'),
+('\\([^N]*N\\)+', 'abNNxyz', SUCCEED,
+ 'found+"-"+g1', 'abNN-N'),
+('\\([abc]*\\)x', 'abcx', SUCCEED,
+ 'found+"-"+g1', 'abcx-abc'),
+('\\([abc]*\\)x', 'abc', FAIL),
+('\\([xyz]*\\)x', 'abcx', SUCCEED,
+ 'found+"-"+g1', 'x-'),
+('\\(a\\)+b\\|aac', 'aac', SUCCEED,
+ 'found+"-"+g1', 'aac-None'),
+('\<a', 'a', SUCCEED, 'found', 'a'),
+('\<a', '!', FAIL),
+('a\<b', 'ab', FAIL),
+('a\>', 'ab', FAIL),
+('a\>', 'a!', SUCCEED, 'found', 'a'),
+('a\>', 'a', SUCCEED, 'found', 'a'),
+]
diff --git a/lib-python/2.2/test/regrtest.py b/lib-python/2.2/test/regrtest.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/test/regrtest.py
@@ -0,0 +1,832 @@
+#! /usr/bin/env python
+
+"""Regression test.
+
+This will find all modules whose name is "test_*" in the test
+directory, and run them.  Various command line options provide
+additional facilities.
+
+Command line options:
+
+-v: verbose   -- run tests in verbose mode with output to stdout
+-q: quiet     -- don't print anything except if a test fails
+-g: generate  -- write the output file for a test instead of comparing it
+-x: exclude   -- arguments are tests to *exclude*
+-s: single    -- run only a single test (see below)
+-r: random    -- randomize test execution order
+-l: findleaks -- if GC is available detect tests that leak memory
+-u: use       -- specify which special resource intensive tests to run
+-h: help      -- print this text and exit
+
+If non-option arguments are present, they are names for tests to run,
+unless -x is given, in which case they are names for tests not to run.
+If no test names are given, all tests are run.
+
+-v is incompatible with -g and does not compare test output files.
+
+-s means to run only a single test and exit.  This is useful when
+doing memory analysis on the Python interpreter (which tend to consume
+too many resources to run the full regression test non-stop).  The
+file /tmp/pynexttest is read to find the next test to run.  If this
+file is missing, the first test_*.py file in testdir or on the command
+line is used.  (actually tempfile.gettempdir() is used instead of
+/tmp).
+
+-u is used to specify which special resource intensive tests to run,
+such as those requiring large file support or network connectivity.
+The argument is a comma-separated list of words indicating the
+resources to test.  Currently only the following are defined:
+
+    all -       Enable all special resources.
+
+    curses -    Tests that use curses and will modify the terminal's
+                state and output modes.
+
+    largefile - It is okay to run some test that may create huge
+                files.  These tests can take a long time and may
+                consume >2GB of disk space temporarily.
+
+    network -   It is okay to run tests that use external network
+                resource, e.g. testing SSL support for sockets.
+
+To enable all resources except one, use '-uall,-<resource>'.  For
+example, to run all the tests except for the network tests, give the
+option '-uall,-network'.
+"""
+
+import sys
+import os
+import getopt
+import traceback
+import random
+import StringIO
+
+import test_support
+
+RESOURCE_NAMES = ['curses', 'largefile', 'network']
+
+
+def usage(code, msg=''):
+    print __doc__
+    if msg: print msg
+    sys.exit(code)
+
+
+def main(tests=None, testdir=None, verbose=0, quiet=0, generate=0,
+         exclude=0, single=0, randomize=0, findleaks=0,
+         use_resources=None):
+    """Execute a test suite.
+
+    This also parses command-line options and modifies its behavior
+    accordingly.
+
+    tests -- a list of strings containing test names (optional)
+    testdir -- the directory in which to look for tests (optional)
+
+    Users other than the Python test suite will certainly want to
+    specify testdir; if it's omitted, the directory containing the
+    Python test suite is searched for.
+
+    If the tests argument is omitted, the tests listed on the
+    command-line will be used.  If that's empty, too, then all *.py
+    files beginning with test_ will be used.
+
+    The other default arguments (verbose, quiet, generate, exclude,
+    single, randomize, findleaks, and use_resources) allow programmers
+    calling main() directly to set the values that would normally be
+    set by flags on the command line.
+
+    """
+
+    test_support.record_original_stdout(sys.stdout)
+    try:
+        opts, args = getopt.getopt(sys.argv[1:], 'hvgqxsrlu:',
+                                   ['help', 'verbose', 'quiet', 'generate',
+                                    'exclude', 'single', 'random',
+                                    'findleaks', 'use='])
+    except getopt.error, msg:
+        usage(2, msg)
+
+    # Defaults
+    if use_resources is None:
+        use_resources = []
+    for o, a in opts:
+        if o in ('-h', '--help'):
+            usage(0)
+        elif o in ('-v', '--verbose'):
+            verbose += 1
+        elif o in ('-q', '--quiet'):
+            quiet = 1;
+            verbose = 0
+        elif o in ('-g', '--generate'):
+            generate = 1
+        elif o in ('-x', '--exclude'):
+            exclude = 1
+        elif o in ('-s', '--single'):
+            single = 1
+        elif o in ('-r', '--randomize'):
+            randomize = 1
+        elif o in ('-l', '--findleaks'):
+            findleaks = 1
+        elif o in ('-u', '--use'):
+            u = [x.lower() for x in a.split(',')]
+            for r in u:
+                if r == 'all':
+                    use_resources[:] = RESOURCE_NAMES
+                    continue
+                remove = False
+                if r[0] == '-':
+                    remove = True
+                    r = r[1:]
+                if r not in RESOURCE_NAMES:
+                    usage(1, 'Invalid -u/--use option: ' + a)
+                if remove:
+                    if r in use_resources:
+                        use_resources.remove(r)
+                elif r not in use_resources:
+                    use_resources.append(r)
+    if generate and verbose:
+        usage(2, "-g and -v don't go together!")
+
+    good = []
+    bad = []
+    skipped = []
+
+    if findleaks:
+        try:
+            import gc
+        except ImportError:
+            print 'No GC available, disabling findleaks.'
+            findleaks = 0
+        else:
+            # Uncomment the line below to report garbage that is not
+            # freeable by reference counting alone.  By default only
+            # garbage that is not collectable by the GC is reported.
+            #gc.set_debug(gc.DEBUG_SAVEALL)
+            found_garbage = []
+
+    if single:
+        from tempfile import gettempdir
+        filename = os.path.join(gettempdir(), 'pynexttest')
+        try:
+            fp = open(filename, 'r')
+            next = fp.read().strip()
+            tests = [next]
+            fp.close()
+        except IOError:
+            pass
+    for i in range(len(args)):
+        # Strip trailing ".py" from arguments
+        if args[i][-3:] == os.extsep+'py':
+            args[i] = args[i][:-3]
+    stdtests = STDTESTS[:]
+    nottests = NOTTESTS[:]
+    if exclude:
+        for arg in args:
+            if arg in stdtests:
+                stdtests.remove(arg)
+        nottests[:0] = args
+        args = []
+    tests = tests or args or findtests(testdir, stdtests, nottests)
+    if single:
+        tests = tests[:1]
+    if randomize:
+        random.shuffle(tests)
+    test_support.verbose = verbose      # Tell tests to be moderately quiet
+    test_support.use_resources = use_resources
+    save_modules = sys.modules.keys()
+    for test in tests:
+        if not quiet:
+            print test
+            sys.stdout.flush()
+        ok = runtest(test, generate, verbose, quiet, testdir)
+        if ok > 0:
+            good.append(test)
+        elif ok == 0:
+            bad.append(test)
+        else:
+            skipped.append(test)
+        if findleaks:
+            gc.collect()
+            if gc.garbage:
+                print "Warning: test created", len(gc.garbage),
+                print "uncollectable object(s)."
+                # move the uncollectable objects somewhere so we don't see
+                # them again
+                found_garbage.extend(gc.garbage)
+                del gc.garbage[:]
+        # Unload the newly imported modules (best effort finalization)
+        for module in sys.modules.keys():
+            if module not in save_modules and module.startswith("test."):
+                test_support.unload(module)
+
+    # The lists won't be sorted if running with -r
+    good.sort()
+    bad.sort()
+    skipped.sort()
+
+    if good and not quiet:
+        if not bad and not skipped and len(good) > 1:
+            print "All",
+        print count(len(good), "test"), "OK."
+        if verbose:
+            print "CAUTION:  stdout isn't compared in verbose mode:  a test"
+            print "that passes in verbose mode may fail without it."
+    if bad:
+        print count(len(bad), "test"), "failed:"
+        printlist(bad)
+    if skipped and not quiet:
+        print count(len(skipped), "test"), "skipped:"
+        printlist(skipped)
+
+        e = _ExpectedSkips()
+        plat = sys.platform
+        if e.isvalid():
+            surprise = _Set(skipped) - e.getexpected()
+            if surprise:
+                print count(len(surprise), "skip"), \
+                      "unexpected on", plat + ":"
+                printlist(surprise)
+            else:
+                print "Those skips are all expected on", plat + "."
+        else:
+            print "Ask someone to teach regrtest.py about which tests are"
+            print "expected to get skipped on", plat + "."
+
+    if single:
+        alltests = findtests(testdir, stdtests, nottests)
+        for i in range(len(alltests)):
+            if tests[0] == alltests[i]:
+                if i == len(alltests) - 1:
+                    os.unlink(filename)
+                else:
+                    fp = open(filename, 'w')
+                    fp.write(alltests[i+1] + '\n')
+                    fp.close()
+                break
+        else:
+            os.unlink(filename)
+
+    sys.exit(len(bad) > 0)
+
+
+STDTESTS = [
+    'test_grammar',
+    'test_opcodes',
+    'test_operations',
+    'test_builtin',
+    'test_exceptions',
+    'test_types',
+   ]
+
+NOTTESTS = [
+    'test_support',
+    'test_b1',
+    'test_b2',
+    'test_future1',
+    'test_future2',
+    'test_future3',
+    ]
+
+def findtests(testdir=None, stdtests=STDTESTS, nottests=NOTTESTS):
+    """Return a list of all applicable test modules."""
+    if not testdir: testdir = findtestdir()
+    names = os.listdir(testdir)
+    tests = []
+    for name in names:
+        if name[:5] == "test_" and name[-3:] == os.extsep+"py":
+            modname = name[:-3]
+            if modname not in stdtests and modname not in nottests:
+                tests.append(modname)
+    tests.sort()
+    return stdtests + tests
+
+def runtest(test, generate, verbose, quiet, testdir = None):
+    """Run a single test.
+    test -- the name of the test
+    generate -- if true, generate output, instead of running the test
+    and comparing it to a previously created output file
+    verbose -- if true, print more messages
+    quiet -- if true, don't print 'skipped' messages (probably redundant)
+    testdir -- test directory
+    """
+    test_support.unload(test)
+    if not testdir: testdir = findtestdir()
+    outputdir = os.path.join(testdir, "output")
+    outputfile = os.path.join(outputdir, test)
+    if verbose:
+        cfp = None
+    else:
+        cfp = StringIO.StringIO()
+    try:
+        save_stdout = sys.stdout
+        try:
+            if cfp:
+                sys.stdout = cfp
+                print test              # Output file starts with test name
+            the_module = __import__(test, globals(), locals(), [])
+            # Most tests run to completion simply as a side-effect of
+            # being imported.  For the benefit of tests that can't run
+            # that way (like test_threaded_import), explicitly invoke
+            # their test_main() function (if it exists).
+            indirect_test = getattr(the_module, "test_main", None)
+            if indirect_test is not None:
+                indirect_test()
+        finally:
+            sys.stdout = save_stdout
+    except (ImportError, test_support.TestSkipped), msg:
+        if not quiet:
+            print test, "skipped --", msg
+            sys.stdout.flush()
+        return -1
+    except KeyboardInterrupt:
+        raise
+    except test_support.TestFailed, msg:
+        print "test", test, "failed --", msg
+        sys.stdout.flush()
+        return 0
+    except:
+        type, value = sys.exc_info()[:2]
+        print "test", test, "crashed --", str(type) + ":", value
+        sys.stdout.flush()
+        if verbose:
+            traceback.print_exc(file=sys.stdout)
+            sys.stdout.flush()
+        return 0
+    else:
+        if not cfp:
+            return 1
+        output = cfp.getvalue()
+        if generate:
+            if output == test + "\n":
+                if os.path.exists(outputfile):
+                    # Write it since it already exists (and the contents
+                    # may have changed), but let the user know it isn't
+                    # needed:
+                    print "output file", outputfile, \
+                          "is no longer needed; consider removing it"
+                else:
+                    # We don't need it, so don't create it.
+                    return 1
+            fp = open(outputfile, "w")
+            fp.write(output)
+            fp.close()
+            return 1
+        if os.path.exists(outputfile):
+            fp = open(outputfile, "r")
+            expected = fp.read()
+            fp.close()
+        else:
+            expected = test + "\n"
+        if output == expected:
+            return 1
+        print "test", test, "produced unexpected output:"
+        sys.stdout.flush()
+        reportdiff(expected, output)
+        sys.stdout.flush()
+        return 0
+
+def reportdiff(expected, output):
+    import difflib
+    print "*" * 70
+    a = expected.splitlines(1)
+    b = output.splitlines(1)
+    sm = difflib.SequenceMatcher(a=a, b=b)
+    tuples = sm.get_opcodes()
+
+    def pair(x0, x1):
+        # x0:x1 are 0-based slice indices; convert to 1-based line indices.
+        x0 += 1
+        if x0 >= x1:
+            return "line " + str(x0)
+        else:
+            return "lines %d-%d" % (x0, x1)
+
+    for op, a0, a1, b0, b1 in tuples:
+        if op == 'equal':
+            pass
+
+        elif op == 'delete':
+            print "***", pair(a0, a1), "of expected output missing:"
+            for line in a[a0:a1]:
+                print "-", line,
+
+        elif op == 'replace':
+            print "*** mismatch between", pair(a0, a1), "of expected", \
+                  "output and", pair(b0, b1), "of actual output:"
+            for line in difflib.ndiff(a[a0:a1], b[b0:b1]):
+                print line,
+
+        elif op == 'insert':
+            print "***", pair(b0, b1), "of actual output doesn't appear", \
+                  "in expected output after line", str(a1)+":"
+            for line in b[b0:b1]:
+                print "+", line,
+
+        else:
+            print "get_opcodes() returned bad tuple?!?!", (op, a0, a1, b0, b1)
+
+    print "*" * 70
+
+def findtestdir():
+    if __name__ == '__main__':
+        file = sys.argv[0]
+    else:
+        file = __file__
+    testdir = os.path.dirname(file) or os.curdir
+    return testdir
+
+def count(n, word):
+    if n == 1:
+        return "%d %s" % (n, word)
+    else:
+        return "%d %ss" % (n, word)
+
+def printlist(x, width=70, indent=4):
+    """Print the elements of a sequence to stdout.
+
+    Optional arg width (default 70) is the maximum line length.
+    Optional arg indent (default 4) is the number of blanks with which to
+    begin each line.
+    """
+
+    line = ' ' * indent
+    for one in map(str, x):
+        w = len(line) + len(one)
+        if line[-1:] == ' ':
+            pad = ''
+        else:
+            pad = ' '
+            w += 1
+        if w > width:
+            print line
+            line = ' ' * indent + one
+        else:
+            line += pad + one
+    if len(line) > indent:
+        print line
+
+class _Set:
+    def __init__(self, seq=[]):
+        data = self.data = {}
+        for x in seq:
+            data[x] = 1
+
+    def __len__(self):
+        return len(self.data)
+
+    def __sub__(self, other):
+        "Return set of all elements in self not in other."
+        result = _Set()
+        data = result.data = self.data.copy()
+        for x in other.data:
+            if x in data:
+                del data[x]
+        return result
+
+    def __iter__(self):
+        return iter(self.data)
+
+    def tolist(self, sorted=1):
+        "Return _Set elements as a list."
+        data = self.data.keys()
+        if sorted:
+            data.sort()
+        return data
+
+_expectations = {
+    'win32':
+        """
+        test_al
+        test_cd
+        test_cl
+        test_commands
+        test_crypt
+        test_curses
+        test_dbm
+        test_dl
+        test_email_codecs
+        test_fcntl
+        test_fork1
+        test_gdbm
+        test_gl
+        test_grp
+        test_imgfile
+        test_largefile
+        test_linuxaudiodev
+        test_mhlib
+        test_nis
+        test_openpty
+        test_poll
+        test_pty
+        test_pwd
+        test_signal
+        test_socket_ssl
+        test_socketserver
+        test_sunaudiodev
+        test_timing
+        """,
+    'linux2':
+        """
+        test_al
+        test_cd
+        test_cl
+        test_curses
+        test_dl
+        test_email_codecs
+        test_gl
+        test_imgfile
+        test_largefile
+        test_nis
+        test_ntpath
+        test_socket_ssl
+        test_socketserver
+        test_sunaudiodev
+        test_unicode_file
+        test_winreg
+        test_winsound
+        """,
+    'mac':
+        """
+        test_al
+        test_bsddb
+        test_cd
+        test_cl
+        test_commands
+        test_crypt
+        test_curses
+        test_dbm
+        test_dl
+        test_email_codecs
+        test_fcntl
+        test_fork1
+        test_gl
+        test_grp
+        test_imgfile
+        test_largefile
+        test_linuxaudiodev
+        test_locale
+        test_mmap
+        test_nis
+        test_ntpath
+        test_openpty
+        test_poll
+        test_popen2
+        test_pty
+        test_pwd
+        test_signal
+        test_socket_ssl
+        test_socketserver
+        test_sunaudiodev
+        test_sundry
+        test_timing
+        test_unicode_file
+        test_winreg
+        test_winsound
+        """,
+    'unixware7':
+        """
+        test_al
+        test_bsddb
+        test_cd
+        test_cl
+        test_dl
+        test_email_codecs
+        test_gl
+        test_imgfile
+        test_largefile
+        test_linuxaudiodev
+        test_minidom
+        test_nis
+        test_ntpath
+        test_openpty
+        test_pyexpat
+        test_sax
+        test_socketserver
+        test_sunaudiodev
+        test_sundry
+        test_unicode_file
+        test_winreg
+        test_winsound
+        """,
+    'openunix8':
+        """
+        test_al
+        test_bsddb
+        test_cd
+        test_cl
+        test_dl
+        test_email_codecs
+        test_gl
+        test_imgfile
+        test_largefile
+        test_linuxaudiodev
+        test_minidom
+        test_nis
+        test_ntpath
+        test_openpty
+        test_pyexpat
+        test_sax
+        test_socketserver
+        test_sunaudiodev
+        test_sundry
+        test_unicode_file
+        test_winreg
+        test_winsound
+        """,
+    'sco_sv3':
+        """
+        test_al
+        test_asynchat
+        test_bsddb
+        test_cd
+        test_cl
+        test_dl
+        test_email_codecs
+        test_fork1
+        test_gettext
+        test_gl
+        test_imgfile
+        test_largefile
+        test_linuxaudiodev
+        test_locale
+        test_minidom
+        test_nis
+        test_ntpath
+        test_openpty
+        test_pyexpat
+        test_queue
+        test_sax
+        test_socketserver
+        test_sunaudiodev
+        test_sundry
+        test_thread
+        test_threaded_import
+        test_threadedtempfile
+        test_threading
+        test_unicode_file
+        test_winreg
+        test_winsound
+        """,
+    'riscos':
+        """
+        test_al
+        test_asynchat
+        test_bsddb
+        test_cd
+        test_cl
+        test_commands
+        test_crypt
+        test_dbm
+        test_dl
+        test_email_codecs
+        test_fcntl
+        test_fork1
+        test_gdbm
+        test_gl
+        test_grp
+        test_imgfile
+        test_largefile
+        test_linuxaudiodev
+        test_locale
+        test_mmap
+        test_nis
+        test_ntpath
+        test_openpty
+        test_poll
+        test_popen2
+        test_pty
+        test_pwd
+        test_socket_ssl
+        test_socketserver
+        test_strop
+        test_sunaudiodev
+        test_sundry
+        test_thread
+        test_threaded_import
+        test_threadedtempfile
+        test_threading
+        test_timing
+        test_unicode_file
+        test_winreg
+        test_winsound
+        """,
+    'darwin':
+        """
+        test_al
+        test_cd
+        test_cl
+        test_curses
+        test_dl
+        test_email_codecs
+        test_gdbm
+        test_gl
+        test_imgfile
+        test_largefile
+        test_locale
+        test_linuxaudiodev
+        test_minidom
+        test_nis
+        test_ntpath
+        test_poll
+        test_socket_ssl
+        test_socketserver
+        test_sunaudiodev
+        test_unicode_file
+        test_winreg
+        test_winsound
+        """,
+    'sunos5':
+        """
+        test_al
+        test_bsddb
+        test_cd
+        test_cl
+        test_curses
+        test_dbm
+        test_email_codecs
+        test_gdbm
+        test_gl
+        test_gzip
+        test_imgfile
+        test_linuxaudiodev
+        test_mpz
+        test_openpty
+        test_socketserver
+        test_zipfile
+        test_zlib
+        """,
+    'hp-ux11':
+        """
+        test_al
+        test_bsddb
+        test_cd
+        test_cl
+        test_curses
+        test_dl
+        test_gdbm
+        test_gl
+        test_gzip
+        test_imgfile
+        test_largefile
+        test_linuxaudiodev
+        test_locale
+        test_minidom
+        test_nis
+        test_ntpath
+        test_openpty
+        test_pyexpat
+        test_sax
+        test_socketserver
+        test_sunaudiodev
+        test_zipfile
+        test_zlib
+        """,
+    'freebsd4':
+        """
+	test_al
+	test_cd
+	test_cl
+	test_curses
+	test_email_codecs
+	test_gdbm
+	test_gl
+	test_imgfile
+	test_linuxaudiodev
+	test_locale
+	test_minidom
+	test_nis
+	test_pyexpat
+	test_sax
+	test_socket_ssl
+	test_socketserver
+	test_sunaudiodev
+	test_unicode_file
+	test_winreg
+	test_winsound
+	""",
+}
+
+class _ExpectedSkips:
+    def __init__(self):
+        self.valid = 0
+        if _expectations.has_key(sys.platform):
+            s = _expectations[sys.platform]
+            self.expected = _Set(s.split())
+            self.valid = 1
+
+    def isvalid(self):
+        "Return true iff _ExpectedSkips knows about the current platform."
+        return self.valid
+
+    def getexpected(self):
+        """Return set of test names we expect to skip on current platform.
+
+        self.isvalid() must be true.
+        """
+
+        assert self.isvalid()
+        return self.expected
+
+if __name__ == '__main__':
+    main()
diff --git a/lib-python/2.2/test/reperf.py b/lib-python/2.2/test/reperf.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/reperf.py
@@ -0,0 +1,23 @@
+import re
+import time
+
+def main():
+    s = "\13hello\14 \13world\14 " * 1000
+    p = re.compile(r"([\13\14])")
+    timefunc(10, p.sub, "", s)
+    timefunc(10, p.split, s)
+    timefunc(10, p.findall, s)
+
+def timefunc(n, func, *args, **kw):
+    t0 = time.clock()
+    try:
+        for i in range(n):
+            result = apply(func, args, kw)
+        return result
+    finally:
+        t1 = time.clock()
+        if n > 1:
+            print n, "times",
+        print func.__name__, "%.3f" % (t1-t0), "CPU seconds"
+
+main()
diff --git a/lib-python/2.2/test/sortperf.py b/lib-python/2.2/test/sortperf.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/sortperf.py
@@ -0,0 +1,141 @@
+"""Sort performance test.
+
+See main() for command line syntax.
+See tabulate() for output format.
+
+"""
+
+import sys
+import time
+import random
+import marshal
+import tempfile
+import operator
+import os
+
+td = tempfile.gettempdir()
+
+def randrange(n):
+    """Return a random shuffle of range(n)."""
+    fn = os.path.join(td, "rr%06d" % n)
+    try:
+        fp = open(fn, "rb")
+    except IOError:
+        result = []
+        for i in range(n):
+            result.append(random.random())
+        try:
+            try:
+                fp = open(fn, "wb")
+                marshal.dump(result, fp)
+                fp.close()
+                fp = None
+            finally:
+                if fp:
+                    try:
+                        os.unlink(fn)
+                    except os.error:
+                        pass
+        except IOError, msg:
+            print "can't write", fn, ":", msg
+    else:
+        result = marshal.load(fp)
+        fp.close()
+        ##assert len(result) == n
+        # Shuffle it a bit...
+        for i in range(10):
+            i = random.randrange(0, n)
+            temp = result[:i]
+            del result[:i]
+            temp.reverse()
+            result[len(result):] = temp
+            del temp
+    return result
+
+def fl():
+    sys.stdout.flush()
+
+def doit(L):
+    t0 = time.clock()
+    L.sort()
+    t1 = time.clock()
+    print "%6.2f" % (t1-t0),
+    fl()
+
+def tabulate(r):
+    """Tabulate sort speed for lists of various sizes.
+
+    The sizes are 2**i for i in r (the argument, a list).
+
+    The output displays i, 2**i, and the time to sort arrays of 2**i
+    floating point numbers with the following properties:
+
+    *sort: random data
+    \sort: descending data
+    /sort: ascending data
+    ~sort: many duplicates
+    -sort: all equal
+    !sort: worst case scenario
+
+    """
+    cases = ("*sort", "\\sort", "/sort", "~sort", "-sort", "!sort")
+    fmt = ("%2s %6s" + " %6s"*len(cases))
+    print fmt % (("i", "2**i") + cases)
+    for i in r:
+        n = 1<<i
+        L = randrange(n)
+        ##assert len(L) == n
+        print "%2d %6d" % (i, n),
+        fl()
+        doit(L) # *sort
+        L.reverse()
+        doit(L) # \sort
+        doit(L) # /sort
+        if n > 4:
+            del L[4:]
+            L = L*(n/4)
+            L = map(lambda x: --x, L)
+        doit(L) # ~sort
+        del L
+        L = map(abs, [-0.5]*n)
+        doit(L) # -sort
+        L = range(n/2-1, -1, -1)
+        L[len(L):] = range(n/2)
+        doit(L) # !sort
+        print
+
+def main():
+    """Main program when invoked as a script.
+
+    One argument: tabulate a single row.
+    Two arguments: tabulate a range (inclusive).
+    Extra arguments are used to seed the random generator.
+
+    """
+    # default range (inclusive)
+    k1 = 15
+    k2 = 19
+    if sys.argv[1:]:
+        # one argument: single point
+        k1 = k2 = int(sys.argv[1])
+        if sys.argv[2:]:
+            # two arguments: specify range
+            k2 = int(sys.argv[2])
+            if sys.argv[3:]:
+                # derive random seed from remaining arguments
+                x, y, z = 0, 0, 0
+                for a in sys.argv[3:]:
+                    h = hash(a)
+                    h, d = divmod(h, 256)
+                    h = h & 0xffffff
+                    x = (x^h^d) & 255
+                    h = h>>8
+                    y = (y^h^d) & 255
+                    h = h>>8
+                    z = (z^h^d) & 255
+                whrandom.seed(x, y, z)
+    r = range(k1, k2+1)                 # include the end point
+    tabulate(r)
+
+if __name__ == '__main__':
+    main()
diff --git a/lib-python/2.2/test/string_tests.py b/lib-python/2.2/test/string_tests.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/string_tests.py
@@ -0,0 +1,265 @@
+"""Common tests shared by test_string and test_userstring"""
+
+import string
+from test_support import verify, verbose, TestFailed, have_unicode
+
+transtable = '\000\001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037 !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`xyzdefghijklmnopqrstuvwxyz{|}~\177\200\201\202\203\204\205\206\207\210\211\212\213\214\215\216\217\220\221\222\223\224\225\226\227\230\231\232\233\234\235\236\237\240\241\242\243\244\245\246\247\250\251\252\253\254\255\256\257\260\261\262\263\264\265\266\267\270\271\272\273\274\275\276\277\300\301\302\303\304\305\306\307\310\311\312\313\314\315\316\317\320\321\322\323\324\325\326\327\330\331\332\333\334\335\336\337\340\341\342\343\344\345\346\347\350\351\352\353\354\355\356\357\360\361\362\363\364\365\366\367\370\371\372\373\374\375\376\377'
+
+from UserList import UserList
+
+class Sequence:
+    def __init__(self): self.seq = 'wxyz'
+    def __len__(self): return len(self.seq)
+    def __getitem__(self, i): return self.seq[i]
+
+class BadSeq1(Sequence):
+    def __init__(self): self.seq = [7, 'hello', 123L]
+
+class BadSeq2(Sequence):
+    def __init__(self): self.seq = ['a', 'b', 'c']
+    def __len__(self): return 8
+
+def run_module_tests(test):
+    """Run all tests that exercise a function in the string module"""
+
+    test('atoi', " 1 ", 1)
+    test('atoi', " 1x", ValueError)
+    test('atoi', " x1 ", ValueError)
+    test('atol', "  1  ", 1L)
+    test('atol', "  1x ", ValueError)
+    test('atol', "  x1 ", ValueError)
+    test('atof', "  1  ", 1.0)
+    test('atof', "  1x ", ValueError)
+    test('atof', "  x1 ", ValueError)
+
+    test('maketrans', 'abc', transtable, 'xyz')
+    test('maketrans', 'abc', ValueError, 'xyzq')
+
+    # join now works with any sequence type
+    test('join', ['a', 'b', 'c', 'd'], 'a b c d')
+    test('join', ('a', 'b', 'c', 'd'), 'abcd', '')
+    test('join', Sequence(), 'w x y z')
+    test('join', 7, TypeError)
+
+    test('join', BadSeq1(), TypeError)
+    test('join', BadSeq2(), 'a b c')
+
+    # try a few long ones
+    print ":".join(['x' * 100] * 100)
+    print ":".join(('x' * 100,) * 100)
+
+
+def run_method_tests(test):
+    """Run all tests that exercise a method of a string object"""
+
+    test('capitalize', ' hello ', ' hello ')
+    test('capitalize', 'hello ', 'Hello ')
+    test('capitalize', 'aaaa', 'Aaaa')
+    test('capitalize', 'AaAa', 'Aaaa')
+
+    test('count', 'aaa', 3, 'a')
+    test('count', 'aaa', 0, 'b')
+
+    test('find', 'abcdefghiabc', 0, 'abc')
+    test('find', 'abcdefghiabc', 9, 'abc', 1)
+    test('find', 'abcdefghiabc', -1, 'def', 4)
+    test('rfind', 'abcdefghiabc', 9, 'abc')
+    test('lower', 'HeLLo', 'hello')
+    test('lower', 'hello', 'hello')
+    test('upper', 'HeLLo', 'HELLO')
+    test('upper', 'HELLO', 'HELLO')
+
+    test('title', ' hello ', ' Hello ')
+    test('title', 'hello ', 'Hello ')
+    test('title', "fOrMaT thIs aS titLe String", 'Format This As Title String')
+    test('title', "fOrMaT,thIs-aS*titLe;String", 'Format,This-As*Title;String')
+    test('title', "getInt", 'Getint')
+
+    test('expandtabs', 'abc\rab\tdef\ng\thi', 'abc\rab      def\ng       hi')
+    test('expandtabs', 'abc\rab\tdef\ng\thi', 'abc\rab      def\ng       hi', 8)
+    test('expandtabs', 'abc\rab\tdef\ng\thi', 'abc\rab  def\ng   hi', 4)
+    test('expandtabs', 'abc\r\nab\tdef\ng\thi', 'abc\r\nab  def\ng   hi', 4)
+
+    test('islower', 'a', 1)
+    test('islower', 'A', 0)
+    test('islower', '\n', 0)
+    test('islower', 'abc', 1)
+    test('islower', 'aBc', 0)
+    test('islower', 'abc\n', 1)
+
+    test('isupper', 'a', 0)
+    test('isupper', 'A', 1)
+    test('isupper', '\n', 0)
+    test('isupper', 'ABC', 1)
+    test('isupper', 'AbC', 0)
+    test('isupper', 'ABC\n', 1)
+
+    test('istitle', 'a', 0)
+    test('istitle', 'A', 1)
+    test('istitle', '\n', 0)
+    test('istitle', 'A Titlecased Line', 1)
+    test('istitle', 'A\nTitlecased Line', 1)
+    test('istitle', 'A Titlecased, Line', 1)
+    test('istitle', 'Not a capitalized String', 0)
+    test('istitle', 'Not\ta Titlecase String', 0)
+    test('istitle', 'Not--a Titlecase String', 0)
+
+    test('isalpha', 'a', 1)
+    test('isalpha', 'A', 1)
+    test('isalpha', '\n', 0)
+    test('isalpha', 'abc', 1)
+    test('isalpha', 'aBc123', 0)
+    test('isalpha', 'abc\n', 0)
+
+    test('isalnum', 'a', 1)
+    test('isalnum', 'A', 1)
+    test('isalnum', '\n', 0)
+    test('isalnum', '123abc456', 1)
+    test('isalnum', 'a1b3c', 1)
+    test('isalnum', 'aBc000 ', 0)
+    test('isalnum', 'abc\n', 0)
+
+    # join now works with any sequence type
+    test('join', ' ', 'a b c d', ['a', 'b', 'c', 'd'])
+    test('join', '', 'abcd', ('a', 'b', 'c', 'd'))
+    test('join', ' ', 'w x y z', Sequence())
+    test('join', 'a', 'abc', ('abc',))
+    test('join', 'a', 'z', UserList(['z']))
+    if have_unicode:
+        test('join', unicode('.'), unicode('a.b.c'), ['a', 'b', 'c'])
+        test('join', '.', unicode('a.b.c'), [unicode('a'), 'b', 'c'])
+        test('join', '.', unicode('a.b.c'), ['a', unicode('b'), 'c'])
+        test('join', '.', unicode('a.b.c'), ['a', 'b', unicode('c')])
+        test('join', '.', TypeError, ['a', unicode('b'), 3])
+    for i in [5, 25, 125]:
+        test('join', '-', ((('a' * i) + '-') * i)[:-1],
+             ['a' * i] * i)
+
+    test('join', ' ', TypeError, BadSeq1())
+    test('join', ' ', 'a b c', BadSeq2())
+
+    test('splitlines', "abc\ndef\n\rghi", ['abc', 'def', '', 'ghi'])
+    test('splitlines', "abc\ndef\n\r\nghi", ['abc', 'def', '', 'ghi'])
+    test('splitlines', "abc\ndef\r\nghi", ['abc', 'def', 'ghi'])
+    test('splitlines', "abc\ndef\r\nghi\n", ['abc', 'def', 'ghi'])
+    test('splitlines', "abc\ndef\r\nghi\n\r", ['abc', 'def', 'ghi', ''])
+    test('splitlines', "\nabc\ndef\r\nghi\n\r", ['', 'abc', 'def', 'ghi', ''])
+    test('splitlines', "\nabc\ndef\r\nghi\n\r", ['\n', 'abc\n', 'def\r\n', 'ghi\n', '\r'], 1)
+
+    test('split', 'this is the split function',
+         ['this', 'is', 'the', 'split', 'function'])
+    test('split', 'a|b|c|d', ['a', 'b', 'c', 'd'], '|')
+    test('split', 'a|b|c|d', ['a', 'b', 'c|d'], '|', 2)
+    test('split', 'a b c d', ['a', 'b c d'], None, 1)
+    test('split', 'a b c d', ['a', 'b', 'c d'], None, 2)
+    test('split', 'a b c d', ['a', 'b', 'c', 'd'], None, 3)
+    test('split', 'a b c d', ['a', 'b', 'c', 'd'], None, 4)
+    test('split', 'a b c d', ['a b c d'], None, 0)
+    test('split', 'a  b  c  d', ['a', 'b', 'c  d'], None, 2)
+    test('split', 'a b c d ', ['a', 'b', 'c', 'd'])
+
+    test('strip', '   hello   ', 'hello')
+    test('lstrip', '   hello   ', 'hello   ')
+    test('rstrip', '   hello   ', '   hello')
+    test('strip', 'hello', 'hello')
+
+    # strip/lstrip/rstrip with None arg
+    test('strip', '   hello   ', 'hello', None)
+    test('lstrip', '   hello   ', 'hello   ', None)
+    test('rstrip', '   hello   ', '   hello', None)
+    test('strip', 'hello', 'hello', None)
+
+    # strip/lstrip/rstrip with str arg
+    test('strip', 'xyzzyhelloxyzzy', 'hello', 'xyz')
+    test('lstrip', 'xyzzyhelloxyzzy', 'helloxyzzy', 'xyz')
+    test('rstrip', 'xyzzyhelloxyzzy', 'xyzzyhello', 'xyz')
+    test('strip', 'hello', 'hello', 'xyz')
+
+    # strip/lstrip/rstrip with unicode arg
+    test('strip', 'xyzzyhelloxyzzy', u'hello', u'xyz')
+    test('lstrip', 'xyzzyhelloxyzzy', u'helloxyzzy', u'xyz')
+    test('rstrip', 'xyzzyhelloxyzzy', u'xyzzyhello', u'xyz')
+    test('strip', 'hello', u'hello', u'xyz')
+
+    test('swapcase', 'HeLLo cOmpUteRs', 'hEllO CoMPuTErS')
+    test('translate', 'xyzabcdef', 'xyzxyz', transtable, 'def')
+
+    table = string.maketrans('a', 'A')
+    test('translate', 'abc', 'Abc', table)
+    test('translate', 'xyz', 'xyz', table)
+
+    test('replace', 'one!two!three!', 'one at two!three!', '!', '@', 1)
+    test('replace', 'one!two!three!', 'onetwothree', '!', '')
+    test('replace', 'one!two!three!', 'one at two@three!', '!', '@', 2)
+    test('replace', 'one!two!three!', 'one at two@three@', '!', '@', 3)
+    test('replace', 'one!two!three!', 'one at two@three@', '!', '@', 4)
+    test('replace', 'one!two!three!', 'one!two!three!', '!', '@', 0)
+    test('replace', 'one!two!three!', 'one at two@three@', '!', '@')
+    test('replace', 'one!two!three!', 'one!two!three!', 'x', '@')
+    test('replace', 'one!two!three!', 'one!two!three!', 'x', '@', 2)
+    # Next three for SF bug 422088: [OSF1 alpha] string.replace(); died with
+    # MemoryError due to empty result (platform malloc issue when requesting
+    # 0 bytes).
+    test('replace', '123', '', '123', '')
+    test('replace', '123123', '', '123', '')
+    test('replace', '123x123', 'x', '123', '')
+
+    test('startswith', 'hello', 1, 'he')
+    test('startswith', 'hello', 1, 'hello')
+    test('startswith', 'hello', 0, 'hello world')
+    test('startswith', 'hello', 1, '')
+    test('startswith', 'hello', 0, 'ello')
+    test('startswith', 'hello', 1, 'ello', 1)
+    test('startswith', 'hello', 1, 'o', 4)
+    test('startswith', 'hello', 0, 'o', 5)
+    test('startswith', 'hello', 1, '', 5)
+    test('startswith', 'hello', 0, 'lo', 6)
+    test('startswith', 'helloworld', 1, 'lowo', 3)
+    test('startswith', 'helloworld', 1, 'lowo', 3, 7)
+    test('startswith', 'helloworld', 0, 'lowo', 3, 6)
+
+    test('endswith', 'hello', 1, 'lo')
+    test('endswith', 'hello', 0, 'he')
+    test('endswith', 'hello', 1, '')
+    test('endswith', 'hello', 0, 'hello world')
+    test('endswith', 'helloworld', 0, 'worl')
+    test('endswith', 'helloworld', 1, 'worl', 3, 9)
+    test('endswith', 'helloworld', 1, 'world', 3, 12)
+    test('endswith', 'helloworld', 1, 'lowo', 1, 7)
+    test('endswith', 'helloworld', 1, 'lowo', 2, 7)
+    test('endswith', 'helloworld', 1, 'lowo', 3, 7)
+    test('endswith', 'helloworld', 0, 'lowo', 4, 7)
+    test('endswith', 'helloworld', 0, 'lowo', 3, 8)
+    test('endswith', 'ab', 0, 'ab', 0, 1)
+    test('endswith', 'ab', 0, 'ab', 0, 0)
+
+    test('zfill', '123', '123', 2)
+    test('zfill', '123', '123', 3)
+    test('zfill', '123', '0123', 4)
+    test('zfill', '+123', '+123', 3)
+    test('zfill', '+123', '+123', 4)
+    test('zfill', '+123', '+0123', 5)
+    test('zfill', '-123', '-123', 3)
+    test('zfill', '-123', '-123', 4)
+    test('zfill', '-123', '-0123', 5)
+    test('zfill', '', '000', 3)
+    test('zfill', '34', '34', 1)
+    test('zfill', '34', '0034', 4)
+
+    # Encoding/decoding
+    codecs = [('rot13', 'uryyb jbeyq'),
+              ('base64', 'aGVsbG8gd29ybGQ=\n'),
+              ('hex', '68656c6c6f20776f726c64'),
+              ('uu', 'begin 666 <data>\n+:&5L;&\\@=V]R;&0 \n \nend\n')]
+    for encoding, data in codecs:
+        test('encode', 'hello world', data, encoding)
+        test('decode', data, 'hello world', encoding)
+    # zlib is optional, so we make the test optional too...
+    try:
+        import zlib
+    except ImportError:
+        pass
+    else:
+        data = 'x\x9c\xcbH\xcd\xc9\xc9W(\xcf/\xcaI\x01\x00\x1a\x0b\x04]'
+        verify('hello world'.encode('zlib') == data)
+        verify(data.decode('zlib') == 'hello world')
diff --git a/lib-python/2.2/test/test.xml b/lib-python/2.2/test/test.xml
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test.xml
@@ -0,0 +1,115 @@
+<?xml version="1.0"?>
+<HTML xmlns:pp="http://www.isogen.com/paul/post-processor">
+<TITLE>Introduction to XSL</TITLE>
+<H1>Introduction to XSL</H1>
+	
+
+	
+		<HR/>
+		<H2>Overview
+</H2>
+		<UL>
+		
+	<LI>1.Intro</LI>
+
+	<LI>2.History</LI>
+
+	<LI>3.XSL Basics</LI>
+
+	<LI>Lunch</LI>
+
+	<LI>4.An XML Data Model</LI>
+
+	<LI>5.XSL Patterns</LI>
+
+	<LI>6.XSL Templates</LI>
+
+	<LI>7.XSL Formatting Model
+</LI>
+
+		</UL>
+	
+
+
+	
+
+	
+		<HR/>
+		<H2>Intro</H2>
+		<UL>
+		
+	<LI>Who am I?</LI>
+
+	<LI>Who are you?</LI>
+
+	<LI>Why are we here?
+</LI>
+
+		</UL>
+	
+
+
+	
+
+	
+		<HR/>
+		<H2>History: XML and SGML</H2>
+		<UL>
+		
+	<LI>XML is a subset of SGML.</LI>
+
+	<LI>SGML allows the separation of abstract content from formatting.</LI>
+
+	<LI>Also one of XML's primary virtues (in the doc publishing domain).
+</LI>
+
+		</UL>
+	
+
+
+	
+
+	
+		<HR/>
+		<H2>History: What are stylesheets?</H2>
+		<UL>
+		
+	<LI>Stylesheets specify the formatting of SGML/XML documents.</LI>
+
+	<LI>Stylesheets put the &quot;style&quot; back into documents.</LI>
+
+	<LI>New York Times content+NYT Stylesheet = NYT paper
+</LI>
+
+		</UL>
+	
+
+
+	
+
+	
+		<HR/>
+		<H2>History: FOSI</H2>
+		<UL>
+		
+	<LI>FOSI: &quot;Formatted Output Specification Instance&quot;
+<UL>
+	<LI>MIL-STD-28001
+	</LI>
+
+	<LI>FOSI's are SGML documents
+	</LI>
+
+	<LI>A stylesheet for another document
+	</LI>
+</UL></LI>
+
+	<LI>Obsolete but implemented...
+</LI>
+
+		</UL>
+	
+
+
+	
+</HTML>
diff --git a/lib-python/2.2/test/test.xml.out b/lib-python/2.2/test/test.xml.out
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test.xml.out
@@ -0,0 +1,115 @@
+<?xml version="1.0" encoding="iso-8859-1"?>
+<HTML xmlns:pp="http://www.isogen.com/paul/post-processor">
+<TITLE>Introduction to XSL</TITLE>
+<H1>Introduction to XSL</H1>
+	
+
+	
+		<HR></HR>
+		<H2>Overview
+</H2>
+		<UL>
+		
+	<LI>1.Intro</LI>
+
+	<LI>2.History</LI>
+
+	<LI>3.XSL Basics</LI>
+
+	<LI>Lunch</LI>
+
+	<LI>4.An XML Data Model</LI>
+
+	<LI>5.XSL Patterns</LI>
+
+	<LI>6.XSL Templates</LI>
+
+	<LI>7.XSL Formatting Model
+</LI>
+
+		</UL>
+	
+
+
+	
+
+	
+		<HR></HR>
+		<H2>Intro</H2>
+		<UL>
+		
+	<LI>Who am I?</LI>
+
+	<LI>Who are you?</LI>
+
+	<LI>Why are we here?
+</LI>
+
+		</UL>
+	
+
+
+	
+
+	
+		<HR></HR>
+		<H2>History: XML and SGML</H2>
+		<UL>
+		
+	<LI>XML is a subset of SGML.</LI>
+
+	<LI>SGML allows the separation of abstract content from formatting.</LI>
+
+	<LI>Also one of XML's primary virtues (in the doc publishing domain).
+</LI>
+
+		</UL>
+	
+
+
+	
+
+	
+		<HR></HR>
+		<H2>History: What are stylesheets?</H2>
+		<UL>
+		
+	<LI>Stylesheets specify the formatting of SGML/XML documents.</LI>
+
+	<LI>Stylesheets put the "style" back into documents.</LI>
+
+	<LI>New York Times content+NYT Stylesheet = NYT paper
+</LI>
+
+		</UL>
+	
+
+
+	
+
+	
+		<HR></HR>
+		<H2>History: FOSI</H2>
+		<UL>
+		
+	<LI>FOSI: "Formatted Output Specification Instance"
+<UL>
+	<LI>MIL-STD-28001
+	</LI>
+
+	<LI>FOSI's are SGML documents
+	</LI>
+
+	<LI>A stylesheet for another document
+	</LI>
+</UL></LI>
+
+	<LI>Obsolete but implemented...
+</LI>
+
+		</UL>
+	
+
+
+	
+</HTML>
\ No newline at end of file
diff --git a/lib-python/2.2/test/test_MimeWriter.py b/lib-python/2.2/test/test_MimeWriter.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_MimeWriter.py
@@ -0,0 +1,170 @@
+"""Test program for MimeWriter module.
+
+The test program was too big to comfortably fit in the MimeWriter
+class, so it's here in its own file.
+
+This should generate Barry's example, modulo some quotes and newlines.
+
+"""
+
+
+from MimeWriter import MimeWriter
+
+SELLER = '''\
+INTERFACE Seller-1;
+
+TYPE Seller = OBJECT
+    DOCUMENTATION "A simple Seller interface to test ILU"
+    METHODS
+            price():INTEGER,
+    END;
+'''
+
+BUYER = '''\
+class Buyer:
+    def __setup__(self, maxprice):
+        self._maxprice = maxprice
+
+    def __main__(self, kos):
+        """Entry point upon arrival at a new KOS."""
+        broker = kos.broker()
+        # B4 == Barry's Big Bass Business :-)
+        seller = broker.lookup('Seller_1.Seller', 'B4')
+        if seller:
+            price = seller.price()
+            print 'Seller wants $', price, '... '
+            if price > self._maxprice:
+                print 'too much!'
+            else:
+                print "I'll take it!"
+        else:
+            print 'no seller found here'
+'''                                     # Don't ask why this comment is here
+
+STATE = '''\
+# instantiate a buyer instance and put it in a magic place for the KOS
+# to find.
+__kp__ = Buyer()
+__kp__.__setup__(500)
+'''
+
+SIMPLE_METADATA = [
+        ("Interpreter", "python"),
+        ("Interpreter-Version", "1.3"),
+        ("Owner-Name", "Barry Warsaw"),
+        ("Owner-Rendezvous", "bwarsaw at cnri.reston.va.us"),
+        ("Home-KSS", "kss.cnri.reston.va.us"),
+        ("Identifier", "hdl://cnri.kss/my_first_knowbot"),
+        ("Launch-Date", "Mon Feb 12 16:39:03 EST 1996"),
+        ]
+
+COMPLEX_METADATA = [
+        ("Metadata-Type", "complex"),
+        ("Metadata-Key", "connection"),
+        ("Access", "read-only"),
+        ("Connection-Description", "Barry's Big Bass Business"),
+        ("Connection-Id", "B4"),
+        ("Connection-Direction", "client"),
+        ]
+
+EXTERNAL_METADATA = [
+        ("Metadata-Type", "complex"),
+        ("Metadata-Key", "generic-interface"),
+        ("Access", "read-only"),
+        ("Connection-Description", "Generic Interface for All Knowbots"),
+        ("Connection-Id", "generic-kp"),
+        ("Connection-Direction", "client"),
+        ]
+
+
+def main():
+    import sys
+
+    # Toplevel headers
+
+    toplevel = MimeWriter(sys.stdout)
+    toplevel.addheader("From", "bwarsaw at cnri.reston.va.us")
+    toplevel.addheader("Date", "Mon Feb 12 17:21:48 EST 1996")
+    toplevel.addheader("To", "kss-submit at cnri.reston.va.us")
+    toplevel.addheader("MIME-Version", "1.0")
+
+    # Toplevel body parts
+
+    f = toplevel.startmultipartbody("knowbot", "801spam999",
+                                    [("version", "0.1")], prefix=0)
+    f.write("This is a multi-part message in MIME format.\n")
+
+    # First toplevel body part: metadata
+
+    md = toplevel.nextpart()
+    md.startmultipartbody("knowbot-metadata", "802spam999")
+
+    # Metadata part 1
+
+    md1 = md.nextpart()
+    md1.addheader("KP-Metadata-Type", "simple")
+    md1.addheader("KP-Access", "read-only")
+    m = MimeWriter(md1.startbody("message/rfc822"))
+    for key, value in SIMPLE_METADATA:
+        m.addheader("KPMD-" + key, value)
+    m.flushheaders()
+    del md1
+
+    # Metadata part 2
+
+    md2 = md.nextpart()
+    for key, value in COMPLEX_METADATA:
+        md2.addheader("KP-" + key, value)
+    f = md2.startbody("text/isl")
+    f.write(SELLER)
+    del md2
+
+    # Metadata part 3
+
+    md3 = md.nextpart()
+    f = md3.startbody("message/external-body",
+                      [("access-type", "URL"),
+                       ("URL", "hdl://cnri.kss/generic-knowbot")])
+    m = MimeWriter(f)
+    for key, value in EXTERNAL_METADATA:
+        md3.addheader("KP-" + key, value)
+    md3.startbody("text/isl")
+    # Phantom body doesn't need to be written
+
+    md.lastpart()
+
+    # Second toplevel body part: code
+
+    code = toplevel.nextpart()
+    code.startmultipartbody("knowbot-code", "803spam999")
+
+    # Code: buyer program source
+
+    buyer = code.nextpart()
+    buyer.addheader("KP-Module-Name", "BuyerKP")
+    f = buyer.startbody("text/plain")
+    f.write(BUYER)
+
+    code.lastpart()
+
+    # Third toplevel body part: state
+
+    state = toplevel.nextpart()
+    state.addheader("KP-Main-Module", "main")
+    state.startmultipartbody("knowbot-state", "804spam999")
+
+    # State: a bunch of assignments
+
+    st = state.nextpart()
+    st.addheader("KP-Module-Name", "main")
+    f = st.startbody("text/plain")
+    f.write(STATE)
+
+    state.lastpart()
+
+    # End toplevel body parts
+
+    toplevel.lastpart()
+
+
+main()
diff --git a/lib-python/2.2/test/test_StringIO.py b/lib-python/2.2/test/test_StringIO.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_StringIO.py
@@ -0,0 +1,113 @@
+# Tests StringIO and cStringIO
+
+import unittest
+import StringIO
+import cStringIO
+import types
+import test_support
+
+
+class TestGenericStringIO(unittest.TestCase):
+    # use a class variable MODULE to define which module is being tested
+
+    # Line of data to test as string
+    _line = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!'
+
+    # Constructor to use for the test data (._line is passed to this
+    # constructor)
+    constructor = str
+
+    def setUp(self):
+        self._line = self.constructor(self._line)
+        self._lines = self.constructor((self._line + '\n') * 5)
+        self._fp = self.MODULE.StringIO(self._lines)
+
+    def test_reads(self):
+        eq = self.assertEqual
+        eq(self._fp.read(10), self._line[:10])
+        eq(self._fp.readline(), self._line[10:] + '\n')
+        eq(len(self._fp.readlines(60)), 2)
+
+    def test_writes(self):
+        f = self.MODULE.StringIO()
+        f.write(self._line[:6])
+        f.seek(3)
+        f.write(self._line[20:26])
+        f.write(self._line[52])
+        self.assertEqual(f.getvalue(), 'abcuvwxyz!')
+
+    def test_writelines(self):
+        f = self.MODULE.StringIO()
+        f.writelines([self._line[0], self._line[1], self._line[2]])
+        f.seek(0)
+        self.assertEqual(f.getvalue(), 'abc')
+
+    def test_truncate(self):
+        eq = self.assertEqual
+        f = self.MODULE.StringIO()
+        f.write(self._lines)
+        f.seek(10)
+        f.truncate()
+        eq(f.getvalue(), 'abcdefghij')
+        f.seek(0)
+        f.truncate(5)
+        eq(f.getvalue(), 'abcde')
+        f.close()
+        self.assertRaises(ValueError, f.write, 'frobnitz')
+
+    def test_iterator(self):
+        eq = self.assertEqual
+        unless = self.failUnless
+        it = iter(self._fp)
+        # Does this object support the iteration protocol?
+        unless(hasattr(it, '__iter__'))
+        unless(hasattr(it, 'next'))
+        i = 0
+        for line in self._fp:
+            eq(line, self._line + '\n')
+            i += 1
+        eq(i, 5)
+
+class TestStringIO(TestGenericStringIO):
+    MODULE = StringIO
+
+    if test_support.have_unicode:
+        def test_unicode(self):
+
+            # The StringIO module also supports concatenating Unicode
+            # snippets to larger Unicode strings. This is tested by this
+            # method. Note that cStringIO does not support this extension.
+
+            f = self.MODULE.StringIO()
+            f.write(self._line[:6])
+            f.seek(3)
+            f.write(unicode(self._line[20:26]))
+            f.write(unicode(self._line[52]))
+            s = f.getvalue()
+            self.assertEqual(s, unicode('abcuvwxyz!'))
+            self.assertEqual(type(s), types.UnicodeType)
+
+class TestcStringIO(TestGenericStringIO):
+    MODULE = cStringIO
+
+import sys
+if sys.platform.startswith('java'):
+    # Jython doesn't have a buffer object, so we just do a useless
+    # fake of the buffer tests.
+    buffer = str
+
+class TestBufferStringIO(TestStringIO):
+    constructor = buffer
+
+class TestBuffercStringIO(TestcStringIO):
+    constructor = buffer
+
+
+def test_main():
+    test_support.run_unittest(TestStringIO)
+    test_support.run_unittest(TestcStringIO)
+    test_support.run_unittest(TestBufferStringIO)
+    test_support.run_unittest(TestBuffercStringIO)
+
+if __name__ == '__main__':
+    test_main()
diff --git a/lib-python/2.2/test/test___all__.py b/lib-python/2.2/test/test___all__.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test___all__.py
@@ -0,0 +1,158 @@
+from test_support import verify, verbose
+import sys
+
+def check_all(modname):
+    names = {}
+    try:
+        exec "import %s" % modname in names
+    except ImportError:
+        # Silent fail here seems the best route since some modules
+        # may not be available in all environments.
+        # Since an ImportError may leave a partial module object in
+        # sys.modules, get rid of that first.  Here's what happens if
+        # you don't:  importing pty fails on Windows because pty tries to
+        # import FCNTL, which doesn't exist.  That raises an ImportError,
+        # caught here.  It also leaves a partial pty module in sys.modules.
+        # So when test_pty is called later, the import of pty succeeds,
+        # but shouldn't.  As a result, test_pty crashes with an
+        # AttributeError instead of an ImportError, and regrtest interprets
+        # the latter as a test failure (ImportError is treated as "test
+        # skipped" -- which is what test_pty should say on Windows).
+        try:
+            del sys.modules[modname]
+        except KeyError:
+            pass
+        return
+    verify(hasattr(sys.modules[modname], "__all__"),
+           "%s has no __all__ attribute" % modname)
+    names = {}
+    exec "from %s import *" % modname in names
+    if names.has_key("__builtins__"):
+        del names["__builtins__"]
+    keys = names.keys()
+    keys.sort()
+    all = list(sys.modules[modname].__all__) # in case it's a tuple
+    all.sort()
+    verify(keys==all, "%s != %s" % (keys, all))
+
+if not sys.platform.startswith('java'):
+    # In case _socket fails to build, make this test fail more gracefully
+    # than an AttributeError somewhere deep in CGIHTTPServer.
+    import _socket
+
+check_all("BaseHTTPServer")
+check_all("CGIHTTPServer")
+check_all("ConfigParser")
+check_all("Cookie")
+check_all("MimeWriter")
+check_all("SimpleHTTPServer")
+check_all("SocketServer")
+check_all("StringIO")
+check_all("UserString")
+check_all("aifc")
+check_all("atexit")
+check_all("audiodev")
+check_all("base64")
+check_all("bdb")
+check_all("binhex")
+check_all("calendar")
+check_all("cgi")
+check_all("cmd")
+check_all("code")
+check_all("codecs")
+check_all("codeop")
+check_all("colorsys")
+check_all("commands")
+check_all("compileall")
+check_all("copy")
+check_all("copy_reg")
+check_all("dbhash")
+check_all("dircache")
+check_all("dis")
+check_all("doctest")
+check_all("dospath")
+check_all("filecmp")
+check_all("fileinput")
+check_all("fnmatch")
+check_all("fpformat")
+check_all("ftplib")
+check_all("getopt")
+check_all("getpass")
+check_all("gettext")
+check_all("glob")
+check_all("gopherlib")
+check_all("gzip")
+check_all("htmllib")
+check_all("httplib")
+check_all("ihooks")
+check_all("imaplib")
+check_all("imghdr")
+check_all("imputil")
+check_all("keyword")
+check_all("linecache")
+check_all("locale")
+check_all("macpath")
+check_all("macurl2path")
+check_all("mailbox")
+check_all("mhlib")
+check_all("mimetools")
+check_all("mimetypes")
+check_all("mimify")
+check_all("multifile")
+check_all("netrc")
+check_all("nntplib")
+check_all("ntpath")
+check_all("os")
+check_all("pdb")
+check_all("pickle")
+check_all("pipes")
+check_all("popen2")
+check_all("poplib")
+check_all("posixpath")
+check_all("pprint")
+check_all("pre")
+check_all("profile")
+check_all("pstats")
+check_all("pty")
+check_all("py_compile")
+check_all("pyclbr")
+check_all("quopri")
+check_all("random")
+check_all("re")
+check_all("reconvert")
+import warnings
+warnings.filterwarnings("ignore", ".* regsub .*", DeprecationWarning, "regsub",
+                        append=1)
+check_all("regsub")
+check_all("repr")
+check_all("rexec")
+check_all("rfc822")
+check_all("rlcompleter")
+check_all("robotparser")
+check_all("sched")
+check_all("sgmllib")
+check_all("shelve")
+check_all("shlex")
+check_all("shutil")
+check_all("smtpd")
+check_all("smtplib")
+check_all("sndhdr")
+check_all("socket")
+check_all("sre")
+check_all("stat_cache")
+check_all("tabnanny")
+check_all("telnetlib")
+check_all("tempfile")
+check_all("toaiff")
+check_all("tokenize")
+check_all("traceback")
+check_all("tty")
+check_all("urllib")
+check_all("urlparse")
+check_all("uu")
+check_all("warnings")
+check_all("wave")
+check_all("weakref")
+check_all("webbrowser")
+check_all("xdrlib")
+check_all("zipfile")
diff --git a/lib-python/2.2/test/test___future__.py b/lib-python/2.2/test/test___future__.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test___future__.py
@@ -0,0 +1,59 @@
+#! /usr/bin/env python
+from test_support import verbose, verify
+from types import TupleType, StringType, IntType
+import __future__
+
+GOOD_SERIALS = ("alpha", "beta", "candidate", "final")
+
+features = __future__.all_feature_names
+
+# Verify that all_feature_names appears correct.
+given_feature_names = features[:]
+for name in dir(__future__):
+    obj = getattr(__future__, name, None)
+    if obj is not None and isinstance(obj, __future__._Feature):
+        verify(name in given_feature_names,
+               "%r should have been in all_feature_names" % name)
+        given_feature_names.remove(name)
+verify(len(given_feature_names) == 0,
+       "all_feature_names has too much: %r" % given_feature_names)
+del given_feature_names
+
+for feature in features:
+    value = getattr(__future__, feature)
+    if verbose:
+        print "Checking __future__ ", feature, "value", value
+
+    optional = value.getOptionalRelease()
+    mandatory = value.getMandatoryRelease()
+
+    verify(type(optional) is TupleType, "optional isn't tuple")
+    verify(len(optional) == 5, "optional isn't 5-tuple")
+    major, minor, micro, level, serial = optional
+    verify(type(major) is IntType, "optional major isn't int")
+    verify(type(minor) is IntType, "optional minor isn't int")
+    verify(type(micro) is IntType, "optional micro isn't int")
+    verify(type(level) is StringType, "optional level isn't string")
+    verify(level in GOOD_SERIALS,
+           "optional level string has unknown value")
+    verify(type(serial) is IntType, "optional serial isn't int")
+
+    verify(type(mandatory) is TupleType or
+           mandatory is None, "mandatory isn't tuple or None")
+    if mandatory is not None:
+        verify(len(mandatory) == 5, "mandatory isn't 5-tuple")
+        major, minor, micro, level, serial = mandatory
+        verify(type(major) is IntType, "mandatory major isn't int")
+        verify(type(minor) is IntType, "mandatory minor isn't int")
+        verify(type(micro) is IntType, "mandatory micro isn't int")
+        verify(type(level) is StringType, "mandatory level isn't string")
+        verify(level in GOOD_SERIALS,
+               "mandatory serial string has unknown value")
+        verify(type(serial) is IntType, "mandatory serial isn't int")
+        verify(optional < mandatory,
+               "optional not less than mandatory, and mandatory not None")
+
+    verify(hasattr(value, "compiler_flag"),
+           "feature is missing a .compiler_flag attr")
+    verify(type(getattr(value, "compiler_flag")) is IntType,
+           ".compiler_flag isn't int")
diff --git a/lib-python/2.2/test/test_al.py b/lib-python/2.2/test/test_al.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/test/test_al.py
@@ -0,0 +1,23 @@
+#! /usr/bin/env python
+"""Whimpy test script for the al module
+   Roger E. Masse
+"""
+import al
+from test_support import verbose
+
+alattrs = ['__doc__', '__name__', 'getdefault', 'getminmax', 'getname', 'getparams',
+           'newconfig', 'openport', 'queryparams', 'setparams']
+
+# This is a very unobtrusive test for the existence of the al module and all it's
+# attributes.  More comprehensive examples can be found in Demo/al
+
+def main():
+    # touch all the attributes of al without doing anything
+    if verbose:
+        print 'Touching al module attributes...'
+    for attr in alattrs:
+        if verbose:
+            print 'touching: ', attr
+        getattr(al, attr)
+
+main()
diff --git a/lib-python/2.2/test/test_array.py b/lib-python/2.2/test/test_array.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/test/test_array.py
@@ -0,0 +1,192 @@
+#! /usr/bin/env python
+"""Test the arraymodule.
+   Roger E. Masse
+"""
+import array
+from test_support import verbose, TESTFN, unlink, TestFailed
+
+def main():
+
+    testtype('c', 'c')
+
+    for type in (['b', 'h', 'i', 'l', 'f', 'd']):
+        testtype(type, 1)
+
+    unlink(TESTFN)
+
+
+def testoverflow(type, lowerLimit, upperLimit):
+        # should not overflow assigning lower limit
+    if verbose:
+        print "overflow test: array(%s, [%s])" % (`type`, `lowerLimit`)
+    try:
+        a = array.array(type, [lowerLimit])
+    except:
+        raise TestFailed, "array(%s) overflowed assigning %s" %\
+                (`type`, `lowerLimit`)
+    # should overflow assigning less than lower limit
+    if verbose:
+        print "overflow test: array(%s, [%s])" % (`type`, `lowerLimit-1`)
+    try:
+        a = array.array(type, [lowerLimit-1])
+        raise TestFailed, "array(%s) did not overflow assigning %s" %\
+                (`type`, `lowerLimit-1`)
+    except OverflowError:
+        pass
+    # should not overflow assigning upper limit
+    if verbose:
+        print "overflow test: array(%s, [%s])" % (`type`, `upperLimit`)
+    try:
+        a = array.array(type, [upperLimit])
+    except:
+        raise TestFailed, "array(%s) overflowed assigning %s" %\
+                (`type`, `upperLimit`)
+    # should overflow assigning more than upper limit
+    if verbose:
+        print "overflow test: array(%s, [%s])" % (`type`, `upperLimit+1`)
+    try:
+        a = array.array(type, [upperLimit+1])
+        raise TestFailed, "array(%s) did not overflow assigning %s" %\
+                (`type`, `upperLimit+1`)
+    except OverflowError:
+        pass
+
+
+
+def testtype(type, example):
+
+    a = array.array(type)
+    a.append(example)
+    if verbose:
+        print 40*'*'
+        print 'array after append: ', a
+    a.typecode
+    a.itemsize
+    if a.typecode in ('i', 'b', 'h', 'l'):
+        a.byteswap()
+
+    if a.typecode == 'c':
+        f = open(TESTFN, "w")
+        f.write("The quick brown fox jumps over the lazy dog.\n")
+        f.close()
+        f = open(TESTFN, 'r')
+        a.fromfile(f, 10)
+        f.close()
+        if verbose:
+            print 'char array with 10 bytes of TESTFN appended: ', a
+        a.fromlist(['a', 'b', 'c'])
+        if verbose:
+            print 'char array with list appended: ', a
+
+    a.insert(0, example)
+    if verbose:
+        print 'array of %s after inserting another:' % a.typecode, a
+    f = open(TESTFN, 'w')
+    a.tofile(f)
+    f.close()
+
+    # This block is just to verify that the operations don't blow up.
+    a.tolist()
+    a.tostring()
+    repr(a)
+    str(a)
+
+    if verbose:
+        print 'array of %s converted to a list: ' % a.typecode, a.tolist()
+    if verbose:
+        print 'array of %s converted to a string: ' \
+               % a.typecode, `a.tostring()`
+
+    if type == 'c':
+        a = array.array(type, "abcde")
+        a[:-1] = a
+        if a != array.array(type, "abcdee"):
+            raise TestFailed, "array(%s) self-slice-assign (head)" % `type`
+        a = array.array(type, "abcde")
+        a[1:] = a
+        if a != array.array(type, "aabcde"):
+            raise TestFailed, "array(%s) self-slice-assign (tail)" % `type`
+        a = array.array(type, "abcde")
+        a[1:-1] = a
+        if a != array.array(type, "aabcdee"):
+            raise TestFailed, "array(%s) self-slice-assign (cntr)" % `type`
+        if a.index("e") != 5:
+            raise TestFailed, "array(%s) index-test" % `type`
+        if a.count("a") != 2:
+            raise TestFailed, "array(%s) count-test" % `type`
+        a.remove("e")
+        if a != array.array(type, "aabcde"):
+            raise TestFailed, "array(%s) remove-test" % `type`
+        if a.pop(0) != "a":
+            raise TestFailed, "array(%s) pop-test" % `type`
+        if a.pop(1) != "b":
+            raise TestFailed, "array(%s) pop-test" % `type`
+        a.extend(array.array(type, "xyz"))
+        if a != array.array(type, "acdexyz"):
+            raise TestFailed, "array(%s) extend-test" % `type`
+        a.pop()
+        a.pop()
+        a.pop()
+        x = a.pop()
+        if x != 'e':
+            raise TestFailed, "array(%s) pop-test" % `type`
+        if a != array.array(type, "acd"):
+            raise TestFailed, "array(%s) pop-test" % `type`
+        a.reverse()
+        if a != array.array(type, "dca"):
+            raise TestFailed, "array(%s) reverse-test" % `type`
+    else:
+        a = array.array(type, [1, 2, 3, 4, 5])
+        a[:-1] = a
+        if a != array.array(type, [1, 2, 3, 4, 5, 5]):
+            raise TestFailed, "array(%s) self-slice-assign (head)" % `type`
+        a = array.array(type, [1, 2, 3, 4, 5])
+        a[1:] = a
+        if a != array.array(type, [1, 1, 2, 3, 4, 5]):
+            raise TestFailed, "array(%s) self-slice-assign (tail)" % `type`
+        a = array.array(type, [1, 2, 3, 4, 5])
+        a[1:-1] = a
+        if a != array.array(type, [1, 1, 2, 3, 4, 5, 5]):
+            raise TestFailed, "array(%s) self-slice-assign (cntr)" % `type`
+        if a.index(5) != 5:
+            raise TestFailed, "array(%s) index-test" % `type`
+        if a.count(1) != 2:
+            raise TestFailed, "array(%s) count-test" % `type`
+        a.remove(5)
+        if a != array.array(type, [1, 1, 2, 3, 4, 5]):
+            raise TestFailed, "array(%s) remove-test" % `type`
+        if a.pop(0) != 1:
+            raise TestFailed, "array(%s) pop-test" % `type`
+        if a.pop(1) != 2:
+            raise TestFailed, "array(%s) pop-test" % `type`
+        a.extend(array.array(type, [7, 8, 9]))
+        if a != array.array(type, [1, 3, 4, 5, 7, 8, 9]):
+            raise TestFailed, "array(%s) extend-test" % `type`
+        a.pop()
+        a.pop()
+        a.pop()
+        x = a.pop()
+        if x != 5:
+            raise TestFailed, "array(%s) pop-test" % `type`
+        if a != array.array(type, [1, 3, 4]):
+            raise TestFailed, "array(%s) pop-test" % `type`
+        a.reverse()
+        if a != array.array(type, [4, 3, 1]):
+            raise TestFailed, "array(%s) reverse-test" % `type`
+
+    # test that overflow exceptions are raised as expected for assignment
+    # to array of specific integral types
+    from math import pow
+    if type in ('b', 'h', 'i', 'l'):
+        # check signed and unsigned versions
+        a = array.array(type)
+        signedLowerLimit = -1 * long(pow(2, a.itemsize * 8 - 1))
+        signedUpperLimit = long(pow(2, a.itemsize * 8 - 1)) - 1L
+        unsignedLowerLimit = 0
+        unsignedUpperLimit = long(pow(2, a.itemsize * 8)) - 1L
+        testoverflow(type, signedLowerLimit, signedUpperLimit)
+        testoverflow(type.upper(), unsignedLowerLimit, unsignedUpperLimit)
+
+
+
+main()
diff --git a/lib-python/2.2/test/test_asynchat.py b/lib-python/2.2/test/test_asynchat.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_asynchat.py
@@ -0,0 +1,58 @@
+# test asynchat -- requires threading
+
+import thread # If this fails, we can't test this module
+import asyncore, asynchat, socket, threading, time
+
+HOST = "127.0.0.1"
+PORT = 54321
+
+class echo_server(threading.Thread):
+
+    def run(self):
+        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+        sock.bind((HOST, PORT))
+        sock.listen(1)
+        conn, client = sock.accept()
+        buffer = ""
+        while "\n" not in buffer:
+            data = conn.recv(10)
+            if not data:
+                break
+            buffer = buffer + data
+        while buffer:
+            n = conn.send(buffer)
+            buffer = buffer[n:]
+        conn.close()
+        sock.close()
+
+class echo_client(asynchat.async_chat):
+
+    def __init__(self):
+        asynchat.async_chat.__init__(self)
+        self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
+        self.connect((HOST, PORT))
+        self.set_terminator("\n")
+        self.buffer = ""
+
+    def handle_connect(self):
+        print "Connected"
+
+    def collect_incoming_data(self, data):
+        self.buffer = self.buffer + data
+
+    def found_terminator(self):
+        print "Received:", `self.buffer`
+        self.buffer = ""
+        self.close()
+
+def main():
+    s = echo_server()
+    s.start()
+    time.sleep(1) # Give server time to initialize
+    c = echo_client()
+    c.push("hello ")
+    c.push("world\n")
+    asyncore.loop()
+
+main()
diff --git a/lib-python/2.2/test/test_atexit.py b/lib-python/2.2/test/test_atexit.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_atexit.py
@@ -0,0 +1,66 @@
+# Test the atexit module.
+from test_support import TESTFN, vereq, is_jython
+import atexit
+from os import popen, unlink
+import sys
+
+executable = sys.executable
+if is_jython:
+    executable = "jython"
+
+input = """\
+import atexit
+
+def handler1():
+    print "handler1"
+
+def handler2(*args, **kargs):
+    print "handler2", args, kargs
+
+atexit.register(handler1)
+atexit.register(handler2)
+atexit.register(handler2, 7, kw="abc")
+"""
+
+fname = TESTFN + ".py"
+f = file(fname, "w")
+f.write(input)
+f.close()
+
+p = popen("%s %s" % (executable, fname))
+output = p.read()
+p.close()
+vereq(output, """\
+handler2 (7,) {'kw': 'abc'}
+handler2 () {}
+handler1
+""")
+
+input = """\
+def direct():
+    print "direct exit"
+
+import sys
+sys.exitfunc = direct
+
+# Make sure atexit doesn't drop
+def indirect():
+    print "indirect exit"
+
+import atexit
+atexit.register(indirect)
+"""
+
+f = file(fname, "w")
+f.write(input)
+f.close()
+
+p = popen("%s %s" % (executable, fname))
+output = p.read()
+p.close()
+vereq(output, """\
+indirect exit
+direct exit
+""")
+
+unlink(fname)
diff --git a/lib-python/2.2/test/test_audioop.py b/lib-python/2.2/test/test_audioop.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_audioop.py
@@ -0,0 +1,264 @@
+# Test audioop.
+import audioop
+from test_support import verbose
+
+def gendata1():
+    return '\0\1\2'
+
+def gendata2():
+    if verbose:
+        print 'getsample'
+    if audioop.getsample('\0\1', 2, 0) == 1:
+        return '\0\0\0\1\0\2'
+    else:
+        return '\0\0\1\0\2\0'
+
+def gendata4():
+    if verbose:
+        print 'getsample'
+    if audioop.getsample('\0\0\0\1', 4, 0) == 1:
+        return '\0\0\0\0\0\0\0\1\0\0\0\2'
+    else:
+        return '\0\0\0\0\1\0\0\0\2\0\0\0'
+
+def testmax(data):
+    if verbose:
+        print 'max'
+    if audioop.max(data[0], 1) != 2 or \
+              audioop.max(data[1], 2) != 2 or \
+              audioop.max(data[2], 4) != 2:
+        return 0
+    return 1
+
+def testminmax(data):
+    if verbose:
+        print 'minmax'
+    if audioop.minmax(data[0], 1) != (0, 2) or \
+              audioop.minmax(data[1], 2) != (0, 2) or \
+              audioop.minmax(data[2], 4) != (0, 2):
+        return 0
+    return 1
+
+def testmaxpp(data):
+    if verbose:
+        print 'maxpp'
+    if audioop.maxpp(data[0], 1) != 0 or \
+              audioop.maxpp(data[1], 2) != 0 or \
+              audioop.maxpp(data[2], 4) != 0:
+        return 0
+    return 1
+
+def testavg(data):
+    if verbose:
+        print 'avg'
+    if audioop.avg(data[0], 1) != 1 or \
+              audioop.avg(data[1], 2) != 1 or \
+              audioop.avg(data[2], 4) != 1:
+        return 0
+    return 1
+
+def testavgpp(data):
+    if verbose:
+        print 'avgpp'
+    if audioop.avgpp(data[0], 1) != 0 or \
+              audioop.avgpp(data[1], 2) != 0 or \
+              audioop.avgpp(data[2], 4) != 0:
+        return 0
+    return 1
+
+def testrms(data):
+    if audioop.rms(data[0], 1) != 1 or \
+              audioop.rms(data[1], 2) != 1 or \
+              audioop.rms(data[2], 4) != 1:
+        return 0
+    return 1
+
+def testcross(data):
+    if verbose:
+        print 'cross'
+    if audioop.cross(data[0], 1) != 0 or \
+              audioop.cross(data[1], 2) != 0 or \
+              audioop.cross(data[2], 4) != 0:
+        return 0
+    return 1
+
+def testadd(data):
+    if verbose:
+        print 'add'
+    data2 = []
+    for d in data:
+        str = ''
+        for s in d:
+            str = str + chr(ord(s)*2)
+        data2.append(str)
+    if audioop.add(data[0], data[0], 1) != data2[0] or \
+              audioop.add(data[1], data[1], 2) != data2[1] or \
+              audioop.add(data[2], data[2], 4) != data2[2]:
+        return 0
+    return 1
+
+def testbias(data):
+    if verbose:
+        print 'bias'
+    # Note: this test assumes that avg() works
+    d1 = audioop.bias(data[0], 1, 100)
+    d2 = audioop.bias(data[1], 2, 100)
+    d4 = audioop.bias(data[2], 4, 100)
+    if audioop.avg(d1, 1) != 101 or \
+              audioop.avg(d2, 2) != 101 or \
+              audioop.avg(d4, 4) != 101:
+        return 0
+    return 1
+
+def testlin2lin(data):
+    if verbose:
+        print 'lin2lin'
+    # too simple: we test only the size
+    for d1 in data:
+        for d2 in data:
+            got = len(d1)//3
+            wtd = len(d2)//3
+            if len(audioop.lin2lin(d1, got, wtd)) != len(d2):
+                return 0
+    return 1
+
+def testadpcm2lin(data):
+    # Very cursory test
+    if audioop.adpcm2lin('\0\0', 1, None) != ('\0\0\0\0', (0,0)):
+        return 0
+    return 1
+
+def testlin2adpcm(data):
+    if verbose:
+        print 'lin2adpcm'
+    # Very cursory test
+    if audioop.lin2adpcm('\0\0\0\0', 1, None) != ('\0\0', (0,0)):
+        return 0
+    return 1
+
+def testlin2ulaw(data):
+    if verbose:
+        print 'lin2ulaw'
+    if audioop.lin2ulaw(data[0], 1) != '\377\347\333' or \
+              audioop.lin2ulaw(data[1], 2) != '\377\377\377' or \
+              audioop.lin2ulaw(data[2], 4) != '\377\377\377':
+        return 0
+    return 1
+
+def testulaw2lin(data):
+    if verbose:
+        print 'ulaw2lin'
+    # Cursory
+    d = audioop.lin2ulaw(data[0], 1)
+    if audioop.ulaw2lin(d, 1) != data[0]:
+        return 0
+    return 1
+
+def testmul(data):
+    if verbose:
+        print 'mul'
+    data2 = []
+    for d in data:
+        str = ''
+        for s in d:
+            str = str + chr(ord(s)*2)
+        data2.append(str)
+    if audioop.mul(data[0], 1, 2) != data2[0] or \
+              audioop.mul(data[1],2, 2) != data2[1] or \
+              audioop.mul(data[2], 4, 2) != data2[2]:
+        return 0
+    return 1
+
+def testratecv(data):
+    if verbose:
+        print 'ratecv'
+    state = None
+    d1, state = audioop.ratecv(data[0], 1, 1, 8000, 16000, state)
+    d2, state = audioop.ratecv(data[0], 1, 1, 8000, 16000, state)
+    if d1 + d2 != '\000\000\001\001\002\001\000\000\001\001\002':
+        return 0
+    return 1
+
+def testreverse(data):
+    if verbose:
+        print 'reverse'
+    if audioop.reverse(data[0], 1) != '\2\1\0':
+        return 0
+    return 1
+
+def testtomono(data):
+    if verbose:
+        print 'tomono'
+    data2 = ''
+    for d in data[0]:
+        data2 = data2 + d + d
+    if audioop.tomono(data2, 1, 0.5, 0.5) != data[0]:
+        return 0
+    return 1
+
+def testtostereo(data):
+    if verbose:
+        print 'tostereo'
+    data2 = ''
+    for d in data[0]:
+        data2 = data2 + d + d
+    if audioop.tostereo(data[0], 1, 1, 1) != data2:
+        return 0
+    return 1
+
+def testfindfactor(data):
+    if verbose:
+        print 'findfactor'
+    if audioop.findfactor(data[1], data[1]) != 1.0:
+        return 0
+    return 1
+
+def testfindfit(data):
+    if verbose:
+        print 'findfit'
+    if audioop.findfit(data[1], data[1]) != (0, 1.0):
+        return 0
+    return 1
+
+def testfindmax(data):
+    if verbose:
+        print 'findmax'
+    if audioop.findmax(data[1], 1) != 2:
+        return 0
+    return 1
+
+def testgetsample(data):
+    if verbose:
+        print 'getsample'
+    for i in range(3):
+        if audioop.getsample(data[0], 1, i) != i or \
+                  audioop.getsample(data[1], 2, i) != i or \
+                  audioop.getsample(data[2], 4, i) != i:
+            return 0
+    return 1
+
+def testone(name, data):
+    try:
+        func = eval('test'+name)
+    except NameError:
+        print 'No test found for audioop.'+name+'()'
+        return
+    try:
+        rv = func(data)
+    except 'xx':
+        print 'Test FAILED for audioop.'+name+'() (with an exception)'
+        return
+    if not rv:
+        print 'Test FAILED for audioop.'+name+'()'
+
+def testall():
+    data = [gendata1(), gendata2(), gendata4()]
+    names = dir(audioop)
+    # We know there is a routine 'add'
+    routines = []
+    for n in names:
+        if type(eval('audioop.'+n)) == type(audioop.add):
+            routines.append(n)
+    for n in routines:
+        testone(n, data)
+testall()
diff --git a/lib-python/2.2/test/test_augassign.py b/lib-python/2.2/test/test_augassign.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_augassign.py
@@ -0,0 +1,261 @@
+# Augmented assignment test.
+
+x = 2
+x += 1
+x *= 2
+x **= 2
+x -= 8
+x //= 2
+x //= 1
+x %= 12
+x &= 2
+x |= 5
+x ^= 1
+
+print x
+
+x = [2]
+x[0] += 1
+x[0] *= 2
+x[0] **= 2
+x[0] -= 8
+x[0] //= 2
+x[0] //= 2
+x[0] %= 12
+x[0] &= 2
+x[0] |= 5
+x[0] ^= 1
+
+print x
+
+x = {0: 2}
+x[0] += 1
+x[0] *= 2
+x[0] **= 2
+x[0] -= 8
+x[0] //= 2
+x[0] //= 1
+x[0] %= 12
+x[0] &= 2
+x[0] |= 5
+x[0] ^= 1
+
+print x[0]
+
+x = [1,2]
+x += [3,4]
+x *= 2
+
+print x
+
+x = [1, 2, 3]
+y = x
+x[1:2] *= 2
+y[1:2] += [1]
+
+print x
+print x is y
+
+class aug_test:
+    def __init__(self, value):
+        self.val = value
+    def __radd__(self, val):
+        return self.val + val
+    def __add__(self, val):
+        return aug_test(self.val + val)
+
+
+class aug_test2(aug_test):
+    def __iadd__(self, val):
+        self.val = self.val + val
+        return self
+
+class aug_test3(aug_test):
+    def __iadd__(self, val):
+        return aug_test3(self.val + val)
+
+x = aug_test(1)
+y = x
+x += 10
+
+print isinstance(x, aug_test)
+print y is not x
+print x.val
+
+x = aug_test2(2)
+y = x
+x += 10
+
+print y is x
+print x.val
+
+x = aug_test3(3)
+y = x
+x += 10
+
+print isinstance(x, aug_test3)
+print y is not x
+print x.val
+
+class testall:
+
+    def __add__(self, val):
+        print "__add__ called"
+    def __radd__(self, val):
+        print "__radd__ called"
+    def __iadd__(self, val):
+        print "__iadd__ called"
+        return self
+
+    def __sub__(self, val):
+        print "__sub__ called"
+    def __rsub__(self, val):
+        print "__rsub__ called"
+    def __isub__(self, val):
+        print "__isub__ called"
+        return self
+
+    def __mul__(self, val):
+        print "__mul__ called"
+    def __rmul__(self, val):
+        print "__rmul__ called"
+    def __imul__(self, val):
+        print "__imul__ called"
+        return self
+
+    def __div__(self, val):
+        print "__div__ called"
+    def __rdiv__(self, val):
+        print "__rdiv__ called"
+    def __idiv__(self, val):
+        print "__idiv__ called"
+        return self
+
+    def __floordiv__(self, val):
+        print "__floordiv__ called"
+        return self
+    def __ifloordiv__(self, val):
+        print "__ifloordiv__ called"
+        return self
+    def __rfloordiv__(self, val):
+        print "__rfloordiv__ called"
+        return self
+
+    def __truediv__(self, val):
+        print "__truediv__ called"
+        return self
+    def __itruediv__(self, val):
+        print "__itruediv__ called"
+        return self
+
+    def __mod__(self, val):
+        print "__mod__ called"
+    def __rmod__(self, val):
+        print "__rmod__ called"
+    def __imod__(self, val):
+        print "__imod__ called"
+        return self
+
+    def __pow__(self, val):
+        print "__pow__ called"
+    def __rpow__(self, val):
+        print "__rpow__ called"
+    def __ipow__(self, val):
+        print "__ipow__ called"
+        return self
+
+    def __or__(self, val):
+        print "__or__ called"
+    def __ror__(self, val):
+        print "__ror__ called"
+    def __ior__(self, val):
+        print "__ior__ called"
+        return self
+
+    def __and__(self, val):
+        print "__and__ called"
+    def __rand__(self, val):
+        print "__rand__ called"
+    def __iand__(self, val):
+        print "__iand__ called"
+        return self
+
+    def __xor__(self, val):
+        print "__xor__ called"
+    def __rxor__(self, val):
+        print "__rxor__ called"
+    def __ixor__(self, val):
+        print "__ixor__ called"
+        return self
+
+    def __rshift__(self, val):
+        print "__rshift__ called"
+    def __rrshift__(self, val):
+        print "__rrshift__ called"
+    def __irshift__(self, val):
+        print "__irshift__ called"
+        return self
+
+    def __lshift__(self, val):
+        print "__lshift__ called"
+    def __rlshift__(self, val):
+        print "__rlshift__ called"
+    def __ilshift__(self, val):
+        print "__ilshift__ called"
+        return self
+
+x = testall()
+x + 1
+1 + x
+x += 1
+
+x - 1
+1 - x
+x -= 1
+
+x * 1
+1 * x
+x *= 1
+
+if 1/2 == 0:
+    x / 1
+    1 / x
+    x /= 1
+else:
+    # True division is in effect, so "/" doesn't map to __div__ etc;
+    # but the canned expected-output file requires that those get called.
+    x.__div__(1)
+    x.__rdiv__(1)
+    x.__idiv__(1)
+
+x // 1
+1 // x
+x //= 1
+
+x % 1
+1 % x
+x %= 1
+
+x ** 1
+1 ** x
+x **= 1
+
+x | 1
+1 | x
+x |= 1
+
+x & 1
+1 & x
+x &= 1
+
+x ^ 1
+1 ^ x
+x ^= 1
+
+x >> 1
+1 >> x
+x >>= 1
+
+x << 1
+1 << x
+x <<= 1
diff --git a/lib-python/2.2/test/test_b1.py b/lib-python/2.2/test/test_b1.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_b1.py
@@ -0,0 +1,632 @@
+# Python test set -- part 4a, built-in functions a-m
+
+from test_support import *
+
+print '__import__'
+__import__('sys')
+__import__('time')
+__import__('string')
+try: __import__('spamspam')
+except ImportError: pass
+else: raise TestFailed, "__import__('spamspam') should fail"
+
+print 'abs'
+if abs(0) != 0: raise TestFailed, 'abs(0)'
+if abs(1234) != 1234: raise TestFailed, 'abs(1234)'
+if abs(-1234) != 1234: raise TestFailed, 'abs(-1234)'
+#
+if abs(0.0) != 0.0: raise TestFailed, 'abs(0.0)'
+if abs(3.14) != 3.14: raise TestFailed, 'abs(3.14)'
+if abs(-3.14) != 3.14: raise TestFailed, 'abs(-3.14)'
+#
+if abs(0L) != 0L: raise TestFailed, 'abs(0L)'
+if abs(1234L) != 1234L: raise TestFailed, 'abs(1234L)'
+if abs(-1234L) != 1234L: raise TestFailed, 'abs(-1234L)'
+
+try: abs('a')
+except TypeError: pass
+else: raise TestFailed, 'abs("a")'
+
+print 'apply'
+def f0(*args):
+    if args != (): raise TestFailed, 'f0 called with ' + `args`
+def f1(a1):
+    if a1 != 1: raise TestFailed, 'f1 called with ' + `a1`
+def f2(a1, a2):
+    if a1 != 1 or a2 != 2:
+        raise TestFailed, 'f2 called with ' + `a1, a2`
+def f3(a1, a2, a3):
+    if a1 != 1 or a2 != 2 or a3 != 3:
+        raise TestFailed, 'f3 called with ' + `a1, a2, a3`
+apply(f0, ())
+apply(f1, (1,))
+apply(f2, (1, 2))
+apply(f3, (1, 2, 3))
+
+# A PyCFunction that takes only positional parameters should allow an
+# empty keyword dictionary to pass without a complaint, but raise a
+# TypeError if the dictionary is non-empty.
+apply(id, (1,), {})
+try:
+    apply(id, (1,), {"foo": 1})
+except TypeError:
+    pass
+else:
+    raise TestFailed, 'expected TypeError; no exception raised'
+
+print 'callable'
+if not callable(len):raise TestFailed, 'callable(len)'
+def f(): pass
+if not callable(f): raise TestFailed, 'callable(f)'
+class C:
+    def meth(self): pass
+if not callable(C): raise TestFailed, 'callable(C)'
+x = C()
+if not callable(x.meth): raise TestFailed, 'callable(x.meth)'
+if callable(x): raise TestFailed, 'callable(x)'
+class D(C):
+    def __call__(self): pass
+y = D()
+if not callable(y): raise TestFailed, 'callable(y)'
+y()
+
+print 'chr'
+if chr(32) != ' ': raise TestFailed, 'chr(32)'
+if chr(65) != 'A': raise TestFailed, 'chr(65)'
+if chr(97) != 'a': raise TestFailed, 'chr(97)'
+
+# cmp
+print 'cmp'
+if cmp(-1, 1) != -1: raise TestFailed, 'cmp(-1, 1)'
+if cmp(1, -1) != 1: raise TestFailed, 'cmp(1, -1)'
+if cmp(1, 1) != 0: raise TestFailed, 'cmp(1, 1)'
+# verify that circular objects are handled
+a = []; a.append(a)
+b = []; b.append(b)
+from UserList import UserList
+c = UserList(); c.append(c)
+if cmp(a, b) != 0: raise TestFailed, "cmp(%s, %s)" % (a, b)
+if cmp(b, c) != 0: raise TestFailed, "cmp(%s, %s)" % (b, c)
+if cmp(c, a) != 0: raise TestFailed, "cmp(%s, %s)" % (c, a)
+if cmp(a, c) != 0: raise TestFailed, "cmp(%s, %s)" % (a, c)
+# okay, now break the cycles
+a.pop(); b.pop(); c.pop()
+
+print 'coerce'
+if fcmp(coerce(1, 1.1), (1.0, 1.1)): raise TestFailed, 'coerce(1, 1.1)'
+if coerce(1, 1L) != (1L, 1L): raise TestFailed, 'coerce(1, 1L)'
+if fcmp(coerce(1L, 1.1), (1.0, 1.1)): raise TestFailed, 'coerce(1L, 1.1)'
+
+try: coerce(0.5, long("12345" * 1000))
+except OverflowError: pass
+else: raise TestFailed, 'coerce(0.5, long("12345" * 1000))'
+
+print 'compile'
+compile('print 1\n', '', 'exec')
+
+print 'complex'
+if complex(1,10) != 1+10j: raise TestFailed, 'complex(1,10)'
+if complex(1,10L) != 1+10j: raise TestFailed, 'complex(1,10L)'
+if complex(1,10.0) != 1+10j: raise TestFailed, 'complex(1,10.0)'
+if complex(1L,10) != 1+10j: raise TestFailed, 'complex(1L,10)'
+if complex(1L,10L) != 1+10j: raise TestFailed, 'complex(1L,10L)'
+if complex(1L,10.0) != 1+10j: raise TestFailed, 'complex(1L,10.0)'
+if complex(1.0,10) != 1+10j: raise TestFailed, 'complex(1.0,10)'
+if complex(1.0,10L) != 1+10j: raise TestFailed, 'complex(1.0,10L)'
+if complex(1.0,10.0) != 1+10j: raise TestFailed, 'complex(1.0,10.0)'
+if complex(3.14+0j) != 3.14+0j: raise TestFailed, 'complex(3.14)'
+if complex(3.14) != 3.14+0j: raise TestFailed, 'complex(3.14)'
+if complex(314) != 314.0+0j: raise TestFailed, 'complex(314)'
+if complex(314L) != 314.0+0j: raise TestFailed, 'complex(314L)'
+if complex(3.14+0j, 0j) != 3.14+0j: raise TestFailed, 'complex(3.14, 0j)'
+if complex(3.14, 0.0) != 3.14+0j: raise TestFailed, 'complex(3.14, 0.0)'
+if complex(314, 0) != 314.0+0j: raise TestFailed, 'complex(314, 0)'
+if complex(314L, 0L) != 314.0+0j: raise TestFailed, 'complex(314L, 0L)'
+if complex(0j, 3.14j) != -3.14+0j: raise TestFailed, 'complex(0j, 3.14j)'
+if complex(0.0, 3.14j) != -3.14+0j: raise TestFailed, 'complex(0.0, 3.14j)'
+if complex(0j, 3.14) != 3.14j: raise TestFailed, 'complex(0j, 3.14)'
+if complex(0.0, 3.14) != 3.14j: raise TestFailed, 'complex(0.0, 3.14)'
+if complex("1") != 1+0j: raise TestFailed, 'complex("1")'
+if complex("1j") != 1j: raise TestFailed, 'complex("1j")'
+
+try: complex("1", "1")
+except TypeError: pass
+else: raise TestFailed, 'complex("1", "1")'
+
+try: complex(1, "1")
+except TypeError: pass
+else: raise TestFailed, 'complex(1, "1")'
+
+if complex("  3.14+J  ") != 3.14+1j:  raise TestFailed, 'complex("  3.14+J  )"'
+if have_unicode:
+    if complex(unicode("  3.14+J  ")) != 3.14+1j:
+        raise TestFailed, 'complex(u"  3.14+J  )"'
+
+# SF bug 543840:  complex(string) accepts strings with \0
+# Fixed in 2.3.
+try:
+    complex('1+1j\0j')
+except ValueError:
+    pass
+else:
+    raise TestFailed("complex('1+1j\0j') should have raised ValueError")
+
+class Z:
+    def __complex__(self): return 3.14j
+z = Z()
+if complex(z) != 3.14j: raise TestFailed, 'complex(classinstance)'
+
+print 'delattr'
+import sys
+sys.spam = 1
+delattr(sys, 'spam')
+
+print 'dir'
+x = 1
+if 'x' not in dir(): raise TestFailed, 'dir()'
+import sys
+if 'modules' not in dir(sys): raise TestFailed, 'dir(sys)'
+
+print 'divmod'
+if divmod(12, 7) != (1, 5): raise TestFailed, 'divmod(12, 7)'
+if divmod(-12, 7) != (-2, 2): raise TestFailed, 'divmod(-12, 7)'
+if divmod(12, -7) != (-2, -2): raise TestFailed, 'divmod(12, -7)'
+if divmod(-12, -7) != (1, -5): raise TestFailed, 'divmod(-12, -7)'
+#
+if divmod(12L, 7L) != (1L, 5L): raise TestFailed, 'divmod(12L, 7L)'
+if divmod(-12L, 7L) != (-2L, 2L): raise TestFailed, 'divmod(-12L, 7L)'
+if divmod(12L, -7L) != (-2L, -2L): raise TestFailed, 'divmod(12L, -7L)'
+if divmod(-12L, -7L) != (1L, -5L): raise TestFailed, 'divmod(-12L, -7L)'
+#
+if divmod(12, 7L) != (1, 5L): raise TestFailed, 'divmod(12, 7L)'
+if divmod(-12, 7L) != (-2, 2L): raise TestFailed, 'divmod(-12, 7L)'
+if divmod(12L, -7) != (-2L, -2): raise TestFailed, 'divmod(12L, -7)'
+if divmod(-12L, -7) != (1L, -5): raise TestFailed, 'divmod(-12L, -7)'
+#
+if fcmp(divmod(3.25, 1.0), (3.0, 0.25)):
+    raise TestFailed, 'divmod(3.25, 1.0)'
+if fcmp(divmod(-3.25, 1.0), (-4.0, 0.75)):
+    raise TestFailed, 'divmod(-3.25, 1.0)'
+if fcmp(divmod(3.25, -1.0), (-4.0, -0.75)):
+    raise TestFailed, 'divmod(3.25, -1.0)'
+if fcmp(divmod(-3.25, -1.0), (3.0, -0.25)):
+    raise TestFailed, 'divmod(-3.25, -1.0)'
+
+print 'eval'
+if eval('1+1') != 2: raise TestFailed, 'eval(\'1+1\')'
+if eval(' 1+1\n') != 2: raise TestFailed, 'eval(\' 1+1\\n\')'
+globals = {'a': 1, 'b': 2}
+locals = {'b': 200, 'c': 300}
+if eval('a', globals) != 1:
+    raise TestFailed, "eval(1) == %s" % eval('a', globals)
+if eval('a', globals, locals) != 1:
+    raise TestFailed, "eval(2)"
+if eval('b', globals, locals) != 200:
+    raise TestFailed, "eval(3)"
+if eval('c', globals, locals) != 300:
+    raise TestFailed, "eval(4)"
+if have_unicode:
+    if eval(unicode('1+1')) != 2: raise TestFailed, 'eval(u\'1+1\')'
+    if eval(unicode(' 1+1\n')) != 2: raise TestFailed, 'eval(u\' 1+1\\n\')'
+globals = {'a': 1, 'b': 2}
+locals = {'b': 200, 'c': 300}
+if have_unicode:
+    if eval(unicode('a'), globals) != 1:
+        raise TestFailed, "eval(1) == %s" % eval(unicode('a'), globals)
+    if eval(unicode('a'), globals, locals) != 1:
+        raise TestFailed, "eval(2)"
+    if eval(unicode('b'), globals, locals) != 200:
+        raise TestFailed, "eval(3)"
+    if eval(unicode('c'), globals, locals) != 300:
+        raise TestFailed, "eval(4)"
+
+print 'execfile'
+z = 0
+f = open(TESTFN, 'w')
+f.write('z = z+1\n')
+f.write('z = z*2\n')
+f.close()
+execfile(TESTFN)
+if z != 2: raise TestFailed, "execfile(1)"
+globals['z'] = 0
+execfile(TESTFN, globals)
+if globals['z'] != 2: raise TestFailed, "execfile(1)"
+locals['z'] = 0
+execfile(TESTFN, globals, locals)
+if locals['z'] != 2: raise TestFailed, "execfile(1)"
+unlink(TESTFN)
+
+print 'filter'
+if filter(lambda c: 'a' <= c <= 'z', 'Hello World') != 'elloorld':
+    raise TestFailed, 'filter (filter a string)'
+if filter(None, [1, 'hello', [], [3], '', None, 9, 0]) != [1, 'hello', [3], 9]:
+    raise TestFailed, 'filter (remove false values)'
+if filter(lambda x: x > 0, [1, -3, 9, 0, 2]) != [1, 9, 2]:
+    raise TestFailed, 'filter (keep positives)'
+class Squares:
+    def __init__(self, max):
+        self.max = max
+        self.sofar = []
+    def __len__(self): return len(self.sofar)
+    def __getitem__(self, i):
+        if not 0 <= i < self.max: raise IndexError
+        n = len(self.sofar)
+        while n <= i:
+            self.sofar.append(n*n)
+            n = n+1
+        return self.sofar[i]
+if filter(None, Squares(10)) != [1, 4, 9, 16, 25, 36, 49, 64, 81]:
+    raise TestFailed, 'filter(None, Squares(10))'
+if filter(lambda x: x%2, Squares(10)) != [1, 9, 25, 49, 81]:
+    raise TestFailed, 'filter(oddp, Squares(10))'
+class StrSquares:
+    def __init__(self, max):
+        self.max = max
+        self.sofar = []
+    def __len__(self):
+        return len(self.sofar)
+    def __getitem__(self, i):
+        if not 0 <= i < self.max:
+            raise IndexError
+        n = len(self.sofar)
+        while n <= i:
+            self.sofar.append(str(n*n))
+            n = n+1
+        return self.sofar[i]
+def identity(item):
+    return 1
+filter(identity, Squares(5))
+
+print 'float'
+if float(3.14) != 3.14: raise TestFailed, 'float(3.14)'
+if float(314) != 314.0: raise TestFailed, 'float(314)'
+if float(314L) != 314.0: raise TestFailed, 'float(314L)'
+if float("  3.14  ") != 3.14:  raise TestFailed, 'float("  3.14  ")'
+if have_unicode:
+    if float(unicode("  3.14  ")) != 3.14:
+        raise TestFailed, 'float(u"  3.14  ")'
+    if float(unicode("  \u0663.\u0661\u0664  ",'raw-unicode-escape')) != 3.14:
+        raise TestFailed, 'float(u"  \u0663.\u0661\u0664  ")'
+
+print 'getattr'
+import sys
+if getattr(sys, 'stdout') is not sys.stdout: raise TestFailed, 'getattr'
+try:
+    getattr(sys, 1)
+except TypeError:
+    pass
+else:
+    raise TestFailed, "getattr(sys, 1) should raise an exception"
+try:
+    getattr(sys, 1, "foo")
+except TypeError:
+    pass
+else:
+    raise TestFailed, 'getattr(sys, 1, "foo") should raise an exception'
+
+print 'hasattr'
+import sys
+if not hasattr(sys, 'stdout'): raise TestFailed, 'hasattr'
+try:
+    hasattr(sys, 1)
+except TypeError:
+    pass
+else:
+    raise TestFailed, "hasattr(sys, 1) should raise an exception"
+
+print 'hash'
+hash(None)
+if not hash(1) == hash(1L) == hash(1.0): raise TestFailed, 'numeric hash()'
+hash('spam')
+hash((0,1,2,3))
+def f(): pass
+try: hash([])
+except TypeError: pass
+else: raise TestFailed, "hash([]) should raise an exception"
+try: hash({})
+except TypeError: pass
+else: raise TestFailed, "hash({}) should raise an exception"
+
+print 'hex'
+if hex(16) != '0x10': raise TestFailed, 'hex(16)'
+if hex(16L) != '0x10L': raise TestFailed, 'hex(16L)'
+if len(hex(-1)) != len(hex(sys.maxint)): raise TestFailed, 'len(hex(-1))'
+if hex(-16) not in ('0xfffffff0', '0xfffffffffffffff0'):
+    raise TestFailed, 'hex(-16)'
+if hex(-16L) != '-0x10L': raise TestFailed, 'hex(-16L)'
+
+print 'id'
+id(None)
+id(1)
+id(1L)
+id(1.0)
+id('spam')
+id((0,1,2,3))
+id([0,1,2,3])
+id({'spam': 1, 'eggs': 2, 'ham': 3})
+
+# Test input() later, together with raw_input
+
+print 'int'
+if int(314) != 314: raise TestFailed, 'int(314)'
+if int(3.14) != 3: raise TestFailed, 'int(3.14)'
+if int(314L) != 314: raise TestFailed, 'int(314L)'
+# Check that conversion from float truncates towards zero
+if int(-3.14) != -3: raise TestFailed, 'int(-3.14)'
+if int(3.9) != 3: raise TestFailed, 'int(3.9)'
+if int(-3.9) != -3: raise TestFailed, 'int(-3.9)'
+if int(3.5) != 3: raise TestFailed, 'int(3.5)'
+if int(-3.5) != -3: raise TestFailed, 'int(-3.5)'
+# Different base:
+if int("10",16) != 16L: raise TestFailed, 'int("10",16)'
+if have_unicode:
+    if int(unicode("10"),16) != 16L:
+        raise TestFailed, 'int(u"10",16)'
+# Test conversion from strings and various anomalies
+L = [
+        ('0', 0),
+        ('1', 1),
+        ('9', 9),
+        ('10', 10),
+        ('99', 99),
+        ('100', 100),
+        ('314', 314),
+        (' 314', 314),
+        ('314 ', 314),
+        ('  \t\t  314  \t\t  ', 314),
+        (`sys.maxint`, sys.maxint),
+        ('  1x', ValueError),
+        ('  1  ', 1),
+        ('  1\02  ', ValueError),
+        ('', ValueError),
+        (' ', ValueError),
+        ('  \t\t  ', ValueError)
+]
+if have_unicode:
+    L += [
+        (unicode('0'), 0),
+        (unicode('1'), 1),
+        (unicode('9'), 9),
+        (unicode('10'), 10),
+        (unicode('99'), 99),
+        (unicode('100'), 100),
+        (unicode('314'), 314),
+        (unicode(' 314'), 314),
+        (unicode('\u0663\u0661\u0664 ','raw-unicode-escape'), 314),
+        (unicode('  \t\t  314  \t\t  '), 314),
+        (unicode('  1x'), ValueError),
+        (unicode('  1  '), 1),
+        (unicode('  1\02  '), ValueError),
+        (unicode(''), ValueError),
+        (unicode(' '), ValueError),
+        (unicode('  \t\t  '), ValueError),
+]
+for s, v in L:
+    for sign in "", "+", "-":
+        for prefix in "", " ", "\t", "  \t\t  ":
+            ss = prefix + sign + s
+            vv = v
+            if sign == "-" and v is not ValueError:
+                vv = -v
+            try:
+                if int(ss) != vv:
+                    raise TestFailed, "int(%s)" % `ss`
+            except v:
+                pass
+            except ValueError, e:
+                raise TestFailed, "int(%s) raised ValueError: %s" % (`ss`, e)
+s = `-1-sys.maxint`
+if int(s)+1 != -sys.maxint:
+    raise TestFailed, "int(%s)" % `s`
+try:
+    int(s[1:])
+except ValueError:
+    pass
+else:
+    raise TestFailed, "int(%s)" % `s[1:]` + " should raise ValueError"
+try:
+    int(1e100)
+except OverflowError:
+    pass
+else:
+    raise TestFailed("int(1e100) expected OverflowError")
+try:
+    int(-1e100)
+except OverflowError:
+    pass
+else:
+    raise TestFailed("int(-1e100) expected OverflowError")
+
+
+# SF bug 434186:  0x80000000/2 != 0x80000000>>1.
+# Worked by accident in Windows release build, but failed in debug build.
+# Failed in all Linux builds.
+x = -1-sys.maxint
+if x >> 1 != x//2:
+    raise TestFailed("x >> 1 != x/2 when x == -1-sys.maxint")
+
+try: int('123\0')
+except ValueError: pass
+else: raise TestFailed("int('123\0') didn't raise exception")
+
+print 'isinstance'
+class C:
+    pass
+class D(C):
+    pass
+class E:
+    pass
+c = C()
+d = D()
+e = E()
+if not isinstance(c, C): raise TestFailed, 'isinstance(c, C)'
+if not isinstance(d, C): raise TestFailed, 'isinstance(d, C)'
+if isinstance(e, C): raise TestFailed, 'isinstance(e, C)'
+if isinstance(c, D): raise TestFailed, 'isinstance(c, D)'
+if isinstance('foo', E): raise TestFailed, 'isinstance("Foo", E)'
+try:
+    isinstance(E, 'foo')
+    raise TestFailed, 'isinstance(E, "foo")'
+except TypeError:
+    pass
+
+print 'issubclass'
+if not issubclass(D, C): raise TestFailed, 'issubclass(D, C)'
+if not issubclass(C, C): raise TestFailed, 'issubclass(C, C)'
+if issubclass(C, D): raise TestFailed, 'issubclass(C, D)'
+try:
+    issubclass('foo', E)
+    raise TestFailed, 'issubclass("foo", E)'
+except TypeError:
+    pass
+try:
+    issubclass(E, 'foo')
+    raise TestFailed, 'issubclass(E, "foo")'
+except TypeError:
+    pass
+
+print 'len'
+if len('123') != 3: raise TestFailed, 'len(\'123\')'
+if len(()) != 0: raise TestFailed, 'len(())'
+if len((1, 2, 3, 4)) != 4: raise TestFailed, 'len((1, 2, 3, 4))'
+if len([1, 2, 3, 4]) != 4: raise TestFailed, 'len([1, 2, 3, 4])'
+if len({}) != 0: raise TestFailed, 'len({})'
+if len({'a':1, 'b': 2}) != 2: raise TestFailed, 'len({\'a\':1, \'b\': 2})'
+
+print 'list'
+if list([]) != []: raise TestFailed, 'list([])'
+l0_3 = [0, 1, 2, 3]
+l0_3_bis = list(l0_3)
+if l0_3 != l0_3_bis or l0_3 is l0_3_bis: raise TestFailed, 'list([0, 1, 2, 3])'
+if list(()) != []: raise TestFailed, 'list(())'
+if list((0, 1, 2, 3)) != [0, 1, 2, 3]: raise TestFailed, 'list((0, 1, 2, 3))'
+if list('') != []: raise TestFailed, 'list('')'
+if list('spam') != ['s', 'p', 'a', 'm']: raise TestFailed, "list('spam')"
+
+if sys.maxint == 0x7fffffff:
+    # This test can currently only work on 32-bit machines.
+    # XXX If/when PySequence_Length() returns a ssize_t, it should be
+    # XXX re-enabled.
+    try:
+        # Verify clearing of bug #556025.
+        # This assumes that the max data size (sys.maxint) == max
+        # address size this also assumes that the address size is at
+        # least 4 bytes with 8 byte addresses, the bug is not well
+        # tested
+        #
+        # Note: This test is expected to SEGV under Cygwin 1.3.12 or
+        # earlier due to a newlib bug.  See the following mailing list
+        # thread for the details:
+
+        #     http://sources.redhat.com/ml/newlib/2002/msg00369.html
+        list(xrange(sys.maxint // 2))
+    except MemoryError:
+        pass
+    else:
+        raise TestFailed, 'list(xrange(sys.maxint / 4))'
+
+print 'long'
+if long(314) != 314L: raise TestFailed, 'long(314)'
+if long(3.14) != 3L: raise TestFailed, 'long(3.14)'
+if long(314L) != 314L: raise TestFailed, 'long(314L)'
+# Check that conversion from float truncates towards zero
+if long(-3.14) != -3L: raise TestFailed, 'long(-3.14)'
+if long(3.9) != 3L: raise TestFailed, 'long(3.9)'
+if long(-3.9) != -3L: raise TestFailed, 'long(-3.9)'
+if long(3.5) != 3L: raise TestFailed, 'long(3.5)'
+if long(-3.5) != -3L: raise TestFailed, 'long(-3.5)'
+if long("-3") != -3L: raise TestFailed, 'long("-3")'
+if have_unicode:
+    if long(unicode("-3")) != -3L:
+        raise TestFailed, 'long(u"-3")'
+# Different base:
+if long("10",16) != 16L: raise TestFailed, 'long("10",16)'
+if have_unicode:
+    if long(unicode("10"),16) != 16L:
+        raise TestFailed, 'long(u"10",16)'
+# Check conversions from string (same test set as for int(), and then some)
+LL = [
+        ('1' + '0'*20, 10L**20),
+        ('1' + '0'*100, 10L**100)
+]
+if have_unicode:
+    L+=[
+        (unicode('1') + unicode('0')*20, 10L**20),
+        (unicode('1') + unicode('0')*100, 10L**100),
+]
+for s, v in L + LL:
+    for sign in "", "+", "-":
+        for prefix in "", " ", "\t", "  \t\t  ":
+            ss = prefix + sign + s
+            vv = v
+            if sign == "-" and v is not ValueError:
+                vv = -v
+            try:
+                if long(ss) != long(vv):
+                    raise TestFailed, "long(%s)" % `ss`
+            except v:
+                pass
+            except ValueError, e:
+                raise TestFailed, "long(%s) raised ValueError: %s" % (`ss`, e)
+
+try: long('123\0')
+except ValueError: pass
+else: raise TestFailed("long('123\0') didn't raise exception")
+
+print 'map'
+if map(None, 'hello world') != ['h','e','l','l','o',' ','w','o','r','l','d']:
+    raise TestFailed, 'map(None, \'hello world\')'
+if map(None, 'abcd', 'efg') != \
+   [('a', 'e'), ('b', 'f'), ('c', 'g'), ('d', None)]:
+    raise TestFailed, 'map(None, \'abcd\', \'efg\')'
+if map(None, range(10)) != [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]:
+    raise TestFailed, 'map(None, range(10))'
+if map(lambda x: x*x, range(1,4)) != [1, 4, 9]:
+    raise TestFailed, 'map(lambda x: x*x, range(1,4))'
+try:
+    from math import sqrt
+except ImportError:
+    def sqrt(x):
+        return pow(x, 0.5)
+if map(lambda x: map(sqrt,x), [[16, 4], [81, 9]]) != [[4.0, 2.0], [9.0, 3.0]]:
+    raise TestFailed, 'map(lambda x: map(sqrt,x), [[16, 4], [81, 9]])'
+if map(lambda x, y: x+y, [1,3,2], [9,1,4]) != [10, 4, 6]:
+    raise TestFailed, 'map(lambda x,y: x+y, [1,3,2], [9,1,4])'
+def plus(*v):
+    accu = 0
+    for i in v: accu = accu + i
+    return accu
+if map(plus, [1, 3, 7]) != [1, 3, 7]:
+    raise TestFailed, 'map(plus, [1, 3, 7])'
+if map(plus, [1, 3, 7], [4, 9, 2]) != [1+4, 3+9, 7+2]:
+    raise TestFailed, 'map(plus, [1, 3, 7], [4, 9, 2])'
+if map(plus, [1, 3, 7], [4, 9, 2], [1, 1, 0]) != [1+4+1, 3+9+1, 7+2+0]:
+    raise TestFailed, 'map(plus, [1, 3, 7], [4, 9, 2], [1, 1, 0])'
+if map(None, Squares(10)) != [0, 1, 4, 9, 16, 25, 36, 49, 64, 81]:
+    raise TestFailed, 'map(None, Squares(10))'
+if map(int, Squares(10)) != [0, 1, 4, 9, 16, 25, 36, 49, 64, 81]:
+    raise TestFailed, 'map(int, Squares(10))'
+if map(None, Squares(3), Squares(2)) != [(0,0), (1,1), (4,None)]:
+    raise TestFailed, 'map(None, Squares(3), Squares(2))'
+if map(max, Squares(3), Squares(2)) != [0, 1, 4]:
+    raise TestFailed, 'map(max, Squares(3), Squares(2))'
+
+print 'max'
+if max('123123') != '3': raise TestFailed, 'max(\'123123\')'
+if max(1, 2, 3) != 3: raise TestFailed, 'max(1, 2, 3)'
+if max((1, 2, 3, 1, 2, 3)) != 3: raise TestFailed, 'max((1, 2, 3, 1, 2, 3))'
+if max([1, 2, 3, 1, 2, 3]) != 3: raise TestFailed, 'max([1, 2, 3, 1, 2, 3])'
+#
+if max(1, 2L, 3.0) != 3.0: raise TestFailed, 'max(1, 2L, 3.0)'
+if max(1L, 2.0, 3) != 3: raise TestFailed, 'max(1L, 2.0, 3)'
+if max(1.0, 2, 3L) != 3L: raise TestFailed, 'max(1.0, 2, 3L)'
+
+print 'min'
+if min('123123') != '1': raise TestFailed, 'min(\'123123\')'
+if min(1, 2, 3) != 1: raise TestFailed, 'min(1, 2, 3)'
+if min((1, 2, 3, 1, 2, 3)) != 1: raise TestFailed, 'min((1, 2, 3, 1, 2, 3))'
+if min([1, 2, 3, 1, 2, 3]) != 1: raise TestFailed, 'min([1, 2, 3, 1, 2, 3])'
+#
+if min(1, 2L, 3.0) != 1: raise TestFailed, 'min(1, 2L, 3.0)'
+if min(1L, 2.0, 3) != 1L: raise TestFailed, 'min(1L, 2.0, 3)'
+if min(1.0, 2, 3L) != 1.0: raise TestFailed, 'min(1.0, 2, 3L)'
diff --git a/lib-python/2.2/test/test_b2.py b/lib-python/2.2/test/test_b2.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_b2.py
@@ -0,0 +1,365 @@
+# Python test set -- part 4b, built-in functions n-z
+
+from test_support import *
+
+print 'oct'
+if oct(100) != '0144': raise TestFailed, 'oct(100)'
+if oct(100L) != '0144L': raise TestFailed, 'oct(100L)'
+if oct(-100) not in ('037777777634', '01777777777777777777634'):
+    raise TestFailed, 'oct(-100)'
+if oct(-100L) != '-0144L': raise TestFailed, 'oct(-100L)'
+
+print 'open'
+# NB the first 4 lines are also used to test input and raw_input, below
+fp = open(TESTFN, 'w')
+try:
+    fp.write('1+1\n')
+    fp.write('1+1\n')
+    fp.write('The quick brown fox jumps over the lazy dog')
+    fp.write('.\n')
+    fp.write('Dear John\n')
+    fp.write('XXX'*100)
+    fp.write('YYY'*100)
+finally:
+    fp.close()
+#
+fp = open(TESTFN, 'r')
+try:
+    if fp.readline(4) != '1+1\n': raise TestFailed, 'readline(4) # exact'
+    if fp.readline(4) != '1+1\n': raise TestFailed, 'readline(4) # exact'
+    if fp.readline() != 'The quick brown fox jumps over the lazy dog.\n':
+        raise TestFailed, 'readline() # default'
+    if fp.readline(4) != 'Dear': raise TestFailed, 'readline(4) # short'
+    if fp.readline(100) != ' John\n': raise TestFailed, 'readline(100)'
+    if fp.read(300) != 'XXX'*100: raise TestFailed, 'read(300)'
+    if fp.read(1000) != 'YYY'*100: raise TestFailed, 'read(1000) # truncate'
+finally:
+    fp.close()
+
+print 'ord'
+if ord(' ') != 32: raise TestFailed, 'ord(\' \')'
+if ord('A') != 65: raise TestFailed, 'ord(\'A\')'
+if ord('a') != 97: raise TestFailed, 'ord(\'a\')'
+
+print 'pow'
+if pow(0,0) != 1: raise TestFailed, 'pow(0,0)'
+if pow(0,1) != 0: raise TestFailed, 'pow(0,1)'
+if pow(1,0) != 1: raise TestFailed, 'pow(1,0)'
+if pow(1,1) != 1: raise TestFailed, 'pow(1,1)'
+#
+if pow(2,0) != 1: raise TestFailed, 'pow(2,0)'
+if pow(2,10) != 1024: raise TestFailed, 'pow(2,10)'
+if pow(2,20) != 1024*1024: raise TestFailed, 'pow(2,20)'
+if pow(2,30) != 1024*1024*1024: raise TestFailed, 'pow(2,30)'
+#
+if pow(-2,0) != 1: raise TestFailed, 'pow(-2,0)'
+if pow(-2,1) != -2: raise TestFailed, 'pow(-2,1)'
+if pow(-2,2) != 4: raise TestFailed, 'pow(-2,2)'
+if pow(-2,3) != -8: raise TestFailed, 'pow(-2,3)'
+#
+if pow(0L,0) != 1: raise TestFailed, 'pow(0L,0)'
+if pow(0L,1) != 0: raise TestFailed, 'pow(0L,1)'
+if pow(1L,0) != 1: raise TestFailed, 'pow(1L,0)'
+if pow(1L,1) != 1: raise TestFailed, 'pow(1L,1)'
+#
+if pow(2L,0) != 1: raise TestFailed, 'pow(2L,0)'
+if pow(2L,10) != 1024: raise TestFailed, 'pow(2L,10)'
+if pow(2L,20) != 1024*1024: raise TestFailed, 'pow(2L,20)'
+if pow(2L,30) != 1024*1024*1024: raise TestFailed, 'pow(2L,30)'
+#
+if pow(-2L,0) != 1: raise TestFailed, 'pow(-2L,0)'
+if pow(-2L,1) != -2: raise TestFailed, 'pow(-2L,1)'
+if pow(-2L,2) != 4: raise TestFailed, 'pow(-2L,2)'
+if pow(-2L,3) != -8: raise TestFailed, 'pow(-2L,3)'
+#
+if fcmp(pow(0.,0), 1.): raise TestFailed, 'pow(0.,0)'
+if fcmp(pow(0.,1), 0.): raise TestFailed, 'pow(0.,1)'
+if fcmp(pow(1.,0), 1.): raise TestFailed, 'pow(1.,0)'
+if fcmp(pow(1.,1), 1.): raise TestFailed, 'pow(1.,1)'
+#
+if fcmp(pow(2.,0), 1.): raise TestFailed, 'pow(2.,0)'
+if fcmp(pow(2.,10), 1024.): raise TestFailed, 'pow(2.,10)'
+if fcmp(pow(2.,20), 1024.*1024.): raise TestFailed, 'pow(2.,20)'
+if fcmp(pow(2.,30), 1024.*1024.*1024.): raise TestFailed, 'pow(2.,30)'
+#
+if fcmp(pow(-2.,0), 1.): raise TestFailed, 'pow(-2.,0)'
+if fcmp(pow(-2.,1), -2.): raise TestFailed, 'pow(-2.,1)'
+if fcmp(pow(-2.,2), 4.): raise TestFailed, 'pow(-2.,2)'
+if fcmp(pow(-2.,3), -8.): raise TestFailed, 'pow(-2.,3)'
+
+from types import FloatType
+for x in 2, 2L, 2.0:
+    for y in 10, 10L, 10.0:
+        for z in 1000, 1000L, 1000.0:
+            if isinstance(x, FloatType) or \
+               isinstance(y, FloatType) or \
+               isinstance(z, FloatType):
+                try:
+                    pow(x, y, z)
+                except TypeError:
+                    pass
+                else:
+                    raise TestFailed("3-arg float pow(%s, %s, %s) should "
+                                     "have raised TypeError" % (x, y, z))
+            else:
+                if fcmp(pow(x, y, z), 24.0):
+                    raise TestFailed, 'pow(%s, %s, %s)' % (x, y, z)
+
+print 'range'
+if range(3) != [0, 1, 2]: raise TestFailed, 'range(3)'
+if range(1, 5) != [1, 2, 3, 4]: raise TestFailed, 'range(1, 5)'
+if range(0) != []: raise TestFailed, 'range(0)'
+if range(-3) != []: raise TestFailed, 'range(-3)'
+if range(1, 10, 3) != [1, 4, 7]: raise TestFailed, 'range(1, 10, 3)'
+if range(5, -5, -3) != [5, 2, -1, -4]: raise TestFailed, 'range(5, -5, -3)'
+
+print 'input and raw_input'
+import sys
+fp = open(TESTFN, 'r')
+savestdin = sys.stdin
+try:
+    sys.stdin = fp
+    if input() != 2: raise TestFailed, 'input()'
+    if input('testing\n') != 2: raise TestFailed, 'input()'
+    if raw_input() != 'The quick brown fox jumps over the lazy dog.':
+        raise TestFailed, 'raw_input()'
+    if raw_input('testing\n') != 'Dear John':
+        raise TestFailed, 'raw_input(\'testing\\n\')'
+finally:
+    sys.stdin = savestdin
+    fp.close()
+
+print 'reduce'
+if reduce(lambda x, y: x+y, ['a', 'b', 'c'], '') != 'abc':
+    raise TestFailed, 'reduce(): implode a string'
+if reduce(lambda x, y: x+y,
+          [['a', 'c'], [], ['d', 'w']], []) != ['a','c','d','w']:
+    raise TestFailed, 'reduce(): append'
+if reduce(lambda x, y: x*y, range(2,8), 1) != 5040:
+    raise TestFailed, 'reduce(): compute 7!'
+if reduce(lambda x, y: x*y, range(2,21), 1L) != 2432902008176640000L:
+    raise TestFailed, 'reduce(): compute 20!, use long'
+class Squares:
+    def __init__(self, max):
+        self.max = max
+        self.sofar = []
+    def __len__(self): return len(self.sofar)
+    def __getitem__(self, i):
+        if not 0 <= i < self.max: raise IndexError
+        n = len(self.sofar)
+        while n <= i:
+            self.sofar.append(n*n)
+            n = n+1
+        return self.sofar[i]
+if reduce(lambda x, y: x+y, Squares(10)) != 285:
+    raise TestFailed, 'reduce(<+>, Squares(10))'
+if reduce(lambda x, y: x+y, Squares(10), 0) != 285:
+    raise TestFailed, 'reduce(<+>, Squares(10), 0)'
+if reduce(lambda x, y: x+y, Squares(0), 0) != 0:
+    raise TestFailed, 'reduce(<+>, Squares(0), 0)'
+
+
+print 'reload'
+import marshal
+reload(marshal)
+import string
+reload(string)
+## import sys
+## try: reload(sys)
+## except ImportError: pass
+## else: raise TestFailed, 'reload(sys) should fail'
+
+print 'repr'
+if repr('') != '\'\'': raise TestFailed, 'repr(\'\')'
+if repr(0) != '0': raise TestFailed, 'repr(0)'
+if repr(0L) != '0L': raise TestFailed, 'repr(0L)'
+if repr(()) != '()': raise TestFailed, 'repr(())'
+if repr([]) != '[]': raise TestFailed, 'repr([])'
+if repr({}) != '{}': raise TestFailed, 'repr({})'
+
+print 'round'
+if round(0.0) != 0.0: raise TestFailed, 'round(0.0)'
+if round(1.0) != 1.0: raise TestFailed, 'round(1.0)'
+if round(10.0) != 10.0: raise TestFailed, 'round(10.0)'
+if round(1000000000.0) != 1000000000.0:
+    raise TestFailed, 'round(1000000000.0)'
+if round(1e20) != 1e20: raise TestFailed, 'round(1e20)'
+
+if round(-1.0) != -1.0: raise TestFailed, 'round(-1.0)'
+if round(-10.0) != -10.0: raise TestFailed, 'round(-10.0)'
+if round(-1000000000.0) != -1000000000.0:
+    raise TestFailed, 'round(-1000000000.0)'
+if round(-1e20) != -1e20: raise TestFailed, 'round(-1e20)'
+
+if round(0.1) != 0.0: raise TestFailed, 'round(0.0)'
+if round(1.1) != 1.0: raise TestFailed, 'round(1.0)'
+if round(10.1) != 10.0: raise TestFailed, 'round(10.0)'
+if round(1000000000.1) != 1000000000.0:
+    raise TestFailed, 'round(1000000000.0)'
+
+if round(-1.1) != -1.0: raise TestFailed, 'round(-1.0)'
+if round(-10.1) != -10.0: raise TestFailed, 'round(-10.0)'
+if round(-1000000000.1) != -1000000000.0:
+    raise TestFailed, 'round(-1000000000.0)'
+
+if round(0.9) != 1.0: raise TestFailed, 'round(0.9)'
+if round(9.9) != 10.0: raise TestFailed, 'round(9.9)'
+if round(999999999.9) != 1000000000.0:
+    raise TestFailed, 'round(999999999.9)'
+
+if round(-0.9) != -1.0: raise TestFailed, 'round(-0.9)'
+if round(-9.9) != -10.0: raise TestFailed, 'round(-9.9)'
+if round(-999999999.9) != -1000000000.0:
+    raise TestFailed, 'round(-999999999.9)'
+
+print 'setattr'
+import sys
+setattr(sys, 'spam', 1)
+if sys.spam != 1: raise TestFailed, 'setattr(sys, \'spam\', 1)'
+try:
+    setattr(sys, 1, 'spam')
+except TypeError:
+    pass
+else:
+    raise TestFailed, "setattr(sys, 1, 'spam') should raise exception"
+
+print 'str'
+if str('') != '': raise TestFailed, 'str(\'\')'
+if str(0) != '0': raise TestFailed, 'str(0)'
+if str(0L) != '0': raise TestFailed, 'str(0L)'
+if str(()) != '()': raise TestFailed, 'str(())'
+if str([]) != '[]': raise TestFailed, 'str([])'
+if str({}) != '{}': raise TestFailed, 'str({})'
+
+print 'tuple'
+if tuple(()) != (): raise TestFailed, 'tuple(())'
+t0_3 = (0, 1, 2, 3)
+t0_3_bis = tuple(t0_3)
+if t0_3 is not t0_3_bis: raise TestFailed, 'tuple((0, 1, 2, 3))'
+if tuple([]) != (): raise TestFailed, 'tuple([])'
+if tuple([0, 1, 2, 3]) != (0, 1, 2, 3): raise TestFailed, 'tuple([0, 1, 2, 3])'
+if tuple('') != (): raise TestFailed, 'tuple('')'
+if tuple('spam') != ('s', 'p', 'a', 'm'): raise TestFailed, "tuple('spam')"
+
+print 'type'
+if type('') != type('123') or type('') == type(()):
+    raise TestFailed, 'type()'
+
+print 'vars'
+a = b = None
+a = vars().keys()
+b = dir()
+a.sort()
+b.sort()
+if a != b: raise TestFailed, 'vars()'
+import sys
+a = vars(sys).keys()
+b = dir(sys)
+a.sort()
+b.sort()
+if a != b: raise TestFailed, 'vars(sys)'
+def f0():
+    if vars() != {}: raise TestFailed, 'vars() in f0()'
+f0()
+def f2():
+    f0()
+    a = 1
+    b = 2
+    if vars() != {'a': a, 'b': b}: raise TestFailed, 'vars() in f2()'
+f2()
+
+print 'xrange'
+import warnings
+warnings.filterwarnings('ignore', r".*xrange", DeprecationWarning)
+if tuple(xrange(10)) != tuple(range(10)): raise TestFailed, 'xrange(10)'
+if tuple(xrange(5,10)) != tuple(range(5,10)): raise TestFailed, 'xrange(5,10)'
+if tuple(xrange(0,10,2)) != tuple(range(0,10,2)):
+    raise TestFailed, 'xrange(0,10,2)'
+r = xrange(10)
+if r.tolist() != range(10): raise TestFailed, 'xrange(10).tolist()'
+if r.start != 0: raise TestFailed, 'xrange(10).start'
+if r.stop != 10: raise TestFailed, 'xrange(10).stop'
+if r.step != 1: raise TestFailed, 'xrange(10).step'
+r = xrange(3, 10)
+if r.tolist() != range(3, 10): raise TestFailed, 'xrange(3, 10).tolist()'
+if r.start != 3: raise TestFailed, 'xrange(3, 10).start'
+if r.stop != 10: raise TestFailed, 'xrange(3, 10).stop'
+if r.step != 1: raise TestFailed, 'xrange(3, 10).step'
+r = xrange(3, 10, 2)
+if r.tolist() != range(3, 10, 2): raise TestFailed, 'xrange(3, 10, 2).tolist()'
+if r.start != 3: raise TestFailed, 'xrange(3, 10, 2).start'
+if r.stop != 11: raise TestFailed, 'xrange(3, 10, 2).stop'
+if r.step != 2: raise TestFailed, 'xrange(3, 10, 2).step'
+r = xrange(10, 3, -1)
+if r.tolist() != range(10, 3, -1):
+    raise TestFailed, 'xrange(10, 3, -1).tolist()'
+if r.start != 10: raise TestFailed, 'xrange(10, 3, -1).start'
+if r.stop != 3: raise TestFailed, 'xrange(10, 3, -1).stop'
+if r.step != -1: raise TestFailed, 'xrange(10, 3, -1).step'
+# regression tests for SourceForge bug #221965
+def _range_test(r):
+    verify(r.start != r.stop, 'Test not valid for passed-in xrange object.')
+    if r.stop in r:
+        raise TestFailed, 'r.stop in ' + `r`
+    if r.stop-r.step not in r:
+        raise TestFailed, 'r.stop-r.step not in ' + `r`
+    if r.start not in r:
+        raise TestFailed, 'r.start not in ' + `r`
+    if r.stop+r.step in r:
+        raise TestFailed, 'r.stop+r.step in ' + `r`
+_range_test(xrange(10))
+_range_test(xrange(9, -1, -1))
+_range_test(xrange(0, 10, 2))
+
+print 'zip'
+a = (1, 2, 3)
+b = (4, 5, 6)
+t = [(1, 4), (2, 5), (3, 6)]
+if zip(a, b) != t: raise TestFailed, 'zip(a, b) - same size, both tuples'
+b = [4, 5, 6]
+if zip(a, b) != t: raise TestFailed, 'zip(a, b) - same size, tuple/list'
+b = (4, 5, 6, 7)
+if zip(a, b) != t: raise TestFailed, 'zip(a, b) - b is longer'
+class I:
+    def __getitem__(self, i):
+        if i < 0 or i > 2: raise IndexError
+        return i + 4
+if zip(a, I()) != t: raise TestFailed, 'zip(a, b) - b is instance'
+exc = 0
+try:
+    zip()
+except TypeError:
+    exc = 1
+except:
+    e = sys.exc_info()[0]
+    raise TestFailed, 'zip() - no args, expected TypeError, got %s' % e
+if not exc:
+    raise TestFailed, 'zip() - no args, missing expected TypeError'
+
+exc = 0
+try:
+    zip(None)
+except TypeError:
+    exc = 1
+except:
+    e = sys.exc_info()[0]
+    raise TestFailed, 'zip(None) - expected TypeError, got %s' % e
+if not exc:
+    raise TestFailed, 'zip(None) - missing expected TypeError'
+class G:
+    pass
+exc = 0
+try:
+    zip(a, G())
+except TypeError:
+    exc = 1
+except:
+    e = sys.exc_info()[0]
+    raise TestFailed, 'zip(a, b) - b instance w/o __getitem__'
+if not exc:
+    raise TestFailed, 'zip(a, b) - missing expected TypeError'
+
+
+# Epilogue -- unlink the temp file
+
+unlink(TESTFN)
diff --git a/lib-python/2.2/test/test_base64.py b/lib-python/2.2/test/test_base64.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_base64.py
@@ -0,0 +1,53 @@
+import unittest
+import test_support
+import base64
+from binascii import Error as binascii_error
+
+class Base64TestCase(unittest.TestCase):
+    def test_encode_string(self):
+        """Testing encode string"""
+        test_support.verify(base64.encodestring("www.python.org") ==
+            "d3d3LnB5dGhvbi5vcmc=\n",
+            reason="www.python.org encodestring failed")
+        test_support.verify(base64.encodestring("a") ==
+            "YQ==\n",
+            reason="a encodestring failed")
+        test_support.verify(base64.encodestring("ab") ==
+            "YWI=\n",
+            reason="ab encodestring failed")
+        test_support.verify(base64.encodestring("abc") ==
+            "YWJj\n",
+            reason="abc encodestring failed")
+        test_support.verify(base64.encodestring("") ==
+            "",
+            reason="null encodestring failed")
+        test_support.verify(base64.encodestring(
+            "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#0^&*();:<>,. []{}") ==
+            "YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNERUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\nNTY3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==\n",
+            reason = "long encodestring failed")
+
+    def test_decode_string(self):
+        """Testing decode string"""
+        test_support.verify(base64.decodestring("d3d3LnB5dGhvbi5vcmc=\n") ==
+            "www.python.org",
+            reason="www.python.org decodestring failed")
+        test_support.verify(base64.decodestring("YQ==\n") ==
+            "a",
+            reason="a decodestring failed")
+        test_support.verify(base64.decodestring("YWI=\n") ==
+            "ab",
+            reason="ab decodestring failed")
+        test_support.verify(base64.decodestring("YWJj\n") ==
+            "abc",
+            reason="abc decodestring failed")
+        test_support.verify(base64.decodestring(
+            "YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNERUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\nNTY3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==\n") ==
+            "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#0^&*();:<>,. []{}",
+            reason = "long decodestring failed")
+        test_support.verify(base64.decodestring('') == '')
+
+def test_main():
+    test_support.run_unittest(Base64TestCase)
+
+if __name__ == "__main__":
+    test_main()
diff --git a/lib-python/2.2/test/test_bastion.py b/lib-python/2.2/test/test_bastion.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_bastion.py
@@ -0,0 +1,3 @@
+##import Bastion
+##
+##Bastion._test()
diff --git a/lib-python/2.2/test/test_binascii.py b/lib-python/2.2/test/test_binascii.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/test/test_binascii.py
@@ -0,0 +1,119 @@
+"""Test the binascii C module."""
+
+from test_support import verify, verbose
+import binascii
+
+# Show module doc string
+print binascii.__doc__
+
+# Show module exceptions
+print binascii.Error
+print binascii.Incomplete
+
+# Check presence and display doc strings of all functions
+funcs = []
+for suffix in "base64", "hqx", "uu":
+    prefixes = ["a2b_", "b2a_"]
+    if suffix == "hqx":
+        prefixes.extend(["crc_", "rlecode_", "rledecode_"])
+    for prefix in prefixes:
+        name = prefix + suffix
+        funcs.append(getattr(binascii, name))
+for func in funcs:
+    print "%-15s: %s" % (func.__name__, func.__doc__)
+
+# Create binary test data
+testdata = "The quick brown fox jumps over the lazy dog.\r\n"
+for i in range(256):
+    # Be slow so we don't depend on other modules
+    testdata = testdata + chr(i)
+testdata = testdata + "\r\nHello world.\n"
+
+# Test base64 with valid data
+print "base64 test"
+MAX_BASE64 = 57
+lines = []
+for i in range(0, len(testdata), MAX_BASE64):
+    b = testdata[i:i+MAX_BASE64]
+    a = binascii.b2a_base64(b)
+    lines.append(a)
+    print a,
+res = ""
+for line in lines:
+    b = binascii.a2b_base64(line)
+    res = res + b
+verify(res == testdata)
+
+# Test base64 with random invalid characters sprinkled throughout
+# (This requires a new version of binascii.)
+fillers = ""
+valid = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789+/"
+for i in range(256):
+    c = chr(i)
+    if c not in valid:
+        fillers = fillers + c
+def addnoise(line):
+    noise = fillers
+    ratio = len(line) // len(noise)
+    res = ""
+    while line and noise:
+        if len(line) // len(noise) > ratio:
+            c, line = line[0], line[1:]
+        else:
+            c, noise = noise[0], noise[1:]
+        res = res + c
+    return res + noise + line
+res = ""
+for line in map(addnoise, lines):
+    b = binascii.a2b_base64(line)
+    res = res + b
+verify(res == testdata)
+
+# Test base64 with just invalid characters, which should return
+# empty strings.
+verify(binascii.a2b_base64(fillers) == '')
+
+# Test uu
+print "uu test"
+MAX_UU = 45
+lines = []
+for i in range(0, len(testdata), MAX_UU):
+    b = testdata[i:i+MAX_UU]
+    a = binascii.b2a_uu(b)
+    lines.append(a)
+    print a,
+res = ""
+for line in lines:
+    b = binascii.a2b_uu(line)
+    res = res + b
+verify(res == testdata)
+
+# Test crc32()
+crc = binascii.crc32("Test the CRC-32 of")
+crc = binascii.crc32(" this string.", crc)
+if crc != 1571220330:
+    print "binascii.crc32() failed."
+
+# The hqx test is in test_binhex.py
+
+# test hexlification
+s = '{s\005\000\000\000worldi\002\000\000\000s\005\000\000\000helloi\001\000\000\0000'
+t = binascii.b2a_hex(s)
+u = binascii.a2b_hex(t)
+if s != u:
+    print 'binascii hexlification failed'
+try:
+    binascii.a2b_hex(t[:-1])
+except TypeError:
+    pass
+else:
+    print 'expected TypeError not raised'
+try:
+    binascii.a2b_hex(t[:-1] + 'q')
+except TypeError:
+    pass
+else:
+    print 'expected TypeError not raised'
+
+# Verify the treatment of Unicode strings
+verify(binascii.hexlify(u'a') == '61', "hexlify failed for Unicode")
diff --git a/lib-python/2.2/test/test_binhex.py b/lib-python/2.2/test/test_binhex.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/test/test_binhex.py
@@ -0,0 +1,50 @@
+#! /usr/bin/env python
+"""Test script for the binhex C module
+
+   Uses the mechanism of the python binhex module
+   Based on an original test by Roger E. Masse.
+"""
+import binhex
+import os
+import tempfile
+import test_support
+import unittest
+
+
+class BinHexTestCase(unittest.TestCase):
+
+    def setUp(self):
+        self.fname1 = tempfile.mktemp()
+        self.fname2 = tempfile.mktemp()
+
+    def tearDown(self):
+        try: os.unlink(self.fname1)
+        except OSError: pass
+
+        try: os.unlink(self.fname2)
+        except OSError: pass
+
+    DATA = 'Jack is my hero'
+
+    def test_binhex(self):
+        f = open(self.fname1, 'w')
+        f.write(self.DATA)
+        f.close()
+
+        binhex.binhex(self.fname1, self.fname2)
+
+        binhex.hexbin(self.fname2, self.fname1)
+
+        f = open(self.fname1, 'r')
+        finish = f.readline()
+        f.close()
+
+        self.assertEqual(self.DATA, finish)
+
+
+def test_main():
+    test_support.run_unittest(BinHexTestCase)
+
+
+if __name__ == "__main__":
+    test_main()
diff --git a/lib-python/2.2/test/test_binop.py b/lib-python/2.2/test/test_binop.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_binop.py
@@ -0,0 +1,328 @@
+"""Tests for binary operators on subtypes of built-in types."""
+
+import test_support
+import unittest
+
+def gcd(a, b):
+    """Greatest common divisor using Euclid's algorithm."""
+    while a:
+        a, b = b%a, a
+    return b
+
+def isint(x):
+    """Test whether an object is an instance of int or long."""
+    return isinstance(x, int) or isinstance(x, long)
+
+def isnum(x):
+    """Test whether an object is an instance of a built-in numeric type."""
+    for T in int, long, float, complex:
+        if isinstance(x, T):
+            return 1
+    return 0
+
+def isRat(x):
+    """Test wheter an object is an instance of the Rat class."""
+    return isinstance(x, Rat)
+
+class Rat(object):
+
+    """Rational number implemented as a normalized pair of longs."""
+
+    __slots__ = ['_Rat__num', '_Rat__den']
+
+    def __init__(self, num=0L, den=1L):
+        """Constructor: Rat([num[, den]]).
+
+        The arguments must be ints or longs, and default to (0, 1)."""
+        if not isint(num):
+            raise TypeError, "Rat numerator must be int or long (%r)" % num
+        if not isint(den):
+            raise TypeError, "Rat denominator must be int or long (%r)" % den
+        # But the zero is always on
+        if den == 0:
+            raise ZeroDivisionError, "zero denominator"
+        g = gcd(den, num)
+        self.__num = long(num//g)
+        self.__den = long(den//g)
+
+    def _get_num(self):
+        """Accessor function for read-only 'num' attribute of Rat."""
+        return self.__num
+    num = property(_get_num, None)
+
+    def _get_den(self):
+        """Accessor function for read-only 'den' attribute of Rat."""
+        return self.__den
+    den = property(_get_den, None)
+
+    def __repr__(self):
+        """Convert a Rat to an string resembling a Rat constructor call."""
+        return "Rat(%d, %d)" % (self.__num, self.__den)
+
+    def __str__(self):
+        """Convert a Rat to a string resembling a decimal numeric value."""
+        return str(float(self))
+
+    def __float__(self):
+        """Convert a Rat to a float."""
+        return self.__num*1.0/self.__den
+
+    def __int__(self):
+        """Convert a Rat to an int; self.den must be 1."""
+        if self.__den == 1:
+            try:
+                return int(self.__num)
+            except OverflowError:
+                raise OverflowError, ("%s too large to convert to int" %
+                                      repr(self))
+        raise ValueError, "can't convert %s to int" % repr(self)
+
+    def __long__(self):
+        """Convert a Rat to an long; self.den must be 1."""
+        if self.__den == 1:
+            return long(self.__num)
+        raise ValueError, "can't convert %s to long" % repr(self)
+
+    def __add__(self, other):
+        """Add two Rats, or a Rat and a number."""
+        if isint(other):
+            other = Rat(other)
+        if isRat(other):
+            return Rat(self.__num*other.__den + other.__num*self.__den,
+                       self.__den*other.__den)
+        if isnum(other):
+            return float(self) + other
+        return NotImplemented
+
+    __radd__ = __add__
+
+    def __sub__(self, other):
+        """Subtract two Rats, or a Rat and a number."""
+        if isint(other):
+            other = Rat(other)
+        if isRat(other):
+            return Rat(self.__num*other.__den - other.__num*self.__den,
+                       self.__den*other.__den)
+        if isnum(other):
+            return float(self) - other
+        return NotImplemented
+
+    def __rsub__(self, other):
+        """Subtract two Rats, or a Rat and a number (reversed args)."""
+        if isint(other):
+            other = Rat(other)
+        if isRat(other):
+            return Rat(other.__num*self.__den - self.__num*other.__den,
+                       self.__den*other.__den)
+        if isnum(other):
+            return other - float(self)
+        return NotImplemented
+
+    def __mul__(self, other):
+        """Multiply two Rats, or a Rat and a number."""
+        if isRat(other):
+            return Rat(self.__num*other.__num, self.__den*other.__den)
+        if isint(other):
+            return Rat(self.__num*other, self.__den)
+        if isnum(other):
+            return float(self)*other
+        return NotImplemented
+
+    __rmul__ = __mul__
+
+    def __truediv__(self, other):
+        """Divide two Rats, or a Rat and a number."""
+        if isRat(other):
+            return Rat(self.__num*other.__den, self.__den*other.__num)
+        if isint(other):
+            return Rat(self.__num, self.__den*other)
+        if isnum(other):
+            return float(self) / other
+        return NotImplemented
+
+    __div__ = __truediv__
+
+    def __rtruediv__(self, other):
+        """Divide two Rats, or a Rat and a number (reversed args)."""
+        if isRat(other):
+            return Rat(other.__num*self.__den, other.__den*self.__num)
+        if isint(other):
+            return Rat(other*self.__den, self.__num)
+        if isnum(other):
+            return other / float(self)
+        return NotImplemented
+
+    __rdiv__ = __rtruediv__
+
+    def __floordiv__(self, other):
+        """Divide two Rats, returning the floored result."""
+        if isint(other):
+            other = Rat(other)
+        elif not isRat(other):
+            return NotImplemented
+        x = self/other
+        return x.__num // x.__den
+
+    def __rfloordiv__(self, other):
+        """Divide two Rats, returning the floored result (reversed args)."""
+        x = other/self
+        return x.__num // x.__den
+
+    def __divmod__(self, other):
+        """Divide two Rats, returning quotient and remainder."""
+        if isint(other):
+            other = Rat(other)
+        elif not isRat(other):
+            return NotImplemented
+        x = self//other
+        return (x, self - other * x)
+
+    def __rdivmod__(self, other):
+        "Divide two Rats, returning quotient and remainder (reversed args)."""
+        if isint(other):
+            other = Rat(other)
+        elif not isRat(other):
+            return NotImplemented
+        return divmod(other, self)
+
+    def __mod__(self, other):
+        """Take one Rat modulo another."""
+        return divmod(self, other)[1]
+
+    def __rmod__(self, other):
+        """Take one Rat modulo another (reversed args)."""
+        return divmod(other, self)[1]
+
+    def __eq__(self, other):
+        """Compare two Rats for equality."""
+        if isint(other):
+            return self.__den == 1 and self.__num == other
+        if isRat(other):
+            return self.__num == other.__num and self.__den == other.__den
+        if isnum(other):
+            return float(self) == other
+        return NotImplemented
+
+    def __ne__(self, other):
+        """Compare two Rats for inequality."""
+        return not self == other
+
+class RatTestCase(unittest.TestCase):
+    """Unit tests for Rat class and its support utilities."""
+
+    def test_gcd(self):
+        self.assertEqual(gcd(10, 12), 2)
+        self.assertEqual(gcd(10, 15), 5)
+        self.assertEqual(gcd(10, 11), 1)
+        self.assertEqual(gcd(100, 15), 5)
+        self.assertEqual(gcd(-10, 2), -2)
+        self.assertEqual(gcd(10, -2), 2)
+        self.assertEqual(gcd(-10, -2), -2)
+        for i in range(1, 20):
+            for j in range(1, 20):
+                self.assert_(gcd(i, j) > 0)
+                self.assert_(gcd(-i, j) < 0)
+                self.assert_(gcd(i, -j) > 0)
+                self.assert_(gcd(-i, -j) < 0)
+
+    def test_constructor(self):
+        a = Rat(10, 15)
+        self.assertEqual(a.num, 2)
+        self.assertEqual(a.den, 3)
+        a = Rat(10L, 15L)
+        self.assertEqual(a.num, 2)
+        self.assertEqual(a.den, 3)
+        a = Rat(10, -15)
+        self.assertEqual(a.num, -2)
+        self.assertEqual(a.den, 3)
+        a = Rat(-10, 15)
+        self.assertEqual(a.num, -2)
+        self.assertEqual(a.den, 3)
+        a = Rat(-10, -15)
+        self.assertEqual(a.num, 2)
+        self.assertEqual(a.den, 3)
+        a = Rat(7)
+        self.assertEqual(a.num, 7)
+        self.assertEqual(a.den, 1)
+        try:
+            a = Rat(1, 0)
+        except ZeroDivisionError:
+            pass
+        else:
+            self.fail("Rat(1, 0) didn't raise ZeroDivisionError")
+        for bad in "0", 0.0, 0j, (), [], {}, None, Rat, unittest:
+            try:
+                a = Rat(bad)
+            except TypeError:
+                pass
+            else:
+                self.fail("Rat(%r) didn't raise TypeError" % bad)
+            try:
+                a = Rat(1, bad)
+            except TypeError:
+                pass
+            else:
+                self.fail("Rat(1, %r) didn't raise TypeError" % bad)
+
+    def test_add(self):
+        self.assertEqual(Rat(2, 3) + Rat(1, 3), 1)
+        self.assertEqual(Rat(2, 3) + 1, Rat(5, 3))
+        self.assertEqual(1 + Rat(2, 3), Rat(5, 3))
+        self.assertEqual(1.0 + Rat(1, 2), 1.5)
+        self.assertEqual(Rat(1, 2) + 1.0, 1.5)
+
+    def test_sub(self):
+        self.assertEqual(Rat(7, 2) - Rat(7, 5), Rat(21, 10))
+        self.assertEqual(Rat(7, 5) - 1, Rat(2, 5))
+        self.assertEqual(1 - Rat(3, 5), Rat(2, 5))
+        self.assertEqual(Rat(3, 2) - 1.0, 0.5)
+        self.assertEqual(1.0 - Rat(1, 2), 0.5)
+
+    def test_mul(self):
+        self.assertEqual(Rat(2, 3) * Rat(5, 7), Rat(10, 21))
+        self.assertEqual(Rat(10, 3) * 3, 10)
+        self.assertEqual(3 * Rat(10, 3), 10)
+        self.assertEqual(Rat(10, 5) * 0.5, 1.0)
+        self.assertEqual(0.5 * Rat(10, 5), 1.0)
+
+    def test_div(self):
+        self.assertEqual(Rat(10, 3) / Rat(5, 7), Rat(14, 3))
+        self.assertEqual(Rat(10, 3) / 3, Rat(10, 9))
+        self.assertEqual(2 / Rat(5), Rat(2, 5))
+        self.assertEqual(3.0 * Rat(1, 2), 1.5)
+        self.assertEqual(Rat(1, 2) * 3.0, 1.5)
+
+    def test_floordiv(self):
+        self.assertEqual(Rat(10) // Rat(4), 2)
+        self.assertEqual(Rat(10, 3) // Rat(4, 3), 2)
+        self.assertEqual(Rat(10) // 4, 2)
+        self.assertEqual(10 // Rat(4), 2)
+
+    def test_eq(self):
+        self.assertEqual(Rat(10), Rat(20, 2))
+        self.assertEqual(Rat(10), 10)
+        self.assertEqual(10, Rat(10))
+        self.assertEqual(Rat(10), 10.0)
+        self.assertEqual(10.0, Rat(10))
+
+    def test_future_div(self):
+        exec future_test
+
+    # XXX Ran out of steam; TO DO: divmod, div, future division
+
+future_test = """
+from __future__ import division
+self.assertEqual(Rat(10, 3) / Rat(5, 7), Rat(14, 3))
+self.assertEqual(Rat(10, 3) / 3, Rat(10, 9))
+self.assertEqual(2 / Rat(5), Rat(2, 5))
+self.assertEqual(3.0 * Rat(1, 2), 1.5)
+self.assertEqual(Rat(1, 2) * 3.0, 1.5)
+self.assertEqual(eval('1/2'), 0.5)
+"""
+
+def test_main():
+    test_support.run_unittest(RatTestCase)
+
+
+if __name__ == "__main__":
+    test_main()
diff --git a/lib-python/2.2/test/test_bisect.py b/lib-python/2.2/test/test_bisect.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_bisect.py
@@ -0,0 +1,127 @@
+from test_support import TestFailed
+
+import bisect
+import sys
+
+nerrors = 0
+
+def check_bisect(func, list, elt, expected):
+    global nerrors
+    got = func(list, elt)
+    if got != expected:
+        print >> sys.stderr, \
+            "expected %s(%s, %s) -> %s, but got %s" % (func.__name__,
+                                                       list,
+                                                       elt,
+                                                       expected,
+                                                       got)
+        nerrors += 1
+
+# XXX optional slice arguments need tests.
+
+check_bisect(bisect.bisect_right, [], 1, 0)
+check_bisect(bisect.bisect_right, [1], 0, 0)
+check_bisect(bisect.bisect_right, [1], 1, 1)
+check_bisect(bisect.bisect_right, [1], 2, 1)
+check_bisect(bisect.bisect_right, [1, 1], 0, 0)
+check_bisect(bisect.bisect_right, [1, 1], 1, 2)
+check_bisect(bisect.bisect_right, [1, 1], 2, 2)
+check_bisect(bisect.bisect_right, [1, 1, 1], 0, 0)
+check_bisect(bisect.bisect_right, [1, 1, 1], 1, 3)
+check_bisect(bisect.bisect_right, [1, 1, 1], 2, 3)
+check_bisect(bisect.bisect_right, [1, 1, 1, 1], 0, 0)
+check_bisect(bisect.bisect_right, [1, 1, 1, 1], 1, 4)
+check_bisect(bisect.bisect_right, [1, 1, 1, 1], 2, 4)
+check_bisect(bisect.bisect_right, [1, 2], 0, 0)
+check_bisect(bisect.bisect_right, [1, 2], 1, 1)
+check_bisect(bisect.bisect_right, [1, 2], 1.5, 1)
+check_bisect(bisect.bisect_right, [1, 2], 2, 2)
+check_bisect(bisect.bisect_right, [1, 2], 3, 2)
+check_bisect(bisect.bisect_right, [1, 1, 2, 2], 0, 0)
+check_bisect(bisect.bisect_right, [1, 1, 2, 2], 1, 2)
+check_bisect(bisect.bisect_right, [1, 1, 2, 2], 1.5, 2)
+check_bisect(bisect.bisect_right, [1, 1, 2, 2], 2, 4)
+check_bisect(bisect.bisect_right, [1, 1, 2, 2], 3, 4)
+check_bisect(bisect.bisect_right, [1, 2, 3], 0, 0)
+check_bisect(bisect.bisect_right, [1, 2, 3], 1, 1)
+check_bisect(bisect.bisect_right, [1, 2, 3], 1.5, 1)
+check_bisect(bisect.bisect_right, [1, 2, 3], 2, 2)
+check_bisect(bisect.bisect_right, [1, 2, 3], 2.5, 2)
+check_bisect(bisect.bisect_right, [1, 2, 3], 3, 3)
+check_bisect(bisect.bisect_right, [1, 2, 3], 4, 3)
+check_bisect(bisect.bisect_right, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 0, 0)
+check_bisect(bisect.bisect_right, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 1, 1)
+check_bisect(bisect.bisect_right, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 1.5, 1)
+check_bisect(bisect.bisect_right, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 2, 3)
+check_bisect(bisect.bisect_right, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 2.5, 3)
+check_bisect(bisect.bisect_right, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 3, 6)
+check_bisect(bisect.bisect_right, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 3.5, 6)
+check_bisect(bisect.bisect_right, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 4, 10)
+check_bisect(bisect.bisect_right, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 5, 10)
+
+check_bisect(bisect.bisect_left, [], 1, 0)
+check_bisect(bisect.bisect_left, [1], 0, 0)
+check_bisect(bisect.bisect_left, [1], 1, 0)
+check_bisect(bisect.bisect_left, [1], 2, 1)
+check_bisect(bisect.bisect_left, [1, 1], 0, 0)
+check_bisect(bisect.bisect_left, [1, 1], 1, 0)
+check_bisect(bisect.bisect_left, [1, 1], 2, 2)
+check_bisect(bisect.bisect_left, [1, 1, 1], 0, 0)
+check_bisect(bisect.bisect_left, [1, 1, 1], 1, 0)
+check_bisect(bisect.bisect_left, [1, 1, 1], 2, 3)
+check_bisect(bisect.bisect_left, [1, 1, 1, 1], 0, 0)
+check_bisect(bisect.bisect_left, [1, 1, 1, 1], 1, 0)
+check_bisect(bisect.bisect_left, [1, 1, 1, 1], 2, 4)
+check_bisect(bisect.bisect_left, [1, 2], 0, 0)
+check_bisect(bisect.bisect_left, [1, 2], 1, 0)
+check_bisect(bisect.bisect_left, [1, 2], 1.5, 1)
+check_bisect(bisect.bisect_left, [1, 2], 2, 1)
+check_bisect(bisect.bisect_left, [1, 2], 3, 2)
+check_bisect(bisect.bisect_left, [1, 1, 2, 2], 0, 0)
+check_bisect(bisect.bisect_left, [1, 1, 2, 2], 1, 0)
+check_bisect(bisect.bisect_left, [1, 1, 2, 2], 1.5, 2)
+check_bisect(bisect.bisect_left, [1, 1, 2, 2], 2, 2)
+check_bisect(bisect.bisect_left, [1, 1, 2, 2], 3, 4)
+check_bisect(bisect.bisect_left, [1, 2, 3], 0, 0)
+check_bisect(bisect.bisect_left, [1, 2, 3], 1, 0)
+check_bisect(bisect.bisect_left, [1, 2, 3], 1.5, 1)
+check_bisect(bisect.bisect_left, [1, 2, 3], 2, 1)
+check_bisect(bisect.bisect_left, [1, 2, 3], 2.5, 2)
+check_bisect(bisect.bisect_left, [1, 2, 3], 3, 2)
+check_bisect(bisect.bisect_left, [1, 2, 3], 4, 3)
+check_bisect(bisect.bisect_left, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 0, 0)
+check_bisect(bisect.bisect_left, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 1, 0)
+check_bisect(bisect.bisect_left, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 1.5, 1)
+check_bisect(bisect.bisect_left, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 2, 1)
+check_bisect(bisect.bisect_left, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 2.5, 3)
+check_bisect(bisect.bisect_left, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 3, 3)
+check_bisect(bisect.bisect_left, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 3.5, 6)
+check_bisect(bisect.bisect_left, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 4, 6)
+check_bisect(bisect.bisect_left, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 5, 10)
+
+def check_insort(n):
+    global nerrors
+    from random import choice
+    import sys
+    digits = "0123456789"
+    raw = []
+    insorted = []
+    for i in range(n):
+        digit = choice(digits)
+        raw.append(digit)
+        if digit in "02468":
+            f = bisect.insort_left
+        else:
+            f = bisect.insort_right
+        f(insorted, digit)
+    sorted = raw[:]
+    sorted.sort()
+    if sorted == insorted:
+        return
+    print >> sys.stderr, "insort test failed: raw %s got %s" % (raw, insorted)
+    nerrors += 1
+
+check_insort(500)
+
+if nerrors:
+    raise TestFailed("%d errors in test_bisect" % nerrors)
diff --git a/lib-python/2.2/test/test_bsddb.py b/lib-python/2.2/test/test_bsddb.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/test/test_bsddb.py
@@ -0,0 +1,76 @@
+#! /usr/bin/env python
+"""Test script for the bsddb C module
+   Roger E. Masse
+"""
+
+import os
+import bsddb
+import dbhash # Just so we know it's imported
+import tempfile
+from test_support import verbose, verify
+
+def test(openmethod, what):
+
+    if verbose:
+        print '\nTesting: ', what
+
+    fname = tempfile.mktemp()
+    f = openmethod(fname, 'c')
+    verify(f.keys() == [])
+    if verbose:
+        print 'creation...'
+    f['0'] = ''
+    f['a'] = 'Guido'
+    f['b'] = 'van'
+    f['c'] = 'Rossum'
+    f['d'] = 'invented'
+    f['f'] = 'Python'
+    if verbose:
+        print '%s %s %s' % (f['a'], f['b'], f['c'])
+
+    if what == 'BTree' :
+        if verbose:
+            print 'key ordering...'
+        f.set_location(f.first()[0])
+        while 1:
+            try:
+                rec = f.next()
+            except KeyError:
+                if rec != f.last():
+                    print 'Error, last != last!'
+                f.previous()
+                break
+            if verbose:
+                print rec
+        if not f.has_key('a'):
+            print 'Error, missing key!'
+
+    f.sync()
+    f.close()
+    if verbose:
+        print 'modification...'
+    f = openmethod(fname, 'w')
+    f['d'] = 'discovered'
+
+    if verbose:
+        print 'access...'
+    for key in f.keys():
+        word = f[key]
+        if verbose:
+            print word
+
+    f.close()
+    try:
+        os.remove(fname)
+    except os.error:
+        pass
+
+types = [(bsddb.btopen, 'BTree'),
+         (bsddb.hashopen, 'Hash Table'),
+         # (bsddb.rnopen,'Record Numbers'), 'put' for RECNO for bsddb 1.85
+         #                                   appears broken... at least on
+         #                                   Solaris Intel - rmasse 1/97
+         ]
+
+for type in types:
+    test(type[0], type[1])
diff --git a/lib-python/2.2/test/test_bufio.py b/lib-python/2.2/test/test_bufio.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_bufio.py
@@ -0,0 +1,60 @@
+from test_support import verify, TestFailed, TESTFN
+
+# Simple test to ensure that optimizations in fileobject.c deliver
+# the expected results.  For best testing, run this under a debug-build
+# Python too (to exercise asserts in the C code).
+
+# Repeat string 'pattern' as often as needed to reach total length
+# 'length'.  Then call try_one with that string, a string one larger
+# than that, and a string one smaller than that.  The main driver
+# feeds this all small sizes and various powers of 2, so we exercise
+# all likely stdio buffer sizes, and "off by one" errors on both
+# sides.
+def drive_one(pattern, length):
+    q, r = divmod(length, len(pattern))
+    teststring = pattern * q + pattern[:r]
+    verify(len(teststring) == length)
+    try_one(teststring)
+    try_one(teststring + "x")
+    try_one(teststring[:-1])
+
+# Write s + "\n" + s to file, then open it and ensure that successive
+# .readline()s deliver what we wrote.
+def try_one(s):
+    # Since C doesn't guarantee we can write/read arbitrary bytes in text
+    # files, use binary mode.
+    f = open(TESTFN, "wb")
+    # write once with \n and once without
+    f.write(s)
+    f.write("\n")
+    f.write(s)
+    f.close()
+    f = open(TESTFN, "rb")
+    line = f.readline()
+    if line != s + "\n":
+        raise TestFailed("Expected %r got %r" % (s + "\n", line))
+    line = f.readline()
+    if line != s:
+        raise TestFailed("Expected %r got %r" % (s, line))
+    line = f.readline()
+    if line:
+        raise TestFailed("Expected EOF but got %r" % line)
+    f.close()
+
+# A pattern with prime length, to avoid simple relationships with
+# stdio buffer sizes.
+primepat = "1234567890\00\01\02\03\04\05\06"
+
+nullpat = "\0" * 1000
+
+try:
+    for size in range(1, 257) + [512, 1000, 1024, 2048, 4096, 8192, 10000,
+                      16384, 32768, 65536, 1000000]:
+        drive_one(primepat, size)
+        drive_one(nullpat, size)
+finally:
+    try:
+        import os
+        os.unlink(TESTFN)
+    except:
+        pass
diff --git a/lib-python/2.2/test/test_builtin.py b/lib-python/2.2/test/test_builtin.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_builtin.py
@@ -0,0 +1,13 @@
+# Python test set -- part 4, built-in functions
+
+from test_support import *
+
+print '4. Built-in functions'
+
+print 'test_b1'
+unload('test_b1')
+import test_b1
+
+print 'test_b2'
+unload('test_b2')
+import test_b2
diff --git a/lib-python/2.2/test/test_calendar.py b/lib-python/2.2/test/test_calendar.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_calendar.py
@@ -0,0 +1,61 @@
+import calendar
+import unittest
+
+from test_support import run_unittest
+
+
+class CalendarTestCase(unittest.TestCase):
+    def test_isleap(self):
+        # Make sure that the return is right for a few years, and
+        # ensure that the return values are 1 or 0, not just true or
+        # false (see SF bug #485794).  Specific additional tests may
+        # be appropriate; this tests a single "cycle".
+        self.assertEqual(calendar.isleap(2000), 1)
+        self.assertEqual(calendar.isleap(2001), 0)
+        self.assertEqual(calendar.isleap(2002), 0)
+        self.assertEqual(calendar.isleap(2003), 0)
+
+    def test_setfirstweekday(self):
+        self.assertRaises(ValueError, calendar.setfirstweekday, 'flabber')
+        self.assertRaises(ValueError, calendar.setfirstweekday, -1)
+        self.assertRaises(ValueError, calendar.setfirstweekday, 200)
+        orig = calendar.firstweekday()
+        calendar.setfirstweekday(calendar.SUNDAY)
+        self.assertEqual(calendar.firstweekday(), calendar.SUNDAY)
+        calendar.setfirstweekday(calendar.MONDAY)
+        self.assertEqual(calendar.firstweekday(), calendar.MONDAY)
+        calendar.setfirstweekday(orig)
+
+    def test_enumerateweekdays(self):
+        self.assertRaises(IndexError, calendar.day_abbr.__getitem__, -10)
+        self.assertRaises(IndexError, calendar.day_name.__getitem__, 10)
+        self.assertEqual(len([d for d in calendar.day_abbr]), 7)
+
+    def test_days(self):
+        for attr in "day_name", "day_abbr":
+            value = getattr(calendar, attr)
+            self.assertEqual(len(value), 7)
+            self.assertEqual(len(value[:]), 7)
+            # ensure they're all unique
+            d = {}
+            for v in value:
+                d[v] = 1
+            self.assertEqual(len(d), 7)
+
+    def test_months(self):
+        for attr in "month_name", "month_abbr":
+            value = getattr(calendar, attr)
+            self.assertEqual(len(value), 13)
+            self.assertEqual(len(value[:]), 13)
+            self.assertEqual(value[0], "")
+            # ensure they're all unique
+            d = {}
+            for v in value:
+                d[v] = 1
+            self.assertEqual(len(d), 13)
+
+def test_main():
+    run_unittest(CalendarTestCase)
+
+if __name__ == "__main__":
+    test_main()
diff --git a/lib-python/2.2/test/test_call.py b/lib-python/2.2/test/test_call.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_call.py
@@ -0,0 +1,131 @@
+import unittest
+from test_support import run_unittest
+
+# The test cases here cover several paths through the function calling
+# code.  They depend on the METH_XXX flag that is used to define a C
+# function, which can't be verified from Python.  If the METH_XXX decl
+# for a C function changes, these tests may not cover the right paths.
+
+class CFunctionCalls(unittest.TestCase):
+
+    def test_varargs0(self):
+        self.assertRaises(TypeError, {}.has_key)
+
+    def test_varargs1(self):
+        {}.has_key(0)
+
+    def test_varargs2(self):
+        self.assertRaises(TypeError, {}.has_key, 0, 1)
+
+    def test_varargs0_ext(self):
+        try:
+            {}.has_key(*())
+        except TypeError:
+            pass
+
+    def test_varargs1_ext(self):
+        {}.has_key(*(0,))
+
+    def test_varargs2_ext(self):
+        try:
+            {}.has_key(*(1, 2))
+        except TypeError:
+            pass
+        else:
+            raise RuntimeError
+
+    def test_varargs0_kw(self):
+        self.assertRaises(TypeError, {}.has_key, x=2)
+
+    def test_varargs1_kw(self):
+        self.assertRaises(TypeError, {}.has_key, x=2)
+
+    def test_varargs2_kw(self):
+        self.assertRaises(TypeError, {}.has_key, x=2, y=2)
+
+    def test_oldargs0_0(self):
+        {}.keys()
+
+    def test_oldargs0_1(self):
+        self.assertRaises(TypeError, {}.keys, 0)
+
+    def test_oldargs0_2(self):
+        self.assertRaises(TypeError, {}.keys, 0, 1)
+
+    def test_oldargs0_0_ext(self):
+        {}.keys(*())
+
+    def test_oldargs0_1_ext(self):
+        try:
+            {}.keys(*(0,))
+        except TypeError:
+            pass
+        else:
+            raise RuntimeError
+
+    def test_oldargs0_2_ext(self):
+        try:
+            {}.keys(*(1, 2))
+        except TypeError:
+            pass
+        else:
+            raise RuntimeError
+
+    def test_oldargs0_0_kw(self):
+        try:
+            {}.keys(x=2)
+        except TypeError:
+            pass
+        else:
+            raise RuntimeError
+
+    def test_oldargs0_1_kw(self):
+        self.assertRaises(TypeError, {}.keys, x=2)
+
+    def test_oldargs0_2_kw(self):
+        self.assertRaises(TypeError, {}.keys, x=2, y=2)
+
+    def test_oldargs1_0(self):
+        self.assertRaises(TypeError, {}.update)
+
+    def test_oldargs1_1(self):
+        {}.update({})
+
+    def test_oldargs1_2(self):
+        self.assertRaises(TypeError, {}.update, {}, 1)
+
+    def test_oldargs1_0_ext(self):
+        try:
+            {}.update(*())
+        except TypeError:
+            pass
+        else:
+            raise RuntimeError
+
+    def test_oldargs1_1_ext(self):
+        {}.update(*({},))
+
+    def test_oldargs1_2_ext(self):
+        try:
+            {}.update(*({}, 2))
+        except TypeError:
+            pass
+        else:
+            raise RuntimeError
+
+    def test_oldargs1_0_kw(self):
+        self.assertRaises(TypeError, {}.update, x=2)
+
+    def test_oldargs1_1_kw(self):
+        self.assertRaises(TypeError, {}.update, {}, x=2)
+
+    def test_oldargs1_2_kw(self):
+        self.assertRaises(TypeError, {}.update, x=2, y=2)
+
+
+def test_main():
+    run_unittest(CFunctionCalls)
+
+
+if __name__ == "__main__":
+    test_main()
diff --git a/lib-python/2.2/test/test_capi.py b/lib-python/2.2/test/test_capi.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_capi.py
@@ -0,0 +1,16 @@
+# Run the _testcapi module tests (tests for the Python/C API):  by defn,
+# these are all functions _testcapi exports whose name begins with 'test_'.
+
+import sys
+import test_support
+import _testcapi
+
+for name in dir(_testcapi):
+    if name.startswith('test_'):
+        test = getattr(_testcapi, name)
+        if test_support.verbose:
+            print "internal", name
+        try:
+            test()
+        except _testcapi.error:
+            raise test_support.TestFailed, sys.exc_info()[1]
diff --git a/lib-python/2.2/test/test_cd.py b/lib-python/2.2/test/test_cd.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/test/test_cd.py
@@ -0,0 +1,26 @@
+#! /usr/bin/env python
+"""Whimpy test script for the cd module
+   Roger E. Masse
+"""
+import cd
+from test_support import verbose
+
+cdattrs = ['BLOCKSIZE', 'CDROM', 'DATASIZE', 'ERROR', 'NODISC', 'PAUSED', 'PLAYING', 'READY',
+           'STILL', '__doc__', '__name__', 'atime', 'audio', 'catalog', 'control', 'createparser', 'error',
+           'ident', 'index', 'msftoframe', 'open', 'pnum', 'ptime']
+
+
+# This is a very inobtrusive test for the existence of the cd module and all it's
+# attributes.  More comprehensive examples can be found in Demo/cd and
+# require that you have a CD and a CD ROM drive
+
+def main():
+    # touch all the attributes of cd without doing anything
+    if verbose:
+        print 'Touching cd module attributes...'
+    for attr in cdattrs:
+        if verbose:
+            print 'touching: ', attr
+        getattr(cd, attr)
+
+main()
diff --git a/lib-python/2.2/test/test_cfgparser.py b/lib-python/2.2/test/test_cfgparser.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_cfgparser.py
@@ -0,0 +1,284 @@
+import ConfigParser
+import StringIO
+
+from test_support import TestFailed, verify
+
+
+def basic(src):
+    print "Testing basic accessors..."
+    cf = ConfigParser.ConfigParser()
+    sio = StringIO.StringIO(src)
+    cf.readfp(sio)
+    L = cf.sections()
+    L.sort()
+    verify(L == [r'Commented Bar',
+                 r'Foo Bar',
+                 r'Internationalized Stuff',
+                 r'Long Line',
+                 r'Section\with$weird%characters[' '\t',
+                 r'Spaces',
+                 r'Spacey Bar',
+                 ],
+           "unexpected list of section names")
+
+    # The use of spaces in the section names serves as a regression test for
+    # SourceForge bug #115357.
+    # http://sourceforge.net/bugs/?func=detailbug&group_id=5470&bug_id=115357
+    verify(cf.get('Foo Bar', 'foo', raw=1) == 'bar')
+    verify(cf.get('Spacey Bar', 'foo', raw=1) == 'bar')
+    verify(cf.get('Commented Bar', 'foo', raw=1) == 'bar')
+    verify(cf.get('Spaces', 'key with spaces', raw=1) == 'value')
+    verify(cf.get('Spaces', 'another with spaces', raw=1) == 'splat!')
+
+    verify('__name__' not in cf.options("Foo Bar"),
+           '__name__ "option" should not be exposed by the API!')
+
+    # Make sure the right things happen for remove_option();
+    # added to include check for SourceForge bug #123324:
+    verify(cf.remove_option('Foo Bar', 'foo'),
+           "remove_option() failed to report existance of option")
+    verify(not cf.has_option('Foo Bar', 'foo'),
+           "remove_option() failed to remove option")
+    verify(not cf.remove_option('Foo Bar', 'foo'),
+           "remove_option() failed to report non-existance of option"
+           " that was removed")
+    try:
+        cf.remove_option('No Such Section', 'foo')
+    except ConfigParser.NoSectionError:
+        pass
+    else:
+        raise TestFailed(
+            "remove_option() failed to report non-existance of option"
+            " that never existed")
+
+    verify(cf.get('Long Line', 'foo', raw=1) ==
+           'this line is much, much longer than my editor\nlikes it.')
+
+
+def write(src):
+    print "Testing writing of files..."
+    cf = ConfigParser.ConfigParser()
+    sio = StringIO.StringIO(src)
+    cf.readfp(sio)
+    output = StringIO.StringIO()
+    cf.write(output)
+    verify(output, """[DEFAULT]
+foo = another very
+        long line
+
+[Long Line]
+foo = this line is much, much longer than my editor
+        likes it.
+""")
+
+def case_sensitivity():
+    print "Testing case sensitivity..."
+    cf = ConfigParser.ConfigParser()
+    cf.add_section("A")
+    cf.add_section("a")
+    L = cf.sections()
+    L.sort()
+    verify(L == ["A", "a"])
+    cf.set("a", "B", "value")
+    verify(cf.options("a") == ["b"])
+    verify(cf.get("a", "b", raw=1) == "value",
+           "could not locate option, expecting case-insensitive option names")
+    verify(cf.has_option("a", "b"))
+    cf.set("A", "A-B", "A-B value")
+    for opt in ("a-b", "A-b", "a-B", "A-B"):
+        verify(cf.has_option("A", opt),
+               "has_option() returned false for option which should exist")
+    verify(cf.options("A") == ["a-b"])
+    verify(cf.options("a") == ["b"])
+    cf.remove_option("a", "B")
+    verify(cf.options("a") == [])
+
+    # SF bug #432369:
+    cf = ConfigParser.ConfigParser()
+    sio = StringIO.StringIO("[MySection]\nOption: first line\n\tsecond line\n")
+    cf.readfp(sio)
+    verify(cf.options("MySection") == ["option"])
+    verify(cf.get("MySection", "Option") == "first line\nsecond line")
+
+    # SF bug #561822:
+    cf = ConfigParser.ConfigParser(defaults={"key":"value"})
+    cf.readfp(StringIO.StringIO("[section]\nnekey=nevalue\n"))
+    verify(cf.has_option("section", "Key"))
+
+
+def boolean(src):
+    print "Testing interpretation of boolean Values..."
+    cf = ConfigParser.ConfigParser()
+    sio = StringIO.StringIO(src)
+    cf.readfp(sio)
+    for x in range(1, 5):
+        verify(cf.getboolean('BOOLTEST', 't%d' % (x)) == 1)
+    for x in range(1, 5):
+        verify(cf.getboolean('BOOLTEST', 'f%d' % (x)) == 0)
+    for x in range(1, 5):
+        try:
+            cf.getboolean('BOOLTEST', 'e%d' % (x))
+        except ValueError:
+            pass
+        else:
+            raise TestFailed(
+                "getboolean() failed to report a non boolean value")
+
+
+def interpolation(src):
+    print "Testing value interpolation..."
+    cf = ConfigParser.ConfigParser({"getname": "%(__name__)s"})
+    sio = StringIO.StringIO(src)
+    cf.readfp(sio)
+    verify(cf.get("Foo", "getname") == "Foo")
+    verify(cf.get("Foo", "bar") == "something with interpolation (1 step)")
+    verify(cf.get("Foo", "bar9")
+           == "something with lots of interpolation (9 steps)")
+    verify(cf.get("Foo", "bar10")
+           == "something with lots of interpolation (10 steps)")
+    expect_get_error(cf, ConfigParser.InterpolationDepthError, "Foo", "bar11")
+
+
+def parse_errors():
+    print "Testing parse errors..."
+    expect_parse_error(ConfigParser.ParsingError,
+                       """[Foo]\n  extra-spaces: splat\n""")
+    expect_parse_error(ConfigParser.ParsingError,
+                       """[Foo]\n  extra-spaces= splat\n""")
+    expect_parse_error(ConfigParser.ParsingError,
+                       """[Foo]\noption-without-value\n""")
+    expect_parse_error(ConfigParser.ParsingError,
+                       """[Foo]\n:value-without-option-name\n""")
+    expect_parse_error(ConfigParser.ParsingError,
+                       """[Foo]\n=value-without-option-name\n""")
+    expect_parse_error(ConfigParser.MissingSectionHeaderError,
+                       """No Section!\n""")
+
+
+def query_errors():
+    print "Testing query interface..."
+    cf = ConfigParser.ConfigParser()
+    verify(cf.sections() == [],
+           "new ConfigParser should have no defined sections")
+    verify(not cf.has_section("Foo"),
+           "new ConfigParser should have no acknowledged sections")
+    try:
+        cf.options("Foo")
+    except ConfigParser.NoSectionError, e:
+        pass
+    else:
+        raise TestFailed(
+            "Failed to catch expected NoSectionError from options()")
+    try:
+        cf.set("foo", "bar", "value")
+    except ConfigParser.NoSectionError, e:
+        pass
+    else:
+        raise TestFailed("Failed to catch expected NoSectionError from set()")
+    expect_get_error(cf, ConfigParser.NoSectionError, "foo", "bar")
+    cf.add_section("foo")
+    expect_get_error(cf, ConfigParser.NoOptionError, "foo", "bar")
+
+
+def weird_errors():
+    print "Testing miscellaneous error conditions..."
+    cf = ConfigParser.ConfigParser()
+    cf.add_section("Foo")
+    try:
+        cf.add_section("Foo")
+    except ConfigParser.DuplicateSectionError, e:
+        pass
+    else:
+        raise TestFailed("Failed to catch expected DuplicateSectionError")
+
+
+def expect_get_error(cf, exctype, section, option, raw=0):
+    try:
+        cf.get(section, option, raw=raw)
+    except exctype, e:
+        pass
+    else:
+        raise TestFailed("Failed to catch expected " + exctype.__name__)
+
+
+def expect_parse_error(exctype, src):
+    cf = ConfigParser.ConfigParser()
+    sio = StringIO.StringIO(src)
+    try:
+        cf.readfp(sio)
+    except exctype, e:
+        pass
+    else:
+        raise TestFailed("Failed to catch expected " + exctype.__name__)
+
+
+basic(r"""
+[Foo Bar]
+foo=bar
+[Spacey Bar]
+foo = bar
+[Commented Bar]
+foo: bar ; comment
+[Long Line]
+foo: this line is much, much longer than my editor
+   likes it.
+[Section\with$weird%characters[""" '\t' r"""]
+[Internationalized Stuff]
+foo[bg]: Bulgarian
+foo=Default
+foo[en]=English
+foo[de]=Deutsch
+[Spaces]
+key with spaces : value
+another with spaces = splat!
+""")
+write("""[Long Line]
+foo: this line is much, much longer than my editor
+   likes it.
+[DEFAULT]
+foo: another very
+ long line""")
+case_sensitivity()
+boolean(r"""
+[BOOLTEST]
+T1=1
+T2=TRUE
+T3=True
+T4=oN
+T5=yes
+F1=0
+F2=FALSE
+F3=False
+F4=oFF
+F5=nO
+E1=2
+E2=foo
+E3=-1
+E4=0.1
+E5=FALSE AND MORE
+""")
+interpolation(r"""
+[Foo]
+bar=something %(with1)s interpolation (1 step)
+bar9=something %(with9)s lots of interpolation (9 steps)
+bar10=something %(with10)s lots of interpolation (10 steps)
+bar11=something %(with11)s lots of interpolation (11 steps)
+with11=%(with10)s
+with10=%(with9)s
+with9=%(with8)s
+with8=%(with7)s
+with7=%(with6)s
+with6=%(with5)s
+with5=%(with4)s
+with4=%(with3)s
+with3=%(with2)s
+with2=%(with1)s
+with1=with
+
+[Mutual Recursion]
+foo=%(bar)s
+bar=%(foo)s
+""")
+parse_errors()
+query_errors()
+weird_errors()
diff --git a/lib-python/2.2/test/test_cgi.py b/lib-python/2.2/test/test_cgi.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_cgi.py
@@ -0,0 +1,188 @@
+from test_support import verify, verbose
+import cgi
+import os
+import sys
+
+class HackedSysModule:
+    # The regression test will have real values in sys.argv, which
+    # will completely confuse the test of the cgi module
+    argv = []
+    stdin = sys.stdin
+
+cgi.sys = HackedSysModule()
+
+try:
+    from cStringIO import StringIO
+except ImportError:
+    from StringIO import StringIO
+
+class ComparableException:
+    def __init__(self, err):
+        self.err = err
+
+    def __str__(self):
+        return str(self.err)
+
+    def __cmp__(self, anExc):
+        if not isinstance(anExc, Exception):
+            return -1
+        x = cmp(self.err.__class__, anExc.__class__)
+        if x != 0:
+            return x
+        return cmp(self.err.args, anExc.args)
+
+    def __getattr__(self, attr):
+        return getattr(self.err, attr)
+
+def do_test(buf, method):
+    env = {}
+    if method == "GET":
+        fp = None
+        env['REQUEST_METHOD'] = 'GET'
+        env['QUERY_STRING'] = buf
+    elif method == "POST":
+        fp = StringIO(buf)
+        env['REQUEST_METHOD'] = 'POST'
+        env['CONTENT_TYPE'] = 'application/x-www-form-urlencoded'
+        env['CONTENT_LENGTH'] = str(len(buf))
+    else:
+        raise ValueError, "unknown method: %s" % method
+    try:
+        return cgi.parse(fp, env, strict_parsing=1)
+    except StandardError, err:
+        return ComparableException(err)
+
+# A list of test cases.  Each test case is a a two-tuple that contains
+# a string with the query and a dictionary with the expected result.
+
+parse_test_cases = [
+    ("", ValueError("bad query field: ''")),
+    ("&", ValueError("bad query field: ''")),
+    ("&&", ValueError("bad query field: ''")),
+    (";", ValueError("bad query field: ''")),
+    (";&;", ValueError("bad query field: ''")),
+    # Should the next few really be valid?
+    ("=", {}),
+    ("=&=", {}),
+    ("=;=", {}),
+    # This rest seem to make sense
+    ("=a", {'': ['a']}),
+    ("&=a", ValueError("bad query field: ''")),
+    ("=a&", ValueError("bad query field: ''")),
+    ("=&a", ValueError("bad query field: 'a'")),
+    ("b=a", {'b': ['a']}),
+    ("b+=a", {'b ': ['a']}),
+    ("a=b=a", {'a': ['b=a']}),
+    ("a=+b=a", {'a': [' b=a']}),
+    ("&b=a", ValueError("bad query field: ''")),
+    ("b&=a", ValueError("bad query field: 'b'")),
+    ("a=a+b&b=b+c", {'a': ['a b'], 'b': ['b c']}),
+    ("a=a+b&a=b+a", {'a': ['a b', 'b a']}),
+    ("x=1&y=2.0&z=2-3.%2b0", {'x': ['1'], 'y': ['2.0'], 'z': ['2-3.+0']}),
+    ("x=1;y=2.0&z=2-3.%2b0", {'x': ['1'], 'y': ['2.0'], 'z': ['2-3.+0']}),
+    ("x=1;y=2.0;z=2-3.%2b0", {'x': ['1'], 'y': ['2.0'], 'z': ['2-3.+0']}),
+    ("Hbc5161168c542333633315dee1182227:key_store_seqid=400006&cuyer=r&view=bustomer&order_id=0bb2e248638833d48cb7fed300000f1b&expire=964546263&lobale=en-US&kid=130003.300038&ss=env",
+     {'Hbc5161168c542333633315dee1182227:key_store_seqid': ['400006'],
+      'cuyer': ['r'],
+      'expire': ['964546263'],
+      'kid': ['130003.300038'],
+      'lobale': ['en-US'],
+      'order_id': ['0bb2e248638833d48cb7fed300000f1b'],
+      'ss': ['env'],
+      'view': ['bustomer'],
+      }),
+
+    ("group_id=5470&set=custom&_assigned_to=31392&_status=1&_category=100&SUBMIT=Browse",
+     {'SUBMIT': ['Browse'],
+      '_assigned_to': ['31392'],
+      '_category': ['100'],
+      '_status': ['1'],
+      'group_id': ['5470'],
+      'set': ['custom'],
+      })
+    ]
+
+def norm(list):
+    if type(list) == type([]):
+        list.sort()
+    return list
+
+def first_elts(list):
+    return map(lambda x:x[0], list)
+
+def first_second_elts(list):
+    return map(lambda p:(p[0], p[1][0]), list)
+
+def main():
+    for orig, expect in parse_test_cases:
+        # Test basic parsing
+        print repr(orig)
+        d = do_test(orig, "GET")
+        verify(d == expect, "Error parsing %s" % repr(orig))
+        d = do_test(orig, "POST")
+        verify(d == expect, "Error parsing %s" % repr(orig))
+
+        env = {'QUERY_STRING': orig}
+        fcd = cgi.FormContentDict(env)
+        sd = cgi.SvFormContentDict(env)
+        fs = cgi.FieldStorage(environ=env)
+        if type(expect) == type({}):
+            # test dict interface
+            verify(len(expect) == len(fcd))
+            verify(norm(expect.keys()) == norm(fcd.keys()))
+            verify(norm(expect.values()) == norm(fcd.values()))
+            verify(norm(expect.items()) == norm(fcd.items()))
+            verify(fcd.get("nonexistent field", "default") == "default")
+            verify(len(sd) == len(fs))
+            verify(norm(sd.keys()) == norm(fs.keys()))
+            verify(fs.getvalue("nonexistent field", "default") == "default")
+            # test individual fields
+            for key in expect.keys():
+                expect_val = expect[key]
+                verify(fcd.has_key(key))
+                verify(norm(fcd[key]) == norm(expect[key]))
+                verify(fcd.get(key, "default") == fcd[key])
+                verify(fs.has_key(key))
+                if len(expect_val) > 1:
+                    single_value = 0
+                else:
+                    single_value = 1
+                try:
+                    val = sd[key]
+                except IndexError:
+                    verify(not single_value)
+                    verify(fs.getvalue(key) == expect_val)
+                else:
+                    verify(single_value)
+                    verify(val == expect_val[0])
+                    verify(fs.getvalue(key) == expect_val[0])
+                verify(norm(sd.getlist(key)) == norm(expect_val))
+                if single_value:
+                    verify(norm(sd.values()) == \
+                           first_elts(norm(expect.values())))
+                    verify(norm(sd.items()) == \
+                           first_second_elts(norm(expect.items())))
+
+    # Test the weird FormContentDict classes
+    env = {'QUERY_STRING': "x=1&y=2.0&z=2-3.%2b0&1=1abc"}
+    expect = {'x': 1, 'y': 2.0, 'z': '2-3.+0', '1': '1abc'}
+    d = cgi.InterpFormContentDict(env)
+    for k, v in expect.items():
+        verify(d[k] == v)
+    for k, v in d.items():
+        verify(expect[k] == v)
+    verify(norm(expect.values()) == norm(d.values()))
+
+    print "Testing log"
+    cgi.initlog()
+    cgi.log("Testing")
+    cgi.logfp = sys.stdout
+    cgi.initlog("%s", "Testing initlog 1")
+    cgi.log("%s", "Testing log 2")
+    if os.path.exists("/dev/null"):
+        cgi.logfp = None
+        cgi.logfile = "/dev/null"
+        cgi.initlog("%s", "Testing log 3")
+        cgi.log("Testing log 4")
+
+main()
diff --git a/lib-python/2.2/test/test_charmapcodec.py b/lib-python/2.2/test/test_charmapcodec.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_charmapcodec.py
@@ -0,0 +1,43 @@
+""" Python character mapping codec test
+
+This uses the test codec in testcodec.py and thus also tests the
+encodings package lookup scheme.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+
+def check(a, b):
+    if a != b:
+        print '*** check failed: %s != %s' % (repr(a), repr(b))
+    else:
+        print '%s == %s: OK' % (a, b)
+
+# test codec's full path name (see test/testcodec.py)
+codecname = 'test.testcodec'
+
+check(unicode('abc', codecname), u'abc')
+check(unicode('xdef', codecname), u'abcdef')
+check(unicode('defx', codecname), u'defabc')
+check(unicode('dxf', codecname), u'dabcf')
+check(unicode('dxfx', codecname), u'dabcfabc')
+
+check(u'abc'.encode(codecname), 'abc')
+check(u'xdef'.encode(codecname), 'abcdef')
+check(u'defx'.encode(codecname), 'defabc')
+check(u'dxf'.encode(codecname), 'dabcf')
+check(u'dxfx'.encode(codecname), 'dabcfabc')
+
+check(unicode('ydef', codecname), u'def')
+check(unicode('defy', codecname), u'def')
+check(unicode('dyf', codecname), u'df')
+check(unicode('dyfy', codecname), u'df')
+
+try:
+    unicode('abc\001', codecname)
+except UnicodeError:
+    print '\\001 maps to undefined: OK'
+else:
+    print '*** check failed: \\001 does not map to undefined'
diff --git a/lib-python/2.2/test/test_cl.py b/lib-python/2.2/test/test_cl.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/test/test_cl.py
@@ -0,0 +1,78 @@
+#! /usr/bin/env python
+"""Whimpy test script for the cl module
+   Roger E. Masse
+"""
+import cl
+from test_support import verbose
+
+clattrs = ['ADDED_ALGORITHM_ERROR', 'ALAW', 'ALGORITHM_ID',
+'ALGORITHM_VERSION', 'AUDIO', 'AWARE_ERROR', 'AWARE_MPEG_AUDIO',
+'AWARE_MULTIRATE', 'AWCMP_CONST_QUAL', 'AWCMP_FIXED_RATE',
+'AWCMP_INDEPENDENT', 'AWCMP_JOINT_STEREO', 'AWCMP_LOSSLESS',
+'AWCMP_MPEG_LAYER_I', 'AWCMP_MPEG_LAYER_II', 'AWCMP_STEREO',
+'Algorithm', 'AlgorithmNumber', 'AlgorithmType', 'AudioFormatName',
+'BAD_ALGORITHM_NAME', 'BAD_ALGORITHM_TYPE', 'BAD_BLOCK_SIZE',
+'BAD_BOARD', 'BAD_BUFFERING', 'BAD_BUFFERLENGTH_NEG',
+'BAD_BUFFERLENGTH_ODD', 'BAD_BUFFER_EXISTS', 'BAD_BUFFER_HANDLE',
+'BAD_BUFFER_POINTER', 'BAD_BUFFER_QUERY_SIZE', 'BAD_BUFFER_SIZE',
+'BAD_BUFFER_SIZE_POINTER', 'BAD_BUFFER_TYPE',
+'BAD_COMPRESSION_SCHEME', 'BAD_COMPRESSOR_HANDLE',
+'BAD_COMPRESSOR_HANDLE_POINTER', 'BAD_FRAME_SIZE',
+'BAD_FUNCTIONALITY', 'BAD_FUNCTION_POINTER', 'BAD_HEADER_SIZE',
+'BAD_INITIAL_VALUE', 'BAD_INTERNAL_FORMAT', 'BAD_LICENSE',
+'BAD_MIN_GT_MAX', 'BAD_NO_BUFFERSPACE', 'BAD_NUMBER_OF_BLOCKS',
+'BAD_PARAM', 'BAD_PARAM_ID_POINTER', 'BAD_PARAM_TYPE', 'BAD_POINTER',
+'BAD_PVBUFFER', 'BAD_SCHEME_POINTER', 'BAD_STREAM_HEADER',
+'BAD_STRING_POINTER', 'BAD_TEXT_STRING_PTR', 'BEST_FIT',
+'BIDIRECTIONAL', 'BITRATE_POLICY', 'BITRATE_TARGET',
+'BITS_PER_COMPONENT', 'BLENDING', 'BLOCK_SIZE', 'BOTTOM_UP',
+'BUFFER_NOT_CREATED', 'BUF_DATA', 'BUF_FRAME', 'BytesPerPixel',
+'BytesPerSample', 'CHANNEL_POLICY', 'CHROMA_THRESHOLD', 'CODEC',
+'COMPONENTS', 'COMPRESSED_BUFFER_SIZE', 'COMPRESSION_RATIO',
+'COMPRESSOR', 'CONTINUOUS_BLOCK', 'CONTINUOUS_NONBLOCK',
+'CompressImage', 'DATA', 'DECOMPRESSOR', 'DecompressImage',
+'EDGE_THRESHOLD', 'ENABLE_IMAGEINFO', 'END_OF_SEQUENCE', 'ENUM_VALUE',
+'EXACT_COMPRESSION_RATIO', 'EXTERNAL_DEVICE', 'FLOATING_ENUM_VALUE',
+'FLOATING_RANGE_VALUE', 'FRAME', 'FRAME_BUFFER_SIZE',
+'FRAME_BUFFER_SIZE_ZERO', 'FRAME_RATE', 'FRAME_TYPE', 'G711_ALAW',
+'G711_ULAW', 'GRAYSCALE', 'GetAlgorithmName', 'HDCC',
+'HDCC_SAMPLES_PER_TILE', 'HDCC_TILE_THRESHOLD', 'HEADER_START_CODE',
+'IMAGE_HEIGHT', 'IMAGE_WIDTH', 'INTERNAL_FORMAT',
+'INTERNAL_IMAGE_HEIGHT', 'INTERNAL_IMAGE_WIDTH', 'INTRA', 'JPEG',
+'JPEG_ERROR', 'JPEG_NUM_PARAMS', 'JPEG_QUALITY_FACTOR',
+'JPEG_QUANTIZATION_TABLES', 'JPEG_SOFTWARE', 'JPEG_STREAM_HEADERS',
+'KEYFRAME', 'LAST_FRAME_INDEX', 'LAYER', 'LUMA_THRESHOLD',
+'MAX_NUMBER_OF_AUDIO_ALGORITHMS', 'MAX_NUMBER_OF_ORIGINAL_FORMATS',
+'MAX_NUMBER_OF_PARAMS', 'MAX_NUMBER_OF_VIDEO_ALGORITHMS', 'MONO',
+'MPEG_VIDEO', 'MVC1', 'MVC2', 'MVC2_BLENDING', 'MVC2_BLENDING_OFF',
+'MVC2_BLENDING_ON', 'MVC2_CHROMA_THRESHOLD', 'MVC2_EDGE_THRESHOLD',
+'MVC2_ERROR', 'MVC2_LUMA_THRESHOLD', 'NEXT_NOT_AVAILABLE',
+'NOISE_MARGIN', 'NONE', 'NUMBER_OF_FRAMES', 'NUMBER_OF_PARAMS',
+'ORIENTATION', 'ORIGINAL_FORMAT', 'OpenCompressor',
+'OpenDecompressor', 'PARAM_OUT_OF_RANGE', 'PREDICTED', 'PREROLL',
+'ParamID', 'ParamNumber', 'ParamType', 'QUALITY_FACTOR',
+'QUALITY_LEVEL', 'QueryAlgorithms', 'QueryMaxHeaderSize',
+'QueryScheme', 'QuerySchemeFromName', 'RANGE_VALUE', 'RGB', 'RGB332',
+'RGB8', 'RGBA', 'RGBX', 'RLE', 'RLE24', 'RTR', 'RTR1',
+'RTR_QUALITY_LEVEL', 'SAMPLES_PER_TILE', 'SCHEME_BUSY',
+'SCHEME_NOT_AVAILABLE', 'SPEED', 'STEREO_INTERLEAVED',
+'STREAM_HEADERS', 'SetDefault', 'SetMax', 'SetMin', 'TILE_THRESHOLD',
+'TOP_DOWN', 'ULAW', 'UNCOMPRESSED', 'UNCOMPRESSED_AUDIO',
+'UNCOMPRESSED_VIDEO', 'UNKNOWN_SCHEME', 'VIDEO', 'VideoFormatName',
+'Y', 'YCbCr', 'YCbCr422', 'YCbCr422DC', 'YCbCr422HC', 'YUV', 'YUV422',
+'YUV422DC', 'YUV422HC', '__doc__', '__name__', 'cvt_type', 'error']
+
+
+# This is a very inobtrusive test for the existence of the cl
+# module and all it's attributes.
+
+def main():
+    # touch all the attributes of al without doing anything
+    if verbose:
+        print 'Touching cl module attributes...'
+    for attr in clattrs:
+        if verbose:
+            print 'touching: ', attr
+        getattr(cl, attr)
+
+main()
diff --git a/lib-python/2.2/test/test_class.py b/lib-python/2.2/test/test_class.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_class.py
@@ -0,0 +1,317 @@
+"Test the functionality of Python classes implementing operators."
+
+from test_support import TestFailed
+
+testmeths = [
+
+# Binary operations
+    "add",
+    "radd",
+    "sub",
+    "rsub",
+    "mul",
+    "rmul",
+    "div",
+    "rdiv",
+    "mod",
+    "rmod",
+    "divmod",
+    "rdivmod",
+    "pow",
+    "rpow",
+    "rshift",
+    "rrshift",
+    "lshift",
+    "rlshift",
+    "and",
+    "rand",
+    "or",
+    "ror",
+    "xor",
+    "rxor",
+
+# List/dict operations
+    "contains",
+    "getitem",
+    "getslice",
+    "setitem",
+    "setslice",
+    "delitem",
+    "delslice",
+
+# Unary operations
+    "neg",
+    "pos",
+    "abs",
+    "int",
+    "long",
+    "float",
+    "oct",
+    "hex",
+
+# generic operations
+    "init",
+    ]
+
+# These need to return something other than None
+#    "coerce",
+#    "hash",
+#    "str",
+#    "repr",
+
+# These are separate because they can influence the test of other methods.
+#    "getattr",
+#    "setattr",
+#    "delattr",
+
+class AllTests:
+    def __coerce__(self, *args):
+        print "__coerce__:", args
+        return (self,) + args
+
+    def __hash__(self, *args):
+        print "__hash__:", args
+        return hash(id(self))
+
+    def __str__(self, *args):
+        print "__str__:", args
+        return "AllTests"
+
+    def __repr__(self, *args):
+        print "__repr__:", args
+        return "AllTests"
+
+    def __cmp__(self, *args):
+        print "__cmp__:", args
+        return 0
+
+    def __del__(self, *args):
+        print "__del__:", args
+
+# Synthesize AllTests methods from the names in testmeths.
+
+method_template = """\
+def __%(method)s__(self, *args):
+    print "__%(method)s__:", args
+"""
+
+for method in testmeths:
+    exec method_template % locals() in AllTests.__dict__
+
+del method, method_template
+
+# this also tests __init__ of course.
+testme = AllTests()
+
+# Binary operations
+
+testme + 1
+1 + testme
+
+testme - 1
+1 - testme
+
+testme * 1
+1 * testme
+
+if 1/2 == 0:
+    testme / 1
+    1 / testme
+else:
+    # True division is in effect, so "/" doesn't map to __div__ etc; but
+    # the canned expected-output file requires that __div__ etc get called.
+    testme.__coerce__(1)
+    testme.__div__(1)
+    testme.__coerce__(1)
+    testme.__rdiv__(1)
+
+testme % 1
+1 % testme
+
+divmod(testme,1)
+divmod(1, testme)
+
+testme ** 1
+1 ** testme
+
+testme >> 1
+1 >> testme
+
+testme << 1
+1 << testme
+
+testme & 1
+1 & testme
+
+testme | 1
+1 | testme
+
+testme ^ 1
+1 ^ testme
+
+
+# List/dict operations
+
+1 in testme
+
+testme[1]
+testme[1] = 1
+del testme[1]
+
+testme[:42]
+testme[:42] = "The Answer"
+del testme[:42]
+
+testme[2:1024:10]
+testme[2:1024:10] = "A lot"
+del testme[2:1024:10]
+
+testme[:42, ..., :24:, 24, 100]
+testme[:42, ..., :24:, 24, 100] = "Strange"
+del testme[:42, ..., :24:, 24, 100]
+
+
+# Now remove the slice hooks to see if converting normal slices to slice
+# object works.
+
+del AllTests.__getslice__
+del AllTests.__setslice__
+del AllTests.__delslice__
+
+import sys
+if sys.platform[:4] != 'java':
+    testme[:42]
+    testme[:42] = "The Answer"
+    del testme[:42]
+else:
+    # This works under Jython, but the actual slice values are
+    # different.
+    print "__getitem__: (slice(0, 42, None),)"
+    print "__setitem__: (slice(0, 42, None), 'The Answer')"
+    print "__delitem__: (slice(0, 42, None),)"
+
+# Unary operations
+
+-testme
++testme
+abs(testme)
+if sys.platform[:4] != 'java':
+    int(testme)
+    long(testme)
+    float(testme)
+    oct(testme)
+    hex(testme)
+else:
+    # Jython enforced that the these methods return
+    # a value of the expected type.
+    print "__int__: ()"
+    print "__long__: ()"
+    print "__float__: ()"
+    print "__oct__: ()"
+    print "__hex__: ()"
+
+
+# And the rest...
+
+hash(testme)
+repr(testme)
+str(testme)
+
+testme == 1
+testme < 1
+testme > 1
+testme <> 1
+testme != 1
+1 == testme
+1 < testme
+1 > testme
+1 <> testme
+1 != testme
+
+# This test has to be last (duh.)
+
+del testme
+if sys.platform[:4] == 'java':
+    import java
+    java.lang.System.gc()
+
+# Interfering tests
+
+class ExtraTests:
+    def __getattr__(self, *args):
+        print "__getattr__:", args
+        return "SomeVal"
+
+    def __setattr__(self, *args):
+        print "__setattr__:", args
+
+    def __delattr__(self, *args):
+        print "__delattr__:", args
+
+testme = ExtraTests()
+testme.spam
+testme.eggs = "spam, spam, spam and ham"
+del testme.cardinal
+
+
+# Test correct errors from hash() on objects with comparisons but no __hash__
+
+class C0:
+    pass
+
+hash(C0()) # This should work; the next two should raise TypeError
+
+class C1:
+    def __cmp__(self, other): return 0
+
+try: hash(C1())
+except TypeError: pass
+else: raise TestFailed, "hash(C1()) should raise an exception"
+
+class C2:
+    def __eq__(self, other): return 1
+
+try: hash(C2())
+except TypeError: pass
+else: raise TestFailed, "hash(C2()) should raise an exception"
+
+
+# Test for SF bug 532646
+
+class A:
+    pass
+A.__call__ = A()
+a = A()
+try:
+    a() # This should not segfault
+except RuntimeError:
+    pass
+else:
+    raise TestFailed, "how could this not have overflowed the stack?"
+
+
+# Tests for exceptions raised in instance_getattr2().
+
+def booh(self):
+    raise AttributeError, "booh"
+
+class A:
+    a = property(booh)
+try:
+    A().a # Raised AttributeError: A instance has no attribute 'a'
+except AttributeError, x:
+    if str(x) is not "booh":
+        print "attribute error for A().a got masked:", str(x)
+
+class E:
+    __eq__ = property(booh)
+E() == E() # In debug mode, caused a C-level assert() to fail
+
+class I:
+    __init__ = property(booh)
+try:
+    I() # In debug mode, printed XXX undetected error and raises AttributeError
+except AttributeError, x:
+    pass
+else:
+    print "attribute error for I.__init__ got masked"
diff --git a/lib-python/2.2/test/test_cmath.py b/lib-python/2.2/test/test_cmath.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/test/test_cmath.py
@@ -0,0 +1,35 @@
+#! /usr/bin/env python
+""" Simple test script for cmathmodule.c
+    Roger E. Masse
+"""
+import cmath
+from test_support import verbose
+
+testdict = {'acos' : 1.0,
+            'acosh' : 1.0,
+            'asin' : 1.0,
+            'asinh' : 1.0,
+            'atan' : 0.2,
+            'atanh' : 0.2,
+            'cos' : 1.0,
+            'cosh' : 1.0,
+            'exp' : 1.0,
+            'log' : 1.0,
+            'log10' : 1.0,
+            'sin' : 1.0,
+            'sinh' : 1.0,
+            'sqrt' : 1.0,
+            'tan' : 1.0,
+            'tanh' : 1.0}
+
+for func in testdict.keys():
+    f = getattr(cmath, func)
+    r = f(testdict[func])
+    if verbose:
+        print 'Calling %s(%f) = %f' % (func, testdict[func], abs(r))
+
+p = cmath.pi
+e = cmath.e
+if verbose:
+    print 'PI = ', abs(p)
+    print 'E = ', abs(e)
diff --git a/lib-python/2.2/test/test_codecs.py b/lib-python/2.2/test/test_codecs.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_codecs.py
@@ -0,0 +1,31 @@
+import test_support,unittest
+import codecs
+import StringIO
+
+class UTF16Test(unittest.TestCase):
+
+    spamle = '\xff\xfes\x00p\x00a\x00m\x00s\x00p\x00a\x00m\x00'
+    spambe = '\xfe\xff\x00s\x00p\x00a\x00m\x00s\x00p\x00a\x00m'
+
+    def test_only_one_bom(self):
+        _,_,reader,writer = codecs.lookup("utf-16")
+        # encode some stream
+        s = StringIO.StringIO()
+        f = writer(s)
+        f.write(u"spam")
+        f.write(u"spam")
+        d = s.getvalue()
+        # check whether there is exactly one BOM in it
+        self.assert_(d == self.spamle or d == self.spambe)
+        # try to read it back
+        s = StringIO.StringIO(d)
+        f = reader(s)
+        self.assertEquals(f.read(), u"spamspam")
+
+
+def test_main():
+    test_support.run_unittest(UTF16Test)
+
+
+if __name__ == "__main__":
+    test_main()
diff --git a/lib-python/2.2/test/test_codeop.py b/lib-python/2.2/test/test_codeop.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_codeop.py
@@ -0,0 +1,190 @@
+"""
+   Test cases for codeop.py
+   Nick Mathewson
+"""
+import unittest
+from test_support import run_unittest, is_jython
+
+from codeop import compile_command
+
+if is_jython:
+    import sys
+    import cStringIO
+
+    def unify_callables(d):
+        for n,v in d.items():
+            if callable(v):
+                d[n] = callable
+        return d
+
+class CodeopTests(unittest.TestCase):
+
+    def assertValid(self, str, symbol='single'):
+        '''succeed iff str is a valid piece of code'''
+        if is_jython:
+            code = compile_command(str, "<input>", symbol)
+            self.assert_(code)
+            if symbol == "single":
+                d,r = {},{}
+                saved_stdout = sys.stdout
+                sys.stdout = cStringIO.StringIO()
+                try:
+                    exec code in d
+                    exec compile(str,"<input>","single") in r
+                finally:
+                    sys.stdout = saved_stdout
+            elif symbol == 'eval':
+                ctx = {'a': 2}
+                d = { 'value': eval(code,ctx) }
+                r = { 'value': eval(str,ctx) }
+            self.assertEquals(unify_callables(r),unify_callables(d))
+        else:
+            expected = compile(str, "<input>", symbol)
+            self.assertEquals( compile_command(str, "<input>", symbol), expected)
+
+    def assertIncomplete(self, str, symbol='single'):
+        '''succeed iff str is the start of a valid piece of code'''
+        self.assertEquals( compile_command(str, symbol=symbol), None)
+
+    def assertInvalid(self, str, symbol='single', is_syntax=1):
+        '''succeed iff str is the start of an invalid piece of code'''
+        try:
+            compile_command(str,symbol=symbol)
+            self.fail("No exception thrown for invalid code")
+        except SyntaxError:
+            self.assert_(is_syntax)
+        except OverflowError:
+            self.assert_(not is_syntax)
+
+    def test_valid(self):
+        av = self.assertValid
+
+        # special case
+        if not is_jython:
+            self.assertEquals(compile_command(""),
+                            compile("pass", "<input>", 'single'))
+            self.assertEquals(compile_command("\n"),
+                            compile("pass", "<input>", 'single'))          
+        else:
+            av("")
+            av("\n")
+        
+        av("a = 1")
+        av("\na = 1")
+        av("a = 1\n")
+        av("a = 1\n\n")
+        av("\n\na = 1\n\n")
+
+        av("def x():\n  pass\n")
+        av("if 1:\n pass\n")
+
+        av("\n\nif 1: pass\n")
+        av("\n\nif 1: pass\n\n")
+
+        av("def x():\n\n pass\n")
+        av("def x():\n  pass\n  \n")
+        av("def x():\n  pass\n \n")
+
+        av("pass\n")
+        av("3**3\n")
+
+        av("if 9==3:\n   pass\nelse:\n   pass\n")
+        av("if 1:\n pass\n if 1:\n  pass\n else:\n  pass\n")
+
+        av("#a\n#b\na = 3\n")
+        av("#a\n\n   \na=3\n")
+        av("a=3\n\n")
+        av("a = 9+ \\\n3")
+
+        av("3**3","eval")
+        av("(lambda z: \n z**3)","eval")
+
+        av("9+ \\\n3","eval")
+        av("9+ \\\n3\n","eval")
+
+        av("\n\na**3","eval")
+        av("\n \na**3","eval")
+        av("#a\n#b\na**3","eval")
+
+    def test_incomplete(self):
+        ai = self.assertIncomplete
+
+        ai("(a **")
+        ai("(a,b,")
+        ai("(a,b,(")
+        ai("(a,b,(")
+        ai("a = (")
+        ai("a = {")
+        ai("b + {")
+
+        ai("if 9==3:\n   pass\nelse:")
+        ai("if 9==3:\n   pass\nelse:\n")
+        ai("if 9==3:\n   pass\nelse:\n   pass")
+        ai("if 1:")
+        ai("if 1:\n")
+        ai("if 1:\n pass\n if 1:\n  pass\n else:")
+        ai("if 1:\n pass\n if 1:\n  pass\n else:\n")          
+        ai("if 1:\n pass\n if 1:\n  pass\n else:\n  pass") 
+        
+        ai("def x():")
+        ai("def x():\n")
+        ai("def x():\n\n")
+
+        ai("def x():\n  pass")
+        ai("def x():\n  pass\n ")
+        ai("def x():\n  pass\n  ")
+        ai("\n\ndef x():\n  pass")
+
+        ai("a = 9+ \\")
+        ai("a = 'a\\")
+        ai("a = '''xy")
+
+        ai("","eval")
+        ai("\n","eval")
+        ai("(","eval")
+        ai("(\n\n\n","eval")
+        ai("(9+","eval")
+        ai("9+ \\","eval")
+        ai("lambda z: \\","eval")
+
+    def test_invalid(self):
+        ai = self.assertInvalid
+        ai("a b")
+
+        ai("a @")
+        ai("a b @")
+        ai("a ** @")
+        
+        ai("a = ")
+        ai("a = 9 +")
+
+        ai("def x():\n\npass\n")
+
+        ai("\n\n if 1: pass\n\npass")
+
+        ai("a = 9+ \\\n")
+        ai("a = 'a\\ ")
+        ai("a = 'a\\\n")
+
+        ai("a = 1","eval")
+        ai("a = (","eval")
+        ai("]","eval")
+        ai("())","eval")
+        ai("[}","eval")
+        ai("9+","eval")
+        ai("lambda z:","eval")
+        ai("a b","eval")
+
+    def test_filename(self):
+        self.assertEquals(compile_command("a = 1\n", "abc").co_filename,
+                          compile("a = 1\n", "abc", 'single').co_filename)
+        self.assertNotEquals(compile_command("a = 1\n", "abc").co_filename,
+                             compile("a = 1\n", "def", 'single').co_filename)
+
+
+def test_main():
+    run_unittest(CodeopTests)
+
+
+if __name__ == "__main__":
+    test_main()
diff --git a/lib-python/2.2/test/test_coercion.py b/lib-python/2.2/test/test_coercion.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_coercion.py
@@ -0,0 +1,118 @@
+import copy
+import sys
+import warnings
+
+# Fake a number that implements numeric methods through __coerce__
+class CoerceNumber:
+    def __init__(self, arg):
+        self.arg = arg
+
+    def __repr__(self):
+        return '<CoerceNumber %s>' % repr(self.arg)
+
+    def __coerce__(self, other):
+        if isinstance(other, CoerceNumber):
+            return self.arg, other.arg
+        else:
+            return (self.arg, other)
+
+
+# Fake a number that implements numeric ops through methods.
+class MethodNumber:
+
+    def __init__(self,arg):
+        self.arg = arg
+
+    def __repr__(self):
+        return '<MethodNumber %s>' % repr(self.arg)
+
+    def __add__(self,other):
+        return self.arg + other
+
+    def __radd__(self,other):
+        return other + self.arg
+
+    def __sub__(self,other):
+        return self.arg - other
+
+    def __rsub__(self,other):
+        return other - self.arg
+
+    def __mul__(self,other):
+        return self.arg * other
+
+    def __rmul__(self,other):
+        return other * self.arg
+
+    def __div__(self,other):
+        return self.arg / other
+
+    def __rdiv__(self,other):
+        return other / self.arg
+
+    def __pow__(self,other):
+        return self.arg ** other
+
+    def __rpow__(self,other):
+        return other ** self.arg
+
+    def __mod__(self,other):
+        return self.arg % other
+
+    def __rmod__(self,other):
+        return other % self.arg
+
+    def __cmp__(self, other):
+        return cmp(self.arg, other)
+
+
+candidates = [ 2, 4.0, 2L, 2+0j, [1], (2,), None,
+               MethodNumber(1), CoerceNumber(2)]
+
+infix_binops = [ '+', '-', '*', '/', '**', '%' ]
+prefix_binops = [ 'divmod' ]
+
+def do_infix_binops():
+    for a in candidates:
+        for b in candidates:
+            for op in infix_binops:
+                print '%s %s %s' % (a, op, b),
+                try:
+                    x = eval('a %s b' % op)
+                except:
+                    error = sys.exc_info()[:2]
+                    print '... %s' % error[0]
+                else:
+                    print '=', x
+                try:
+                    z = copy.copy(a)
+                except copy.Error:
+                    z = a # assume it has no inplace ops
+                print '%s %s= %s' % (a, op, b),
+                try:
+                    exec('z %s= b' % op)
+                except:
+                    error = sys.exc_info()[:2]
+                    print '... %s' % error[0]
+                else:
+                    print '=>', z
+
+def do_prefix_binops():
+    for a in candidates:
+        for b in candidates:
+            for op in prefix_binops:
+                print '%s(%s, %s)' % (op, a, b),
+                try:
+                    x = eval('%s(a, b)' % op)
+                except:
+                    error = sys.exc_info()[:2]
+                    print '... %s' % error[0]
+                else:
+                    print '=', x
+
+warnings.filterwarnings("ignore",
+                        r'complex divmod\(\), // and % are deprecated',
+                        DeprecationWarning,
+                        r'test_coercion$')
+do_infix_binops()
+do_prefix_binops()
diff --git a/lib-python/2.2/test/test_commands.py b/lib-python/2.2/test/test_commands.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_commands.py
@@ -0,0 +1,52 @@
+'''
+   Tests for commands module
+   Nick Mathewson
+'''
+import unittest
+import os, tempfile, re
+
+from test_support import TestSkipped, run_unittest
+from commands import *
+
+# The module says:
+#   "NB This only works (and is only relevant) for UNIX."
+#
+# Actually, getoutput should work on any platform with an os.popen, but
+# I'll take the comment as given, and skip this suite.
+
+if os.name != 'posix':
+    raise TestSkipped('Not posix; skipping test_commands')
+
+
+class CommandTests(unittest.TestCase):
+
+    def test_getoutput(self):
+        self.assertEquals(getoutput('echo xyzzy'), 'xyzzy')
+        self.assertEquals(getstatusoutput('echo xyzzy'), (0, 'xyzzy'))
+
+        # we use mktemp in the next line to get a filename which we
+        # _know_ won't exist.  This is guaranteed to fail.
+        status, output = getstatusoutput('cat ' + tempfile.mktemp())
+        self.assertNotEquals(status, 0)
+
+    def test_getstatus(self):
+        # This pattern should match 'ls -ld /.' on any posix
+        # system, however perversely configured.
+        pat = r'''d.........   # It is a directory.
+                  \s+\d+       # It has some number of links.
+                  \s+\w+\s+\w+ # It has a user and group, which may
+                               #     be named anything.
+                  \s+\d+       # It has a size.
+                  [^/]*        # Skip the date.
+                  /.           # and end with the name of the file.
+               '''
+
+        self.assert_(re.match(pat, getstatus("/."), re.VERBOSE))
+
+
+def test_main():
+    run_unittest(CommandTests)
+
+
+if __name__ == "__main__":
+    test_main()
diff --git a/lib-python/2.2/test/test_compare.py b/lib-python/2.2/test/test_compare.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_compare.py
@@ -0,0 +1,56 @@
+import sys
+
+from test_support import *
+
+class Empty:
+    def __repr__(self):
+        return '<Empty>'
+
+class Coerce:
+    def __init__(self, arg):
+        self.arg = arg
+
+    def __repr__(self):
+        return '<Coerce %s>' % self.arg
+
+    def __coerce__(self, other):
+        if isinstance(other, Coerce):
+            return self.arg, other.arg
+        else:
+            return self.arg, other
+
+class Cmp:
+    def __init__(self,arg):
+        self.arg = arg
+
+    def __repr__(self):
+        return '<Cmp %s>' % self.arg
+
+    def __cmp__(self, other):
+        return cmp(self.arg, other)
+
+
+candidates = [2, 2.0, 2L, 2+0j, [1], (3,), None, Empty(), Coerce(2), Cmp(2.0)]
+
+def test():
+    for a in candidates:
+        for b in candidates:
+            try:
+                x = a == b
+            except:
+                print 'cmp(%s, %s) => %s' % (a, b, sys.exc_info()[0])
+            else:
+                if x:
+                    print "%s == %s" % (a, b)
+                else:
+                    print "%s != %s" % (a, b)
+    # Ensure default comparison compares id() of args
+    L = []
+    for i in range(10):
+        L.insert(len(L)//2, Empty())
+    for a in L:
+        for b in L:
+            if cmp(a, b) != cmp(id(a), id(b)):
+                print "ERROR:", cmp(a, b), cmp(id(a), id(b)), id(a), id(b)
+
+test()
diff --git a/lib-python/2.2/test/test_compile.py b/lib-python/2.2/test/test_compile.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_compile.py
@@ -0,0 +1,129 @@
+from test_support import verbose, TestFailed
+
+if verbose:
+    print "Testing whether compiler catches assignment to __debug__"
+
+try:
+    compile('__debug__ = 1', '?', 'single')
+except SyntaxError:
+    pass
+
+import __builtin__
+prev = __builtin__.__debug__
+setattr(__builtin__, '__debug__', 'sure')
+setattr(__builtin__, '__debug__', prev)
+
+if verbose:
+    print 'Running tests on argument handling'
+
+try:
+    exec 'def f(a, a): pass'
+    raise TestFailed, "duplicate arguments"
+except SyntaxError:
+    pass
+
+try:
+    exec 'def f(a = 0, a = 1): pass'
+    raise TestFailed, "duplicate keyword arguments"
+except SyntaxError:
+    pass
+
+try:
+    exec 'def f(a): global a; a = 1'
+    raise TestFailed, "variable is global and local"
+except SyntaxError:
+    pass
+
+if verbose:
+    print "testing complex args"
+
+def comp_args((a, b)):
+    print a,b
+
+comp_args((1, 2))
+
+def comp_args((a, b)=(3, 4)):
+    print a, b
+
+comp_args((1, 2))
+comp_args()
+
+def comp_args(a, (b, c)):
+    print a, b, c
+
+comp_args(1, (2, 3))
+
+def comp_args(a=2, (b, c)=(3, 4)):
+    print a, b, c
+
+comp_args(1, (2, 3))
+comp_args()
+
+try:
+    exec 'def f(a=1, (b, c)): pass'
+    raise TestFailed, "non-default args after default"
+except SyntaxError:
+    pass
+
+if verbose:
+    print "testing bad float literals"
+
+def expect_error(s):
+    try:
+        eval(s)
+        raise TestFailed("%r accepted" % s)
+    except SyntaxError:
+        pass
+
+expect_error("2e")
+expect_error("2.0e+")
+expect_error("1e-")
+expect_error("3-4e/21")
+
+
+if verbose:
+    print "testing literals with leading zeroes"
+
+def expect_same(test_source, expected):
+    got = eval(test_source)
+    if got != expected:
+        raise TestFailed("eval(%r) gave %r, but expected %r" %
+                         (test_source, got, expected))
+
+expect_error("077787")
+expect_error("0xj")
+expect_error("0x.")
+expect_error("0e")
+expect_same("0777", 511)
+expect_same("0777L", 511)
+expect_same("000777", 511)
+expect_same("0xff", 255)
+expect_same("0xffL", 255)
+expect_same("0XfF", 255)
+expect_same("0777.", 777)
+expect_same("0777.0", 777)
+expect_same("000000000000000000000000000000000000000000000000000777e0", 777)
+expect_same("0777e1", 7770)
+expect_same("0e0", 0)
+expect_same("0000E-012", 0)
+expect_same("09.5", 9.5)
+expect_same("0777j", 777j)
+expect_same("00j", 0j)
+expect_same("00.0", 0)
+expect_same("0e3", 0)
+expect_same("090000000000000.", 90000000000000.)
+expect_same("090000000000000.0000000000000000000000", 90000000000000.)
+expect_same("090000000000000e0", 90000000000000.)
+expect_same("090000000000000e-0", 90000000000000.)
+expect_same("090000000000000j", 90000000000000j)
+expect_error("090000000000000")  # plain octal literal w/ decimal digit
+expect_error("080000000000000")  # plain octal literal w/ decimal digit
+expect_error("000000000000009")  # plain octal literal w/ decimal digit
+expect_error("000000000000008")  # plain octal literal w/ decimal digit
+expect_same("000000000000007", 7)
+expect_same("000000000000008.", 8.)
+expect_same("000000000000009.", 9.)
+
+# Verify treatment of unary minus on negative numbers SF bug #660455
+expect_same("0xffffffff", -1)
+expect_same("-0xffffffff", 1)
diff --git a/lib-python/2.2/test/test_complex.py b/lib-python/2.2/test/test_complex.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_complex.py
@@ -0,0 +1,68 @@
+from test_support import TestFailed
+from random import random
+
+# These tests ensure that complex math does the right thing; tests of
+# the complex() function/constructor are in test_b1.py.
+
+# XXX need many, many more tests here.
+
+nerrors = 0
+
+def check_close_real(x, y, eps=1e-9):
+    """Return true iff floats x and y "are close\""""
+    # put the one with larger magnitude second
+    if abs(x) > abs(y):
+        x, y = y, x
+    if y == 0:
+        return abs(x) < eps
+    if x == 0:
+        return abs(y) < eps
+    # check that relative difference < eps
+    return abs((x-y)/y) < eps
+
+def check_close(x, y, eps=1e-9):
+    """Return true iff complexes x and y "are close\""""
+    return check_close_real(x.real, y.real, eps) and \
+           check_close_real(x.imag, y.imag, eps)
+
+def test_div(x, y):
+    """Compute complex z=x*y, and check that z/x==y and z/y==x."""
+    global nerrors
+    z = x * y
+    if x != 0:
+        q = z / x
+        if not check_close(q, y):
+            nerrors += 1
+            print "%r / %r == %r but expected %r" % (z, x, q, y)
+    if y != 0:
+        q = z / y
+        if not check_close(q, x):
+            nerrors += 1
+            print "%r / %r == %r but expected %r" % (z, y, q, x)
+
+simple_real = [float(i) for i in range(-5, 6)]
+simple_complex = [complex(x, y) for x in simple_real for y in simple_real]
+for x in simple_complex:
+    for y in simple_complex:
+        test_div(x, y)
+
+# A naive complex division algorithm (such as in 2.0) is very prone to
+# nonsense errors for these (overflows and underflows).
+test_div(complex(1e200, 1e200), 1+0j)
+test_div(complex(1e-200, 1e-200), 1+0j)
+
+# Just for fun.
+for i in range(100):
+    test_div(complex(random(), random()),
+             complex(random(), random()))
+
+try:
+    z = 1.0 / (0+0j)
+except ZeroDivisionError:
+    pass
+else:
+    nerrors += 1
+    raise TestFailed("Division by complex 0 didn't raise ZeroDivisionError")
+
+if nerrors:
+    raise TestFailed("%d tests failed" % nerrors)
diff --git a/lib-python/2.2/test/test_contains.py b/lib-python/2.2/test/test_contains.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_contains.py
@@ -0,0 +1,171 @@
+from test_support import TestFailed, have_unicode
+
+class base_set:
+
+    def __init__(self, el):
+        self.el = el
+
+class set(base_set):
+
+    def __contains__(self, el):
+        return self.el == el
+
+class seq(base_set):
+
+    def __getitem__(self, n):
+        return [self.el][n]
+
+def check(ok, *args):
+    if not ok:
+        raise TestFailed, " ".join(map(str, args))
+
+a = base_set(1)
+b = set(1)
+c = seq(1)
+
+check(1 in b, "1 not in set(1)")
+check(0 not in b, "0 in set(1)")
+check(1 in c, "1 not in seq(1)")
+check(0 not in c, "0 in seq(1)")
+
+try:
+    1 in a
+    check(0, "in base_set did not raise error")
+except TypeError:
+    pass
+
+try:
+    1 not in a
+    check(0, "not in base_set did not raise error")
+except TypeError:
+    pass
+
+# Test char in string
+
+check('c' in 'abc', "'c' not in 'abc'")
+check('d' not in 'abc', "'d' in 'abc'")
+
+try:
+    '' in 'abc'
+    check(0, "'' in 'abc' did not raise error")
+except TypeError:
+    pass
+
+try:
+    'ab' in 'abc'
+    check(0, "'ab' in 'abc' did not raise error")
+except TypeError:
+    pass
+
+try:
+    None in 'abc'
+    check(0, "None in 'abc' did not raise error")
+except TypeError:
+    pass
+
+
+if have_unicode:
+
+    # Test char in Unicode
+
+    check('c' in unicode('abc'), "'c' not in u'abc'")
+    check('d' not in unicode('abc'), "'d' in u'abc'")
+
+    try:
+        '' in unicode('abc')
+        check(0, "'' in u'abc' did not raise error")
+    except TypeError:
+        pass
+
+    try:
+        'ab' in unicode('abc')
+        check(0, "'ab' in u'abc' did not raise error")
+    except TypeError:
+        pass
+
+    try:
+        None in unicode('abc')
+        check(0, "None in u'abc' did not raise error")
+    except TypeError:
+        pass
+
+    # Test Unicode char in Unicode
+
+    check(unicode('c') in unicode('abc'), "u'c' not in u'abc'")
+    check(unicode('d') not in unicode('abc'), "u'd' in u'abc'")
+
+    try:
+        unicode('') in unicode('abc')
+        check(0, "u'' in u'abc' did not raise error")
+    except TypeError:
+        pass
+
+    try:
+        unicode('ab') in unicode('abc')
+        check(0, "u'ab' in u'abc' did not raise error")
+    except TypeError:
+        pass
+
+    # Test Unicode char in string
+
+    check(unicode('c') in 'abc', "u'c' not in 'abc'")
+    check(unicode('d') not in 'abc', "u'd' in 'abc'")
+
+    try:
+        unicode('') in 'abc'
+        check(0, "u'' in 'abc' did not raise error")
+    except TypeError:
+        pass
+
+    try:
+        unicode('ab') in 'abc'
+        check(0, "u'ab' in 'abc' did not raise error")
+    except TypeError:
+        pass
+
+# A collection of tests on builtin sequence types
+a = range(10)
+for i in a:
+    check(i in a, "%s not in %s" % (`i`, `a`))
+check(16 not in a, "16 not in %s" % `a`)
+check(a not in a, "%s not in %s" % (`a`, `a`))
+
+a = tuple(a)
+for i in a:
+    check(i in a, "%s not in %s" % (`i`, `a`))
+check(16 not in a, "16 not in %s" % `a`)
+check(a not in a, "%s not in %s" % (`a`, `a`))
+
+class Deviant1:
+    """Behaves strangely when compared
+
+    This class is designed to make sure that the contains code
+    works when the list is modified during the check.
+    """
+
+    aList = range(15)
+
+    def __cmp__(self, other):
+        if other == 12:
+            self.aList.remove(12)
+            self.aList.remove(13)
+            self.aList.remove(14)
+        return 1
+
+check(Deviant1() not in Deviant1.aList, "Deviant1 failed")
+
+class Deviant2:
+    """Behaves strangely when compared
+
+    This class raises an exception during comparison.  That in
+    turn causes the comparison to fail with a TypeError.
+    """
+
+    def __cmp__(self, other):
+        if other == 4:
+            raise RuntimeError, "gotcha"
+
+try:
+    check(Deviant2() not in a, "oops")
+except TypeError:
+    pass
diff --git a/lib-python/2.2/test/test_cookie.py b/lib-python/2.2/test/test_cookie.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_cookie.py
@@ -0,0 +1,47 @@
+# Simple test suite for Cookie.py
+
+from test_support import verify, verbose, run_doctest
+import Cookie
+
+# Currently this only tests SimpleCookie
+
+cases = [
+    ('chips=ahoy; vienna=finger', {'chips':'ahoy', 'vienna':'finger'}),
+    ('keebler="E=mc2; L=\\"Loves\\"; fudge=\\012;";',
+     {'keebler' : 'E=mc2; L="Loves"; fudge=\012;'}),
+
+    # Check illegal cookies that have an '=' char in an unquoted value
+    ('keebler=E=mc2;', {'keebler' : 'E=mc2'})
+    ]
+
+for data, dict in cases:
+    C = Cookie.SimpleCookie() ; C.load(data)
+    print repr(C)
+    print str(C)
+    items = dict.items()
+    items.sort()
+    for k, v in items:
+        print ' ', k, repr( C[k].value ), repr(v)
+        verify(C[k].value == v)
+        print C[k]
+
+C = Cookie.SimpleCookie()
+C.load('Customer="WILE_E_COYOTE"; Version=1; Path=/acme')
+
+verify(C['Customer'].value == 'WILE_E_COYOTE')
+verify(C['Customer']['version'] == '1')
+verify(C['Customer']['path'] == '/acme')
+
+print C.output(['path'])
+print C.js_output()
+print C.js_output(['path'])
+
+# Try cookie with quoted meta-data
+C = Cookie.SimpleCookie()
+C.load('Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"')
+verify(C['Customer'].value == 'WILE_E_COYOTE')
+verify(C['Customer']['version'] == '1')
+verify(C['Customer']['path'] == '/acme')
+
+print "If anything blows up after this line, it's from Cookie's doctest."
+run_doctest(Cookie)
diff --git a/lib-python/2.2/test/test_copy_reg.py b/lib-python/2.2/test/test_copy_reg.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_copy_reg.py
@@ -0,0 +1,30 @@
+import copy_reg
+import test_support
+import unittest
+
+
+class C:
+    pass
+
+
+class CopyRegTestCase(unittest.TestCase):
+
+    def test_class(self):
+        self.assertRaises(TypeError, copy_reg.pickle,
+                          C, None, None)
+
+    def test_noncallable_reduce(self):
+        self.assertRaises(TypeError, copy_reg.pickle,
+                          type(1), "not a callable")
+
+    def test_noncallable_constructor(self):
+        self.assertRaises(TypeError, copy_reg.pickle,
+                          type(1), int, "not a callable")
+
+
+def test_main():
+    test_support.run_unittest(CopyRegTestCase)
+
+
+if __name__ == "__main__":
+    test_main()
diff --git a/lib-python/2.2/test/test_cpickle.py b/lib-python/2.2/test/test_cpickle.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_cpickle.py
@@ -0,0 +1,100 @@
+import cPickle
+import test_support
+import unittest
+from cStringIO import StringIO
+from pickletester import AbstractPickleTests, AbstractPickleModuleTests
+
+class cPickleTests(AbstractPickleTests, AbstractPickleModuleTests):
+
+    def setUp(self):
+        self.dumps = cPickle.dumps
+        self.loads = cPickle.loads
+
+    error = cPickle.BadPickleGet
+    module = cPickle
+
+class cPicklePicklerTests(AbstractPickleTests):
+
+    def dumps(self, arg, bin=0):
+        f = StringIO()
+        p = cPickle.Pickler(f, bin)
+        p.dump(arg)
+        f.seek(0)
+        return f.read()
+
+    def loads(self, buf):
+        f = StringIO(buf)
+        p = cPickle.Unpickler(f)
+        return p.load()
+
+    error = cPickle.BadPickleGet
+
+class cPickleListPicklerTests(AbstractPickleTests):
+
+    def dumps(self, arg, bin=0):
+        p = cPickle.Pickler(bin)
+        p.dump(arg)
+        return p.getvalue()
+
+    def loads(self, *args):
+        f = StringIO(args[0])
+        p = cPickle.Unpickler(f)
+        return p.load()
+
+    error = cPickle.BadPickleGet
+
+class cPickleFastPicklerTests(AbstractPickleTests):
+
+    def dumps(self, arg, bin=0):
+        f = StringIO()
+        p = cPickle.Pickler(f, bin)
+        p.fast = 1
+        p.dump(arg)
+        f.seek(0)
+        return f.read()
+
+    def loads(self, *args):
+        f = StringIO(args[0])
+        p = cPickle.Unpickler(f)
+        return p.load()
+
+    error = cPickle.BadPickleGet
+
+    def test_recursive_list(self):
+        self.assertRaises(ValueError,
+                          AbstractPickleTests.test_recursive_list,
+                          self)
+
+    def test_recursive_inst(self):
+        self.assertRaises(ValueError,
+                          AbstractPickleTests.test_recursive_inst,
+                          self)
+
+    def test_recursive_dict(self):
+        self.assertRaises(ValueError,
+                          AbstractPickleTests.test_recursive_dict,
+                          self)
+
+    def test_recursive_multi(self):
+        self.assertRaises(ValueError,
+                          AbstractPickleTests.test_recursive_multi,
+                          self)
+
+    def test_nonrecursive_deep(self):
+        a = []
+        for i in range(100):
+            a = [a]
+        b = self.loads(self.dumps(a))
+        self.assertEqual(a, b)
+
+def test_main():
+    loader = unittest.TestLoader()
+    suite = unittest.TestSuite()
+    suite.addTest(loader.loadTestsFromTestCase(cPickleTests))
+    suite.addTest(loader.loadTestsFromTestCase(cPicklePicklerTests))
+    suite.addTest(loader.loadTestsFromTestCase(cPickleListPicklerTests))
+    suite.addTest(loader.loadTestsFromTestCase(cPickleFastPicklerTests))
+    test_support.run_suite(suite)
+
+if __name__ == "__main__":
+    test_main()
diff --git a/lib-python/2.2/test/test_crypt.py b/lib-python/2.2/test/test_crypt.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/test/test_crypt.py
@@ -0,0 +1,11 @@
+#! /usr/bin/env python
+"""Simple test script for cryptmodule.c
+   Roger E. Masse
+"""
+
+from test_support import verify, verbose
+import crypt
+
+c = crypt.crypt('mypassword', 'ab')
+if verbose:
+    print 'Test encryption: ', c
diff --git a/lib-python/2.2/test/test_curses.py b/lib-python/2.2/test/test_curses.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_curses.py
@@ -0,0 +1,210 @@
+#
+# Test script for the curses module
+#
+# This script doesn't actually display anything very coherent. but it
+# does call every method and function.
+#
+# Functions not tested: {def,reset}_{shell,prog}_mode, getch(), getstr(),
+# getmouse(), ungetmouse(), init_color()
+#
+
+import curses, sys, tempfile
+
+# Optionally test curses module.  This currently requires that the
+# 'curses' resource be given on the regrtest command line using the -u
+# option.  If not available, nothing after this line will be executed.
+
+import test_support
+test_support.requires('curses')
+
+def window_funcs(stdscr):
+    "Test the methods of windows"
+    win = curses.newwin(10,10)
+    win = curses.newwin(5,5, 5,5)
+    win2 = curses.newwin(15,15, 5,5)
+
+    for meth in [stdscr.addch, stdscr.addstr]:
+        for args in [('a'), ('a', curses.A_BOLD),
+                     (4,4, 'a'), (5,5, 'a', curses.A_BOLD)]:
+            apply(meth, args)
+
+    for meth in [stdscr.box, stdscr.clear, stdscr.clrtobot,
+                 stdscr.clrtoeol, stdscr.cursyncup, stdscr.delch,
+                 stdscr.deleteln, stdscr.erase, stdscr.getbegyx,
+                 stdscr.getbkgd, stdscr.getkey, stdscr.getmaxyx,
+                 stdscr.getparyx, stdscr.getyx, stdscr.inch,
+                 stdscr.insertln, stdscr.instr, stdscr.is_wintouched,
+                 win.noutrefresh, stdscr.redrawwin, stdscr.refresh,
+                 stdscr.standout, stdscr.standend, stdscr.syncdown,
+                 stdscr.syncup, stdscr.touchwin, stdscr.untouchwin]:
+        meth()
+
+    stdscr.addnstr('1234', 3)
+    stdscr.addnstr('1234', 3, curses.A_BOLD)
+    stdscr.addnstr(4,4, '1234', 3)
+    stdscr.addnstr(5,5, '1234', 3, curses.A_BOLD)
+
+    stdscr.attron(curses.A_BOLD)
+    stdscr.attroff(curses.A_BOLD)
+    stdscr.attrset(curses.A_BOLD)
+    stdscr.bkgd(' ')
+    stdscr.bkgd(' ', curses.A_REVERSE)
+    stdscr.bkgdset(' ')
+    stdscr.bkgdset(' ', curses.A_REVERSE)
+
+    win.border(65, 66, 67, 68,
+               69, 70, 71, 72)
+    win.border('|', '!', '-', '_',
+               '+', '\\', '#', '/')
+    try:
+        win.border(65, 66, 67, 68,
+                   69, [], 71, 72)
+    except TypeError:
+        pass
+    else:
+        raise RuntimeError, "Expected win.border() to raise TypeError"
+
+    stdscr.clearok(1)
+
+    win4 = stdscr.derwin(2,2)
+    win4 = stdscr.derwin(1,1, 5,5)
+    win4.mvderwin(9,9)
+
+    stdscr.echochar('a')
+    stdscr.echochar('a', curses.A_BOLD)
+    stdscr.hline('-', 5)
+    stdscr.hline('-', 5, curses.A_BOLD)
+    stdscr.hline(1,1,'-', 5)
+    stdscr.hline(1,1,'-', 5, curses.A_BOLD)
+
+    stdscr.idcok(1)
+    stdscr.idlok(1)
+    stdscr.immedok(1)
+    stdscr.insch('c')
+    stdscr.insdelln(1)
+    stdscr.insnstr('abc', 3)
+    stdscr.insnstr('abc', 3, curses.A_BOLD)
+    stdscr.insnstr(5, 5, 'abc', 3)
+    stdscr.insnstr(5, 5, 'abc', 3, curses.A_BOLD)
+
+    stdscr.insstr('def')
+    stdscr.insstr('def', curses.A_BOLD)
+    stdscr.insstr(5, 5, 'def')
+    stdscr.insstr(5, 5, 'def', curses.A_BOLD)
+    stdscr.is_linetouched(0)
+    stdscr.keypad(1)
+    stdscr.leaveok(1)
+    stdscr.move(3,3)
+    win.mvwin(2,2)
+    stdscr.nodelay(1)
+    stdscr.notimeout(1)
+    win2.overlay(win)
+    win2.overwrite(win)
+    stdscr.redrawln(1,2)
+
+    stdscr.scrollok(1)
+    stdscr.scroll()
+    stdscr.scroll(2)
+    stdscr.scroll(-3)
+
+    stdscr.setscrreg(10,15)
+    win3 = stdscr.subwin(10,10)
+    win3 = stdscr.subwin(10,10, 5,5)
+    stdscr.syncok(1)
+    stdscr.timeout(5)
+    stdscr.touchline(5,5)
+    stdscr.touchline(5,5,0)
+    stdscr.vline('a', 3)
+    stdscr.vline('a', 3, curses.A_STANDOUT)
+    stdscr.vline(1,1, 'a', 3)
+    stdscr.vline(1,1, 'a', 3, curses.A_STANDOUT)
+
+    if hasattr(curses, 'resize'):
+        stdscr.resize()
+    if hasattr(curses, 'enclose'):
+        stdscr.enclose()
+
+
+def module_funcs(stdscr):
+    "Test module-level functions"
+
+    for func in [curses.baudrate, curses.beep, curses.can_change_color,
+                 curses.cbreak, curses.def_prog_mode, curses.doupdate,
+                 curses.filter, curses.flash, curses.flushinp,
+                 curses.has_colors, curses.has_ic, curses.has_il,
+                 curses.isendwin, curses.killchar, curses.longname,
+                 curses.nocbreak, curses.noecho, curses.nonl,
+                 curses.noqiflush, curses.noraw,
+                 curses.reset_prog_mode, curses.termattrs,
+                 curses.termname, curses.erasechar, curses.getsyx]:
+        func()
+
+    # Functions that actually need arguments
+    curses.curs_set(1)
+    curses.delay_output(1)
+    curses.echo() ; curses.echo(1)
+
+    f = tempfile.TemporaryFile()
+    stdscr.putwin(f)
+    f.seek(0)
+    curses.getwin(f)
+    f.close()
+
+    curses.halfdelay(1)
+    curses.intrflush(1)
+    curses.meta(1)
+    curses.napms(100)
+    curses.newpad(50,50)
+    win = curses.newwin(5,5)
+    win = curses.newwin(5,5, 1,1)
+    curses.nl() ; curses.nl(1)
+    curses.putp('abc')
+    curses.qiflush()
+    curses.raw() ; curses.raw(1)
+    curses.setsyx(5,5)
+    curses.setupterm(fd=sys.__stdout__.fileno())
+    curses.tigetflag('hc')
+    curses.tigetnum('co')
+    curses.tigetstr('cr')
+    curses.tparm('cr')
+    curses.typeahead(sys.__stdin__.fileno())
+    curses.unctrl('a')
+    curses.ungetch('a')
+    curses.use_env(1)
+
+    # Functions only available on a few platforms
+    if curses.has_colors():
+        curses.start_color()
+        curses.init_pair(2, 1,1)
+        curses.color_content(1)
+        curses.color_pair(2)
+        curses.pair_content(curses.COLOR_PAIRS)
+        curses.pair_number(0)
+
+    if hasattr(curses, 'keyname'):
+        curses.keyname(13)
+
+    if hasattr(curses, 'has_key'):
+        curses.has_key(13)
+
+    if hasattr(curses, 'getmouse'):
+        curses.mousemask(curses.BUTTON1_PRESSED)
+        curses.mouseinterval(10)
+
+
+def main(stdscr):
+    curses.savetty()
+    try:
+        module_funcs(stdscr)
+        window_funcs(stdscr)
+    finally:
+        curses.resetty()
+
+if __name__ == '__main__':
+    curses.wrapper(main)
+else:
+    try:
+        stdscr = curses.initscr()
+        main(stdscr)
+    finally:
+        curses.endwin()
diff --git a/lib-python/2.2/test/test_dbm.py b/lib-python/2.2/test/test_dbm.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/test/test_dbm.py
@@ -0,0 +1,43 @@
+#! /usr/bin/env python
+"""Test script for the dbm module
+   Roger E. Masse
+"""
+import dbm
+from dbm import error
+from test_support import verbose, verify
+
+filename = '/tmp/delete_me'
+
+d = dbm.open(filename, 'c')
+verify(d.keys() == [])
+d['a'] = 'b'
+d['12345678910'] = '019237410982340912840198242'
+d.keys()
+if d.has_key('a'):
+    if verbose:
+        print 'Test dbm keys: ', d.keys()
+
+d.close()
+d = dbm.open(filename, 'r')
+d.close()
+d = dbm.open(filename, 'rw')
+d.close()
+d = dbm.open(filename, 'w')
+d.close()
+d = dbm.open(filename, 'n')
+d.close()
+
+try:
+    import os
+    if dbm.library == "ndbm":
+        # classic dbm
+        os.unlink(filename + '.dir')
+        os.unlink(filename + '.pag')
+    elif dbm.library == "BSD db":
+        # BSD DB's compatibility layer
+        os.unlink(filename + '.db')
+    else:
+        # GNU gdbm compatibility layer
+        os.unlink(filename)
+except:
+    pass
diff --git a/lib-python/2.2/test/test_descr.py b/lib-python/2.2/test/test_descr.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_descr.py
@@ -0,0 +1,3276 @@
+# Test enhancements related to descriptors and new-style classes
+
+from test_support import verify, vereq, verbose, TestFailed, TESTFN, get_original_stdout
+from copy import deepcopy
+import warnings
+
+warnings.filterwarnings("ignore",
+         r'complex divmod\(\), // and % are deprecated$',
+         DeprecationWarning, r'(<string>|test_descr)$')
+
+def veris(a, b):
+    if a is not b:
+        raise TestFailed, "%r is %r" % (a, b)
+
+def testunop(a, res, expr="len(a)", meth="__len__"):
+    if verbose: print "checking", expr
+    dict = {'a': a}
+    vereq(eval(expr, dict), res)
+    t = type(a)
+    m = getattr(t, meth)
+    while meth not in t.__dict__:
+        t = t.__bases__[0]
+    vereq(m, t.__dict__[meth])
+    vereq(m(a), res)
+    bm = getattr(a, meth)
+    vereq(bm(), res)
+
+def testbinop(a, b, res, expr="a+b", meth="__add__"):
+    if verbose: print "checking", expr
+    dict = {'a': a, 'b': b}
+
+    # XXX Hack so this passes before 2.3 when -Qnew is specified.
+    if meth == "__div__" and 1/2 == 0.5:
+        meth = "__truediv__"
+
+    vereq(eval(expr, dict), res)
+    t = type(a)
+    m = getattr(t, meth)
+    while meth not in t.__dict__:
+        t = t.__bases__[0]
+    vereq(m, t.__dict__[meth])
+    vereq(m(a, b), res)
+    bm = getattr(a, meth)
+    vereq(bm(b), res)
+
+def testternop(a, b, c, res, expr="a[b:c]", meth="__getslice__"):
+    if verbose: print "checking", expr
+    dict = {'a': a, 'b': b, 'c': c}
+    vereq(eval(expr, dict), res)
+    t = type(a)
+    m = getattr(t, meth)
+    while meth not in t.__dict__:
+        t = t.__bases__[0]
+    vereq(m, t.__dict__[meth])
+    vereq(m(a, b, c), res)
+    bm = getattr(a, meth)
+    vereq(bm(b, c), res)
+
+def testsetop(a, b, res, stmt="a+=b", meth="__iadd__"):
+    if verbose: print "checking", stmt
+    dict = {'a': deepcopy(a), 'b': b}
+    exec stmt in dict
+    vereq(dict['a'], res)
+    t = type(a)
+    m = getattr(t, meth)
+    while meth not in t.__dict__:
+        t = t.__bases__[0]
+    vereq(m, t.__dict__[meth])
+    dict['a'] = deepcopy(a)
+    m(dict['a'], b)
+    vereq(dict['a'], res)
+    dict['a'] = deepcopy(a)
+    bm = getattr(dict['a'], meth)
+    bm(b)
+    vereq(dict['a'], res)
+
+def testset2op(a, b, c, res, stmt="a[b]=c", meth="__setitem__"):
+    if verbose: print "checking", stmt
+    dict = {'a': deepcopy(a), 'b': b, 'c': c}
+    exec stmt in dict
+    vereq(dict['a'], res)
+    t = type(a)
+    m = getattr(t, meth)
+    while meth not in t.__dict__:
+        t = t.__bases__[0]
+    vereq(m, t.__dict__[meth])
+    dict['a'] = deepcopy(a)
+    m(dict['a'], b, c)
+    vereq(dict['a'], res)
+    dict['a'] = deepcopy(a)
+    bm = getattr(dict['a'], meth)
+    bm(b, c)
+    vereq(dict['a'], res)
+
+def testset3op(a, b, c, d, res, stmt="a[b:c]=d", meth="__setslice__"):
+    if verbose: print "checking", stmt
+    dict = {'a': deepcopy(a), 'b': b, 'c': c, 'd': d}
+    exec stmt in dict
+    vereq(dict['a'], res)
+    t = type(a)
+    while meth not in t.__dict__:
+        t = t.__bases__[0]
+    m = getattr(t, meth)
+    vereq(m, t.__dict__[meth])
+    dict['a'] = deepcopy(a)
+    m(dict['a'], b, c, d)
+    vereq(dict['a'], res)
+    dict['a'] = deepcopy(a)
+    bm = getattr(dict['a'], meth)
+    bm(b, c, d)
+    vereq(dict['a'], res)
+
+def class_docstrings():
+    class Classic:
+        "A classic docstring."
+    vereq(Classic.__doc__, "A classic docstring.")
+    vereq(Classic.__dict__['__doc__'], "A classic docstring.")
+
+    class Classic2:
+        pass
+    verify(Classic2.__doc__ is None)
+
+    class NewStatic(object):
+        "Another docstring."
+    vereq(NewStatic.__doc__, "Another docstring.")
+    vereq(NewStatic.__dict__['__doc__'], "Another docstring.")
+
+    class NewStatic2(object):
+        pass
+    verify(NewStatic2.__doc__ is None)
+
+    class NewDynamic(object):
+        "Another docstring."
+    vereq(NewDynamic.__doc__, "Another docstring.")
+    vereq(NewDynamic.__dict__['__doc__'], "Another docstring.")
+
+    class NewDynamic2(object):
+        pass
+    verify(NewDynamic2.__doc__ is None)
+
+def lists():
+    if verbose: print "Testing list operations..."
+    testbinop([1], [2], [1,2], "a+b", "__add__")
+    testbinop([1,2,3], 2, 1, "b in a", "__contains__")
+    testbinop([1,2,3], 4, 0, "b in a", "__contains__")
+    testbinop([1,2,3], 1, 2, "a[b]", "__getitem__")
+    testternop([1,2,3], 0, 2, [1,2], "a[b:c]", "__getslice__")
+    testsetop([1], [2], [1,2], "a+=b", "__iadd__")
+    testsetop([1,2], 3, [1,2,1,2,1,2], "a*=b", "__imul__")
+    testunop([1,2,3], 3, "len(a)", "__len__")
+    testbinop([1,2], 3, [1,2,1,2,1,2], "a*b", "__mul__")
+    testbinop([1,2], 3, [1,2,1,2,1,2], "b*a", "__rmul__")
+    testset2op([1,2], 1, 3, [1,3], "a[b]=c", "__setitem__")
+    testset3op([1,2,3,4], 1, 3, [5,6], [1,5,6,4], "a[b:c]=d", "__setslice__")
+
+def dicts():
+    if verbose: print "Testing dict operations..."
+    testbinop({1:2}, {2:1}, -1, "cmp(a,b)", "__cmp__")
+    testbinop({1:2,3:4}, 1, 1, "b in a", "__contains__")
+    testbinop({1:2,3:4}, 2, 0, "b in a", "__contains__")
+    testbinop({1:2,3:4}, 1, 2, "a[b]", "__getitem__")
+    d = {1:2,3:4}
+    l1 = []
+    for i in d.keys(): l1.append(i)
+    l = []
+    for i in iter(d): l.append(i)
+    vereq(l, l1)
+    l = []
+    for i in d.__iter__(): l.append(i)
+    vereq(l, l1)
+    l = []
+    for i in dict.__iter__(d): l.append(i)
+    vereq(l, l1)
+    d = {1:2, 3:4}
+    testunop(d, 2, "len(a)", "__len__")
+    vereq(eval(repr(d), {}), d)
+    vereq(eval(d.__repr__(), {}), d)
+    testset2op({1:2,3:4}, 2, 3, {1:2,2:3,3:4}, "a[b]=c", "__setitem__")
+
+def dict_constructor():
+    if verbose:
+        print "Testing dict constructor ..."
+    d = dict()
+    vereq(d, {})
+    d = dict({})
+    vereq(d, {})
+    d = dict(items={})
+    vereq(d, {})
+    d = dict({1: 2, 'a': 'b'})
+    vereq(d, {1: 2, 'a': 'b'})
+    vereq(d, dict(d.items()))
+    vereq(d, dict(items=d.iteritems()))
+    for badarg in 0, 0L, 0j, "0", [0], (0,):
+        try:
+            dict(badarg)
+        except TypeError:
+            pass
+        except ValueError:
+            if badarg == "0":
+                # It's a sequence, and its elements are also sequences (gotta
+                # love strings <wink>), but they aren't of length 2, so this
+                # one seemed better as a ValueError than a TypeError.
+                pass
+            else:
+                raise TestFailed("no TypeError from dict(%r)" % badarg)
+        else:
+            raise TestFailed("no TypeError from dict(%r)" % badarg)
+    try:
+        dict(senseless={})
+    except TypeError:
+        pass
+    else:
+        raise TestFailed("no TypeError from dict(senseless={})")
+
+    try:
+        dict({}, {})
+    except TypeError:
+        pass
+    else:
+        raise TestFailed("no TypeError from dict({}, {})")
+
+    class Mapping:
+        # Lacks a .keys() method; will be added later.
+        dict = {1:2, 3:4, 'a':1j}
+
+    try:
+        dict(Mapping())
+    except TypeError:
+        pass
+    else:
+        raise TestFailed("no TypeError from dict(incomplete mapping)")
+
+    Mapping.keys = lambda self: self.dict.keys()
+    Mapping.__getitem__ = lambda self, i: self.dict[i]
+    d = dict(items=Mapping())
+    vereq(d, Mapping.dict)
+
+    # Init from sequence of iterable objects, each producing a 2-sequence.
+    class AddressBookEntry:
+        def __init__(self, first, last):
+            self.first = first
+            self.last = last
+        def __iter__(self):
+            return iter([self.first, self.last])
+
+    d = dict([AddressBookEntry('Tim', 'Warsaw'),
+              AddressBookEntry('Barry', 'Peters'),
+              AddressBookEntry('Tim', 'Peters'),
+              AddressBookEntry('Barry', 'Warsaw')])
+    vereq(d, {'Barry': 'Warsaw', 'Tim': 'Peters'})
+
+    d = dict(zip(range(4), range(1, 5)))
+    vereq(d, dict([(i, i+1) for i in range(4)]))
+
+    # Bad sequence lengths.
+    for bad in [('tooshort',)], [('too', 'long', 'by 1')]:
+        try:
+            dict(bad)
+        except ValueError:
+            pass
+        else:
+            raise TestFailed("no ValueError from dict(%r)" % bad)
+
+def test_dir():
+    if verbose:
+        print "Testing dir() ..."
+    junk = 12
+    vereq(dir(), ['junk'])
+    del junk
+
+    # Just make sure these don't blow up!
+    for arg in 2, 2L, 2j, 2e0, [2], "2", u"2", (2,), {2:2}, type, test_dir:
+        dir(arg)
+
+    # Try classic classes.
+    class C:
+        Cdata = 1
+        def Cmethod(self): pass
+
+    cstuff = ['Cdata', 'Cmethod', '__doc__', '__module__']
+    vereq(dir(C), cstuff)
+    verify('im_self' in dir(C.Cmethod))
+
+    c = C()  # c.__doc__ is an odd thing to see here; ditto c.__module__.
+    vereq(dir(c), cstuff)
+
+    c.cdata = 2
+    c.cmethod = lambda self: 0
+    vereq(dir(c), cstuff + ['cdata', 'cmethod'])
+    verify('im_self' in dir(c.Cmethod))
+
+    class A(C):
+        Adata = 1
+        def Amethod(self): pass
+
+    astuff = ['Adata', 'Amethod'] + cstuff
+    vereq(dir(A), astuff)
+    verify('im_self' in dir(A.Amethod))
+    a = A()
+    vereq(dir(a), astuff)
+    verify('im_self' in dir(a.Amethod))
+    a.adata = 42
+    a.amethod = lambda self: 3
+    vereq(dir(a), astuff + ['adata', 'amethod'])
+
+    # The same, but with new-style classes.  Since these have object as a
+    # base class, a lot more gets sucked in.
+    def interesting(strings):
+        return [s for s in strings if not s.startswith('_')]
+
+    class C(object):
+        Cdata = 1
+        def Cmethod(self): pass
+
+    cstuff = ['Cdata', 'Cmethod']
+    vereq(interesting(dir(C)), cstuff)
+
+    c = C()
+    vereq(interesting(dir(c)), cstuff)
+    verify('im_self' in dir(C.Cmethod))
+
+    c.cdata = 2
+    c.cmethod = lambda self: 0
+    vereq(interesting(dir(c)), cstuff + ['cdata', 'cmethod'])
+    verify('im_self' in dir(c.Cmethod))
+
+    class A(C):
+        Adata = 1
+        def Amethod(self): pass
+
+    astuff = ['Adata', 'Amethod'] + cstuff
+    vereq(interesting(dir(A)), astuff)
+    verify('im_self' in dir(A.Amethod))
+    a = A()
+    vereq(interesting(dir(a)), astuff)
+    a.adata = 42
+    a.amethod = lambda self: 3
+    vereq(interesting(dir(a)), astuff + ['adata', 'amethod'])
+    verify('im_self' in dir(a.Amethod))
+
+    # Try a module subclass.
+    import sys
+    class M(type(sys)):
+        pass
+    minstance = M()
+    minstance.b = 2
+    minstance.a = 1
+    vereq(dir(minstance), ['a', 'b'])
+
+    class M2(M):
+        def getdict(self):
+            return "Not a dict!"
+        __dict__ = property(getdict)
+
+    m2instance = M2()
+    m2instance.b = 2
+    m2instance.a = 1
+    vereq(m2instance.__dict__, "Not a dict!")
+    try:
+        dir(m2instance)
+    except TypeError:
+        pass
+
+    # Two essentially featureless objects, just inheriting stuff from
+    # object.
+    vereq(dir(None), dir(Ellipsis))
+
+    # Nasty test case for proxied objects
+    class Wrapper(object):
+        def __init__(self, obj):
+            self.__obj = obj
+        def __repr__(self):
+            return "Wrapper(%s)" % repr(self.__obj)
+        def __getitem__(self, key):
+            return Wrapper(self.__obj[key])
+        def __len__(self):
+            return len(self.__obj)
+        def __getattr__(self, name):
+            return Wrapper(getattr(self.__obj, name))
+
+    class C(object):
+        def __getclass(self):
+            return Wrapper(type(self))
+        __class__ = property(__getclass)
+
+    dir(C()) # This used to segfault
+
+binops = {
+    'add': '+',
+    'sub': '-',
+    'mul': '*',
+    'div': '/',
+    'mod': '%',
+    'divmod': 'divmod',
+    'pow': '**',
+    'lshift': '<<',
+    'rshift': '>>',
+    'and': '&',
+    'xor': '^',
+    'or': '|',
+    'cmp': 'cmp',
+    'lt': '<',
+    'le': '<=',
+    'eq': '==',
+    'ne': '!=',
+    'gt': '>',
+    'ge': '>=',
+    }
+
+for name, expr in binops.items():
+    if expr.islower():
+        expr = expr + "(a, b)"
+    else:
+        expr = 'a %s b' % expr
+    binops[name] = expr
+
+unops = {
+    'pos': '+',
+    'neg': '-',
+    'abs': 'abs',
+    'invert': '~',
+    'int': 'int',
+    'long': 'long',
+    'float': 'float',
+    'oct': 'oct',
+    'hex': 'hex',
+    }
+
+for name, expr in unops.items():
+    if expr.islower():
+        expr = expr + "(a)"
+    else:
+        expr = '%s a' % expr
+    unops[name] = expr
+
+def numops(a, b, skip=[]):
+    dict = {'a': a, 'b': b}
+    for name, expr in binops.items():
+        if name not in skip:
+            name = "__%s__" % name
+            if hasattr(a, name):
+                res = eval(expr, dict)
+                testbinop(a, b, res, expr, name)
+    for name, expr in unops.items():
+        if name not in skip:
+            name = "__%s__" % name
+            if hasattr(a, name):
+                res = eval(expr, dict)
+                testunop(a, res, expr, name)
+
+def ints():
+    if verbose: print "Testing int operations..."
+    numops(100, 3)
+    # The following crashes in Python 2.2
+    vereq((1).__nonzero__(), 1)
+    vereq((0).__nonzero__(), 0)
+    # This returns 'NotImplemented' in Python 2.2
+    class C(int):
+        def __add__(self, other):
+            return NotImplemented
+    try:
+        C() + ""
+    except TypeError:
+        pass
+    else:
+        raise TestFailed, "NotImplemented should have caused TypeError"
+
+def longs():
+    if verbose: print "Testing long operations..."
+    numops(100L, 3L)
+
+def floats():
+    if verbose: print "Testing float operations..."
+    numops(100.0, 3.0)
+
+def complexes():
+    if verbose: print "Testing complex operations..."
+    numops(100.0j, 3.0j, skip=['lt', 'le', 'gt', 'ge', 'int', 'long', 'float'])
+    class Number(complex):
+        __slots__ = ['prec']
+        def __new__(cls, *args, **kwds):
+            result = complex.__new__(cls, *args)
+            result.prec = kwds.get('prec', 12)
+            return result
+        def __repr__(self):
+            prec = self.prec
+            if self.imag == 0.0:
+                return "%.*g" % (prec, self.real)
+            if self.real == 0.0:
+                return "%.*gj" % (prec, self.imag)
+            return "(%.*g+%.*gj)" % (prec, self.real, prec, self.imag)
+        __str__ = __repr__
+
+    a = Number(3.14, prec=6)
+    vereq(`a`, "3.14")
+    vereq(a.prec, 6)
+
+    a = Number(a, prec=2)
+    vereq(`a`, "3.1")
+    vereq(a.prec, 2)
+
+    a = Number(234.5)
+    vereq(`a`, "234.5")
+    vereq(a.prec, 12)
+
+def spamlists():
+    if verbose: print "Testing spamlist operations..."
+    import copy, xxsubtype as spam
+    def spamlist(l, memo=None):
+        import xxsubtype as spam
+        return spam.spamlist(l)
+    # This is an ugly hack:
+    copy._deepcopy_dispatch[spam.spamlist] = spamlist
+
+    testbinop(spamlist([1]), spamlist([2]), spamlist([1,2]), "a+b", "__add__")
+    testbinop(spamlist([1,2,3]), 2, 1, "b in a", "__contains__")
+    testbinop(spamlist([1,2,3]), 4, 0, "b in a", "__contains__")
+    testbinop(spamlist([1,2,3]), 1, 2, "a[b]", "__getitem__")
+    testternop(spamlist([1,2,3]), 0, 2, spamlist([1,2]),
+               "a[b:c]", "__getslice__")
+    testsetop(spamlist([1]), spamlist([2]), spamlist([1,2]),
+              "a+=b", "__iadd__")
+    testsetop(spamlist([1,2]), 3, spamlist([1,2,1,2,1,2]), "a*=b", "__imul__")
+    testunop(spamlist([1,2,3]), 3, "len(a)", "__len__")
+    testbinop(spamlist([1,2]), 3, spamlist([1,2,1,2,1,2]), "a*b", "__mul__")
+    testbinop(spamlist([1,2]), 3, spamlist([1,2,1,2,1,2]), "b*a", "__rmul__")
+    testset2op(spamlist([1,2]), 1, 3, spamlist([1,3]), "a[b]=c", "__setitem__")
+    testset3op(spamlist([1,2,3,4]), 1, 3, spamlist([5,6]),
+               spamlist([1,5,6,4]), "a[b:c]=d", "__setslice__")
+    # Test subclassing
+    class C(spam.spamlist):
+        def foo(self): return 1
+    a = C()
+    vereq(a, [])
+    vereq(a.foo(), 1)
+    a.append(100)
+    vereq(a, [100])
+    vereq(a.getstate(), 0)
+    a.setstate(42)
+    vereq(a.getstate(), 42)
+
+def spamdicts():
+    if verbose: print "Testing spamdict operations..."
+    import copy, xxsubtype as spam
+    def spamdict(d, memo=None):
+        import xxsubtype as spam
+        sd = spam.spamdict()
+        for k, v in d.items(): sd[k] = v
+        return sd
+    # This is an ugly hack:
+    copy._deepcopy_dispatch[spam.spamdict] = spamdict
+
+    testbinop(spamdict({1:2}), spamdict({2:1}), -1, "cmp(a,b)", "__cmp__")
+    testbinop(spamdict({1:2,3:4}), 1, 1, "b in a", "__contains__")
+    testbinop(spamdict({1:2,3:4}), 2, 0, "b in a", "__contains__")
+    testbinop(spamdict({1:2,3:4}), 1, 2, "a[b]", "__getitem__")
+    d = spamdict({1:2,3:4})
+    l1 = []
+    for i in d.keys(): l1.append(i)
+    l = []
+    for i in iter(d): l.append(i)
+    vereq(l, l1)
+    l = []
+    for i in d.__iter__(): l.append(i)
+    vereq(l, l1)
+    l = []
+    for i in type(spamdict({})).__iter__(d): l.append(i)
+    vereq(l, l1)
+    straightd = {1:2, 3:4}
+    spamd = spamdict(straightd)
+    testunop(spamd, 2, "len(a)", "__len__")
+    testunop(spamd, repr(straightd), "repr(a)", "__repr__")
+    testset2op(spamdict({1:2,3:4}), 2, 3, spamdict({1:2,2:3,3:4}),
+               "a[b]=c", "__setitem__")
+    # Test subclassing
+    class C(spam.spamdict):
+        def foo(self): return 1
+    a = C()
+    vereq(a.items(), [])
+    vereq(a.foo(), 1)
+    a['foo'] = 'bar'
+    vereq(a.items(), [('foo', 'bar')])
+    vereq(a.getstate(), 0)
+    a.setstate(100)
+    vereq(a.getstate(), 100)
+
+def pydicts():
+    if verbose: print "Testing Python subclass of dict..."
+    verify(issubclass(dict, dict))
+    verify(isinstance({}, dict))
+    d = dict()
+    vereq(d, {})
+    verify(d.__class__ is dict)
+    verify(isinstance(d, dict))
+    class C(dict):
+        state = -1
+        def __init__(self, *a, **kw):
+            if a:
+                vereq(len(a), 1)
+                self.state = a[0]
+            if kw:
+                for k, v in kw.items(): self[v] = k
+        def __getitem__(self, key):
+            return self.get(key, 0)
+        def __setitem__(self, key, value):
+            verify(isinstance(key, type(0)))
+            dict.__setitem__(self, key, value)
+        def setstate(self, state):
+            self.state = state
+        def getstate(self):
+            return self.state
+    verify(issubclass(C, dict))
+    a1 = C(12)
+    vereq(a1.state, 12)
+    a2 = C(foo=1, bar=2)
+    vereq(a2[1] == 'foo' and a2[2], 'bar')
+    a = C()
+    vereq(a.state, -1)
+    vereq(a.getstate(), -1)
+    a.setstate(0)
+    vereq(a.state, 0)
+    vereq(a.getstate(), 0)
+    a.setstate(10)
+    vereq(a.state, 10)
+    vereq(a.getstate(), 10)
+    vereq(a[42], 0)
+    a[42] = 24
+    vereq(a[42], 24)
+    if verbose: print "pydict stress test ..."
+    N = 50
+    for i in range(N):
+        a[i] = C()
+        for j in range(N):
+            a[i][j] = i*j
+    for i in range(N):
+        for j in range(N):
+            vereq(a[i][j], i*j)
+
+def pylists():
+    if verbose: print "Testing Python subclass of list..."
+    class C(list):
+        def __getitem__(self, i):
+            return list.__getitem__(self, i) + 100
+        def __getslice__(self, i, j):
+            return (i, j)
+    a = C()
+    a.extend([0,1,2])
+    vereq(a[0], 100)
+    vereq(a[1], 101)
+    vereq(a[2], 102)
+    vereq(a[100:200], (100,200))
+
+def metaclass():
+    if verbose: print "Testing __metaclass__..."
+    class C:
+        __metaclass__ = type
+        def __init__(self):
+            self.__state = 0
+        def getstate(self):
+            return self.__state
+        def setstate(self, state):
+            self.__state = state
+    a = C()
+    vereq(a.getstate(), 0)
+    a.setstate(10)
+    vereq(a.getstate(), 10)
+    class D:
+        class __metaclass__(type):
+            def myself(cls): return cls
+    vereq(D.myself(), D)
+    d = D()
+    verify(d.__class__ is D)
+    class M1(type):
+        def __new__(cls, name, bases, dict):
+            dict['__spam__'] = 1
+            return type.__new__(cls, name, bases, dict)
+    class C:
+        __metaclass__ = M1
+    vereq(C.__spam__, 1)
+    c = C()
+    vereq(c.__spam__, 1)
+
+    class _instance(object):
+        pass
+    class M2(object):
+        def __new__(cls, name, bases, dict):
+            self = object.__new__(cls)
+            self.name = name
+            self.bases = bases
+            self.dict = dict
+            return self
+        __new__ = staticmethod(__new__)
+        def __call__(self):
+            it = _instance()
+            # Early binding of methods
+            for key in self.dict:
+                if key.startswith("__"):
+                    continue
+                setattr(it, key, self.dict[key].__get__(it, self))
+            return it
+    class C:
+        __metaclass__ = M2
+        def spam(self):
+            return 42
+    vereq(C.name, 'C')
+    vereq(C.bases, ())
+    verify('spam' in C.dict)
+    c = C()
+    vereq(c.spam(), 42)
+
+    # More metaclass examples
+
+    class autosuper(type):
+        # Automatically add __super to the class
+        # This trick only works for dynamic classes
+        def __new__(metaclass, name, bases, dict):
+            cls = super(autosuper, metaclass).__new__(metaclass,
+                                                      name, bases, dict)
+            # Name mangling for __super removes leading underscores
+            while name[:1] == "_":
+                name = name[1:]
+            if name:
+                name = "_%s__super" % name
+            else:
+                name = "__super"
+            setattr(cls, name, super(cls))
+            return cls
+    class A:
+        __metaclass__ = autosuper
+        def meth(self):
+            return "A"
+    class B(A):
+        def meth(self):
+            return "B" + self.__super.meth()
+    class C(A):
+        def meth(self):
+            return "C" + self.__super.meth()
+    class D(C, B):
+        def meth(self):
+            return "D" + self.__super.meth()
+    vereq(D().meth(), "DCBA")
+    class E(B, C):
+        def meth(self):
+            return "E" + self.__super.meth()
+    vereq(E().meth(), "EBCA")
+
+    class autoproperty(type):
+        # Automatically create property attributes when methods
+        # named _get_x and/or _set_x are found
+        def __new__(metaclass, name, bases, dict):
+            hits = {}
+            for key, val in dict.iteritems():
+                if key.startswith("_get_"):
+                    key = key[5:]
+                    get, set = hits.get(key, (None, None))
+                    get = val
+                    hits[key] = get, set
+                elif key.startswith("_set_"):
+                    key = key[5:]
+                    get, set = hits.get(key, (None, None))
+                    set = val
+                    hits[key] = get, set
+            for key, (get, set) in hits.iteritems():
+                dict[key] = property(get, set)
+            return super(autoproperty, metaclass).__new__(metaclass,
+                                                        name, bases, dict)
+    class A:
+        __metaclass__ = autoproperty
+        def _get_x(self):
+            return -self.__x
+        def _set_x(self, x):
+            self.__x = -x
+    a = A()
+    verify(not hasattr(a, "x"))
+    a.x = 12
+    vereq(a.x, 12)
+    vereq(a._A__x, -12)
+
+    class multimetaclass(autoproperty, autosuper):
+        # Merge of multiple cooperating metaclasses
+        pass
+    class A:
+        __metaclass__ = multimetaclass
+        def _get_x(self):
+            return "A"
+    class B(A):
+        def _get_x(self):
+            return "B" + self.__super._get_x()
+    class C(A):
+        def _get_x(self):
+            return "C" + self.__super._get_x()
+    class D(C, B):
+        def _get_x(self):
+            return "D" + self.__super._get_x()
+    vereq(D().x, "DCBA")
+
+    # Make sure type(x) doesn't call x.__class__.__init__
+    class T(type):
+        counter = 0
+        def __init__(self, *args):
+            T.counter += 1
+    class C:
+        __metaclass__ = T
+    vereq(T.counter, 1)
+    a = C()
+    vereq(type(a), C)
+    vereq(T.counter, 1)
+
+    class C(object): pass
+    c = C()
+    try: c()
+    except TypeError: pass
+    else: raise TestError, "calling object w/o call method should raise TypeError"
+
+def pymods():
+    if verbose: print "Testing Python subclass of module..."
+    log = []
+    import sys
+    MT = type(sys)
+    class MM(MT):
+        def __init__(self):
+            MT.__init__(self)
+        def __getattribute__(self, name):
+            log.append(("getattr", name))
+            return MT.__getattribute__(self, name)
+        def __setattr__(self, name, value):
+            log.append(("setattr", name, value))
+            MT.__setattr__(self, name, value)
+        def __delattr__(self, name):
+            log.append(("delattr", name))
+            MT.__delattr__(self, name)
+    a = MM()
+    a.foo = 12
+    x = a.foo
+    del a.foo
+    vereq(log, [("setattr", "foo", 12),
+                ("getattr", "foo"),
+                ("delattr", "foo")])
+
+def multi():
+    if verbose: print "Testing multiple inheritance..."
+    class C(object):
+        def __init__(self):
+            self.__state = 0
+        def getstate(self):
+            return self.__state
+        def setstate(self, state):
+            self.__state = state
+    a = C()
+    vereq(a.getstate(), 0)
+    a.setstate(10)
+    vereq(a.getstate(), 10)
+    class D(dict, C):
+        def __init__(self):
+            type({}).__init__(self)
+            C.__init__(self)
+    d = D()
+    vereq(d.keys(), [])
+    d["hello"] = "world"
+    vereq(d.items(), [("hello", "world")])
+    vereq(d["hello"], "world")
+    vereq(d.getstate(), 0)
+    d.setstate(10)
+    vereq(d.getstate(), 10)
+    vereq(D.__mro__, (D, dict, C, object))
+
+    # SF bug #442833
+    class Node(object):
+        def __int__(self):
+            return int(self.foo())
+        def foo(self):
+            return "23"
+    class Frag(Node, list):
+        def foo(self):
+            return "42"
+    vereq(Node().__int__(), 23)
+    vereq(int(Node()), 23)
+    vereq(Frag().__int__(), 42)
+    vereq(int(Frag()), 42)
+
+    # MI mixing classic and new-style classes.
+
+    class A:
+        x = 1
+
+    class B(A):
+        pass
+
+    class C(A):
+        x = 2
+
+    class D(B, C):
+        pass
+    vereq(D.x, 1)
+
+    # Classic MRO is preserved for a classic base class.
+    class E(D, object):
+        pass
+    vereq(E.__mro__, (E, D, B, A, C, object))
+    vereq(E.x, 1)
+
+    # But with a mix of classic bases, their MROs are combined using
+    # new-style MRO.
+    class F(B, C, object):
+        pass
+    vereq(F.__mro__, (F, B, C, A, object))
+    vereq(F.x, 2)
+
+    # Try something else.
+    class C:
+        def cmethod(self):
+            return "C a"
+        def all_method(self):
+            return "C b"
+
+    class M1(C, object):
+        def m1method(self):
+            return "M1 a"
+        def all_method(self):
+            return "M1 b"
+
+    vereq(M1.__mro__, (M1, C, object))
+    m = M1()
+    vereq(m.cmethod(), "C a")
+    vereq(m.m1method(), "M1 a")
+    vereq(m.all_method(), "M1 b")
+
+    class D(C):
+        def dmethod(self):
+            return "D a"
+        def all_method(self):
+            return "D b"
+
+    class M2(object, D):
+        def m2method(self):
+            return "M2 a"
+        def all_method(self):
+            return "M2 b"
+
+    vereq(M2.__mro__, (M2, object, D, C))
+    m = M2()
+    vereq(m.cmethod(), "C a")
+    vereq(m.dmethod(), "D a")
+    vereq(m.m2method(), "M2 a")
+    vereq(m.all_method(), "M2 b")
+
+    class M3(M1, object, M2):
+        def m3method(self):
+            return "M3 a"
+        def all_method(self):
+            return "M3 b"
+    # XXX Expected this (the commented-out result):
+    # vereq(M3.__mro__, (M3, M1, M2, object, D, C))
+    vereq(M3.__mro__, (M3, M1, M2, D, C, object))  # XXX ?
+    m = M3()
+    vereq(m.cmethod(), "C a")
+    vereq(m.dmethod(), "D a")
+    vereq(m.m1method(), "M1 a")
+    vereq(m.m2method(), "M2 a")
+    vereq(m.m3method(), "M3 a")
+    vereq(m.all_method(), "M3 b")
+
+    class Classic:
+        pass
+    try:
+        class New(Classic):
+            __metaclass__ = type
+    except TypeError:
+        pass
+    else:
+        raise TestFailed, "new class with only classic bases - shouldn't be"
+
+def diamond():
+    if verbose: print "Testing multiple inheritance special cases..."
+    class A(object):
+        def spam(self): return "A"
+    vereq(A().spam(), "A")
+    class B(A):
+        def boo(self): return "B"
+        def spam(self): return "B"
+    vereq(B().spam(), "B")
+    vereq(B().boo(), "B")
+    class C(A):
+        def boo(self): return "C"
+    vereq(C().spam(), "A")
+    vereq(C().boo(), "C")
+    class D(B, C): pass
+    vereq(D().spam(), "B")
+    vereq(D().boo(), "B")
+    vereq(D.__mro__, (D, B, C, A, object))
+    class E(C, B): pass
+    vereq(E().spam(), "B")
+    vereq(E().boo(), "C")
+    vereq(E.__mro__, (E, C, B, A, object))
+    class F(D, E): pass
+    vereq(F().spam(), "B")
+    vereq(F().boo(), "B")
+    vereq(F.__mro__, (F, D, E, B, C, A, object))
+    class G(E, D): pass
+    vereq(G().spam(), "B")
+    vereq(G().boo(), "C")
+    vereq(G.__mro__, (G, E, D, C, B, A, object))
+
+def objects():
+    if verbose: print "Testing object class..."
+    a = object()
+    vereq(a.__class__, object)
+    vereq(type(a), object)
+    b = object()
+    verify(a is not b)
+    verify(not hasattr(a, "foo"))
+    try:
+        a.foo = 12
+    except (AttributeError, TypeError):
+        pass
+    else:
+        verify(0, "object() should not allow setting a foo attribute")
+    verify(not hasattr(object(), "__dict__"))
+
+    class Cdict(object):
+        pass
+    x = Cdict()
+    vereq(x.__dict__, {})
+    x.foo = 1
+    vereq(x.foo, 1)
+    vereq(x.__dict__, {'foo': 1})
+
+def slots():
+    if verbose: print "Testing __slots__..."
+    class C0(object):
+        __slots__ = []
+    x = C0()
+    verify(not hasattr(x, "__dict__"))
+    verify(not hasattr(x, "foo"))
+
+    class C1(object):
+        __slots__ = ['a']
+    x = C1()
+    verify(not hasattr(x, "__dict__"))
+    verify(not hasattr(x, "a"))
+    x.a = 1
+    vereq(x.a, 1)
+    x.a = None
+    veris(x.a, None)
+    del x.a
+    verify(not hasattr(x, "a"))
+
+    class C3(object):
+        __slots__ = ['a', 'b', 'c']
+    x = C3()
+    verify(not hasattr(x, "__dict__"))
+    verify(not hasattr(x, 'a'))
+    verify(not hasattr(x, 'b'))
+    verify(not hasattr(x, 'c'))
+    x.a = 1
+    x.b = 2
+    x.c = 3
+    vereq(x.a, 1)
+    vereq(x.b, 2)
+    vereq(x.c, 3)
+
+    # Test leaks
+    class Counted(object):
+        counter = 0    # counts the number of instances alive
+        def __init__(self):
+            Counted.counter += 1
+        def __del__(self):
+            Counted.counter -= 1
+    class C(object):
+        __slots__ = ['a', 'b', 'c']
+    x = C()
+    x.a = Counted()
+    x.b = Counted()
+    x.c = Counted()
+    vereq(Counted.counter, 3)
+    del x
+    vereq(Counted.counter, 0)
+    class D(C):
+        pass
+    x = D()
+    x.a = Counted()
+    x.z = Counted()
+    vereq(Counted.counter, 2)
+    del x
+    vereq(Counted.counter, 0)
+    class E(D):
+        __slots__ = ['e']
+    x = E()
+    x.a = Counted()
+    x.z = Counted()
+    x.e = Counted()
+    vereq(Counted.counter, 3)
+    del x
+    vereq(Counted.counter, 0)
+
+    # Test cyclical leaks [SF bug 519621]
+    class F(object):
+        __slots__ = ['a', 'b']
+    log = []
+    s = F()
+    s.a = [Counted(), s]
+    vereq(Counted.counter, 1)
+    s = None
+    import gc
+    gc.collect()
+    vereq(Counted.counter, 0)
+
+    # Test lookup leaks [SF bug 572567]
+    import sys,gc
+    class G(object):
+        def __cmp__(self, other):
+            return 0
+    g = G()
+    orig_objects = len(gc.get_objects())
+    for i in xrange(10):
+        g==g
+    new_objects = len(gc.get_objects())
+    vereq(orig_objects, new_objects)
+    class H(object):
+        __slots__ = ['a', 'b']
+        def __init__(self):
+            self.a = 1
+            self.b = 2
+        def __del__(self):
+            assert self.a == 1
+            assert self.b == 2
+
+    save_stderr = sys.stderr
+    sys.stderr = sys.stdout
+    h = H()
+    try:
+        del h
+    finally:
+        sys.stderr = save_stderr
+
+def dynamics():
+    if verbose: print "Testing class attribute propagation..."
+    class D(object):
+        pass
+    class E(D):
+        pass
+    class F(D):
+        pass
+    D.foo = 1
+    vereq(D.foo, 1)
+    # Test that dynamic attributes are inherited
+    vereq(E.foo, 1)
+    vereq(F.foo, 1)
+    # Test dynamic instances
+    class C(object):
+        pass
+    a = C()
+    verify(not hasattr(a, "foobar"))
+    C.foobar = 2
+    vereq(a.foobar, 2)
+    C.method = lambda self: 42
+    vereq(a.method(), 42)
+    C.__repr__ = lambda self: "C()"
+    vereq(repr(a), "C()")
+    C.__int__ = lambda self: 100
+    vereq(int(a), 100)
+    vereq(a.foobar, 2)
+    verify(not hasattr(a, "spam"))
+    def mygetattr(self, name):
+        if name == "spam":
+            return "spam"
+        raise AttributeError
+    C.__getattr__ = mygetattr
+    vereq(a.spam, "spam")
+    a.new = 12
+    vereq(a.new, 12)
+    def mysetattr(self, name, value):
+        if name == "spam":
+            raise AttributeError
+        return object.__setattr__(self, name, value)
+    C.__setattr__ = mysetattr
+    try:
+        a.spam = "not spam"
+    except AttributeError:
+        pass
+    else:
+        verify(0, "expected AttributeError")
+    vereq(a.spam, "spam")
+    class D(C):
+        pass
+    d = D()
+    d.foo = 1
+    vereq(d.foo, 1)
+
+    # Test handling of int*seq and seq*int
+    class I(int):
+        pass
+    vereq("a"*I(2), "aa")
+    vereq(I(2)*"a", "aa")
+    vereq(2*I(3), 6)
+    vereq(I(3)*2, 6)
+    vereq(I(3)*I(2), 6)
+
+    # Test handling of long*seq and seq*long
+    class L(long):
+        pass
+    vereq("a"*L(2L), "aa")
+    vereq(L(2L)*"a", "aa")
+    vereq(2*L(3), 6)
+    vereq(L(3)*2, 6)
+    vereq(L(3)*L(2), 6)
+
+    # Test comparison of classes with dynamic metaclasses
+    class dynamicmetaclass(type):
+        pass
+    class someclass:
+        __metaclass__ = dynamicmetaclass
+    verify(someclass != object)
+
+def errors():
+    if verbose: print "Testing errors..."
+
+    try:
+        class C(list, dict):
+            pass
+    except TypeError:
+        pass
+    else:
+        verify(0, "inheritance from both list and dict should be illegal")
+
+    try:
+        class C(object, None):
+            pass
+    except TypeError:
+        pass
+    else:
+        verify(0, "inheritance from non-type should be illegal")
+    class Classic:
+        pass
+
+    try:
+        class C(type(len)):
+            pass
+    except TypeError:
+        pass
+    else:
+        verify(0, "inheritance from CFunction should be illegal")
+
+    try:
+        class C(object):
+            __slots__ = 1
+    except TypeError:
+        pass
+    else:
+        verify(0, "__slots__ = 1 should be illegal")
+
+    try:
+        class C(object):
+            __slots__ = [1]
+    except TypeError:
+        pass
+    else:
+        verify(0, "__slots__ = [1] should be illegal")
+
+def classmethods():
+    if verbose: print "Testing class methods..."
+    class C(object):
+        def foo(*a): return a
+        goo = classmethod(foo)
+    c = C()
+    vereq(C.goo(1), (C, 1))
+    vereq(c.goo(1), (C, 1))
+    vereq(c.foo(1), (c, 1))
+    class D(C):
+        pass
+    d = D()
+    vereq(D.goo(1), (D, 1))
+    vereq(d.goo(1), (D, 1))
+    vereq(d.foo(1), (d, 1))
+    vereq(D.foo(d, 1), (d, 1))
+    # Test for a specific crash (SF bug 528132)
+    def f(cls, arg): return (cls, arg)
+    ff = classmethod(f)
+    vereq(ff.__get__(0, int)(42), (int, 42))
+    vereq(ff.__get__(0)(42), (int, 42))
+
+    # Test super() with classmethods (SF bug 535444)
+    veris(C.goo.im_self, C)
+    veris(D.goo.im_self, D)
+    veris(super(D,D).goo.im_self, D)
+    veris(super(D,d).goo.im_self, D)
+    vereq(super(D,D).goo(), (D,))
+    vereq(super(D,d).goo(), (D,))
+
+    # Verify that argument is checked for callability (SF bug 753451)
+    try:
+        classmethod(1).__get__(1)
+    except TypeError:
+        pass
+    else:
+        raise TestFailed, "classmethod should check for callability"
+
+def staticmethods():
+    if verbose: print "Testing static methods..."
+    class C(object):
+        def foo(*a): return a
+        goo = staticmethod(foo)
+    c = C()
+    vereq(C.goo(1), (1,))
+    vereq(c.goo(1), (1,))
+    vereq(c.foo(1), (c, 1,))
+    class D(C):
+        pass
+    d = D()
+    vereq(D.goo(1), (1,))
+    vereq(d.goo(1), (1,))
+    vereq(d.foo(1), (d, 1))
+    vereq(D.foo(d, 1), (d, 1))
+
+def classic():
+    if verbose: print "Testing classic classes..."
+    class C:
+        def foo(*a): return a
+        goo = classmethod(foo)
+    c = C()
+    vereq(C.goo(1), (C, 1))
+    vereq(c.goo(1), (C, 1))
+    vereq(c.foo(1), (c, 1))
+    class D(C):
+        pass
+    d = D()
+    vereq(D.goo(1), (D, 1))
+    vereq(d.goo(1), (D, 1))
+    vereq(d.foo(1), (d, 1))
+    vereq(D.foo(d, 1), (d, 1))
+    class E: # *not* subclassing from C
+        foo = C.foo
+    vereq(E().foo, C.foo) # i.e., unbound
+    verify(repr(C.foo.__get__(C())).startswith("<bound method "))
+
+def compattr():
+    if verbose: print "Testing computed attributes..."
+    class C(object):
+        class computed_attribute(object):
+            def __init__(self, get, set=None, delete=None):
+                self.__get = get
+                self.__set = set
+                self.__delete = delete
+            def __get__(self, obj, type=None):
+                return self.__get(obj)
+            def __set__(self, obj, value):
+                return self.__set(obj, value)
+            def __delete__(self, obj):
+                return self.__delete(obj)
+        def __init__(self):
+            self.__x = 0
+        def __get_x(self):
+            x = self.__x
+            self.__x = x+1
+            return x
+        def __set_x(self, x):
+            self.__x = x
+        def __delete_x(self):
+            del self.__x
+        x = computed_attribute(__get_x, __set_x, __delete_x)
+    a = C()
+    vereq(a.x, 0)
+    vereq(a.x, 1)
+    a.x = 10
+    vereq(a.x, 10)
+    vereq(a.x, 11)
+    del a.x
+    vereq(hasattr(a, 'x'), 0)
+
+def newslot():
+    if verbose: print "Testing __new__ slot override..."
+    class C(list):
+        def __new__(cls):
+            self = list.__new__(cls)
+            self.foo = 1
+            return self
+        def __init__(self):
+            self.foo = self.foo + 2
+    a = C()
+    vereq(a.foo, 3)
+    verify(a.__class__ is C)
+    class D(C):
+        pass
+    b = D()
+    vereq(b.foo, 3)
+    verify(b.__class__ is D)
+
+def altmro():
+    if verbose: print "Testing mro() and overriding it..."
+    class A(object):
+        def f(self): return "A"
+    class B(A):
+        pass
+    class C(A):
+        def f(self): return "C"
+    class D(B, C):
+        pass
+    vereq(D.mro(), [D, B, C, A, object])
+    vereq(D.__mro__, (D, B, C, A, object))
+    vereq(D().f(), "C")
+    class PerverseMetaType(type):
+        def mro(cls):
+            L = type.mro(cls)
+            L.reverse()
+            return L
+    class X(A,B,C,D):
+        __metaclass__ = PerverseMetaType
+    vereq(X.__mro__, (object, A, C, B, D, X))
+    vereq(X().f(), "A")
+
+def overloading():
+    if verbose: print "Testing operator overloading..."
+
+    class B(object):
+        "Intermediate class because object doesn't have a __setattr__"
+
+    class C(B):
+
+        def __getattr__(self, name):
+            if name == "foo":
+                return ("getattr", name)
+            else:
+                raise AttributeError
+        def __setattr__(self, name, value):
+            if name == "foo":
+                self.setattr = (name, value)
+            else:
+                return B.__setattr__(self, name, value)
+        def __delattr__(self, name):
+            if name == "foo":
+                self.delattr = name
+            else:
+                return B.__delattr__(self, name)
+
+        def __getitem__(self, key):
+            return ("getitem", key)
+        def __setitem__(self, key, value):
+            self.setitem = (key, value)
+        def __delitem__(self, key):
+            self.delitem = key
+
+        def __getslice__(self, i, j):
+            return ("getslice", i, j)
+        def __setslice__(self, i, j, value):
+            self.setslice = (i, j, value)
+        def __delslice__(self, i, j):
+            self.delslice = (i, j)
+
+    a = C()
+    vereq(a.foo, ("getattr", "foo"))
+    a.foo = 12
+    vereq(a.setattr, ("foo", 12))
+    del a.foo
+    vereq(a.delattr, "foo")
+
+    vereq(a[12], ("getitem", 12))
+    a[12] = 21
+    vereq(a.setitem, (12, 21))
+    del a[12]
+    vereq(a.delitem, 12)
+
+    vereq(a[0:10], ("getslice", 0, 10))
+    a[0:10] = "foo"
+    vereq(a.setslice, (0, 10, "foo"))
+    del a[0:10]
+    vereq(a.delslice, (0, 10))
+
+def methods():
+    if verbose: print "Testing methods..."
+    class C(object):
+        def __init__(self, x):
+            self.x = x
+        def foo(self):
+            return self.x
+    c1 = C(1)
+    vereq(c1.foo(), 1)
+    class D(C):
+        boo = C.foo
+        goo = c1.foo
+    d2 = D(2)
+    vereq(d2.foo(), 2)
+    vereq(d2.boo(), 2)
+    vereq(d2.goo(), 1)
+    class E(object):
+        foo = C.foo
+    vereq(E().foo, C.foo) # i.e., unbound
+    verify(repr(C.foo.__get__(C(1))).startswith("<bound method "))
+
+def specials():
+    # Test operators like __hash__ for which a built-in default exists
+    if verbose: print "Testing special operators..."
+    # Test the default behavior for static classes
+    class C(object):
+        def __getitem__(self, i):
+            if 0 <= i < 10: return i
+            raise IndexError
+    c1 = C()
+    c2 = C()
+    verify(not not c1)
+    vereq(hash(c1), id(c1))
+    vereq(cmp(c1, c2), cmp(id(c1), id(c2)))
+    vereq(c1, c1)
+    verify(c1 != c2)
+    verify(not c1 != c1)
+    verify(not c1 == c2)
+    # Note that the module name appears in str/repr, and that varies
+    # depending on whether this test is run standalone or from a framework.
+    verify(str(c1).find('C object at ') >= 0)
+    vereq(str(c1), repr(c1))
+    verify(-1 not in c1)
+    for i in range(10):
+        verify(i in c1)
+    verify(10 not in c1)
+    # Test the default behavior for dynamic classes
+    class D(object):
+        def __getitem__(self, i):
+            if 0 <= i < 10: return i
+            raise IndexError
+    d1 = D()
+    d2 = D()
+    verify(not not d1)
+    vereq(hash(d1), id(d1))
+    vereq(cmp(d1, d2), cmp(id(d1), id(d2)))
+    vereq(d1, d1)
+    verify(d1 != d2)
+    verify(not d1 != d1)
+    verify(not d1 == d2)
+    # Note that the module name appears in str/repr, and that varies
+    # depending on whether this test is run standalone or from a framework.
+    verify(str(d1).find('D object at ') >= 0)
+    vereq(str(d1), repr(d1))
+    verify(-1 not in d1)
+    for i in range(10):
+        verify(i in d1)
+    verify(10 not in d1)
+    # Test overridden behavior for static classes
+    class Proxy(object):
+        def __init__(self, x):
+            self.x = x
+        def __nonzero__(self):
+            return not not self.x
+        def __hash__(self):
+            return hash(self.x)
+        def __eq__(self, other):
+            return self.x == other
+        def __ne__(self, other):
+            return self.x != other
+        def __cmp__(self, other):
+            return cmp(self.x, other.x)
+        def __str__(self):
+            return "Proxy:%s" % self.x
+        def __repr__(self):
+            return "Proxy(%r)" % self.x
+        def __contains__(self, value):
+            return value in self.x
+    p0 = Proxy(0)
+    p1 = Proxy(1)
+    p_1 = Proxy(-1)
+    verify(not p0)
+    verify(not not p1)
+    vereq(hash(p0), hash(0))
+    vereq(p0, p0)
+    verify(p0 != p1)
+    verify(not p0 != p0)
+    vereq(not p0, p1)
+    vereq(cmp(p0, p1), -1)
+    vereq(cmp(p0, p0), 0)
+    vereq(cmp(p0, p_1), 1)
+    vereq(str(p0), "Proxy:0")
+    vereq(repr(p0), "Proxy(0)")
+    p10 = Proxy(range(10))
+    verify(-1 not in p10)
+    for i in range(10):
+        verify(i in p10)
+    verify(10 not in p10)
+    # Test overridden behavior for dynamic classes
+    class DProxy(object):
+        def __init__(self, x):
+            self.x = x
+        def __nonzero__(self):
+            return not not self.x
+        def __hash__(self):
+            return hash(self.x)
+        def __eq__(self, other):
+            return self.x == other
+        def __ne__(self, other):
+            return self.x != other
+        def __cmp__(self, other):
+            return cmp(self.x, other.x)
+        def __str__(self):
+            return "DProxy:%s" % self.x
+        def __repr__(self):
+            return "DProxy(%r)" % self.x
+        def __contains__(self, value):
+            return value in self.x
+    p0 = DProxy(0)
+    p1 = DProxy(1)
+    p_1 = DProxy(-1)
+    verify(not p0)
+    verify(not not p1)
+    vereq(hash(p0), hash(0))
+    vereq(p0, p0)
+    verify(p0 != p1)
+    verify(not p0 != p0)
+    vereq(not p0, p1)
+    vereq(cmp(p0, p1), -1)
+    vereq(cmp(p0, p0), 0)
+    vereq(cmp(p0, p_1), 1)
+    vereq(str(p0), "DProxy:0")
+    vereq(repr(p0), "DProxy(0)")
+    p10 = DProxy(range(10))
+    verify(-1 not in p10)
+    for i in range(10):
+        verify(i in p10)
+    verify(10 not in p10)
+    # Safety test for __cmp__
+    def unsafecmp(a, b):
+        try:
+            a.__class__.__cmp__(a, b)
+        except TypeError:
+            pass
+        else:
+            raise TestFailed, "shouldn't allow %s.__cmp__(%r, %r)" % (
+                a.__class__, a, b)
+    unsafecmp(u"123", "123")
+    unsafecmp("123", u"123")
+    unsafecmp(1, 1.0)
+    unsafecmp(1.0, 1)
+    unsafecmp(1, 1L)
+    unsafecmp(1L, 1)
+
+    class Letter(str):
+        def __new__(cls, letter):
+            if letter == 'EPS':
+                return str.__new__(cls)
+            return str.__new__(cls, letter)
+        def __str__(self):
+            if not self:
+                return 'EPS'
+            return self 
+
+    # sys.stdout needs to be the original to trigger the recursion bug
+    import sys
+    test_stdout = sys.stdout
+    sys.stdout = get_original_stdout()
+    try:
+        # nothing should actually be printed, this should raise an exception
+        print Letter('w')
+    except RuntimeError:
+        pass
+    else:
+        raise TestFailed, "expected a RuntimeError for print recursion"
+    sys.stdout = test_stdout
+
+def weakrefs():
+    if verbose: print "Testing weak references..."
+    import weakref
+    class C(object):
+        pass
+    c = C()
+    r = weakref.ref(c)
+    verify(r() is c)
+    del c
+    verify(r() is None)
+    del r
+    class NoWeak(object):
+        __slots__ = ['foo']
+    no = NoWeak()
+    try:
+        weakref.ref(no)
+    except TypeError, msg:
+        verify(str(msg).find("weak reference") >= 0)
+    else:
+        verify(0, "weakref.ref(no) should be illegal")
+    class Weak(object):
+        __slots__ = ['foo', '__weakref__']
+    yes = Weak()
+    r = weakref.ref(yes)
+    verify(r() is yes)
+    del yes
+    verify(r() is None)
+    del r
+
+def properties():
+    if verbose: print "Testing property..."
+    class C(object):
+        def getx(self):
+            return self.__x
+        def setx(self, value):
+            self.__x = value
+        def delx(self):
+            del self.__x
+        x = property(getx, setx, delx, doc="I'm the x property.")
+    a = C()
+    verify(not hasattr(a, "x"))
+    a.x = 42
+    vereq(a._C__x, 42)
+    vereq(a.x, 42)
+    del a.x
+    verify(not hasattr(a, "x"))
+    verify(not hasattr(a, "_C__x"))
+    C.x.__set__(a, 100)
+    vereq(C.x.__get__(a), 100)
+    C.x.__delete__(a)
+    verify(not hasattr(a, "x"))
+
+    raw = C.__dict__['x']
+    verify(isinstance(raw, property))
+
+    attrs = dir(raw)
+    verify("__doc__" in attrs)
+    verify("fget" in attrs)
+    verify("fset" in attrs)
+    verify("fdel" in attrs)
+
+    vereq(raw.__doc__, "I'm the x property.")
+    verify(raw.fget is C.__dict__['getx'])
+    verify(raw.fset is C.__dict__['setx'])
+    verify(raw.fdel is C.__dict__['delx'])
+
+    for attr in "__doc__", "fget", "fset", "fdel":
+        try:
+            setattr(raw, attr, 42)
+        except TypeError, msg:
+            if str(msg).find('readonly') < 0:
+                raise TestFailed("when setting readonly attr %r on a "
+                                 "property, got unexpected TypeError "
+                                 "msg %r" % (attr, str(msg)))
+        else:
+            raise TestFailed("expected TypeError from trying to set "
+                             "readonly %r attr on a property" % attr)
+
+    class D(object):
+        __getitem__ = property(lambda s: 1/0)
+
+    d = D()
+    try:
+        for i in d:
+            str(i)
+    except ZeroDivisionError:
+        pass
+    else:
+        raise TestFailed, "expected ZeroDivisionError from bad property"
+
+def supers():
+    if verbose: print "Testing super..."
+
+    class A(object):
+        def meth(self, a):
+            return "A(%r)" % a
+
+    vereq(A().meth(1), "A(1)")
+
+    class B(A):
+        def __init__(self):
+            self.__super = super(B, self)
+        def meth(self, a):
+            return "B(%r)" % a + self.__super.meth(a)
+
+    vereq(B().meth(2), "B(2)A(2)")
+
+    class C(A):
+        def meth(self, a):
+            return "C(%r)" % a + self.__super.meth(a)
+    C._C__super = super(C)
+
+    vereq(C().meth(3), "C(3)A(3)")
+
+    class D(C, B):
+        def meth(self, a):
+            return "D(%r)" % a + super(D, self).meth(a)
+
+    vereq(D().meth(4), "D(4)C(4)B(4)A(4)")
+
+    # Test for subclassing super
+
+    class mysuper(super):
+        def __init__(self, *args):
+            return super(mysuper, self).__init__(*args)
+
+    class E(D):
+        def meth(self, a):
+            return "E(%r)" % a + mysuper(E, self).meth(a)
+
+    vereq(E().meth(5), "E(5)D(5)C(5)B(5)A(5)")
+
+    class F(E):
+        def meth(self, a):
+            s = self.__super
+            return "F(%r)[%s]" % (a, s.__class__.__name__) + s.meth(a)
+    F._F__super = mysuper(F)
+
+    vereq(F().meth(6), "F(6)[mysuper]E(6)D(6)C(6)B(6)A(6)")
+
+    # Make sure certain errors are raised
+
+    try:
+        super(D, 42)
+    except TypeError:
+        pass
+    else:
+        raise TestFailed, "shouldn't allow super(D, 42)"
+
+    try:
+        super(D, C())
+    except TypeError:
+        pass
+    else:
+        raise TestFailed, "shouldn't allow super(D, C())"
+
+    try:
+        super(D).__get__(12)
+    except TypeError:
+        pass
+    else:
+        raise TestFailed, "shouldn't allow super(D).__get__(12)"
+
+    try:
+        super(D).__get__(C())
+    except TypeError:
+        pass
+    else:
+        raise TestFailed, "shouldn't allow super(D).__get__(C())"
+
+def inherits():
+    if verbose: print "Testing inheritance from basic types..."
+
+    class hexint(int):
+        def __repr__(self):
+            return hex(self)
+        def __add__(self, other):
+            return hexint(int.__add__(self, other))
+        # (Note that overriding __radd__ doesn't work,
+        # because the int type gets first dibs.)
+    vereq(repr(hexint(7) + 9), "0x10")
+    vereq(repr(hexint(1000) + 7), "0x3ef")
+    a = hexint(12345)
+    vereq(a, 12345)
+    vereq(int(a), 12345)
+    verify(int(a).__class__ is int)
+    vereq(hash(a), hash(12345))
+    verify((+a).__class__ is int)
+    verify((a >> 0).__class__ is int)
+    verify((a << 0).__class__ is int)
+    verify((hexint(0) << 12).__class__ is int)
+    verify((hexint(0) >> 12).__class__ is int)
+
+    class octlong(long):
+        __slots__ = []
+        def __str__(self):
+            s = oct(self)
+            if s[-1] == 'L':
+                s = s[:-1]
+            return s
+        def __add__(self, other):
+            return self.__class__(super(octlong, self).__add__(other))
+        __radd__ = __add__
+    vereq(str(octlong(3) + 5), "010")
+    # (Note that overriding __radd__ here only seems to work
+    # because the example uses a short int left argument.)
+    vereq(str(5 + octlong(3000)), "05675")
+    a = octlong(12345)
+    vereq(a, 12345L)
+    vereq(long(a), 12345L)
+    vereq(hash(a), hash(12345L))
+    verify(long(a).__class__ is long)
+    verify((+a).__class__ is long)
+    verify((-a).__class__ is long)
+    verify((-octlong(0)).__class__ is long)
+    verify((a >> 0).__class__ is long)
+    verify((a << 0).__class__ is long)
+    verify((a - 0).__class__ is long)
+    verify((a * 1).__class__ is long)
+    verify((a ** 1).__class__ is long)
+    verify((a // 1).__class__ is long)
+    verify((1 * a).__class__ is long)
+    verify((a | 0).__class__ is long)
+    verify((a ^ 0).__class__ is long)
+    verify((a & -1L).__class__ is long)
+    verify((octlong(0) << 12).__class__ is long)
+    verify((octlong(0) >> 12).__class__ is long)
+    verify(abs(octlong(0)).__class__ is long)
+
+    # Because octlong overrides __add__, we can't check the absence of +0
+    # optimizations using octlong.
+    class longclone(long):
+        pass
+    a = longclone(1)
+    verify((a + 0).__class__ is long)
+    verify((0 + a).__class__ is long)
+
+    # Check that negative clones don't segfault
+    a = longclone(-1)
+    vereq(a.__dict__, {})
+    vereq(long(a), -1)  # verify PyNumber_Long() copies the sign bit
+
+    class precfloat(float):
+        __slots__ = ['prec']
+        def __init__(self, value=0.0, prec=12):
+            self.prec = int(prec)
+            float.__init__(value)
+        def __repr__(self):
+            return "%.*g" % (self.prec, self)
+    vereq(repr(precfloat(1.1)), "1.1")
+    a = precfloat(12345)
+    vereq(a, 12345.0)
+    vereq(float(a), 12345.0)
+    verify(float(a).__class__ is float)
+    vereq(hash(a), hash(12345.0))
+    verify((+a).__class__ is float)
+
+    class madcomplex(complex):
+        def __repr__(self):
+            return "%.17gj%+.17g" % (self.imag, self.real)
+    a = madcomplex(-3, 4)
+    vereq(repr(a), "4j-3")
+    base = complex(-3, 4)
+    veris(base.__class__, complex)
+    vereq(a, base)
+    vereq(complex(a), base)
+    veris(complex(a).__class__, complex)
+    a = madcomplex(a)  # just trying another form of the constructor
+    vereq(repr(a), "4j-3")
+    vereq(a, base)
+    vereq(complex(a), base)
+    veris(complex(a).__class__, complex)
+    vereq(hash(a), hash(base))
+    veris((+a).__class__, complex)
+    veris((a + 0).__class__, complex)
+    vereq(a + 0, base)
+    veris((a - 0).__class__, complex)
+    vereq(a - 0, base)
+    veris((a * 1).__class__, complex)
+    vereq(a * 1, base)
+    veris((a / 1).__class__, complex)
+    vereq(a / 1, base)
+
+    class madtuple(tuple):
+        _rev = None
+        def rev(self):
+            if self._rev is not None:
+                return self._rev
+            L = list(self)
+            L.reverse()
+            self._rev = self.__class__(L)
+            return self._rev
+    a = madtuple((1,2,3,4,5,6,7,8,9,0))
+    vereq(a, (1,2,3,4,5,6,7,8,9,0))
+    vereq(a.rev(), madtuple((0,9,8,7,6,5,4,3,2,1)))
+    vereq(a.rev().rev(), madtuple((1,2,3,4,5,6,7,8,9,0)))
+    for i in range(512):
+        t = madtuple(range(i))
+        u = t.rev()
+        v = u.rev()
+        vereq(v, t)
+    a = madtuple((1,2,3,4,5))
+    vereq(tuple(a), (1,2,3,4,5))
+    verify(tuple(a).__class__ is tuple)
+    vereq(hash(a), hash((1,2,3,4,5)))
+    verify(a[:].__class__ is tuple)
+    verify((a * 1).__class__ is tuple)
+    verify((a * 0).__class__ is tuple)
+    verify((a + ()).__class__ is tuple)
+    a = madtuple(())
+    vereq(tuple(a), ())
+    verify(tuple(a).__class__ is tuple)
+    verify((a + a).__class__ is tuple)
+    verify((a * 0).__class__ is tuple)
+    verify((a * 1).__class__ is tuple)
+    verify((a * 2).__class__ is tuple)
+    verify(a[:].__class__ is tuple)
+
+    class madstring(str):
+        _rev = None
+        def rev(self):
+            if self._rev is not None:
+                return self._rev
+            L = list(self)
+            L.reverse()
+            self._rev = self.__class__("".join(L))
+            return self._rev
+    s = madstring("abcdefghijklmnopqrstuvwxyz")
+    vereq(s, "abcdefghijklmnopqrstuvwxyz")
+    vereq(s.rev(), madstring("zyxwvutsrqponmlkjihgfedcba"))
+    vereq(s.rev().rev(), madstring("abcdefghijklmnopqrstuvwxyz"))
+    for i in range(256):
+        s = madstring("".join(map(chr, range(i))))
+        t = s.rev()
+        u = t.rev()
+        vereq(u, s)
+    s = madstring("12345")
+    vereq(str(s), "12345")
+    verify(str(s).__class__ is str)
+
+    base = "\x00" * 5
+    s = madstring(base)
+    vereq(s, base)
+    vereq(str(s), base)
+    verify(str(s).__class__ is str)
+    vereq(hash(s), hash(base))
+    vereq({s: 1}[base], 1)
+    vereq({base: 1}[s], 1)
+    verify((s + "").__class__ is str)
+    vereq(s + "", base)
+    verify(("" + s).__class__ is str)
+    vereq("" + s, base)
+    verify((s * 0).__class__ is str)
+    vereq(s * 0, "")
+    verify((s * 1).__class__ is str)
+    vereq(s * 1, base)
+    verify((s * 2).__class__ is str)
+    vereq(s * 2, base + base)
+    verify(s[:].__class__ is str)
+    vereq(s[:], base)
+    verify(s[0:0].__class__ is str)
+    vereq(s[0:0], "")
+    verify(s.strip().__class__ is str)
+    vereq(s.strip(), base)
+    verify(s.lstrip().__class__ is str)
+    vereq(s.lstrip(), base)
+    verify(s.rstrip().__class__ is str)
+    vereq(s.rstrip(), base)
+    identitytab = ''.join([chr(i) for i in range(256)])
+    verify(s.translate(identitytab).__class__ is str)
+    vereq(s.translate(identitytab), base)
+    verify(s.translate(identitytab, "x").__class__ is str)
+    vereq(s.translate(identitytab, "x"), base)
+    vereq(s.translate(identitytab, "\x00"), "")
+    verify(s.replace("x", "x").__class__ is str)
+    vereq(s.replace("x", "x"), base)
+    verify(s.ljust(len(s)).__class__ is str)
+    vereq(s.ljust(len(s)), base)
+    verify(s.rjust(len(s)).__class__ is str)
+    vereq(s.rjust(len(s)), base)
+    verify(s.center(len(s)).__class__ is str)
+    vereq(s.center(len(s)), base)
+    verify(s.lower().__class__ is str)
+    vereq(s.lower(), base)
+
+    s = madstring("x y")
+    vereq(s, "x y")
+    verify(intern(s).__class__ is str)
+    verify(intern(s) is intern("x y"))
+    vereq(intern(s), "x y")
+
+    i = intern("y x")
+    s = madstring("y x")
+    vereq(s, i)
+    verify(intern(s).__class__ is str)
+    verify(intern(s) is i)
+
+    s = madstring(i)
+    verify(intern(s).__class__ is str)
+    verify(intern(s) is i)
+
+    class madunicode(unicode):
+        _rev = None
+        def rev(self):
+            if self._rev is not None:
+                return self._rev
+            L = list(self)
+            L.reverse()
+            self._rev = self.__class__(u"".join(L))
+            return self._rev
+    u = madunicode("ABCDEF")
+    vereq(u, u"ABCDEF")
+    vereq(u.rev(), madunicode(u"FEDCBA"))
+    vereq(u.rev().rev(), madunicode(u"ABCDEF"))
+    base = u"12345"
+    u = madunicode(base)
+    vereq(unicode(u), base)
+    verify(unicode(u).__class__ is unicode)
+    vereq(hash(u), hash(base))
+    vereq({u: 1}[base], 1)
+    vereq({base: 1}[u], 1)
+    verify(u.strip().__class__ is unicode)
+    vereq(u.strip(), base)
+    verify(u.lstrip().__class__ is unicode)
+    vereq(u.lstrip(), base)
+    verify(u.rstrip().__class__ is unicode)
+    vereq(u.rstrip(), base)
+    verify(u.replace(u"x", u"x").__class__ is unicode)
+    vereq(u.replace(u"x", u"x"), base)
+    verify(u.replace(u"xy", u"xy").__class__ is unicode)
+    vereq(u.replace(u"xy", u"xy"), base)
+    verify(u.center(len(u)).__class__ is unicode)
+    vereq(u.center(len(u)), base)
+    verify(u.ljust(len(u)).__class__ is unicode)
+    vereq(u.ljust(len(u)), base)
+    verify(u.rjust(len(u)).__class__ is unicode)
+    vereq(u.rjust(len(u)), base)
+    verify(u.lower().__class__ is unicode)
+    vereq(u.lower(), base)
+    verify(u.upper().__class__ is unicode)
+    vereq(u.upper(), base)
+    verify(u.capitalize().__class__ is unicode)
+    vereq(u.capitalize(), base)
+    verify(u.title().__class__ is unicode)
+    vereq(u.title(), base)
+    verify((u + u"").__class__ is unicode)
+    vereq(u + u"", base)
+    verify((u"" + u).__class__ is unicode)
+    vereq(u"" + u, base)
+    verify((u * 0).__class__ is unicode)
+    vereq(u * 0, u"")
+    verify((u * 1).__class__ is unicode)
+    vereq(u * 1, base)
+    verify((u * 2).__class__ is unicode)
+    vereq(u * 2, base + base)
+    verify(u[:].__class__ is unicode)
+    vereq(u[:], base)
+    verify(u[0:0].__class__ is unicode)
+    vereq(u[0:0], u"")
+
+    class sublist(list):
+        pass
+    a = sublist(range(5))
+    vereq(a, range(5))
+    a.append("hello")
+    vereq(a, range(5) + ["hello"])
+    a[5] = 5
+    vereq(a, range(6))
+    a.extend(range(6, 20))
+    vereq(a, range(20))
+    a[-5:] = []
+    vereq(a, range(15))
+    del a[10:15]
+    vereq(len(a), 10)
+    vereq(a, range(10))
+    vereq(list(a), range(10))
+    vereq(a[0], 0)
+    vereq(a[9], 9)
+    vereq(a[-10], 0)
+    vereq(a[-1], 9)
+    vereq(a[:5], range(5))
+
+    class CountedInput(file):
+        """Counts lines read by self.readline().
+
+        self.lineno is the 0-based ordinal of the last line read, up to
+        a maximum of one greater than the number of lines in the file.
+
+        self.ateof is true if and only if the final "" line has been read,
+        at which point self.lineno stops incrementing, and further calls
+        to readline() continue to return "".
+        """
+
+        lineno = 0
+        ateof = 0
+        def readline(self):
+            if self.ateof:
+                return ""
+            s = file.readline(self)
+            # Next line works too.
+            # s = super(CountedInput, self).readline()
+            self.lineno += 1
+            if s == "":
+                self.ateof = 1
+            return s
+
+    f = file(name=TESTFN, mode='w')
+    lines = ['a\n', 'b\n', 'c\n']
+    try:
+        f.writelines(lines)
+        f.close()
+        f = CountedInput(TESTFN)
+        for (i, expected) in zip(range(1, 5) + [4], lines + 2 * [""]):
+            got = f.readline()
+            vereq(expected, got)
+            vereq(f.lineno, i)
+            vereq(f.ateof, (i > len(lines)))
+        f.close()
+    finally:
+        try:
+            f.close()
+        except:
+            pass
+        try:
+            import os
+            os.unlink(TESTFN)
+        except:
+            pass
+
+def keywords():
+    if verbose:
+        print "Testing keyword args to basic type constructors ..."
+    vereq(int(x=1), 1)
+    vereq(float(x=2), 2.0)
+    vereq(long(x=3), 3L)
+    vereq(complex(imag=42, real=666), complex(666, 42))
+    vereq(str(object=500), '500')
+    vereq(unicode(string='abc', errors='strict'), u'abc')
+    vereq(tuple(sequence=range(3)), (0, 1, 2))
+    vereq(list(sequence=(0, 1, 2)), range(3))
+    vereq(dict(items={1: 2}), {1: 2})
+
+    for constructor in (int, float, long, complex, str, unicode,
+                        tuple, list, dict, file):
+        try:
+            constructor(bogus_keyword_arg=1)
+        except TypeError:
+            pass
+        else:
+            raise TestFailed("expected TypeError from bogus keyword "
+                             "argument to %r" % constructor)
+
+def restricted():
+    # XXX This test is disabled because rexec is not deemed safe
+    return
+    import rexec
+    if verbose:
+        print "Testing interaction with restricted execution ..."
+
+    sandbox = rexec.RExec()
+
+    code1 = """f = open(%r, 'w')""" % TESTFN
+    code2 = """f = file(%r, 'w')""" % TESTFN
+    code3 = """\
+f = open(%r)
+t = type(f)  # a sneaky way to get the file() constructor
+f.close()
+f = t(%r, 'w')  # rexec can't catch this by itself
+""" % (TESTFN, TESTFN)
+
+    f = open(TESTFN, 'w')  # Create the file so code3 can find it.
+    f.close()
+
+    try:
+        for code in code1, code2, code3:
+            try:
+                sandbox.r_exec(code)
+            except IOError, msg:
+                if str(msg).find("restricted") >= 0:
+                    outcome = "OK"
+                else:
+                    outcome = "got an exception, but not an expected one"
+            else:
+                outcome = "expected a restricted-execution exception"
+
+            if outcome != "OK":
+                raise TestFailed("%s, in %r" % (outcome, code))
+
+    finally:
+        try:
+            import os
+            os.unlink(TESTFN)
+        except:
+            pass
+
+def str_subclass_as_dict_key():
+    if verbose:
+        print "Testing a str subclass used as dict key .."
+
+    class cistr(str):
+        """Sublcass of str that computes __eq__ case-insensitively.
+
+        Also computes a hash code of the string in canonical form.
+        """
+
+        def __init__(self, value):
+            self.canonical = value.lower()
+            self.hashcode = hash(self.canonical)
+
+        def __eq__(self, other):
+            if not isinstance(other, cistr):
+                other = cistr(other)
+            return self.canonical == other.canonical
+
+        def __hash__(self):
+            return self.hashcode
+
+    vereq(cistr('ABC'), 'abc')
+    vereq('aBc', cistr('ABC'))
+    vereq(str(cistr('ABC')), 'ABC')
+
+    d = {cistr('one'): 1, cistr('two'): 2, cistr('tHree'): 3}
+    vereq(d[cistr('one')], 1)
+    vereq(d[cistr('tWo')], 2)
+    vereq(d[cistr('THrEE')], 3)
+    verify(cistr('ONe') in d)
+    vereq(d.get(cistr('thrEE')), 3)
+
+def classic_comparisons():
+    if verbose: print "Testing classic comparisons..."
+    class classic:
+        pass
+    for base in (classic, int, object):
+        if verbose: print "        (base = %s)" % base
+        class C(base):
+            def __init__(self, value):
+                self.value = int(value)
+            def __cmp__(self, other):
+                if isinstance(other, C):
+                    return cmp(self.value, other.value)
+                if isinstance(other, int) or isinstance(other, long):
+                    return cmp(self.value, other)
+                return NotImplemented
+        c1 = C(1)
+        c2 = C(2)
+        c3 = C(3)
+        vereq(c1, 1)
+        c = {1: c1, 2: c2, 3: c3}
+        for x in 1, 2, 3:
+            for y in 1, 2, 3:
+                verify(cmp(c[x], c[y]) == cmp(x, y), "x=%d, y=%d" % (x, y))
+                for op in "<", "<=", "==", "!=", ">", ">=":
+                    verify(eval("c[x] %s c[y]" % op) == eval("x %s y" % op),
+                           "x=%d, y=%d" % (x, y))
+                verify(cmp(c[x], y) == cmp(x, y), "x=%d, y=%d" % (x, y))
+                verify(cmp(x, c[y]) == cmp(x, y), "x=%d, y=%d" % (x, y))
+
+def rich_comparisons():
+    if verbose:
+        print "Testing rich comparisons..."
+    class Z(complex):
+        pass
+    z = Z(1)
+    vereq(z, 1+0j)
+    vereq(1+0j, z)
+    class ZZ(complex):
+        def __eq__(self, other):
+            try:
+                return abs(self - other) <= 1e-6
+            except:
+                return NotImplemented
+    zz = ZZ(1.0000003)
+    vereq(zz, 1+0j)
+    vereq(1+0j, zz)
+
+    class classic:
+        pass
+    for base in (classic, int, object, list):
+        if verbose: print "        (base = %s)" % base
+        class C(base):
+            def __init__(self, value):
+                self.value = int(value)
+            def __cmp__(self, other):
+                raise TestFailed, "shouldn't call __cmp__"
+            def __eq__(self, other):
+                if isinstance(other, C):
+                    return self.value == other.value
+                if isinstance(other, int) or isinstance(other, long):
+                    return self.value == other
+                return NotImplemented
+            def __ne__(self, other):
+                if isinstance(other, C):
+                    return self.value != other.value
+                if isinstance(other, int) or isinstance(other, long):
+                    return self.value != other
+                return NotImplemented
+            def __lt__(self, other):
+                if isinstance(other, C):
+                    return self.value < other.value
+                if isinstance(other, int) or isinstance(other, long):
+                    return self.value < other
+                return NotImplemented
+            def __le__(self, other):
+                if isinstance(other, C):
+                    return self.value <= other.value
+                if isinstance(other, int) or isinstance(other, long):
+                    return self.value <= other
+                return NotImplemented
+            def __gt__(self, other):
+                if isinstance(other, C):
+                    return self.value > other.value
+                if isinstance(other, int) or isinstance(other, long):
+                    return self.value > other
+                return NotImplemented
+            def __ge__(self, other):
+                if isinstance(other, C):
+                    return self.value >= other.value
+                if isinstance(other, int) or isinstance(other, long):
+                    return self.value >= other
+                return NotImplemented
+        c1 = C(1)
+        c2 = C(2)
+        c3 = C(3)
+        vereq(c1, 1)
+        c = {1: c1, 2: c2, 3: c3}
+        for x in 1, 2, 3:
+            for y in 1, 2, 3:
+                for op in "<", "<=", "==", "!=", ">", ">=":
+                    verify(eval("c[x] %s c[y]" % op) == eval("x %s y" % op),
+                           "x=%d, y=%d" % (x, y))
+                    verify(eval("c[x] %s y" % op) == eval("x %s y" % op),
+                           "x=%d, y=%d" % (x, y))
+                    verify(eval("x %s c[y]" % op) == eval("x %s y" % op),
+                           "x=%d, y=%d" % (x, y))
+
+def coercions():
+    if verbose: print "Testing coercions..."
+    class I(int): pass
+    coerce(I(0), 0)
+    coerce(0, I(0))
+    class L(long): pass
+    coerce(L(0), 0)
+    coerce(L(0), 0L)
+    coerce(0, L(0))
+    coerce(0L, L(0))
+    class F(float): pass
+    coerce(F(0), 0)
+    coerce(F(0), 0L)
+    coerce(F(0), 0.)
+    coerce(0, F(0))
+    coerce(0L, F(0))
+    coerce(0., F(0))
+    class C(complex): pass
+    coerce(C(0), 0)
+    coerce(C(0), 0L)
+    coerce(C(0), 0.)
+    coerce(C(0), 0j)
+    coerce(0, C(0))
+    coerce(0L, C(0))
+    coerce(0., C(0))
+    coerce(0j, C(0))
+
+def descrdoc():
+    if verbose: print "Testing descriptor doc strings..."
+    def check(descr, what):
+        vereq(descr.__doc__, what)
+    check(file.closed, "flag set if the file is closed") # getset descriptor
+    check(file.name, "file name") # member descriptor
+
+def setclass():
+    if verbose: print "Testing __class__ assignment..."
+    class C(object): pass
+    class D(object): pass
+    class E(object): pass
+    class F(D, E): pass
+    for cls in C, D, E, F:
+        for cls2 in C, D, E, F:
+            x = cls()
+            x.__class__ = cls2
+            verify(x.__class__ is cls2)
+            x.__class__ = cls
+            verify(x.__class__ is cls)
+    def cant(x, C):
+        try:
+            x.__class__ = C
+        except TypeError:
+            pass
+        else:
+            raise TestFailed, "shouldn't allow %r.__class__ = %r" % (x, C)
+        try:
+            delattr(x, "__class__")
+        except TypeError:
+            pass
+        else:
+            raise TestFailed, "shouldn't allow del %r.__class__" % x
+    cant(C(), list)
+    cant(list(), C)
+    cant(C(), 1)
+    cant(C(), object)
+    cant(object(), list)
+    cant(list(), object)
+    o = object()
+    cant(o, type(1))
+    cant(o, type(None))
+    del o
+
+def setdict():
+    if verbose: print "Testing __dict__ assignment..."
+    class C(object): pass
+    a = C()
+    a.__dict__ = {'b': 1}
+    vereq(a.b, 1)
+    def cant(x, dict):
+        try:
+            x.__dict__ = dict
+        except TypeError:
+            pass
+        else:
+            raise TestFailed, "shouldn't allow %r.__dict__ = %r" % (x, dict)
+    cant(a, None)
+    cant(a, [])
+    cant(a, 1)
+    del a.__dict__ # Deleting __dict__ is allowed
+    # Classes don't allow __dict__ assignment
+    cant(C, {})
+
+def pickles():
+    if verbose:
+        print "Testing pickling and copying new-style classes and objects..."
+    import pickle, cPickle
+
+    def sorteditems(d):
+        L = d.items()
+        L.sort()
+        return L
+
+    global C
+    class C(object):
+        def __init__(self, a, b):
+            super(C, self).__init__()
+            self.a = a
+            self.b = b
+        def __repr__(self):
+            return "C(%r, %r)" % (self.a, self.b)
+
+    global C1
+    class C1(list):
+        def __new__(cls, a, b):
+            return super(C1, cls).__new__(cls)
+        def __init__(self, a, b):
+            self.a = a
+            self.b = b
+        def __repr__(self):
+            return "C1(%r, %r)<%r>" % (self.a, self.b, list(self))
+
+    global C2
+    class C2(int):
+        def __new__(cls, a, b, val=0):
+            return super(C2, cls).__new__(cls, val)
+        def __init__(self, a, b, val=0):
+            self.a = a
+            self.b = b
+        def __repr__(self):
+            return "C2(%r, %r)<%r>" % (self.a, self.b, int(self))
+
+    global C3
+    class C3(object):
+        def __init__(self, foo):
+            self.foo = foo
+        def __getstate__(self):
+            return self.foo
+        def __setstate__(self, foo):
+            self.foo = foo
+
+    global C4classic, C4
+    class C4classic: # classic
+        pass
+    class C4(C4classic, object): # mixed inheritance
+        pass
+
+    for p in pickle, cPickle:
+        for bin in 0, 1:
+            if verbose:
+                print p.__name__, ["text", "binary"][bin]
+
+            for cls in C, C1, C2:
+                s = p.dumps(cls, bin)
+                cls2 = p.loads(s)
+                verify(cls2 is cls)
+
+            a = C1(1, 2); a.append(42); a.append(24)
+            b = C2("hello", "world", 42)
+            s = p.dumps((a, b), bin)
+            x, y = p.loads(s)
+            vereq(x.__class__, a.__class__)
+            vereq(sorteditems(x.__dict__), sorteditems(a.__dict__))
+            vereq(y.__class__, b.__class__)
+            vereq(sorteditems(y.__dict__), sorteditems(b.__dict__))
+            vereq(`x`, `a`)
+            vereq(`y`, `b`)
+            if verbose:
+                print "a = x =", a
+                print "b = y =", b
+            # Test for __getstate__ and __setstate__ on new style class
+            u = C3(42)
+            s = p.dumps(u, bin)
+            v = p.loads(s)
+            veris(u.__class__, v.__class__)
+            vereq(u.foo, v.foo)
+            # Test for picklability of hybrid class
+            u = C4()
+            u.foo = 42
+            s = p.dumps(u, bin)
+            v = p.loads(s)
+            veris(u.__class__, v.__class__)
+            vereq(u.foo, v.foo)
+
+    # Testing copy.deepcopy()
+    if verbose:
+        print "deepcopy"
+    import copy
+    for cls in C, C1, C2:
+        cls2 = copy.deepcopy(cls)
+        verify(cls2 is cls)
+
+    a = C1(1, 2); a.append(42); a.append(24)
+    b = C2("hello", "world", 42)
+    x, y = copy.deepcopy((a, b))
+    vereq(x.__class__, a.__class__)
+    vereq(sorteditems(x.__dict__), sorteditems(a.__dict__))
+    vereq(y.__class__, b.__class__)
+    vereq(sorteditems(y.__dict__), sorteditems(b.__dict__))
+    vereq(`x`, `a`)
+    vereq(`y`, `b`)
+    if verbose:
+        print "a = x =", a
+        print "b = y =", b
+
+def pickleslots():
+    if verbose: print "Testing pickling of classes with __slots__ ..."
+    import pickle, cPickle
+    # Pickling of classes with __slots__ but without __getstate__ should fail
+    global B, C, D, E
+    class B(object):
+        pass
+    for base in [object, B]:
+        class C(base):
+            __slots__ = ['a']
+        class D(C):
+            pass
+        try:
+            pickle.dumps(C())
+        except TypeError:
+            pass
+        else:
+            raise TestFailed, "should fail: pickle C instance - %s" % base
+        try:
+            cPickle.dumps(C())
+        except TypeError:
+            pass
+        else:
+            raise TestFailed, "should fail: cPickle C instance - %s" % base
+        try:
+            pickle.dumps(C())
+        except TypeError:
+            pass
+        else:
+            raise TestFailed, "should fail: pickle D instance - %s" % base
+        try:
+            cPickle.dumps(D())
+        except TypeError:
+            pass
+        else:
+            raise TestFailed, "should fail: cPickle D instance - %s" % base
+        # Give C a __getstate__ and __setstate__
+        class C(base):
+            __slots__ = ['a']
+            def __getstate__(self):
+                try:
+                    d = self.__dict__.copy()
+                except AttributeError:
+                    d = {}
+                try:
+                    d['a'] = self.a
+                except AttributeError:
+                    pass
+                return d
+            def __setstate__(self, d):
+                for k, v in d.items():
+                    setattr(self, k, v)
+        class D(C):
+            pass
+        # Now it should work
+        x = C()
+        y = pickle.loads(pickle.dumps(x))
+        vereq(hasattr(y, 'a'), 0)
+        y = cPickle.loads(cPickle.dumps(x))
+        vereq(hasattr(y, 'a'), 0)
+        x.a = 42
+        y = pickle.loads(pickle.dumps(x))
+        vereq(y.a, 42)
+        y = cPickle.loads(cPickle.dumps(x))
+        vereq(y.a, 42)
+        x = D()
+        x.a = 42
+        x.b = 100
+        y = pickle.loads(pickle.dumps(x))
+        vereq(y.a + y.b, 142)
+        y = cPickle.loads(cPickle.dumps(x))
+        vereq(y.a + y.b, 142)
+        # But a subclass that adds a slot should not work
+        class E(C):
+            __slots__ = ['b']
+        try:
+            pickle.dumps(E())
+        except TypeError:
+            pass
+        else:
+            raise TestFailed, "should fail: pickle E instance - %s" % base
+        try:
+            cPickle.dumps(E())
+        except TypeError:
+            pass
+        else:
+            raise TestFailed, "should fail: cPickle E instance - %s" % base
+
+def copies():
+    if verbose: print "Testing copy.copy() and copy.deepcopy()..."
+    import copy
+    class C(object):
+        pass
+
+    a = C()
+    a.foo = 12
+    b = copy.copy(a)
+    vereq(b.__dict__, a.__dict__)
+
+    a.bar = [1,2,3]
+    c = copy.copy(a)
+    vereq(c.bar, a.bar)
+    verify(c.bar is a.bar)
+
+    d = copy.deepcopy(a)
+    vereq(d.__dict__, a.__dict__)
+    a.bar.append(4)
+    vereq(d.bar, [1,2,3])
+
+def binopoverride():
+    if verbose: print "Testing overrides of binary operations..."
+    class I(int):
+        def __repr__(self):
+            return "I(%r)" % int(self)
+        def __add__(self, other):
+            return I(int(self) + int(other))
+        __radd__ = __add__
+        def __pow__(self, other, mod=None):
+            if mod is None:
+                return I(pow(int(self), int(other)))
+            else:
+                return I(pow(int(self), int(other), int(mod)))
+        def __rpow__(self, other, mod=None):
+            if mod is None:
+                return I(pow(int(other), int(self), mod))
+            else:
+                return I(pow(int(other), int(self), int(mod)))
+
+    vereq(`I(1) + I(2)`, "I(3)")
+    vereq(`I(1) + 2`, "I(3)")
+    vereq(`1 + I(2)`, "I(3)")
+    vereq(`I(2) ** I(3)`, "I(8)")
+    vereq(`2 ** I(3)`, "I(8)")
+    vereq(`I(2) ** 3`, "I(8)")
+    vereq(`pow(I(2), I(3), I(5))`, "I(3)")
+    class S(str):
+        def __eq__(self, other):
+            return self.lower() == other.lower()
+
+def subclasspropagation():
+    if verbose: print "Testing propagation of slot functions to subclasses..."
+    class A(object):
+        pass
+    class B(A):
+        pass
+    class C(A):
+        pass
+    class D(B, C):
+        pass
+    d = D()
+    vereq(hash(d), id(d))
+    A.__hash__ = lambda self: 42
+    vereq(hash(d), 42)
+    C.__hash__ = lambda self: 314
+    vereq(hash(d), 314)
+    B.__hash__ = lambda self: 144
+    vereq(hash(d), 144)
+    D.__hash__ = lambda self: 100
+    vereq(hash(d), 100)
+    del D.__hash__
+    vereq(hash(d), 144)
+    del B.__hash__
+    vereq(hash(d), 314)
+    del C.__hash__
+    vereq(hash(d), 42)
+    del A.__hash__
+    vereq(hash(d), id(d))
+    d.foo = 42
+    d.bar = 42
+    vereq(d.foo, 42)
+    vereq(d.bar, 42)
+    def __getattribute__(self, name):
+        if name == "foo":
+            return 24
+        return object.__getattribute__(self, name)
+    A.__getattribute__ = __getattribute__
+    vereq(d.foo, 24)
+    vereq(d.bar, 42)
+    def __getattr__(self, name):
+        if name in ("spam", "foo", "bar"):
+            return "hello"
+        raise AttributeError, name
+    B.__getattr__ = __getattr__
+    vereq(d.spam, "hello")
+    vereq(d.foo, 24)
+    vereq(d.bar, 42)
+    del A.__getattribute__
+    vereq(d.foo, 42)
+    del d.foo
+    vereq(d.foo, "hello")
+    vereq(d.bar, 42)
+    del B.__getattr__
+    try:
+        d.foo
+    except AttributeError:
+        pass
+    else:
+        raise TestFailed, "d.foo should be undefined now"
+
+    # Test a nasty bug in recurse_down_subclasses()
+    import gc
+    class A(object):
+        pass
+    class B(A):
+        pass
+    del B
+    gc.collect()
+    A.__setitem__ = lambda *a: None # crash
+
+def buffer_inherit():
+    import binascii
+    # SF bug [#470040] ParseTuple t# vs subclasses.
+    if verbose:
+        print "Testing that buffer interface is inherited ..."
+
+    class MyStr(str):
+        pass
+    base = 'abc'
+    m = MyStr(base)
+    # b2a_hex uses the buffer interface to get its argument's value, via
+    # PyArg_ParseTuple 't#' code.
+    vereq(binascii.b2a_hex(m), binascii.b2a_hex(base))
+
+    # It's not clear that unicode will continue to support the character
+    # buffer interface, and this test will fail if that's taken away.
+    class MyUni(unicode):
+        pass
+    base = u'abc'
+    m = MyUni(base)
+    vereq(binascii.b2a_hex(m), binascii.b2a_hex(base))
+
+    class MyInt(int):
+        pass
+    m = MyInt(42)
+    try:
+        binascii.b2a_hex(m)
+        raise TestFailed('subclass of int should not have a buffer interface')
+    except TypeError:
+        pass
+
+def str_of_str_subclass():
+    import binascii
+    import cStringIO
+
+    if verbose:
+        print "Testing __str__ defined in subclass of str ..."
+
+    class octetstring(str):
+        def __str__(self):
+            return binascii.b2a_hex(self)
+        def __repr__(self):
+            return self + " repr"
+
+    o = octetstring('A')
+    vereq(type(o), octetstring)
+    vereq(type(str(o)), str)
+    vereq(type(repr(o)), str)
+    vereq(ord(o), 0x41)
+    vereq(str(o), '41')
+    vereq(repr(o), 'A repr')
+    vereq(o.__str__(), '41')
+    vereq(o.__repr__(), 'A repr')
+
+    capture = cStringIO.StringIO()
+    # Calling str() or not exercises different internal paths.
+    print >> capture, o
+    print >> capture, str(o)
+    vereq(capture.getvalue(), '41\n41\n')
+    capture.close()
+
+def kwdargs():
+    if verbose: print "Testing keyword arguments to __init__, __call__..."
+    def f(a): return a
+    vereq(f.__call__(a=42), 42)
+    a = []
+    list.__init__(a, sequence=[0, 1, 2])
+    vereq(a, [0, 1, 2])
+
+def delhook():
+    if verbose: print "Testing __del__ hook..."
+    log = []
+    class C(object):
+        def __del__(self):
+            log.append(1)
+    c = C()
+    vereq(log, [])
+    del c
+    vereq(log, [1])
+
+    class D(object): pass
+    d = D()
+    try: del d[0]
+    except TypeError: pass
+    else: raise TestFailed, "invalid del() didn't raise TypeError"
+
+def hashinherit():
+    if verbose: print "Testing hash of mutable subclasses..."
+
+    class mydict(dict):
+        pass
+    d = mydict()
+    try:
+        hash(d)
+    except TypeError:
+        pass
+    else:
+        raise TestFailed, "hash() of dict subclass should fail"
+
+    class mylist(list):
+        pass
+    d = mylist()
+    try:
+        hash(d)
+    except TypeError:
+        pass
+    else:
+        raise TestFailed, "hash() of list subclass should fail"
+
+def strops():
+    try: 'a' + 5
+    except TypeError: pass
+    else: raise TestFailed, "'' + 5 doesn't raise TypeError"
+
+    try: ''.split('')
+    except ValueError: pass
+    else: raise TestFailed, "''.split('') doesn't raise ValueError"
+
+    try: ''.join([0])
+    except TypeError: pass
+    else: raise TestFailed, "''.join([0]) doesn't raise TypeError"
+
+    try: ''.rindex('5')
+    except ValueError: pass
+    else: raise TestFailed, "''.rindex('5') doesn't raise ValueError"
+
+    try: ''.replace('', '')
+    except ValueError: pass
+    else: raise TestFailed, "''.replace('', '') doesn't raise ValueError"
+
+    try: '%(n)s' % None
+    except TypeError: pass
+    else: raise TestFailed, "'%(n)s' % None doesn't raise TypeError"
+
+    try: '%(n' % {}
+    except ValueError: pass
+    else: raise TestFailed, "'%(n' % {} '' doesn't raise ValueError"
+
+    try: '%*s' % ('abc')
+    except TypeError: pass
+    else: raise TestFailed, "'%*s' % ('abc') doesn't raise TypeError"
+
+    try: '%*.*s' % ('abc', 5)
+    except TypeError: pass
+    else: raise TestFailed, "'%*.*s' % ('abc', 5) doesn't raise TypeError"
+
+    try: '%s' % (1, 2)
+    except TypeError: pass
+    else: raise TestFailed, "'%s' % (1, 2) doesn't raise TypeError"
+
+    try: '%' % None
+    except ValueError: pass
+    else: raise TestFailed, "'%' % None doesn't raise ValueError"
+
+    vereq('534253'.isdigit(), 1)
+    vereq('534253x'.isdigit(), 0)
+    vereq('%c' % 5, '\x05')
+    vereq('%c' % '5', '5')
+
+def deepcopyrecursive():
+    if verbose: print "Testing deepcopy of recursive objects..."
+    class Node:
+        pass
+    a = Node()
+    b = Node()
+    a.b = b
+    b.a = a
+    z = deepcopy(a) # This blew up before
+
+def modules():
+    if verbose: print "Testing uninitialized module objects..."
+    from types import ModuleType as M
+    m = M.__new__(M)
+    str(m)
+    vereq(hasattr(m, "__name__"), 0)
+    vereq(hasattr(m, "__file__"), 0)
+    vereq(hasattr(m, "foo"), 0)
+    vereq(m.__dict__, None)
+    m.foo = 1
+    vereq(m.__dict__, {"foo": 1})
+
+def docdescriptor():
+    # SF bug 542984
+    if verbose: print "Testing __doc__ descriptor..."
+    class DocDescr(object):
+        def __get__(self, object, otype):
+            if object:
+                object = object.__class__.__name__ + ' instance'
+            if otype:
+                otype = otype.__name__
+            return 'object=%s; type=%s' % (object, otype)
+    class OldClass:
+        __doc__ = DocDescr()
+    class NewClass(object):
+        __doc__ = DocDescr()
+    vereq(OldClass.__doc__, 'object=None; type=OldClass')
+    vereq(OldClass().__doc__, 'object=OldClass instance; type=OldClass')
+    vereq(NewClass.__doc__, 'object=None; type=NewClass')
+    vereq(NewClass().__doc__, 'object=NewClass instance; type=NewClass')
+
+def imulbug():
+    # SF bug 544647
+    if verbose: print "Testing for __imul__ problems..."
+    class C(object):
+        def __imul__(self, other):
+            return (self, other)
+    x = C()
+    y = x
+    y *= 1.0
+    vereq(y, (x, 1.0))
+    y = x
+    y *= 2
+    vereq(y, (x, 2))
+    y = x
+    y *= 3L
+    vereq(y, (x, 3L))
+    y = x
+    y *= 1L<<100
+    vereq(y, (x, 1L<<100))
+    y = x
+    y *= None
+    vereq(y, (x, None))
+    y = x
+    y *= "foo"
+    vereq(y, (x, "foo"))
+
+def copy_setstate():
+    if verbose:
+        print "Testing that copy.*copy() correctly uses __setstate__..."
+    import copy
+    class C(object):
+        def __init__(self, foo=None):
+            self.foo = foo
+            self.__foo = foo
+        def setfoo(self, foo=None):
+            self.foo = foo
+        def getfoo(self):
+            return self.__foo
+        def __getstate__(self):
+            return [self.foo]
+        def __setstate__(self, lst):
+            assert len(lst) == 1
+            self.__foo = self.foo = lst[0]
+    a = C(42)
+    a.setfoo(24)
+    vereq(a.foo, 24)
+    vereq(a.getfoo(), 42)
+    b = copy.copy(a)
+    vereq(b.foo, 24)
+    vereq(b.getfoo(), 24)
+    b = copy.deepcopy(a)
+    vereq(b.foo, 24)
+    vereq(b.getfoo(), 24)
+
+def subtype_resurrection():
+    if verbose:
+        print "Testing resurrection of new-style instance..."
+
+    class C(object):
+        container = []
+
+        def __del__(self):
+            # resurrect the instance
+            C.container.append(self)
+
+    c = C()
+    c.attr = 42
+    # The most interesting thing here is whether this blows up, due to flawed
+    #  GC tracking logic in typeobject.c's call_finalizer() (a 2.2.1 bug).
+    del c
+
+    # If that didn't blow up, it's also interesting to see whether clearing
+    # the last container slot works:  that will attempt to delete c again,
+    # which will cause c to get appended back to the container again "during"
+    # the del.
+    del C.container[-1]
+    vereq(len(C.container), 1)
+    vereq(C.container[-1].attr, 42)
+
+    # Make c mortal again, so that the test framework with -l doesn't report
+    # it as a leak.
+    del C.__del__
+
+def funnynew():
+    if verbose: print "Testing __new__ returning something unexpected..."
+    class C(object):
+        def __new__(cls, arg):
+            if isinstance(arg, str): return [1, 2, 3]
+            elif isinstance(arg, int): return object.__new__(D)
+            else: return object.__new__(cls)
+    class D(C):
+        def __init__(self, arg):
+            self.foo = arg
+    vereq(C("1"), [1, 2, 3])
+    vereq(D("1"), [1, 2, 3])
+    d = D(None)
+    veris(d.foo, None)
+    d = C(1)
+    vereq(isinstance(d, D), True)
+    vereq(d.foo, 1)
+    d = D(1)
+    vereq(isinstance(d, D), True)
+    vereq(d.foo, 1)
+
+def subclass_right_op():
+    if verbose:
+        print "Testing correct dispatch of subclass overloading __r<op>__..."
+
+    # This code tests various cases where right-dispatch of a subclass
+    # should be preferred over left-dispatch of a base class.
+
+    # Case 1: subclass of int; this tests code in abstract.c::binary_op1()
+
+    class B(int):
+        def __div__(self, other):
+            return "B.__div__"
+        def __rdiv__(self, other):
+            return "B.__rdiv__"
+
+    vereq(B(1) / 1, "B.__div__")
+    vereq(1 / B(1), "B.__rdiv__")
+
+    # Case 2: subclass of object; this is just the baseline for case 3
+
+    class C(object):
+        def __div__(self, other):
+            return "C.__div__"
+        def __rdiv__(self, other):
+            return "C.__rdiv__"
+
+    vereq(C(1) / 1, "C.__div__")
+    vereq(1 / C(1), "C.__rdiv__")
+
+    # Case 3: subclass of new-style class; here it gets interesting
+
+    class D(C):
+        def __div__(self, other):
+            return "D.__div__"
+        def __rdiv__(self, other):
+            return "D.__rdiv__"
+
+    vereq(D(1) / C(1), "D.__div__")
+    vereq(C(1) / D(1), "D.__rdiv__")
+
+    # Case 4: this didn't work right in 2.2.2 and 2.3a1
+
+    class E(C):
+        pass
+
+    vereq(E.__rdiv__, C.__rdiv__)
+
+    vereq(E(1) / 1, "C.__div__")
+    vereq(1 / E(1), "C.__rdiv__")
+    vereq(E(1) / C(1), "C.__div__")
+    vereq(C(1) / E(1), "C.__div__") # This one would fail
+
+def dict_type_with_metaclass():
+    if verbose:
+        print "Testing type of __dict__ when __metaclass__ set..."
+
+    class B(object):
+        pass
+    class M(type):
+        pass
+    class C:
+        # In 2.3a1, C.__dict__ was a real dict rather than a dict proxy
+        __metaclass__ = M
+    veris(type(C.__dict__), type(B.__dict__))
+
+def weakref_segfault():
+    # SF 742911
+    if verbose:
+        print "Testing weakref segfault..."
+
+    import weakref
+
+    class Provoker:
+        def __init__(self, referrent):
+            self.ref = weakref.ref(referrent)
+
+        def __del__(self):
+            x = self.ref()
+
+    class Oops(object):
+        pass
+
+    o = Oops()
+    o.whatever = Provoker(o)
+    del o
+
+
+def crash_in_get_sf736892():
+    def func():
+        pass
+
+    try:
+        f = func.__get__(None)
+    except TypeError:
+        pass
+    else:
+        # should not get here
+        f(1) # crash
+
+
+def test_main():
+    weakref_segfault() # Must be first, somehow
+    class_docstrings()
+    lists()
+    dicts()
+    dict_constructor()
+    test_dir()
+    ints()
+    longs()
+    floats()
+    complexes()
+    spamlists()
+    spamdicts()
+    pydicts()
+    pylists()
+    metaclass()
+    pymods()
+    multi()
+    diamond()
+    objects()
+    slots()
+    dynamics()
+    errors()
+    classmethods()
+    staticmethods()
+    classic()
+    compattr()
+    newslot()
+    altmro()
+    overloading()
+    methods()
+    specials()
+    weakrefs()
+    properties()
+    supers()
+    inherits()
+    keywords()
+    restricted()
+    str_subclass_as_dict_key()
+    classic_comparisons()
+    rich_comparisons()
+    coercions()
+    descrdoc()
+    setclass()
+    setdict()
+    pickles()
+    copies()
+    binopoverride()
+    subclasspropagation()
+    buffer_inherit()
+    str_of_str_subclass()
+    kwdargs()
+    delhook()
+    hashinherit()
+    strops()
+    deepcopyrecursive()
+    modules()
+    pickleslots()
+    docdescriptor()
+    imulbug()
+    copy_setstate()
+    subtype_resurrection()
+    funnynew()
+    subclass_right_op()
+    dict_type_with_metaclass()
+    crash_in_get_sf736892()
+
+    if verbose: print "All OK"
+
+if __name__ == "__main__":
+    test_main()
diff --git a/lib-python/2.2/test/test_descrtut.py b/lib-python/2.2/test/test_descrtut.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_descrtut.py
@@ -0,0 +1,501 @@
+# This contains most of the executable examples from Guido's descr
+# tutorial, once at
+#
+#     http://www.python.org/2.2/descrintro.html
+#
+# A few examples left implicit in the writeup were fleshed out, a few were
+# skipped due to lack of interest (e.g., faking super() by hand isn't
+# of much interest anymore), and a few were fiddled to make the output
+# deterministic.
+
+from test_support import sortdict
+import pprint
+
+class defaultdict(dict):
+    def __init__(self, default=None):
+        dict.__init__(self)
+        self.default = default
+
+    def __getitem__(self, key):
+        try:
+            return dict.__getitem__(self, key)
+        except KeyError:
+            return self.default
+
+    def get(self, key, *args):
+        if not args:
+            args = (self.default,)
+        return dict.get(self, key, *args)
+
+    def merge(self, other):
+        for key in other:
+            if key not in self:
+                self[key] = other[key]
+
+test_1 = """
+
+Here's the new type at work:
+
+    >>> print defaultdict               # show our type
+    <class 'test.test_descrtut.defaultdict'>
+    >>> print type(defaultdict)         # its metatype
+    <type 'type'>
+    >>> a = defaultdict(default=0.0)    # create an instance
+    >>> print a                         # show the instance
+    {}
+    >>> print type(a)                   # show its type
+    <class 'test.test_descrtut.defaultdict'>
+    >>> print a.__class__               # show its class
+    <class 'test.test_descrtut.defaultdict'>
+    >>> print type(a) is a.__class__    # its type is its class
+    1
+    >>> a[1] = 3.25                     # modify the instance
+    >>> print a                         # show the new value
+    {1: 3.25}
+    >>> print a[1]                      # show the new item
+    3.25
+    >>> print a[0]                      # a non-existant item
+    0.0
+    >>> a.merge({1:100, 2:200})         # use a dict method
+    >>> print sortdict(a)               # show the result
+    {1: 3.25, 2: 200}
+    >>>
+
+We can also use the new type in contexts where classic only allows "real"
+dictionaries, such as the locals/globals dictionaries for the exec
+statement or the built-in function eval():
+
+    >>> def sorted(seq):
+    ...     seq.sort()
+    ...     return seq
+    >>> print sorted(a.keys())
+    [1, 2]
+    >>> exec "x = 3; print x" in a
+    3
+    >>> print sorted(a.keys())
+    [1, 2, '__builtins__', 'x']
+    >>> print a['x']
+    3
+    >>>
+
+However, our __getitem__() method is not used for variable access by the
+interpreter:
+
+    >>> exec "print foo" in a
+    Traceback (most recent call last):
+      File "<stdin>", line 1, in ?
+      File "<string>", line 1, in ?
+    NameError: name 'foo' is not defined
+    >>>
+
+Now I'll show that defaultdict instances have dynamic instance variables,
+just like classic classes:
+
+    >>> a.default = -1
+    >>> print a["noway"]
+    -1
+    >>> a.default = -1000
+    >>> print a["noway"]
+    -1000
+    >>> 'default' in dir(a)
+    1
+    >>> a.x1 = 100
+    >>> a.x2 = 200
+    >>> print a.x1
+    100
+    >>> d = dir(a)
+    >>> 'default' in d and 'x1' in d and 'x2' in d
+    1
+    >>> print a.__dict__
+    {'default': -1000, 'x2': 200, 'x1': 100}
+    >>>
+"""
+
+class defaultdict2(dict):
+    __slots__ = ['default']
+
+    def __init__(self, default=None):
+        dict.__init__(self)
+        self.default = default
+
+    def __getitem__(self, key):
+        try:
+            return dict.__getitem__(self, key)
+        except KeyError:
+            return self.default
+
+    def get(self, key, *args):
+        if not args:
+            args = (self.default,)
+        return dict.get(self, key, *args)
+
+    def merge(self, other):
+        for key in other:
+            if key not in self:
+                self[key] = other[key]
+
+test_2 = """
+
+The __slots__ declaration takes a list of instance variables, and reserves
+space for exactly these in the instance. When __slots__ is used, other
+instance variables cannot be assigned to:
+
+    >>> a = defaultdict2(default=0.0)
+    >>> a[1]
+    0.0
+    >>> a.default = -1
+    >>> a[1]
+    -1
+    >>> a.x1 = 1
+    Traceback (most recent call last):
+      File "<stdin>", line 1, in ?
+    AttributeError: 'defaultdict2' object has no attribute 'x1'
+    >>>
+
+"""
+
+test_3 = """
+
+Introspecting instances of built-in types
+
+For instance of built-in types, x.__class__ is now the same as type(x):
+
+    >>> type([])
+    <type 'list'>
+    >>> [].__class__
+    <type 'list'>
+    >>> list
+    <type 'list'>
+    >>> isinstance([], list)
+    1
+    >>> isinstance([], dict)
+    0
+    >>> isinstance([], object)
+    1
+    >>>
+
+Under the new proposal, the __methods__ attribute no longer exists:
+
+    >>> [].__methods__
+    Traceback (most recent call last):
+      File "<stdin>", line 1, in ?
+    AttributeError: 'list' object has no attribute '__methods__'
+    >>>
+
+Instead, you can get the same information from the list type:
+
+    >>> pprint.pprint(dir(list))    # like list.__dict__.keys(), but sorted
+    ['__add__',
+     '__class__',
+     '__contains__',
+     '__delattr__',
+     '__delitem__',
+     '__delslice__',
+     '__doc__',
+     '__eq__',
+     '__ge__',
+     '__getattribute__',
+     '__getitem__',
+     '__getslice__',
+     '__gt__',
+     '__hash__',
+     '__iadd__',
+     '__imul__',
+     '__init__',
+     '__le__',
+     '__len__',
+     '__lt__',
+     '__mul__',
+     '__ne__',
+     '__new__',
+     '__reduce__',
+     '__repr__',
+     '__rmul__',
+     '__setattr__',
+     '__setitem__',
+     '__setslice__',
+     '__str__',
+     'append',
+     'count',
+     'extend',
+     'index',
+     'insert',
+     'pop',
+     'remove',
+     'reverse',
+     'sort']
+
+The new introspection API gives more information than the old one:  in
+addition to the regular methods, it also shows the methods that are
+normally invoked through special notations, e.g. __iadd__ (+=), __len__
+(len), __ne__ (!=). You can invoke any method from this list directly:
+
+    >>> a = ['tic', 'tac']
+    >>> list.__len__(a)          # same as len(a)
+    2
+    >>> a.__len__()              # ditto
+    2
+    >>> list.append(a, 'toe')    # same as a.append('toe')
+    >>> a
+    ['tic', 'tac', 'toe']
+    >>>
+
+This is just like it is for user-defined classes.
+"""
+
+test_4 = """
+
+Static methods and class methods
+
+The new introspection API makes it possible to add static methods and class
+methods. Static methods are easy to describe: they behave pretty much like
+static methods in C++ or Java. Here's an example:
+
+    >>> class C:
+    ...
+    ...     def foo(x, y):
+    ...         print "staticmethod", x, y
+    ...     foo = staticmethod(foo)
+
+    >>> C.foo(1, 2)
+    staticmethod 1 2
+    >>> c = C()
+    >>> c.foo(1, 2)
+    staticmethod 1 2
+
+Class methods use a similar pattern to declare methods that receive an
+implicit first argument that is the *class* for which they are invoked.
+
+    >>> class C:
+    ...     def foo(cls, y):
+    ...         print "classmethod", cls, y
+    ...     foo = classmethod(foo)
+
+    >>> C.foo(1)
+    classmethod test.test_descrtut.C 1
+    >>> c = C()
+    >>> c.foo(1)
+    classmethod test.test_descrtut.C 1
+
+    >>> class D(C):
+    ...     pass
+
+    >>> D.foo(1)
+    classmethod test.test_descrtut.D 1
+    >>> d = D()
+    >>> d.foo(1)
+    classmethod test.test_descrtut.D 1
+
+This prints "classmethod __main__.D 1" both times; in other words, the
+class passed as the first argument of foo() is the class involved in the
+call, not the class involved in the definition of foo().
+
+But notice this:
+
+    >>> class E(C):
+    ...     def foo(cls, y): # override C.foo
+    ...         print "E.foo() called"
+    ...         C.foo(y)
+    ...     foo = classmethod(foo)
+
+    >>> E.foo(1)
+    E.foo() called
+    classmethod test.test_descrtut.C 1
+    >>> e = E()
+    >>> e.foo(1)
+    E.foo() called
+    classmethod test.test_descrtut.C 1
+
+In this example, the call to C.foo() from E.foo() will see class C as its
+first argument, not class E. This is to be expected, since the call
+specifies the class C. But it stresses the difference between these class
+methods and methods defined in metaclasses (where an upcall to a metamethod
+would pass the target class as an explicit first argument).
+"""
+
+test_5 = """
+
+Attributes defined by get/set methods
+
+
+    >>> class property(object):
+    ...
+    ...     def __init__(self, get, set=None):
+    ...         self.__get = get
+    ...         self.__set = set
+    ...
+    ...     def __get__(self, inst, type=None):
+    ...         return self.__get(inst)
+    ...
+    ...     def __set__(self, inst, value):
+    ...         if self.__set is None:
+    ...             raise AttributeError, "this attribute is read-only"
+    ...         return self.__set(inst, value)
+
+Now let's define a class with an attribute x defined by a pair of methods,
+getx() and and setx():
+
+    >>> class C(object):
+    ...
+    ...     def __init__(self):
+    ...         self.__x = 0
+    ...
+    ...     def getx(self):
+    ...         return self.__x
+    ...
+    ...     def setx(self, x):
+    ...         if x < 0: x = 0
+    ...         self.__x = x
+    ...
+    ...     x = property(getx, setx)
+
+Here's a small demonstration:
+
+    >>> a = C()
+    >>> a.x = 10
+    >>> print a.x
+    10
+    >>> a.x = -10
+    >>> print a.x
+    0
+    >>>
+
+Hmm -- property is builtin now, so let's try it that way too.
+
+    >>> del property  # unmask the builtin
+    >>> property
+    <type 'property'>
+
+    >>> class C(object):
+    ...     def __init__(self):
+    ...         self.__x = 0
+    ...     def getx(self):
+    ...         return self.__x
+    ...     def setx(self, x):
+    ...         if x < 0: x = 0
+    ...         self.__x = x
+    ...     x = property(getx, setx)
+
+
+    >>> a = C()
+    >>> a.x = 10
+    >>> print a.x
+    10
+    >>> a.x = -10
+    >>> print a.x
+    0
+    >>>
+"""
+
+test_6 = """
+
+Method resolution order
+
+This example is implicit in the writeup.
+
+>>> class A:    # classic class
+...     def save(self):
+...         print "called A.save()"
+>>> class B(A):
+...     pass
+>>> class C(A):
+...     def save(self):
+...         print "called C.save()"
+>>> class D(B, C):
+...     pass
+
+>>> D().save()
+called A.save()
+
+>>> class A(object):  # new class
+...     def save(self):
+...         print "called A.save()"
+>>> class B(A):
+...     pass
+>>> class C(A):
+...     def save(self):
+...         print "called C.save()"
+>>> class D(B, C):
+...     pass
+
+>>> D().save()
+called C.save()
+"""
+
+class A(object):
+    def m(self):
+        return "A"
+
+class B(A):
+    def m(self):
+        return "B" + super(B, self).m()
+
+class C(A):
+    def m(self):
+        return "C" + super(C, self).m()
+
+class D(C, B):
+    def m(self):
+        return "D" + super(D, self).m()
+
+
+test_7 = """
+
+Cooperative methods and "super"
+
+>>> print D().m() # "DCBA"
+DCBA
+"""
+
+test_8 = """
+
+Backwards incompatibilities
+
+>>> class A:
+...     def foo(self):
+...         print "called A.foo()"
+
+>>> class B(A):
+...     pass
+
+>>> class C(A):
+...     def foo(self):
+...         B.foo(self)
+
+>>> C().foo()
+Traceback (most recent call last):
+ ...
+TypeError: unbound method foo() must be called with B instance as first argument (got C instance instead)
+
+>>> class C(A):
+...     def foo(self):
+...         A.foo(self)
+>>> C().foo()
+called A.foo()
+"""
+
+__test__ = {"tut1": test_1,
+            "tut2": test_2,
+            "tut3": test_3,
+            "tut4": test_4,
+            "tut5": test_5,
+            "tut6": test_6,
+            "tut7": test_7,
+            "tut8": test_8}
+
+# Magic test name that regrtest.py invokes *after* importing this module.
+# This worms around a bootstrap problem.
+# Note that doctest and regrtest both look in sys.argv for a "-v" argument,
+# so this works as expected in both ways of running regrtest.
+def test_main(verbose=None):
+    # Obscure:  import this module as test.test_descrtut instead of as
+    # plain test_descrtut because the name of this module works its way
+    # into the doctest examples, and unless the full test.test_descrtut
+    # business is used the name can change depending on how the test is
+    # invoked.
+    import test_support, test.test_descrtut
+    test_support.run_doctest(test.test_descrtut, verbose)
+
+# This part isn't needed for regrtest, but for running the test directly.
+if __name__ == "__main__":
+    test_main(1)
diff --git a/lib-python/2.2/test/test_difflib.py b/lib-python/2.2/test/test_difflib.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_difflib.py
@@ -0,0 +1,2 @@
+import difflib, test_support
+test_support.run_doctest(difflib)
diff --git a/lib-python/2.2/test/test_dircache.py b/lib-python/2.2/test/test_dircache.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_dircache.py
@@ -0,0 +1,74 @@
+"""
+  Test cases for the dircache module
+  Nick Mathewson
+"""
+
+import unittest
+from test_support import run_unittest, TESTFN
+import dircache, os, time, sys
+
+
+class DircacheTests(unittest.TestCase):
+    def setUp(self):
+        self.tempdir = TESTFN+"_dir"
+        os.mkdir(self.tempdir)
+
+    def tearDown(self):
+        for fname in os.listdir(self.tempdir):
+            self.delTemp(fname)
+        os.rmdir(self.tempdir)
+
+    def writeTemp(self, fname):
+        f = open(os.path.join(self.tempdir, fname), 'w')
+        f.close()
+
+    def mkdirTemp(self, fname):
+        os.mkdir(os.path.join(self.tempdir, fname))
+
+    def delTemp(self, fname):
+        fname = os.path.join(self.tempdir, fname)
+        if os.path.isdir(fname):
+            os.rmdir(fname)
+        else:
+            os.unlink(fname)
+
+    def test_listdir(self):
+        ## SUCCESSFUL CASES
+        entries = dircache.listdir(self.tempdir)
+        self.assertEquals(entries, [])
+
+        # Check that cache is actually caching, not just passing through.
+        self.assert_(dircache.listdir(self.tempdir) is entries)
+
+        # Directories aren't "files" on Windows, and directory mtime has
+        # nothing to do with when files under a directory get created.
+        # That is, this test can't possibly work under Windows -- dircache
+        # is only good for capturing a one-shot snapshot there.
+
+        if sys.platform[:3] not in ('win', 'os2'):
+            # Sadly, dircache has the same granularity as stat.mtime, and so
+            # can't notice any changes that occured within 1 sec of the last
+            # time it examined a directory.
+            time.sleep(1)
+            self.writeTemp("test1")
+            entries = dircache.listdir(self.tempdir)
+            self.assertEquals(entries, ['test1'])
+            self.assert_(dircache.listdir(self.tempdir) is entries)
+
+        ## UNSUCCESSFUL CASES
+        self.assertEquals(dircache.listdir(self.tempdir+"_nonexistent"), [])
+
+    def test_annotate(self):
+        self.writeTemp("test2")
+        self.mkdirTemp("A")
+        lst = ['A', 'test2', 'test_nonexistent']
+        dircache.annotate(self.tempdir, lst)
+        self.assertEquals(lst, ['A/', 'test2', 'test_nonexistent'])
+
+
+def test_main():
+    run_unittest(DircacheTests)
+
+
+if __name__ == "__main__":
+    test_main()
diff --git a/lib-python/2.2/test/test_dl.py b/lib-python/2.2/test/test_dl.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/test/test_dl.py
@@ -0,0 +1,33 @@
+#! /usr/bin/env python
+"""Test dlmodule.c
+   Roger E. Masse  revised strategy by Barry Warsaw
+"""
+
+import dl
+from test_support import verbose,TestSkipped
+
+sharedlibs = [
+    ('/usr/lib/libc.so', 'getpid'),
+    ('/lib/libc.so.6', 'getpid'),
+    ('/usr/bin/cygwin1.dll', 'getpid'),
+    ]
+
+for s, func in sharedlibs:
+    try:
+        if verbose:
+            print 'trying to open:', s,
+        l = dl.open(s)
+    except dl.error, err:
+        if verbose:
+            print 'failed', repr(str(err))
+        pass
+    else:
+        if verbose:
+            print 'succeeded...',
+        l.call(func)
+        l.close()
+        if verbose:
+            print 'worked!'
+        break
+else:
+    raise TestSkipped, 'Could not open any shared libraries'
diff --git a/lib-python/2.2/test/test_doctest.py b/lib-python/2.2/test/test_doctest.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_doctest.py
@@ -0,0 +1,2 @@
+import doctest, test_support
+test_support.run_doctest(doctest)
diff --git a/lib-python/2.2/test/test_doctest2.py b/lib-python/2.2/test/test_doctest2.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_doctest2.py
@@ -0,0 +1,108 @@
+"""A module to test whether doctest recognizes some 2.2 features,
+like static and class methods.
+
+>>> print 'yup'  # 1
+yup
+"""
+
+import test_support
+
+class C(object):
+    """Class C.
+
+    >>> print C()  # 2
+    42
+    """
+
+    def __init__(self):
+        """C.__init__.
+
+        >>> print C() # 3
+        42
+        """
+
+    def __str__(self):
+        """
+        >>> print C() # 4
+        42
+        """
+        return "42"
+
+    class D(object):
+        """A nested D class.
+
+        >>> print "In D!"   # 5
+        In D!
+        """
+
+        def nested(self):
+            """
+            >>> print 3 # 6
+            3
+            """
+
+    def getx(self):
+        """
+        >>> c = C()    # 7
+        >>> c.x = 12   # 8
+        >>> print c.x  # 9
+        -12
+        """
+        return -self._x
+
+    def setx(self, value):
+        """
+        >>> c = C()     # 10
+        >>> c.x = 12    # 11
+        >>> print c.x   # 12
+        -12
+        """
+        self._x = value
+
+    x = property(getx, setx, doc="""\
+        >>> c = C()    # 13
+        >>> c.x = 12   # 14
+        >>> print c.x  # 15
+        -12
+        """)
+
+    def statm():
+        """
+        A static method.
+
+        >>> print C.statm()    # 16
+        666
+        >>> print C().statm()  # 17
+        666
+        """
+        return 666
+
+    statm = staticmethod(statm)
+
+    def clsm(cls, val):
+        """
+        A class method.
+
+        >>> print C.clsm(22)    # 18
+        22
+        >>> print C().clsm(23)  # 19
+        23
+        """
+        return val
+
+    clsm = classmethod(clsm)
+
+def test_main():
+    import test_doctest2
+    EXPECTED = 19
+    f, t = test_support.run_doctest(test_doctest2)
+    if t != EXPECTED:
+        raise test_support.TestFailed("expected %d tests to run, not %d" %
+                                      (EXPECTED, t))
+
+# Pollute the namespace with a bunch of imported functions and classes,
+# to make sure they don't get tested.
+from doctest import *
+
+if __name__ == '__main__':
+    test_main()
diff --git a/lib-python/2.2/test/test_dospath.py b/lib-python/2.2/test/test_dospath.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_dospath.py
@@ -0,0 +1,61 @@
+import dospath
+import test_support
+import unittest
+
+
+class DOSPathTestCase(unittest.TestCase):
+
+    def test_abspath(self):
+        self.assert_(dospath.abspath("C:\\") == "C:\\")
+
+    def test_isabs(self):
+        isabs = dospath.isabs
+        self.assert_(isabs("c:\\"))
+        self.assert_(isabs("\\\\conky\\mountpoint\\"))
+        self.assert_(isabs("\\foo"))
+        self.assert_(isabs("\\foo\\bar"))
+        self.failIf(isabs("foo"))
+        self.failIf(isabs("foo\\"))
+        self.failIf(isabs("foo\\bar"))
+        self.failIf(isabs("c:foo"))
+        self.failIf(isabs("c:foo\\"))
+        self.failIf(isabs("c:foo\\bar"))
+
+    def test_commonprefix(self):
+        commonprefix = dospath.commonprefix
+        self.assert_(commonprefix(["/home/swenson/spam", "/home/swen/spam"])
+                     == "/home/swen")
+        self.assert_(commonprefix(["\\home\\swen\\spam", "\\home\\swen\\eggs"])
+                     == "\\home\\swen\\")
+        self.assert_(commonprefix(["/home/swen/spam", "/home/swen/spam"])
+                     == "/home/swen/spam")
+
+    def test_split(self):
+        split = dospath.split
+        self.assertEquals(split("c:\\foo\\bar"),
+                          ('c:\\foo', 'bar'))
+        self.assertEquals(split("\\\\conky\\mountpoint\\foo\\bar"),
+                          ('\\\\conky\\mountpoint\\foo', 'bar'))
+
+        self.assertEquals(split("c:\\"), ('c:\\', ''))
+        self.assertEquals(split("\\\\conky\\mountpoint\\"),
+                          ('\\\\conky\\mountpoint', ''))
+
+        self.assertEquals(split("c:/"), ('c:/', ''))
+        self.assertEquals(split("//conky/mountpoint/"),
+                          ('//conky/mountpoint', ''))
+
+    def test_splitdrive(self):
+        splitdrive = dospath.splitdrive
+        self.assertEquals(splitdrive("c:\\foo\\bar"), ('c:', '\\foo\\bar'))
+        self.assertEquals(splitdrive("c:/foo/bar"), ('c:', '/foo/bar'))
+        self.assertEquals(splitdrive("foo\\bar"), ('', 'foo\\bar'))
+        self.assertEquals(splitdrive("c:"), ('c:', ''))
+
+
+def test_main():
+    test_support.run_unittest(DOSPathTestCase)
+
+
+if __name__ == "__main__":
+    test_main()
diff --git a/lib-python/2.2/test/test_dumbdbm.py b/lib-python/2.2/test/test_dumbdbm.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_dumbdbm.py
@@ -0,0 +1,79 @@
+#! /usr/bin/env python
+"""Test script for the dumbdbm module
+   Original by Roger E. Masse
+"""
+
+import os
+import test_support
+import unittest
+import dumbdbm
+import tempfile
+
+_fname = tempfile.mktemp()
+
+def _delete_files():
+    for ext in [".dir", ".dat", ".bak"]:
+        try:
+            os.unlink(_fname + ext)
+        except OSError:
+            pass
+
+class DumbDBMTestCase(unittest.TestCase):
+    _dict = {'0': '',
+             'a': 'Python:',
+             'b': 'Programming',
+             'c': 'the',
+             'd': 'way',
+             'f': 'Guido',
+             'g': 'intended'
+             }
+
+    def __init__(self, *args):
+        unittest.TestCase.__init__(self, *args)
+        self._dkeys = self._dict.keys()
+        self._dkeys.sort()
+
+    def test_dumbdbm_creation(self):
+        _delete_files()
+        f = dumbdbm.open(_fname, 'c')
+        self.assertEqual(f.keys(), [])
+        for key in self._dict:
+            f[key] = self._dict[key]
+        self.read_helper(f)
+        f.close()
+
+    def test_dumbdbm_modification(self):
+        f = dumbdbm.open(_fname, 'w')
+        self._dict['g'] = f['g'] = "indented"
+        self.read_helper(f)
+        f.close()
+
+    def test_dumbdbm_read(self):
+        f = dumbdbm.open(_fname, 'r')
+        self.read_helper(f)
+        f.close()
+
+    def test_dumbdbm_keys(self):
+        f = dumbdbm.open(_fname)
+        keys = self.keys_helper(f)
+        f.close()
+
+    def read_helper(self, f):
+        keys = self.keys_helper(f)
+        for key in self._dict:
+            self.assertEqual(self._dict[key], f[key])
+
+    def keys_helper(self, f):
+        keys = f.keys()
+        keys.sort()
+        self.assertEqual(keys, self._dkeys)
+        return keys
+
+def test_main():
+    try:
+        test_support.run_unittest(DumbDBMTestCase)
+    finally:
+        _delete_files()
+
+if __name__ == "__main__":
+    test_main()
diff --git a/lib-python/2.2/test/test_email.py b/lib-python/2.2/test/test_email.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_email.py
@@ -0,0 +1,13 @@
+# Copyright (C) 2001,2002 Python Software Foundation
+# email package unit tests
+
+import unittest
+# The specific tests now live in Lib/email/test
+from email.test.test_email import suite
+from test_support import run_suite
+
+def test_main():
+    run_suite(suite())
+
+if __name__ == '__main__':
+    test_main()
diff --git a/lib-python/2.2/test/test_email_codecs.py b/lib-python/2.2/test/test_email_codecs.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_email_codecs.py
@@ -0,0 +1,11 @@
+# Copyright (C) 2002 Python Software Foundation
+# email package unit tests for (optional) Asian codecs
+
+import unittest
+# The specific tests now live in Lib/email/test
+from email.test.test_email_codecs import suite
+
+
+
+if __name__ == '__main__':
+    unittest.main(defaultTest='suite')
diff --git a/lib-python/2.2/test/test_errno.py b/lib-python/2.2/test/test_errno.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/test/test_errno.py
@@ -0,0 +1,49 @@
+#! /usr/bin/env python
+"""Test the errno module
+   Roger E. Masse
+"""
+
+import errno
+from test_support import verbose
+
+errors = ['E2BIG', 'EACCES', 'EADDRINUSE', 'EADDRNOTAVAIL', 'EADV',
+          'EAFNOSUPPORT', 'EAGAIN', 'EALREADY', 'EBADE', 'EBADF',
+          'EBADFD', 'EBADMSG', 'EBADR', 'EBADRQC', 'EBADSLT',
+          'EBFONT', 'EBUSY', 'ECHILD', 'ECHRNG', 'ECOMM',
+          'ECONNABORTED', 'ECONNREFUSED', 'ECONNRESET',
+          'EDEADLK', 'EDEADLOCK', 'EDESTADDRREQ', 'EDOM',
+          'EDQUOT', 'EEXIST', 'EFAULT', 'EFBIG', 'EHOSTDOWN',
+          'EHOSTUNREACH', 'EIDRM', 'EILSEQ', 'EINPROGRESS',
+          'EINTR', 'EINVAL', 'EIO', 'EISCONN', 'EISDIR',
+          'EL2HLT', 'EL2NSYNC', 'EL3HLT', 'EL3RST', 'ELIBACC',
+          'ELIBBAD', 'ELIBEXEC', 'ELIBMAX', 'ELIBSCN', 'ELNRNG',
+          'ELOOP', 'EMFILE', 'EMLINK', 'EMSGSIZE', 'EMULTIHOP',
+          'ENAMETOOLONG', 'ENETDOWN', 'ENETRESET', 'ENETUNREACH',
+          'ENFILE', 'ENOANO', 'ENOBUFS', 'ENOCSI', 'ENODATA',
+          'ENODEV', 'ENOENT', 'ENOEXEC', 'ENOLCK', 'ENOLINK',
+          'ENOMEM', 'ENOMSG', 'ENONET', 'ENOPKG', 'ENOPROTOOPT',
+          'ENOSPC', 'ENOSR', 'ENOSTR', 'ENOSYS', 'ENOTBLK',
+          'ENOTCONN', 'ENOTDIR', 'ENOTEMPTY', 'ENOTOBACCO', 'ENOTSOCK',
+          'ENOTTY', 'ENOTUNIQ', 'ENXIO', 'EOPNOTSUPP',
+          'EOVERFLOW', 'EPERM', 'EPFNOSUPPORT', 'EPIPE',
+          'EPROTO', 'EPROTONOSUPPORT', 'EPROTOTYPE',
+          'ERANGE', 'EREMCHG', 'EREMOTE', 'ERESTART',
+          'EROFS', 'ESHUTDOWN', 'ESOCKTNOSUPPORT', 'ESPIPE',
+          'ESRCH', 'ESRMNT', 'ESTALE', 'ESTRPIPE', 'ETIME',
+          'ETIMEDOUT', 'ETOOMANYREFS', 'ETXTBSY', 'EUNATCH',
+          'EUSERS', 'EWOULDBLOCK', 'EXDEV', 'EXFULL']
+
+#
+# This is is a wee bit bogus since the module only conditionally adds
+# errno constants if they have been defined by errno.h  However, this
+# test seems to work on SGI, Sparc & intel Solaris, and linux.
+#
+for error in errors:
+    try:
+        a = getattr(errno, error)
+    except AttributeError:
+        if verbose:
+            print '%s: not found' % error
+    else:
+        if verbose:
+            print '%s: %d' % (error, a)
diff --git a/lib-python/2.2/test/test_exceptions.py b/lib-python/2.2/test/test_exceptions.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_exceptions.py
@@ -0,0 +1,206 @@
+# Python test set -- part 5, built-in exceptions
+
+from test_support import *
+from types import ClassType
+import warnings
+import sys, traceback
+
+warnings.filterwarnings("error", "", OverflowWarning, __name__)
+
+print '5. Built-in exceptions'
+# XXX This is not really enough, each *operation* should be tested!
+
+# Reloading the built-in exceptions module failed prior to Py2.2, while it
+# should act the same as reloading built-in sys.
+try:
+    import exceptions
+    reload(exceptions)
+except ImportError, e:
+    raise TestFailed, e
+
+def test_raise_catch(exc):
+    try:
+        raise exc, "spam"
+    except exc, err:
+        buf = str(err)
+    try:
+        raise exc("spam")
+    except exc, err:
+        buf = str(err)
+    print buf
+
+def r(thing):
+    test_raise_catch(thing)
+    if isinstance(thing, ClassType):
+        print thing.__name__
+    else:
+        print thing
+
+r(AttributeError)
+import sys
+try: x = sys.undefined_attribute
+except AttributeError: pass
+
+r(EOFError)
+import sys
+fp = open(TESTFN, 'w')
+fp.close()
+fp = open(TESTFN, 'r')
+savestdin = sys.stdin
+try:
+    try:
+        sys.stdin = fp
+        x = raw_input()
+    except EOFError:
+        pass
+finally:
+    sys.stdin = savestdin
+    fp.close()
+
+r(IOError)
+try: open('this file does not exist', 'r')
+except IOError: pass
+
+r(ImportError)
+try: import undefined_module
+except ImportError: pass
+
+r(IndexError)
+x = []
+try: a = x[10]
+except IndexError: pass
+
+r(KeyError)
+x = {}
+try: a = x['key']
+except KeyError: pass
+
+r(KeyboardInterrupt)
+print '(not testable in a script)'
+
+r(MemoryError)
+print '(not safe to test)'
+
+r(NameError)
+try: x = undefined_variable
+except NameError: pass
+
+r(OverflowError)
+x = 1
+try:
+    while 1: x = x+x
+except OverflowError: pass
+
+r(RuntimeError)
+print '(not used any more?)'
+
+r(SyntaxError)
+try: exec '/\n'
+except SyntaxError: pass
+
+# make sure the right exception message is raised for each of these
+# code fragments:
+
+def ckmsg(src, msg):
+    try:
+        compile(src, '<fragment>', 'exec')
+    except SyntaxError, e:
+        print e.msg
+        if e.msg == msg:
+            print "ok"
+        else:
+            print "expected:", msg
+    else:
+        print "failed to get expected SyntaxError"
+
+s = '''\
+while 1:
+    try:
+        pass
+    finally:
+        continue
+'''
+if sys.platform.startswith('java'):
+    print "'continue' not supported inside 'finally' clause"
+    print "ok"
+else:
+    ckmsg(s, "'continue' not supported inside 'finally' clause")
+s = '''\
+try:
+    continue
+except:
+    pass
+'''
+ckmsg(s, "'continue' not properly in loop")
+ckmsg("continue\n", "'continue' not properly in loop")
+
+r(IndentationError)
+
+r(TabError)
+# can only be tested under -tt, and is the only test for -tt
+#try: compile("try:\n\t1/0\n    \t1/0\nfinally:\n pass\n", '<string>', 'exec')
+#except TabError: pass
+#else: raise TestFailed
+
+r(SystemError)
+print '(hard to reproduce)'
+
+r(SystemExit)
+import sys
+try: sys.exit(0)
+except SystemExit: pass
+
+r(TypeError)
+try: [] + ()
+except TypeError: pass
+
+r(ValueError)
+try: x = chr(10000)
+except ValueError: pass
+
+r(ZeroDivisionError)
+try: x = 1/0
+except ZeroDivisionError: pass
+
+r(Exception)
+try: x = 1/0
+except Exception, e: pass
+
+# test that setting an exception at the C level works even if the
+# exception object can't be constructed.
+
+class BadException:
+    def __init__(self):
+        raise RuntimeError, "can't instantiate BadException"
+
+def test_capi1():
+    import _testcapi
+    try:
+        _testcapi.raise_exception(BadException, 1)
+    except TypeError, err:
+        exc, err, tb = sys.exc_info()
+        co = tb.tb_frame.f_code
+        assert co.co_name == "test_capi1"
+        assert co.co_filename.endswith('test_exceptions.py')
+    else:
+        print "Expected exception"
+
+def test_capi2():
+    import _testcapi
+    try:
+        _testcapi.raise_exception(BadException, 0)
+    except RuntimeError, err:
+        exc, err, tb = sys.exc_info()
+        co = tb.tb_frame.f_code
+        assert co.co_name == "__init__"
+        assert co.co_filename.endswith('test_exceptions.py')
+        co2 = tb.tb_frame.f_back.f_code
+        assert co2.co_name == "test_capi2"
+    else:
+        print "Expected exception"
+
+if not sys.platform.startswith('java'):
+    test_capi1()
+    test_capi2()
+
+unlink(TESTFN)
diff --git a/lib-python/2.2/test/test_extcall.py b/lib-python/2.2/test/test_extcall.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_extcall.py
@@ -0,0 +1,268 @@
+from test_support import verify, verbose, TestFailed, sortdict
+from UserList import UserList
+
+def f(*a, **k):
+    print a, sortdict(k)
+
+def g(x, *y, **z):
+    print x, y, sortdict(z)
+
+def h(j=1, a=2, h=3):
+    print j, a, h
+
+f()
+f(1)
+f(1, 2)
+f(1, 2, 3)
+
+f(1, 2, 3, *(4, 5))
+f(1, 2, 3, *[4, 5])
+f(1, 2, 3, *UserList([4, 5]))
+f(1, 2, 3, **{'a':4, 'b':5})
+f(1, 2, 3, *(4, 5), **{'a':6, 'b':7})
+f(1, 2, 3, x=4, y=5, *(6, 7), **{'a':8, 'b':9})
+
+try:
+    g()
+except TypeError, err:
+    print "TypeError:", err
+else:
+    print "should raise TypeError: not enough arguments; expected 1, got 0"
+
+try:
+    g(*())
+except TypeError, err:
+    print "TypeError:", err
+else:
+    print "should raise TypeError: not enough arguments; expected 1, got 0"
+
+try:
+    g(*(), **{})
+except TypeError, err:
+    print "TypeError:", err
+else:
+    print "should raise TypeError: not enough arguments; expected 1, got 0"
+
+g(1)
+g(1, 2)
+g(1, 2, 3)
+g(1, 2, 3, *(4, 5))
+class Nothing: pass
+try:
+    g(*Nothing())
+except TypeError, attr:
+    pass
+else:
+    print "should raise TypeError"
+
+class Nothing:
+    def __len__(self):
+        return 5
+try:
+    g(*Nothing())
+except TypeError, attr:
+    pass
+else:
+    print "should raise TypeError"
+
+class Nothing:
+    def __len__(self):
+        return 5
+    def __getitem__(self, i):
+        if i < 3:
+            return i
+        else:
+            raise IndexError, i
+g(*Nothing())
+
+class Nothing:
+    def __init__(self):
+        self.c = 0
+    def __iter__(self):
+        return self
+try:
+    g(*Nothing())
+except TypeError, attr:
+    pass
+else:
+    print "should raise TypeError"
+
+class Nothing:
+    def __init__(self):
+        self.c = 0
+    def __iter__(self):
+        return self
+    def next(self):
+        if self.c == 4:
+            raise StopIteration
+        c = self.c
+        self.c += 1
+        return c
+g(*Nothing())
+
+# make sure the function call doesn't stomp on the dictionary?
+d = {'a': 1, 'b': 2, 'c': 3}
+d2 = d.copy()
+verify(d == d2)
+g(1, d=4, **d)
+print sortdict(d)
+print sortdict(d2)
+verify(d == d2, "function call modified dictionary")
+
+# what about willful misconduct?
+def saboteur(**kw):
+    kw['x'] = locals() # yields a cyclic kw
+    return kw
+d = {}
+kw = saboteur(a=1, **d)
+verify(d == {})
+# break the cycle
+del kw['x']
+
+try:
+    g(1, 2, 3, **{'x':4, 'y':5})
+except TypeError, err:
+    print err
+else:
+    print "should raise TypeError: keyword parameter redefined"
+
+try:
+    g(1, 2, 3, a=4, b=5, *(6, 7), **{'a':8, 'b':9})
+except TypeError, err:
+    print err
+else:
+    print "should raise TypeError: keyword parameter redefined"
+
+try:
+    f(**{1:2})
+except TypeError, err:
+    print err
+else:
+    print "should raise TypeError: keywords must be strings"
+
+try:
+    h(**{'e': 2})
+except TypeError, err:
+    print err
+else:
+    print "should raise TypeError: unexpected keyword argument: e"
+
+try:
+    h(*h)
+except TypeError, err:
+    print err
+else:
+    print "should raise TypeError: * argument must be a tuple"
+
+try:
+    dir(*h)
+except TypeError, err:
+    print err
+else:
+    print "should raise TypeError: * argument must be a tuple"
+
+try:
+    None(*h)
+except TypeError, err:
+    print err
+else:
+    print "should raise TypeError: * argument must be a tuple"
+
+try:
+    h(**h)
+except TypeError, err:
+    print err
+else:
+    print "should raise TypeError: ** argument must be a dictionary"
+
+try:
+    dir(**h)
+except TypeError, err:
+    print err
+else:
+    print "should raise TypeError: ** argument must be a dictionary"
+
+try:
+    None(**h)
+except TypeError, err:
+    print err
+else:
+    print "should raise TypeError: ** argument must be a dictionary"
+
+try:
+    dir(b=1,**{'b':1})
+except TypeError, err:
+    print err
+else:
+    print "should raise TypeError: dir() got multiple values for keyword argument 'b'"
+
+def f2(*a, **b):
+    return a, b
+
+d = {}
+for i in range(512):
+    key = 'k%d' % i
+    d[key] = i
+a, b = f2(1, *(2, 3), **d)
+print len(a), len(b), b == d
+
+class Foo:
+    def method(self, arg1, arg2):
+        return arg1 + arg2
+
+x = Foo()
+print Foo.method(*(x, 1, 2))
+print Foo.method(x, *(1, 2))
+try:
+    print Foo.method(*(1, 2, 3))
+except TypeError, err:
+    pass
+else:
+    print 'expected a TypeError for unbound method call'
+try:
+    print Foo.method(1, *(2, 3))
+except TypeError, err:
+    pass
+else:
+    print 'expected a TypeError for unbound method call'
+
+# A PyCFunction that takes only positional parameters should allow an
+# empty keyword dictionary to pass without a complaint, but raise a
+# TypeError if the dictionary is non-empty.
+id(1, **{})
+try:
+    id(1, **{"foo": 1})
+except TypeError:
+    pass
+else:
+    raise TestFailed, 'expected TypeError; no exception raised'
+
+a, b, d, e, v, k = 'A', 'B', 'D', 'E', 'V', 'K'
+funcs = []
+maxargs = {}
+for args in ['', 'a', 'ab']:
+    for defargs in ['', 'd', 'de']:
+        for vararg in ['', 'v']:
+            for kwarg in ['', 'k']:
+                name = 'z' + args + defargs + vararg + kwarg
+                arglist = list(args) + map(
+                    lambda x: '%s="%s"' % (x, x), defargs)
+                if vararg: arglist.append('*' + vararg)
+                if kwarg: arglist.append('**' + kwarg)
+                decl = (('def %s(%s): print "ok %s", a, b, d, e, v, ' +
+                         'type(k) is type ("") and k or sortdict(k)')
+                         % (name, ', '.join(arglist), name))
+                exec(decl)
+                func = eval(name)
+                funcs.append(func)
+                maxargs[func] = len(args + defargs)
+
+for name in ['za', 'zade', 'zabk', 'zabdv', 'zabdevk']:
+    func = eval(name)
+    for args in [(), (1, 2), (1, 2, 3, 4, 5)]:
+        for kwargs in ['', 'a', 'd', 'ad', 'abde']:
+            kwdict = {}
+            for k in kwargs: kwdict[k] = k + k
+            print func.func_name, args, sortdict(kwdict), '->',
+            try: apply(func, args, kwdict)
+            except TypeError, err: print err
diff --git a/lib-python/2.2/test/test_fcntl.py b/lib-python/2.2/test/test_fcntl.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/test/test_fcntl.py
@@ -0,0 +1,53 @@
+#! /usr/bin/env python
+"""Test program for the fcntl C module.
+   Roger E. Masse
+"""
+import struct
+import fcntl
+import os, sys
+from test_support import verbose, TESTFN
+
+filename = TESTFN
+
+try:
+    os.O_LARGEFILE
+except AttributeError:
+    start_len = "ll"
+else:
+    start_len = "qq"
+
+if sys.platform in ('netbsd1', 'Darwin1.2', 'darwin',
+                    'freebsd2', 'freebsd3', 'freebsd4', 'freebsd5',
+                    'bsdos2', 'bsdos3', 'bsdos4',
+                    'openbsd', 'openbsd2', 'openbsd3'):
+    lockdata = struct.pack('lxxxxlxxxxlhh', 0, 0, 0, fcntl.F_WRLCK, 0)
+elif sys.platform in ['aix3', 'aix4', 'hp-uxB', 'unixware7']:
+    lockdata = struct.pack('hhlllii', fcntl.F_WRLCK, 0, 0, 0, 0, 0, 0)
+else:
+    lockdata = struct.pack('hh'+start_len+'hh', fcntl.F_WRLCK, 0, 0, 0, 0, 0)
+if verbose:
+    print 'struct.pack: ', `lockdata`
+
+
+# the example from the library docs
+f = open(filename, 'w')
+rv = fcntl.fcntl(f.fileno(), fcntl.F_SETFL, os.O_NONBLOCK)
+if verbose:
+    print 'Status from fnctl with O_NONBLOCK: ', rv
+
+rv = fcntl.fcntl(f.fileno(), fcntl.F_SETLKW, lockdata)
+if verbose:
+    print 'String from fcntl with F_SETLKW: ', `rv`
+
+f.close()
+os.unlink(filename)
+
+
+# Again, but pass the file rather than numeric descriptor:
+f = open(filename, 'w')
+rv = fcntl.fcntl(f, fcntl.F_SETFL, os.O_NONBLOCK)
+
+rv = fcntl.fcntl(f, fcntl.F_SETLKW, lockdata)
+
+f.close()
+os.unlink(filename)
diff --git a/lib-python/2.2/test/test_file.py b/lib-python/2.2/test/test_file.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_file.py
@@ -0,0 +1,63 @@
+import os
+
+from test_support import verify, TESTFN
+from UserList import UserList
+
+# verify writelines with instance sequence
+l = UserList(['1', '2'])
+f = open(TESTFN, 'wb')
+f.writelines(l)
+f.close()
+f = open(TESTFN, 'rb')
+buf = f.read()
+f.close()
+verify(buf == '12')
+
+# verify writelines with integers
+f = open(TESTFN, 'wb')
+try:
+    f.writelines([1, 2, 3])
+except TypeError:
+    pass
+else:
+    print "writelines accepted sequence of integers"
+f.close()
+
+# verify writelines with integers in UserList
+f = open(TESTFN, 'wb')
+l = UserList([1,2,3])
+try:
+    f.writelines(l)
+except TypeError:
+    pass
+else:
+    print "writelines accepted sequence of integers"
+f.close()
+
+# verify writelines with non-string object
+class NonString: pass
+
+f = open(TESTFN, 'wb')
+try:
+    f.writelines([NonString(), NonString()])
+except TypeError:
+    pass
+else:
+    print "writelines accepted sequence of non-string objects"
+f.close()
+
+# verify that we get a sensible error message for bad mode argument
+bad_mode = "qwerty"
+try:
+    open(TESTFN, bad_mode)
+except IOError, msg:
+    if msg[0] != 0:
+        s = str(msg)
+        if s.find(TESTFN) != -1 or s.find(bad_mode) == -1:
+            print "bad error message for invalid mode: %s" % s
+    # if msg[0] == 0, we're probably on Windows where there may be
+    # no obvious way to discover why open() failed.
+else:
+    print "no error for invalid mode: %s" % bad_mode
+
+os.unlink(TESTFN)
diff --git a/lib-python/2.2/test/test_fileinput.py b/lib-python/2.2/test/test_fileinput.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_fileinput.py
@@ -0,0 +1,159 @@
+'''
+Tests for fileinput module.
+Nick Mathewson
+'''
+
+from test_support import verify, verbose, TESTFN
+import sys, os, re
+from StringIO import StringIO
+from fileinput import FileInput
+
+# The fileinput module has 2 interfaces: the FileInput class which does
+# all the work, and a few functions (input, etc.) that use a global _state
+# variable.  We only test the FileInput class, since the other functions
+# only provide a thin facade over FileInput.
+
+# Write lines (a list of lines) to temp file number i, and return the
+# temp file's name.
+def writeTmp(i, lines):
+    name = TESTFN + str(i)
+    f = open(name, 'w')
+    f.writelines(lines)
+    f.close()
+    return name
+
+pat = re.compile(r'LINE (\d+) OF FILE (\d+)')
+
+def remove_tempfiles(*names):
+    for name in names:
+        try:
+            os.unlink(name)
+        except:
+            pass
+
+def runTests(t1, t2, t3, t4, bs=0, round=0):
+    start = 1 + round*6
+    if verbose:
+        print '%s. Simple iteration (bs=%s)' % (start+0, bs)
+    fi = FileInput(files=(t1, t2, t3, t4), bufsize=bs)
+    lines = list(fi)
+    fi.close()
+    verify(len(lines) == 31)
+    verify(lines[4] == 'Line 5 of file 1\n')
+    verify(lines[30] == 'Line 1 of file 4\n')
+    verify(fi.lineno() == 31)
+    verify(fi.filename() == t4)
+
+    if verbose:
+        print '%s. Status variables (bs=%s)' % (start+1, bs)
+    fi = FileInput(files=(t1, t2, t3, t4), bufsize=bs)
+    s = "x"
+    while s and s != 'Line 6 of file 2\n':
+        s = fi.readline()
+    verify(fi.filename() == t2)
+    verify(fi.lineno() == 21)
+    verify(fi.filelineno() == 6)
+    verify(not fi.isfirstline())
+    verify(not fi.isstdin())
+
+    if verbose:
+        print '%s. Nextfile (bs=%s)' % (start+2, bs)
+    fi.nextfile()
+    verify(fi.readline() == 'Line 1 of file 3\n')
+    verify(fi.lineno() == 22)
+    fi.close()
+
+    if verbose:
+        print '%s. Stdin (bs=%s)' % (start+3, bs)
+    fi = FileInput(files=(t1, t2, t3, t4, '-'), bufsize=bs)
+    savestdin = sys.stdin
+    try:
+        sys.stdin = StringIO("Line 1 of stdin\nLine 2 of stdin\n")
+        lines = list(fi)
+        verify(len(lines) == 33)
+        verify(lines[32] == 'Line 2 of stdin\n')
+        verify(fi.filename() == '<stdin>')
+        fi.nextfile()
+    finally:
+        sys.stdin = savestdin
+
+    if verbose:
+        print '%s. Boundary conditions (bs=%s)' % (start+4, bs)
+    fi = FileInput(files=(t1, t2, t3, t4), bufsize=bs)
+    verify(fi.lineno() == 0)
+    verify(fi.filename() == None)
+    fi.nextfile()
+    verify(fi.lineno() == 0)
+    verify(fi.filename() == None)
+
+    if verbose:
+        print '%s. Inplace (bs=%s)' % (start+5, bs)
+    savestdout = sys.stdout
+    try:
+        fi = FileInput(files=(t1, t2, t3, t4), inplace=1, bufsize=bs)
+        for line in fi:
+            line = line[:-1].upper()
+            print line
+        fi.close()
+    finally:
+        sys.stdout = savestdout
+
+    fi = FileInput(files=(t1, t2, t3, t4), bufsize=bs)
+    for line in fi:
+        verify(line[-1] == '\n')
+        m = pat.match(line[:-1])
+        verify(m != None)
+        verify(int(m.group(1)) == fi.filelineno())
+    fi.close()
+
+
+def writeFiles():
+    global t1, t2, t3, t4
+    t1 = writeTmp(1, ["Line %s of file 1\n" % (i+1) for i in range(15)])
+    t2 = writeTmp(2, ["Line %s of file 2\n" % (i+1) for i in range(10)])
+    t3 = writeTmp(3, ["Line %s of file 3\n" % (i+1) for i in range(5)])
+    t4 = writeTmp(4, ["Line %s of file 4\n" % (i+1) for i in range(1)])
+
+# First, run the tests with default and teeny buffer size.
+for round, bs in (0, 0), (1, 30):
+    try:
+        writeFiles()
+        runTests(t1, t2, t3, t4, bs, round)
+    finally:
+        remove_tempfiles(t1, t2, t3, t4)
+
+# Next, check for proper behavior with 0-byte files.
+if verbose:
+    print "13. 0-byte files"
+try:
+    t1 = writeTmp(1, [""])
+    t2 = writeTmp(2, [""])
+    t3 = writeTmp(3, ["The only line there is.\n"])
+    t4 = writeTmp(4, [""])
+    fi = FileInput(files=(t1, t2, t3, t4))
+    line = fi.readline()
+    verify(line == 'The only line there is.\n')
+    verify(fi.lineno() == 1)
+    verify(fi.filelineno() == 1)
+    verify(fi.filename() == t3)
+    line = fi.readline()
+    verify(not line)
+    verify(fi.lineno() == 1)
+    verify(fi.filelineno() == 0)
+    verify(fi.filename() == t4)
+    fi.close()
+finally:
+    remove_tempfiles(t1, t2, t3, t4)
+
+if verbose:
+    print "14. Files that don't end with newline"
+try:
+    t1 = writeTmp(1, ["A\nB\nC"])
+    t2 = writeTmp(2, ["D\nE\nF"])
+    fi = FileInput(files=(t1, t2))
+    lines = list(fi)
+    verify(lines == ["A\n", "B\n", "C", "D\n", "E\n", "F"])
+    verify(fi.filelineno() == 3)
+    verify(fi.lineno() == 6)
+finally:
+    remove_tempfiles(t1, t2)
diff --git a/lib-python/2.2/test/test_fnmatch.py b/lib-python/2.2/test/test_fnmatch.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_fnmatch.py
@@ -0,0 +1,46 @@
+"""Test cases for the fnmatch module."""
+
+import test_support
+import unittest
+
+from fnmatch import fnmatch, fnmatchcase
+
+
+class FnmatchTestCase(unittest.TestCase):
+    def check_match(self, filename, pattern, should_match=1):
+        if should_match:
+            self.assert_(fnmatch(filename, pattern),
+                         "expected %r to match pattern %r"
+                         % (filename, pattern))
+        else:
+            self.assert_(not fnmatch(filename, pattern),
+                         "expected %r not to match pattern %r"
+                         % (filename, pattern))
+
+    def test_fnmatch(self):
+        check = self.check_match
+        check('abc', 'abc')
+        check('abc', '?*?')
+        check('abc', '???*')
+        check('abc', '*???')
+        check('abc', '???')
+        check('abc', '*')
+        check('abc', 'ab[cd]')
+        check('abc', 'ab[!de]')
+        check('abc', 'ab[de]', 0)
+        check('a', '??', 0)
+        check('a', 'b', 0)
+
+        # these test that '\' is handled correctly in character sets;
+        # see SF bug #???
+        check('\\', r'[\]')
+        check('a', r'[!\]')
+        check('\\', r'[!\]', 0)
+
+
+def test_main():
+    test_support.run_unittest(FnmatchTestCase)
+
+
+if __name__ == "__main__":
+    test_main()
diff --git a/lib-python/2.2/test/test_fork1.py b/lib-python/2.2/test/test_fork1.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_fork1.py
@@ -0,0 +1,75 @@
+"""This test checks for correct fork() behavior.
+
+We want fork1() semantics -- only the forking thread survives in the
+child after a fork().
+
+On some systems (e.g. Solaris without posix threads) we find that all
+active threads survive in the child after a fork(); this is an error.
+
+While BeOS doesn't officially support fork and native threading in
+the same application, the present example should work just fine.  DC
+"""
+
+import os, sys, time, thread
+from test_support import verify, verbose, TestSkipped
+
+try:
+    os.fork
+except AttributeError:
+    raise TestSkipped, "os.fork not defined -- skipping test_fork1"
+
+LONGSLEEP = 2
+
+SHORTSLEEP = 0.5
+
+NUM_THREADS = 4
+
+alive = {}
+
+stop = 0
+
+def f(id):
+    while not stop:
+        alive[id] = os.getpid()
+        try:
+            time.sleep(SHORTSLEEP)
+        except IOError:
+            pass
+
+def main():
+    for i in range(NUM_THREADS):
+        thread.start_new(f, (i,))
+
+    time.sleep(LONGSLEEP)
+
+    a = alive.keys()
+    a.sort()
+    verify(a == range(NUM_THREADS))
+
+    prefork_lives = alive.copy()
+
+    if sys.platform in ['unixware7']:
+        cpid = os.fork1()
+    else:
+        cpid = os.fork()
+
+    if cpid == 0:
+        # Child
+        time.sleep(LONGSLEEP)
+        n = 0
+        for key in alive.keys():
+            if alive[key] != prefork_lives[key]:
+                n = n+1
+        os._exit(n)
+    else:
+        # Parent
+        spid, status = os.waitpid(cpid, 0)
+        verify(spid == cpid)
+        verify(status == 0,
+                "cause = %d, exit = %d" % (status&0xff, status>>8) )
+        global stop
+        # Tell threads to die
+        stop = 1
+        time.sleep(2*SHORTSLEEP) # Wait for threads to die
+
+main()
diff --git a/lib-python/2.2/test/test_format.py b/lib-python/2.2/test/test_format.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_format.py
@@ -0,0 +1,218 @@
+from test_support import verbose, have_unicode
+import sys
+
+# test string formatting operator (I am not sure if this is being tested
+# elsewhere but, surely, some of the given cases are *not* tested because
+# they crash python)
+# test on unicode strings as well
+
+overflowok = 1
+
+def testformat(formatstr, args, output=None):
+    if verbose:
+        if output:
+            print "%s %% %s =? %s ..." %\
+                (repr(formatstr), repr(args), repr(output)),
+        else:
+            print "%s %% %s works? ..." % (repr(formatstr), repr(args)),
+    try:
+        result = formatstr % args
+    except OverflowError:
+        if not overflowok:
+            raise
+        if verbose:
+            print 'overflow (this is fine)'
+    else:
+        if output and result != output:
+            if verbose:
+                print 'no'
+            print "%s %% %s == %s != %s" %\
+                (repr(formatstr), repr(args), repr(result), repr(output))
+        else:
+            if verbose:
+                print 'yes'
+
+def testboth(formatstr, *args):
+    testformat(formatstr, *args)
+    if have_unicode:
+        testformat(unicode(formatstr), *args)
+
+
+testboth("%.1d", (1,), "1")
+testboth("%.*d", (sys.maxint,1))  # expect overflow
+testboth("%.100d", (1,), '0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001')
+testboth("%#.117x", (1,), '0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001')
+testboth("%#.118x", (1,), '0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001')
+
+testboth("%f", (1.0,), "1.000000")
+# these are trying to test the limits of the internal magic-number-length
+# formatting buffer, if that number changes then these tests are less
+# effective
+testboth("%#.*g", (109, -1.e+49/3.))
+testboth("%#.*g", (110, -1.e+49/3.))
+testboth("%#.*g", (110, -1.e+100/3.))
+
+# test some ridiculously large precision, expect overflow
+testboth('%12.*f', (123456, 1.0))
+
+# Formatting of long integers. Overflow is not ok
+overflowok = 0
+testboth("%x", 10L, "a")
+testboth("%x", 100000000000L, "174876e800")
+testboth("%o", 10L, "12")
+testboth("%o", 100000000000L, "1351035564000")
+testboth("%d", 10L, "10")
+testboth("%d", 100000000000L, "100000000000")
+
+big = 123456789012345678901234567890L
+testboth("%d", big, "123456789012345678901234567890")
+testboth("%d", -big, "-123456789012345678901234567890")
+testboth("%5d", -big, "-123456789012345678901234567890")
+testboth("%31d", -big, "-123456789012345678901234567890")
+testboth("%32d", -big, " -123456789012345678901234567890")
+testboth("%-32d", -big, "-123456789012345678901234567890 ")
+testboth("%032d", -big, "-0123456789012345678901234567890")
+testboth("%-032d", -big, "-123456789012345678901234567890 ")
+testboth("%034d", -big, "-000123456789012345678901234567890")
+testboth("%034d", big, "0000123456789012345678901234567890")
+testboth("%0+34d", big, "+000123456789012345678901234567890")
+testboth("%+34d", big, "   +123456789012345678901234567890")
+testboth("%34d", big, "    123456789012345678901234567890")
+testboth("%.2d", big, "123456789012345678901234567890")
+testboth("%.30d", big, "123456789012345678901234567890")
+testboth("%.31d", big, "0123456789012345678901234567890")
+testboth("%32.31d", big, " 0123456789012345678901234567890")
+
+big = 0x1234567890abcdef12345L  # 21 hex digits
+testboth("%x", big, "1234567890abcdef12345")
+testboth("%x", -big, "-1234567890abcdef12345")
+testboth("%5x", -big, "-1234567890abcdef12345")
+testboth("%22x", -big, "-1234567890abcdef12345")
+testboth("%23x", -big, " -1234567890abcdef12345")
+testboth("%-23x", -big, "-1234567890abcdef12345 ")
+testboth("%023x", -big, "-01234567890abcdef12345")
+testboth("%-023x", -big, "-1234567890abcdef12345 ")
+testboth("%025x", -big, "-0001234567890abcdef12345")
+testboth("%025x", big, "00001234567890abcdef12345")
+testboth("%0+25x", big, "+0001234567890abcdef12345")
+testboth("%+25x", big, "   +1234567890abcdef12345")
+testboth("%25x", big, "    1234567890abcdef12345")
+testboth("%.2x", big, "1234567890abcdef12345")
+testboth("%.21x", big, "1234567890abcdef12345")
+testboth("%.22x", big, "01234567890abcdef12345")
+testboth("%23.22x", big, " 01234567890abcdef12345")
+testboth("%-23.22x", big, "01234567890abcdef12345 ")
+testboth("%X", big, "1234567890ABCDEF12345")
+testboth("%#X", big, "0X1234567890ABCDEF12345")
+testboth("%#x", big, "0x1234567890abcdef12345")
+testboth("%#x", -big, "-0x1234567890abcdef12345")
+testboth("%#.23x", -big, "-0x001234567890abcdef12345")
+testboth("%#+.23x", big, "+0x001234567890abcdef12345")
+testboth("%# .23x", big, " 0x001234567890abcdef12345")
+testboth("%#+.23X", big, "+0X001234567890ABCDEF12345")
+testboth("%#-+.23X", big, "+0X001234567890ABCDEF12345")
+testboth("%#-+26.23X", big, "+0X001234567890ABCDEF12345")
+testboth("%#-+27.23X", big, "+0X001234567890ABCDEF12345 ")
+testboth("%#+27.23X", big, " +0X001234567890ABCDEF12345")
+# next one gets two leading zeroes from precision, and another from the
+# 0 flag and the width
+testboth("%#+027.23X", big, "+0X0001234567890ABCDEF12345")
+# same, except no 0 flag
+testboth("%#+27.23X", big, " +0X001234567890ABCDEF12345")
+
+big = 012345670123456701234567012345670L  # 32 octal digits
+testboth("%o", big, "12345670123456701234567012345670")
+testboth("%o", -big, "-12345670123456701234567012345670")
+testboth("%5o", -big, "-12345670123456701234567012345670")
+testboth("%33o", -big, "-12345670123456701234567012345670")
+testboth("%34o", -big, " -12345670123456701234567012345670")
+testboth("%-34o", -big, "-12345670123456701234567012345670 ")
+testboth("%034o", -big, "-012345670123456701234567012345670")
+testboth("%-034o", -big, "-12345670123456701234567012345670 ")
+testboth("%036o", -big, "-00012345670123456701234567012345670")
+testboth("%036o", big, "000012345670123456701234567012345670")
+testboth("%0+36o", big, "+00012345670123456701234567012345670")
+testboth("%+36o", big, "   +12345670123456701234567012345670")
+testboth("%36o", big, "    12345670123456701234567012345670")
+testboth("%.2o", big, "12345670123456701234567012345670")
+testboth("%.32o", big, "12345670123456701234567012345670")
+testboth("%.33o", big, "012345670123456701234567012345670")
+testboth("%34.33o", big, " 012345670123456701234567012345670")
+testboth("%-34.33o", big, "012345670123456701234567012345670 ")
+testboth("%o", big, "12345670123456701234567012345670")
+testboth("%#o", big, "012345670123456701234567012345670")
+testboth("%#o", -big, "-012345670123456701234567012345670")
+testboth("%#.34o", -big, "-0012345670123456701234567012345670")
+testboth("%#+.34o", big, "+0012345670123456701234567012345670")
+testboth("%# .34o", big, " 0012345670123456701234567012345670")
+testboth("%#+.34o", big, "+0012345670123456701234567012345670")
+testboth("%#-+.34o", big, "+0012345670123456701234567012345670")
+testboth("%#-+37.34o", big, "+0012345670123456701234567012345670  ")
+testboth("%#+37.34o", big, "  +0012345670123456701234567012345670")
+# next one gets one leading zero from precision
+testboth("%.33o", big, "012345670123456701234567012345670")
+# base marker shouldn't change that, since "0" is redundant
+testboth("%#.33o", big, "012345670123456701234567012345670")
+# but reduce precision, and base marker should add a zero
+testboth("%#.32o", big, "012345670123456701234567012345670")
+# one leading zero from precision, and another from "0" flag & width
+testboth("%034.33o", big, "0012345670123456701234567012345670")
+# base marker shouldn't change that
+testboth("%0#34.33o", big, "0012345670123456701234567012345670")
+
+# Some small ints, in both Python int and long flavors).
+testboth("%d", 42, "42")
+testboth("%d", -42, "-42")
+testboth("%d", 42L, "42")
+testboth("%d", -42L, "-42")
+testboth("%#x", 1, "0x1")
+testboth("%#x", 1L, "0x1")
+testboth("%#X", 1, "0X1")
+testboth("%#X", 1L, "0X1")
+testboth("%#o", 1, "01")
+testboth("%#o", 1L, "01")
+testboth("%#o", 0, "0")
+testboth("%#o", 0L, "0")
+testboth("%o", 0, "0")
+testboth("%o", 0L, "0")
+testboth("%d", 0, "0")
+testboth("%d", 0L, "0")
+testboth("%#x", 0, "0x0")
+testboth("%#x", 0L, "0x0")
+testboth("%#X", 0, "0X0")
+testboth("%#X", 0L, "0X0")
+
+testboth("%x", 0x42, "42")
+# testboth("%x", -0x42, "ffffffbe") # Alas, that's specific to 32-bit machines
+testboth("%x", 0x42L, "42")
+testboth("%x", -0x42L, "-42")
+
+testboth("%o", 042, "42")
+# testboth("%o", -042, "37777777736") # Alas, that's specific to 32-bit machines
+testboth("%o", 042L, "42")
+testboth("%o", -042L, "-42")
+
+# Test exception for unknown format characters
+if verbose:
+    print 'Testing exceptions'
+
+def test_exc(formatstr, args, exception, excmsg):
+    try:
+        testformat(formatstr, args)
+    except exception, exc:
+        if str(exc) == excmsg:
+            if verbose:
+                print "yes"
+        else:
+            if verbose: print 'no'
+            print 'Unexpected ', exception, ':', repr(str(exc))
+    except:
+        if verbose: print 'no'
+        print 'Unexpected exception'
+        raise
+
+test_exc('abc %a', 1, ValueError,
+         "unsupported format character 'a' (0x61) at index 5")
+if have_unicode:
+    test_exc(unicode('abc %\u3000','raw-unicode-escape'), 1, ValueError,
+             "unsupported format character '?' (0x3000) at index 5")
diff --git a/lib-python/2.2/test/test_fpformat.py b/lib-python/2.2/test/test_fpformat.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_fpformat.py
@@ -0,0 +1,75 @@
+'''
+   Tests for fpformat module
+   Nick Mathewson
+'''
+from test_support import run_unittest
+import unittest
+from fpformat import fix, sci, NotANumber
+
+StringType = type('')
+
+# Test the old and obsolescent fpformat module.
+#
+# (It's obsolescent because fix(n,d) == "%.*f"%(d,n) and
+#                           sci(n,d) == "%.*e"%(d,n)
+#  for all reasonable numeric n and d, except that sci gives 3 exponent
+#  digits instead of 2.
+#
+# Differences only occur for unreasonable n and d.    <.2 wink>)
+
+class FpformatTest(unittest.TestCase):
+
+    def checkFix(self, n, digits):
+        result = fix(n, digits)
+        if isinstance(n, StringType):
+            n = repr(n)
+        expected = "%.*f" % (digits, float(n))
+
+        self.assertEquals(result, expected)
+
+    def checkSci(self, n, digits):
+        result = sci(n, digits)
+        if isinstance(n, StringType):
+            n = repr(n)
+        expected = "%.*e" % (digits, float(n))
+        # add the extra 0 if needed
+        num, exp = expected.split("e")
+        if len(exp) < 4:
+            exp = exp[0] + "0" + exp[1:]
+        expected = "%se%s" % (num, exp)
+
+        self.assertEquals(result, expected)
+
+    def test_basic_cases(self):
+        self.assertEquals(fix(100.0/3, 3), '33.333')
+        self.assertEquals(sci(100.0/3, 3), '3.333e+001')
+
+    def test_reasonable_values(self):
+        for d in range(7):
+            for val in (1000.0/3, 1000, 1000.0, .002, 1.0/3, 1e10):
+                for realVal in (val, 1.0/val, -val, -1.0/val):
+                    self.checkFix(realVal, d)
+                    self.checkSci(realVal, d)
+
+    def test_failing_values(self):
+        # Now for 'unreasonable n and d'
+        self.assertEquals(fix(1.0, 1000), '1.'+('0'*1000))
+        self.assertEquals(sci("1"+('0'*1000), 0), '1e+1000')
+
+        # This behavior is inconsistent.  sci raises an exception; fix doesn't.
+        yacht = "Throatwobbler Mangrove"
+        self.assertEquals(fix(yacht, 10), yacht)
+        try:
+            sci(yacht, 10)
+        except NotANumber:
+            pass
+        else:
+            self.fail("No exception on non-numeric sci")
+
+
+def test_main():
+    run_unittest(FpformatTest)
+
+
+if __name__ == "__main__":
+    test_main()
diff --git a/lib-python/2.2/test/test_frozen.py b/lib-python/2.2/test/test_frozen.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_frozen.py
@@ -0,0 +1,26 @@
+# Test the frozen module defined in frozen.c.
+
+from test_support import TestFailed
+import sys, os
+
+try:
+    import __hello__
+except ImportError, x:
+    raise TestFailed, "import __hello__ failed:", x
+
+try:
+    import __phello__
+except ImportError, x:
+    raise TestFailed, "import __phello__ failed:", x
+
+try:
+    import __phello__.spam
+except ImportError, x:
+    raise TestFailed, "import __phello__.spam failed:", x
+
+try:
+    import __phello__.foo
+except ImportError:
+    pass
+else:
+    raise TestFailed, "import __phello__.foo should have failed"
diff --git a/lib-python/2.2/test/test_funcattrs.py b/lib-python/2.2/test/test_funcattrs.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_funcattrs.py
@@ -0,0 +1,379 @@
+from test_support import verbose, TestFailed, verify
+import types
+
+class F:
+    def a(self):
+        pass
+
+def b():
+    'my docstring'
+    pass
+
+# setting attributes on functions
+try:
+    b.publish
+except AttributeError: pass
+else: raise TestFailed, 'expected AttributeError'
+
+if b.__dict__ <> {}:
+    raise TestFailed, 'expected unassigned func.__dict__ to be {}'
+
+b.publish = 1
+if b.publish <> 1:
+    raise TestFailed, 'function attribute not set to expected value'
+
+docstring = 'its docstring'
+b.__doc__ = docstring
+if b.__doc__ <> docstring:
+    raise TestFailed, 'problem with setting __doc__ attribute'
+
+if 'publish' not in dir(b):
+    raise TestFailed, 'attribute not in dir()'
+
+try:
+    del b.__dict__
+except TypeError: pass
+else: raise TestFailed, 'del func.__dict__ expected TypeError'
+
+b.publish = 1
+try:
+    b.__dict__ = None
+except TypeError: pass
+else: raise TestFailed, 'func.__dict__ = None expected TypeError'
+
+d = {'hello': 'world'}
+b.__dict__ = d
+if b.func_dict is not d:
+    raise TestFailed, 'func.__dict__ assignment to dictionary failed'
+if b.hello <> 'world':
+    raise TestFailed, 'attribute after func.__dict__ assignment failed'
+
+f1 = F()
+f2 = F()
+
+try:
+    F.a.publish
+except AttributeError: pass
+else: raise TestFailed, 'expected AttributeError'
+
+try:
+    f1.a.publish
+except AttributeError: pass
+else: raise TestFailed, 'expected AttributeError'
+
+# In Python 2.1 beta 1, we disallowed setting attributes on unbound methods
+# (it was already disallowed on bound methods).  See the PEP for details.
+try:
+    F.a.publish = 1
+except (AttributeError, TypeError): pass
+else: raise TestFailed, 'expected AttributeError or TypeError'
+
+# But setting it explicitly on the underlying function object is okay.
+F.a.im_func.publish = 1
+
+if F.a.publish <> 1:
+    raise TestFailed, 'unbound method attribute not set to expected value'
+
+if f1.a.publish <> 1:
+    raise TestFailed, 'bound method attribute access did not work'
+
+if f2.a.publish <> 1:
+    raise TestFailed, 'bound method attribute access did not work'
+
+if 'publish' not in dir(F.a):
+    raise TestFailed, 'attribute not in dir()'
+
+try:
+    f1.a.publish = 0
+except (AttributeError, TypeError): pass
+else: raise TestFailed, 'expected AttributeError or TypeError'
+
+# See the comment above about the change in semantics for Python 2.1b1
+try:
+    F.a.myclass = F
+except (AttributeError, TypeError): pass
+else: raise TestFailed, 'expected AttributeError or TypeError'
+
+F.a.im_func.myclass = F
+
+f1.a.myclass
+f2.a.myclass
+f1.a.myclass
+F.a.myclass
+
+if f1.a.myclass is not f2.a.myclass or \
+       f1.a.myclass is not F.a.myclass:
+    raise TestFailed, 'attributes were not the same'
+
+# try setting __dict__
+try:
+    F.a.__dict__ = (1, 2, 3)
+except (AttributeError, TypeError): pass
+else: raise TestFailed, 'expected TypeError or AttributeError'
+
+F.a.im_func.__dict__ = {'one': 11, 'two': 22, 'three': 33}
+
+if f1.a.two <> 22:
+    raise TestFailed, 'setting __dict__'
+
+from UserDict import UserDict
+d = UserDict({'four': 44, 'five': 55})
+
+try:
+    F.a.__dict__ = d
+except (AttributeError, TypeError): pass
+else: raise TestFailed
+
+if f2.a.one <> f1.a.one <> F.a.one <> 11:
+    raise TestFailed
+
+# im_func may not be a Python method!
+import new
+F.id = new.instancemethod(id, None, F)
+
+eff = F()
+if eff.id() <> id(eff):
+    raise TestFailed
+
+try:
+    F.id.foo
+except AttributeError: pass
+else: raise TestFailed
+
+try:
+    F.id.foo = 12
+except (AttributeError, TypeError): pass
+else: raise TestFailed
+
+try:
+    F.id.foo
+except AttributeError: pass
+else: raise TestFailed
+
+try:
+    eff.id.foo
+except AttributeError: pass
+else: raise TestFailed
+
+try:
+    eff.id.foo = 12
+except (AttributeError, TypeError): pass
+else: raise TestFailed
+
+try:
+    eff.id.foo
+except AttributeError: pass
+else: raise TestFailed
+
+# Regression test for a crash in pre-2.1a1
+def another():
+    pass
+
+try:
+    del another.__dict__
+except TypeError: pass
+else: raise TestFailed
+
+try:
+    del another.func_dict
+except TypeError: pass
+else: raise TestFailed
+
+try:
+    another.func_dict = None
+except TypeError: pass
+else: raise TestFailed
+
+try:
+    del another.bar
+except AttributeError: pass
+else: raise TestFailed
+
+# This isn't specifically related to function attributes, but it does test a
+# core dump regression in funcobject.c
+del another.func_defaults
+
+def foo():
+    pass
+
+def bar():
+    pass
+
+def temp():
+    print 1
+
+if foo==bar:
+    raise TestFailed
+
+d={}
+d[foo] = 1
+
+foo.func_code = temp.func_code
+
+d[foo]
+
+# Test all predefined function attributes systematically
+
+def cantset(obj, name, value):
+    verify(hasattr(obj, name)) # Otherwise it's probably a typo
+    try:
+        setattr(obj, name, value)
+    except (AttributeError, TypeError):
+        pass
+    else:
+        raise TestFailed, "shouldn't be able to set %s to %r" % (name, value)
+    try:
+        delattr(obj, name)
+    except (AttributeError, TypeError):
+        pass
+    else:
+        raise TestFailed, "shouldn't be able to del %s" % name
+
+def test_func_closure():
+    a = 12
+    def f(): print a
+    c = f.func_closure
+    verify(isinstance(c, tuple))
+    verify(len(c) == 1)
+    verify(c[0].__class__.__name__ == "cell") # don't have a type object handy
+    cantset(f, "func_closure", c)
+
+def test_func_doc():
+    def f(): pass
+    verify(f.__doc__ is None)
+    verify(f.func_doc is None)
+    f.__doc__ = "hello"
+    verify(f.__doc__ == "hello")
+    verify(f.func_doc == "hello")
+    del f.__doc__
+    verify(f.__doc__ is None)
+    verify(f.func_doc is None)
+    f.func_doc = "world"
+    verify(f.__doc__ == "world")
+    verify(f.func_doc == "world")
+    del f.func_doc
+    verify(f.func_doc is None)
+    verify(f.__doc__ is None)
+
+def test_func_globals():
+    def f(): pass
+    verify(f.func_globals is globals())
+    cantset(f, "func_globals", globals())
+
+def test_func_name():
+    def f(): pass
+    verify(f.__name__ == "f")
+    verify(f.func_name == "f")
+    cantset(f, "func_name", "f")
+    cantset(f, "__name__", "f")
+
+def test_func_code():
+    def f(): pass
+    def g(): print 12
+    verify(type(f.func_code) is types.CodeType)
+    f.func_code = g.func_code
+    cantset(f, "func_code", None)
+
+def test_func_defaults():
+    def f(a, b): return (a, b)
+    verify(f.func_defaults is None)
+    f.func_defaults = (1, 2)
+    verify(f.func_defaults == (1, 2))
+    verify(f(10) == (10, 2))
+    def g(a=1, b=2): return (a, b)
+    verify(g.func_defaults == (1, 2))
+    del g.func_defaults
+    verify(g.func_defaults is None)
+    try:
+        g()
+    except TypeError:
+        pass
+    else:
+        raise TestFailed, "shouldn't be allowed to call g() w/o defaults"
+
+def test_func_dict():
+    def f(): pass
+    a = f.__dict__
+    b = f.func_dict
+    verify(a == {})
+    verify(a is b)
+    f.hello = 'world'
+    verify(a == {'hello': 'world'})
+    verify(f.func_dict is a is f.__dict__)
+    f.func_dict = {}
+    verify(not hasattr(f, "hello"))
+    f.__dict__ = {'world': 'hello'}
+    verify(f.world == "hello")
+    verify(f.__dict__ is f.func_dict == {'world': 'hello'})
+    cantset(f, "func_dict", None)
+    cantset(f, "__dict__", None)
+
+def test_im_class():
+    class C:
+        def foo(self): pass
+    verify(C.foo.im_class is C)
+    verify(C().foo.im_class is C)
+    cantset(C.foo, "im_class", C)
+    cantset(C().foo, "im_class", C)
+
+def test_im_func():
+    def foo(self): pass
+    class C:
+        pass
+    C.foo = foo
+    verify(C.foo.im_func is foo)
+    verify(C().foo.im_func is foo)
+    cantset(C.foo, "im_func", foo)
+    cantset(C().foo, "im_func", foo)
+
+def test_im_self():
+    class C:
+        def foo(self): pass
+    verify(C.foo.im_self is None)
+    c = C()
+    verify(c.foo.im_self is c)
+    cantset(C.foo, "im_self", None)
+    cantset(c.foo, "im_self", c)
+
+def test_im_dict():
+    class C:
+        def foo(self): pass
+        foo.bar = 42
+    verify(C.foo.__dict__ == {'bar': 42})
+    verify(C().foo.__dict__ == {'bar': 42})
+    cantset(C.foo, "__dict__", C.foo.__dict__)
+    cantset(C().foo, "__dict__", C.foo.__dict__)
+
+def test_im_doc():
+    class C:
+        def foo(self): "hello"
+    verify(C.foo.__doc__ == "hello")
+    verify(C().foo.__doc__ == "hello")
+    cantset(C.foo, "__doc__", "hello")
+    cantset(C().foo, "__doc__", "hello")
+
+def test_im_name():
+    class C:
+        def foo(self): pass
+    verify(C.foo.__name__ == "foo")
+    verify(C().foo.__name__ == "foo")
+    cantset(C.foo, "__name__", "foo")
+    cantset(C().foo, "__name__", "foo")
+
+def testmore():
+    test_func_closure()
+    test_func_doc()
+    test_func_globals()
+    test_func_name()
+    test_func_code()
+    test_func_defaults()
+    test_func_dict()
+    # Tests for instance method attributes
+    test_im_class()
+    test_im_func()
+    test_im_self()
+    test_im_dict()
+    test_im_doc()
+    test_im_name()
+
+testmore()
diff --git a/lib-python/2.2/test/test_future.py b/lib-python/2.2/test/test_future.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_future.py
@@ -0,0 +1,47 @@
+# Test various flavors of legal and illegal future statements
+
+from test_support import unload
+import re
+
+rx = re.compile('\((\S+).py, line (\d+)')
+
+def check_error_location(msg):
+    mo = rx.search(msg)
+    print "SyntaxError %s %s" % mo.group(1, 2)
+
+# The first two tests should work
+
+unload('test_future1')
+import test_future1
+
+unload('test_future2')
+import test_future2
+
+unload('test_future3')
+import test_future3
+
+# The remaining tests should fail
+try:
+    import badsyntax_future3
+except SyntaxError, msg:
+    check_error_location(str(msg))
+
+try:
+    import badsyntax_future4
+except SyntaxError, msg:
+    check_error_location(str(msg))
+
+try:
+    import badsyntax_future5
+except SyntaxError, msg:
+    check_error_location(str(msg))
+
+try:
+    import badsyntax_future6
+except SyntaxError, msg:
+    check_error_location(str(msg))
+
+try:
+    import badsyntax_future7
+except SyntaxError, msg:
+    check_error_location(str(msg))
diff --git a/lib-python/2.2/test/test_future1.py b/lib-python/2.2/test/test_future1.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_future1.py
@@ -0,0 +1,11 @@
+"""This is a test"""
+
+# Import the name nested_scopes twice to trigger SF bug #407394 (regression).
+from __future__ import nested_scopes, nested_scopes
+
+def f(x):
+    def g(y):
+        return x + y
+    return g
+
+print f(2)(4)
diff --git a/lib-python/2.2/test/test_future2.py b/lib-python/2.2/test/test_future2.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_future2.py
@@ -0,0 +1,10 @@
+"""This is a test"""
+
+from __future__ import nested_scopes; import string
+
+def f(x):
+    def g(y):
+        return x + y
+    return g
+
+print f(2)(4)
diff --git a/lib-python/2.2/test/test_future3.py b/lib-python/2.2/test/test_future3.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_future3.py
@@ -0,0 +1,11 @@
+from __future__ import nested_scopes
+from __future__ import division
+from __future__ import nested_scopes
+
+def f(x):
+    def g(y):
+        return y // x
+    return g
+
+
+print f(2)(5)
diff --git a/lib-python/2.2/test/test_gc.py b/lib-python/2.2/test/test_gc.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_gc.py
@@ -0,0 +1,346 @@
+from test_support import verify, verbose, TestFailed
+import sys
+import gc
+
+def expect(actual, expected, name):
+    if actual != expected:
+        raise TestFailed, "test_%s: actual %d, expected %d" % (
+            name, actual, expected)
+
+def expect_nonzero(actual, name):
+    if actual == 0:
+        raise TestFailed, "test_%s: unexpected zero" % name
+
+def run_test(name, thunk):
+    if verbose:
+        print "testing %s..." % name,
+    thunk()
+    if verbose:
+        print "ok"
+
+def test_list():
+    l = []
+    l.append(l)
+    gc.collect()
+    del l
+    expect(gc.collect(), 1, "list")
+
+def test_dict():
+    d = {}
+    d[1] = d
+    gc.collect()
+    del d
+    expect(gc.collect(), 1, "dict")
+
+def test_tuple():
+    # since tuples are immutable we close the loop with a list
+    l = []
+    t = (l,)
+    l.append(t)
+    gc.collect()
+    del t
+    del l
+    expect(gc.collect(), 2, "tuple")
+
+def test_class():
+    class A:
+        pass
+    A.a = A
+    gc.collect()
+    del A
+    expect_nonzero(gc.collect(), "class")
+
+def test_newstyleclass():
+    class A(object):
+        pass
+    gc.collect()
+    del A
+    expect_nonzero(gc.collect(), "staticclass")
+
+def test_instance():
+    class A:
+        pass
+    a = A()
+    a.a = a
+    gc.collect()
+    del a
+    expect_nonzero(gc.collect(), "instance")
+
+def test_newinstance():
+    class A(object):
+        pass
+    a = A()
+    a.a = a
+    gc.collect()
+    del a
+    expect_nonzero(gc.collect(), "newinstance")
+    class B(list):
+        pass
+    class C(B, A):
+        pass
+    a = C()
+    a.a = a
+    gc.collect()
+    del a
+    expect_nonzero(gc.collect(), "newinstance(2)")
+    del B, C
+    expect_nonzero(gc.collect(), "newinstance(3)")
+    A.a = A()
+    del A
+    expect_nonzero(gc.collect(), "newinstance(4)")
+    expect(gc.collect(), 0, "newinstance(5)")
+
+def test_method():
+    # Tricky: self.__init__ is a bound method, it references the instance.
+    class A:
+        def __init__(self):
+            self.init = self.__init__
+    a = A()
+    gc.collect()
+    del a
+    expect_nonzero(gc.collect(), "method")
+
+def test_finalizer():
+    # A() is uncollectable if it is part of a cycle, make sure it shows up
+    # in gc.garbage.
+    class A:
+        def __del__(self): pass
+    class B:
+        pass
+    a = A()
+    a.a = a
+    id_a = id(a)
+    b = B()
+    b.b = b
+    gc.collect()
+    del a
+    del b
+    expect_nonzero(gc.collect(), "finalizer")
+    for obj in gc.garbage:
+        if id(obj) == id_a:
+            del obj.a
+            break
+    else:
+        raise TestFailed, "didn't find obj in garbage (finalizer)"
+    gc.garbage.remove(obj)
+
+def test_function():
+    # Tricky: f -> d -> f, code should call d.clear() after the exec to
+    # break the cycle.
+    d = {}
+    exec("def f(): pass\n") in d
+    gc.collect()
+    del d
+    expect(gc.collect(), 2, "function")
+
+def test_frame():
+    def f():
+        frame = sys._getframe()
+    gc.collect()
+    f()
+    expect(gc.collect(), 1, "frame")
+
+
+def test_saveall():
+    # Verify that cyclic garbage like lists show up in gc.garbage if the
+    # SAVEALL option is enabled.
+    debug = gc.get_debug()
+    gc.set_debug(debug | gc.DEBUG_SAVEALL)
+    l = []
+    l.append(l)
+    id_l = id(l)
+    del l
+    gc.collect()
+    try:
+        for obj in gc.garbage:
+            if id(obj) == id_l:
+                del obj[:]
+                break
+        else:
+            raise TestFailed, "didn't find obj in garbage (saveall)"
+        gc.garbage.remove(obj)
+    finally:
+        gc.set_debug(debug)
+
+def test_del():
+    # __del__ methods can trigger collection, make this to happen
+    thresholds = gc.get_threshold()
+    gc.enable()
+    gc.set_threshold(1)
+
+    class A:
+        def __del__(self):
+            dir(self)
+    a = A()
+    del a
+
+    gc.disable()
+    apply(gc.set_threshold, thresholds)
+
+class Ouch:
+    n = 0
+    def __del__(self):
+        Ouch.n = Ouch.n + 1
+        if Ouch.n % 7 == 0:
+            gc.collect()
+
+def test_trashcan():
+    # "trashcan" is a hack to prevent stack overflow when deallocating
+    # very deeply nested tuples etc.  It works in part by abusing the
+    # type pointer and refcount fields, and that can yield horrible
+    # problems when gc tries to traverse the structures.
+    # If this test fails (as it does in 2.0, 2.1 and 2.2), it will
+    # most likely die via segfault.
+
+    gc.enable()
+    N = 200
+    for count in range(3):
+        t = []
+        for i in range(N):
+            t = [t, Ouch()]
+        u = []
+        for i in range(N):
+            u = [u, Ouch()]
+        v = {}
+        for i in range(N):
+            v = {1: v, 2: Ouch()}
+    gc.disable()
+
+class Boom:
+    def __getattr__(self, someattribute):
+        del self.attr
+        raise AttributeError
+
+def test_boom():
+    a = Boom()
+    b = Boom()
+    a.attr = b
+    b.attr = a
+
+    gc.collect()
+    garbagelen = len(gc.garbage)
+    del a, b
+    # a<->b are in a trash cycle now.  Collection will invoke Boom.__getattr__
+    # (to see whether a and b have __del__ methods), and __getattr__ deletes
+    # the internal "attr" attributes as a side effect.  That causes the
+    # trash cycle to get reclaimed via refcounts falling to 0, thus mutating
+    # the trash graph as a side effect of merely asking whether __del__
+    # exists.  This used to (before 2.3b1) crash Python.  Now __getattr__
+    # isn't called.
+    expect(gc.collect(), 4, "boom")
+    expect(len(gc.garbage), garbagelen, "boom")
+
+class Boom2:
+    def __init__(self):
+        self.x = 0
+
+    def __getattr__(self, someattribute):
+        self.x += 1
+        if self.x > 1:
+            del self.attr
+        raise AttributeError
+
+def test_boom2():
+    a = Boom2()
+    b = Boom2()
+    a.attr = b
+    b.attr = a
+
+    gc.collect()
+    garbagelen = len(gc.garbage)
+    del a, b
+    # Much like test_boom(), except that __getattr__ doesn't break the
+    # cycle until the second time gc checks for __del__.  As of 2.3b1,
+    # there isn't a second time, so this simply cleans up the trash cycle.
+    # We expect a, b, a.__dict__ and b.__dict__ (4 objects) to get reclaimed
+    # this way.
+    expect(gc.collect(), 4, "boom2")
+    expect(len(gc.garbage), garbagelen, "boom2")
+
+# boom__new and boom2_new are exactly like boom and boom2, except use
+# new-style classes.
+
+class Boom_New(object):
+    def __getattr__(self, someattribute):
+        del self.attr
+        raise AttributeError
+
+def test_boom_new():
+    a = Boom_New()
+    b = Boom_New()
+    a.attr = b
+    b.attr = a
+
+    gc.collect()
+    garbagelen = len(gc.garbage)
+    del a, b
+    expect(gc.collect(), 4, "boom_new")
+    expect(len(gc.garbage), garbagelen, "boom_new")
+
+class Boom2_New(object):
+    def __init__(self):
+        self.x = 0
+
+    def __getattr__(self, someattribute):
+        self.x += 1
+        if self.x > 1:
+            del self.attr
+        raise AttributeError
+
+def test_boom2_new():
+    a = Boom2_New()
+    b = Boom2_New()
+    a.attr = b
+    b.attr = a
+
+    gc.collect()
+    garbagelen = len(gc.garbage)
+    del a, b
+    expect(gc.collect(), 4, "boom2_new")
+    expect(len(gc.garbage), garbagelen, "boom2_new")
+
+def test_all():
+    gc.collect() # Delete 2nd generation garbage
+    run_test("lists", test_list)
+    run_test("dicts", test_dict)
+    run_test("tuples", test_tuple)
+    run_test("classes", test_class)
+    run_test("new style classes", test_newstyleclass)
+    run_test("instances", test_instance)
+    run_test("new instances", test_newinstance)
+    run_test("methods", test_method)
+    run_test("functions", test_function)
+    run_test("frames", test_frame)
+    run_test("finalizers", test_finalizer)
+    run_test("__del__", test_del)
+    run_test("saveall", test_saveall)
+    run_test("trashcan", test_trashcan)
+    run_test("boom", test_boom)
+    run_test("boom2", test_boom2)
+    run_test("boom_new", test_boom_new)
+    run_test("boom2_new", test_boom2_new)
+
+def test():
+    if verbose:
+        print "disabling automatic collection"
+    enabled = gc.isenabled()
+    gc.disable()
+    verify(not gc.isenabled() )
+    debug = gc.get_debug()
+    gc.set_debug(debug & ~gc.DEBUG_LEAK) # this test is supposed to leak
+
+    try:
+        test_all()
+    finally:
+        gc.set_debug(debug)
+        # test gc.enable() even if GC is disabled by default
+        if verbose:
+            print "restoring automatic collection"
+        # make sure to always test gc.enable()
+        gc.enable()
+        verify(gc.isenabled())
+        if not enabled:
+            gc.disable()
+
+
+test()
diff --git a/lib-python/2.2/test/test_gdbm.py b/lib-python/2.2/test/test_gdbm.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/test/test_gdbm.py
@@ -0,0 +1,46 @@
+#! /usr/bin/env python
+"""Test script for the gdbm module
+   Roger E. Masse
+"""
+
+import gdbm
+from gdbm import error
+from test_support import verbose, verify, TestFailed
+
+filename= '/tmp/delete_me'
+
+g = gdbm.open(filename, 'c')
+verify(g.keys() == [])
+g['a'] = 'b'
+g['12345678910'] = '019237410982340912840198242'
+a = g.keys()
+if verbose:
+    print 'Test gdbm file keys: ', a
+
+g.has_key('a')
+g.close()
+try:
+    g['a']
+except error:
+    pass
+else:
+    raise TestFailed, "expected gdbm.error accessing closed database"
+g = gdbm.open(filename, 'r')
+g.close()
+g = gdbm.open(filename, 'w')
+g.close()
+g = gdbm.open(filename, 'n')
+g.close()
+try:
+    g = gdbm.open(filename, 'rx')
+    g.close()
+except error:
+    pass
+else:
+    raise TestFailed, "expected gdbm.error when passing invalid open flags"
+
+try:
+    import os
+    os.unlink(filename)
+except:
+    pass
diff --git a/lib-python/2.2/test/test_generators.py b/lib-python/2.2/test/test_generators.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_generators.py
@@ -0,0 +1,1386 @@
+from __future__ import generators
+
+tutorial_tests = """
+Let's try a simple generator:
+
+    >>> def f():
+    ...    yield 1
+    ...    yield 2
+
+    >>> for i in f():
+    ...     print i
+    1
+    2
+    >>> g = f()
+    >>> g.next()
+    1
+    >>> g.next()
+    2
+
+"Falling off the end" stops the generator:
+
+    >>> g.next()
+    Traceback (most recent call last):
+      File "<stdin>", line 1, in ?
+      File "<stdin>", line 2, in g
+    StopIteration
+
+"return" also stops the generator:
+
+    >>> def f():
+    ...     yield 1
+    ...     return
+    ...     yield 2 # never reached
+    ...
+    >>> g = f()
+    >>> g.next()
+    1
+    >>> g.next()
+    Traceback (most recent call last):
+      File "<stdin>", line 1, in ?
+      File "<stdin>", line 3, in f
+    StopIteration
+    >>> g.next() # once stopped, can't be resumed
+    Traceback (most recent call last):
+      File "<stdin>", line 1, in ?
+    StopIteration
+
+"raise StopIteration" stops the generator too:
+
+    >>> def f():
+    ...     yield 1
+    ...     raise StopIteration
+    ...     yield 2 # never reached
+    ...
+    >>> g = f()
+    >>> g.next()
+    1
+    >>> g.next()
+    Traceback (most recent call last):
+      File "<stdin>", line 1, in ?
+    StopIteration
+    >>> g.next()
+    Traceback (most recent call last):
+      File "<stdin>", line 1, in ?
+    StopIteration
+
+However, they are not exactly equivalent:
+
+    >>> def g1():
+    ...     try:
+    ...         return
+    ...     except:
+    ...         yield 1
+    ...
+    >>> list(g1())
+    []
+
+    >>> def g2():
+    ...     try:
+    ...         raise StopIteration
+    ...     except:
+    ...         yield 42
+    >>> print list(g2())
+    [42]
+
+This may be surprising at first:
+
+    >>> def g3():
+    ...     try:
+    ...         return
+    ...     finally:
+    ...         yield 1
+    ...
+    >>> list(g3())
+    [1]
+
+Let's create an alternate range() function implemented as a generator:
+
+    >>> def yrange(n):
+    ...     for i in range(n):
+    ...         yield i
+    ...
+    >>> list(yrange(5))
+    [0, 1, 2, 3, 4]
+
+Generators always return to the most recent caller:
+
+    >>> def creator():
+    ...     r = yrange(5)
+    ...     print "creator", r.next()
+    ...     return r
+    ...
+    >>> def caller():
+    ...     r = creator()
+    ...     for i in r:
+    ...             print "caller", i
+    ...
+    >>> caller()
+    creator 0
+    caller 1
+    caller 2
+    caller 3
+    caller 4
+
+Generators can call other generators:
+
+    >>> def zrange(n):
+    ...     for i in yrange(n):
+    ...         yield i
+    ...
+    >>> list(zrange(5))
+    [0, 1, 2, 3, 4]
+
+"""
+
+# The examples from PEP 255.
+
+pep_tests = """
+
+Specification:  Yield
+
+    Restriction:  A generator cannot be resumed while it is actively
+    running:
+
+    >>> def g():
+    ...     i = me.next()
+    ...     yield i
+    >>> me = g()
+    >>> me.next()
+    Traceback (most recent call last):
+     ...
+      File "<string>", line 2, in g
+    ValueError: generator already executing
+
+Specification: Return
+
+    Note that return isn't always equivalent to raising StopIteration:  the
+    difference lies in how enclosing try/except constructs are treated.
+    For example,
+
+        >>> def f1():
+        ...     try:
+        ...         return
+        ...     except:
+        ...        yield 1
+        >>> print list(f1())
+        []
+
+    because, as in any function, return simply exits, but
+
+        >>> def f2():
+        ...     try:
+        ...         raise StopIteration
+        ...     except:
+        ...         yield 42
+        >>> print list(f2())
+        [42]
+
+    because StopIteration is captured by a bare "except", as is any
+    exception.
+
+Specification: Generators and Exception Propagation
+
+    >>> def f():
+    ...     return 1//0
+    >>> def g():
+    ...     yield f()  # the zero division exception propagates
+    ...     yield 42   # and we'll never get here
+    >>> k = g()
+    >>> k.next()
+    Traceback (most recent call last):
+      File "<stdin>", line 1, in ?
+      File "<stdin>", line 2, in g
+      File "<stdin>", line 2, in f
+    ZeroDivisionError: integer division or modulo by zero
+    >>> k.next()  # and the generator cannot be resumed
+    Traceback (most recent call last):
+      File "<stdin>", line 1, in ?
+    StopIteration
+    >>>
+
+Specification: Try/Except/Finally
+
+    >>> def f():
+    ...     try:
+    ...         yield 1
+    ...         try:
+    ...             yield 2
+    ...             1//0
+    ...             yield 3  # never get here
+    ...         except ZeroDivisionError:
+    ...             yield 4
+    ...             yield 5
+    ...             raise
+    ...         except:
+    ...             yield 6
+    ...         yield 7     # the "raise" above stops this
+    ...     except:
+    ...         yield 8
+    ...     yield 9
+    ...     try:
+    ...         x = 12
+    ...     finally:
+    ...         yield 10
+    ...     yield 11
+    >>> print list(f())
+    [1, 2, 4, 5, 8, 9, 10, 11]
+    >>>
+
+Guido's binary tree example.
+
+    >>> # A binary tree class.
+    >>> class Tree:
+    ...
+    ...     def __init__(self, label, left=None, right=None):
+    ...         self.label = label
+    ...         self.left = left
+    ...         self.right = right
+    ...
+    ...     def __repr__(self, level=0, indent="    "):
+    ...         s = level*indent + `self.label`
+    ...         if self.left:
+    ...             s = s + "\\n" + self.left.__repr__(level+1, indent)
+    ...         if self.right:
+    ...             s = s + "\\n" + self.right.__repr__(level+1, indent)
+    ...         return s
+    ...
+    ...     def __iter__(self):
+    ...         return inorder(self)
+
+    >>> # Create a Tree from a list.
+    >>> def tree(list):
+    ...     n = len(list)
+    ...     if n == 0:
+    ...         return []
+    ...     i = n // 2
+    ...     return Tree(list[i], tree(list[:i]), tree(list[i+1:]))
+
+    >>> # Show it off: create a tree.
+    >>> t = tree("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
+
+    >>> # A recursive generator that generates Tree leaves in in-order.
+    >>> def inorder(t):
+    ...     if t:
+    ...         for x in inorder(t.left):
+    ...             yield x
+    ...         yield t.label
+    ...         for x in inorder(t.right):
+    ...             yield x
+
+    >>> # Show it off: create a tree.
+    ... t = tree("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
+    ... # Print the nodes of the tree in in-order.
+    ... for x in t:
+    ...     print x,
+    A B C D E F G H I J K L M N O P Q R S T U V W X Y Z
+
+    >>> # A non-recursive generator.
+    >>> def inorder(node):
+    ...     stack = []
+    ...     while node:
+    ...         while node.left:
+    ...             stack.append(node)
+    ...             node = node.left
+    ...         yield node.label
+    ...         while not node.right:
+    ...             try:
+    ...                 node = stack.pop()
+    ...             except IndexError:
+    ...                 return
+    ...             yield node.label
+    ...         node = node.right
+
+    >>> # Exercise the non-recursive generator.
+    >>> for x in t:
+    ...     print x,
+    A B C D E F G H I J K L M N O P Q R S T U V W X Y Z
+
+"""
+
+# Examples from Iterator-List and Python-Dev and c.l.py.
+
+email_tests = """
+
+The difference between yielding None and returning it.
+
+>>> def g():
+...     for i in range(3):
+...         yield None
+...     yield None
+...     return
+>>> list(g())
+[None, None, None, None]
+
+Ensure that explicitly raising StopIteration acts like any other exception
+in try/except, not like a return.
+
+>>> def g():
+...     yield 1
+...     try:
+...         raise StopIteration
+...     except:
+...         yield 2
+...     yield 3
+>>> list(g())
+[1, 2, 3]
+
+Next one was posted to c.l.py.
+
+>>> def gcomb(x, k):
+...     "Generate all combinations of k elements from list x."
+...
+...     if k > len(x):
+...         return
+...     if k == 0:
+...         yield []
+...     else:
+...         first, rest = x[0], x[1:]
+...         # A combination does or doesn't contain first.
+...         # If it does, the remainder is a k-1 comb of rest.
+...         for c in gcomb(rest, k-1):
+...             c.insert(0, first)
+...             yield c
+...         # If it doesn't contain first, it's a k comb of rest.
+...         for c in gcomb(rest, k):
+...             yield c
+
+>>> seq = range(1, 5)
+>>> for k in range(len(seq) + 2):
+...     print "%d-combs of %s:" % (k, seq)
+...     for c in gcomb(seq, k):
+...         print "   ", c
+0-combs of [1, 2, 3, 4]:
+    []
+1-combs of [1, 2, 3, 4]:
+    [1]
+    [2]
+    [3]
+    [4]
+2-combs of [1, 2, 3, 4]:
+    [1, 2]
+    [1, 3]
+    [1, 4]
+    [2, 3]
+    [2, 4]
+    [3, 4]
+3-combs of [1, 2, 3, 4]:
+    [1, 2, 3]
+    [1, 2, 4]
+    [1, 3, 4]
+    [2, 3, 4]
+4-combs of [1, 2, 3, 4]:
+    [1, 2, 3, 4]
+5-combs of [1, 2, 3, 4]:
+
+From the Iterators list, about the types of these things.
+
+>>> def g():
+...     yield 1
+...
+>>> type(g)
+<type 'function'>
+>>> i = g()
+>>> type(i)
+<type 'generator'>
+>>> [s for s in dir(i) if not s.startswith('_')]
+['gi_frame', 'gi_running', 'next']
+>>> print i.next.__doc__
+x.next() -> the next value, or raise StopIteration
+>>> iter(i) is i
+1
+>>> import types
+>>> isinstance(i, types.GeneratorType)
+1
+
+And more, added later.
+
+>>> i.gi_running
+0
+>>> type(i.gi_frame)
+<type 'frame'>
+>>> i.gi_running = 42
+Traceback (most recent call last):
+  ...
+TypeError: readonly attribute
+>>> def g():
+...     yield me.gi_running
+>>> me = g()
+>>> me.gi_running
+0
+>>> me.next()
+1
+>>> me.gi_running
+0
+
+A clever union-find implementation from c.l.py, due to David Eppstein.
+Sent: Friday, June 29, 2001 12:16 PM
+To: python-list at python.org
+Subject: Re: PEP 255: Simple Generators
+
+>>> class disjointSet:
+...     def __init__(self, name):
+...         self.name = name
+...         self.parent = None
+...         self.generator = self.generate()
+...
+...     def generate(self):
+...         while not self.parent:
+...             yield self
+...         for x in self.parent.generator:
+...             yield x
+...
+...     def find(self):
+...         return self.generator.next()
+...
+...     def union(self, parent):
+...         if self.parent:
+...             raise ValueError("Sorry, I'm not a root!")
+...         self.parent = parent
+...
+...     def __str__(self):
+...         return self.name
+
+>>> names = "ABCDEFGHIJKLM"
+>>> sets = [disjointSet(name) for name in names]
+>>> roots = sets[:]
+
+>>> import random
+>>> random.seed(42)
+>>> while 1:
+...     for s in sets:
+...         print "%s->%s" % (s, s.find()),
+...     print
+...     if len(roots) > 1:
+...         s1 = random.choice(roots)
+...         roots.remove(s1)
+...         s2 = random.choice(roots)
+...         s1.union(s2)
+...         print "merged", s1, "into", s2
+...     else:
+...         break
+A->A B->B C->C D->D E->E F->F G->G H->H I->I J->J K->K L->L M->M
+merged D into G
+A->A B->B C->C D->G E->E F->F G->G H->H I->I J->J K->K L->L M->M
+merged C into F
+A->A B->B C->F D->G E->E F->F G->G H->H I->I J->J K->K L->L M->M
+merged L into A
+A->A B->B C->F D->G E->E F->F G->G H->H I->I J->J K->K L->A M->M
+merged H into E
+A->A B->B C->F D->G E->E F->F G->G H->E I->I J->J K->K L->A M->M
+merged B into E
+A->A B->E C->F D->G E->E F->F G->G H->E I->I J->J K->K L->A M->M
+merged J into G
+A->A B->E C->F D->G E->E F->F G->G H->E I->I J->G K->K L->A M->M
+merged E into G
+A->A B->G C->F D->G E->G F->F G->G H->G I->I J->G K->K L->A M->M
+merged M into G
+A->A B->G C->F D->G E->G F->F G->G H->G I->I J->G K->K L->A M->G
+merged I into K
+A->A B->G C->F D->G E->G F->F G->G H->G I->K J->G K->K L->A M->G
+merged K into A
+A->A B->G C->F D->G E->G F->F G->G H->G I->A J->G K->A L->A M->G
+merged F into A
+A->A B->G C->A D->G E->G F->A G->G H->G I->A J->G K->A L->A M->G
+merged A into G
+A->G B->G C->G D->G E->G F->G G->G H->G I->G J->G K->G L->G M->G
+"""
+
+# Fun tests (for sufficiently warped notions of "fun").
+
+fun_tests = """
+
+Build up to a recursive Sieve of Eratosthenes generator.
+
+>>> def firstn(g, n):
+...     return [g.next() for i in range(n)]
+
+>>> def intsfrom(i):
+...     while 1:
+...         yield i
+...         i += 1
+
+>>> firstn(intsfrom(5), 7)
+[5, 6, 7, 8, 9, 10, 11]
+
+>>> def exclude_multiples(n, ints):
+...     for i in ints:
+...         if i % n:
+...             yield i
+
+>>> firstn(exclude_multiples(3, intsfrom(1)), 6)
+[1, 2, 4, 5, 7, 8]
+
+>>> def sieve(ints):
+...     prime = ints.next()
+...     yield prime
+...     not_divisible_by_prime = exclude_multiples(prime, ints)
+...     for p in sieve(not_divisible_by_prime):
+...         yield p
+
+>>> primes = sieve(intsfrom(2))
+>>> firstn(primes, 20)
+[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71]
+
+
+Another famous problem:  generate all integers of the form
+    2**i * 3**j  * 5**k
+in increasing order, where i,j,k >= 0.  Trickier than it may look at first!
+Try writing it without generators, and correctly, and without generating
+3 internal results for each result output.
+
+>>> def times(n, g):
+...     for i in g:
+...         yield n * i
+>>> firstn(times(10, intsfrom(1)), 10)
+[10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
+
+>>> def merge(g, h):
+...     ng = g.next()
+...     nh = h.next()
+...     while 1:
+...         if ng < nh:
+...             yield ng
+...             ng = g.next()
+...         elif ng > nh:
+...             yield nh
+...             nh = h.next()
+...         else:
+...             yield ng
+...             ng = g.next()
+...             nh = h.next()
+
+The following works, but is doing a whale of a lot of redundant work --
+it's not clear how to get the internal uses of m235 to share a single
+generator.  Note that me_times2 (etc) each need to see every element in the
+result sequence.  So this is an example where lazy lists are more natural
+(you can look at the head of a lazy list any number of times).
+
+>>> def m235():
+...     yield 1
+...     me_times2 = times(2, m235())
+...     me_times3 = times(3, m235())
+...     me_times5 = times(5, m235())
+...     for i in merge(merge(me_times2,
+...                          me_times3),
+...                    me_times5):
+...         yield i
+
+Don't print "too many" of these -- the implementation above is extremely
+inefficient:  each call of m235() leads to 3 recursive calls, and in
+turn each of those 3 more, and so on, and so on, until we've descended
+enough levels to satisfy the print stmts.  Very odd:  when I printed 5
+lines of results below, this managed to screw up Win98's malloc in "the
+usual" way, i.e. the heap grew over 4Mb so Win98 started fragmenting
+address space, and it *looked* like a very slow leak.
+
+>>> result = m235()
+>>> for i in range(3):
+...     print firstn(result, 15)
+[1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24]
+[25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80]
+[81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192]
+
+Heh.  Here's one way to get a shared list, complete with an excruciating
+namespace renaming trick.  The *pretty* part is that the times() and merge()
+functions can be reused as-is, because they only assume their stream
+arguments are iterable -- a LazyList is the same as a generator to times().
+
+>>> class LazyList:
+...     def __init__(self, g):
+...         self.sofar = []
+...         self.fetch = g.next
+...
+...     def __getitem__(self, i):
+...         sofar, fetch = self.sofar, self.fetch
+...         while i >= len(sofar):
+...             sofar.append(fetch())
+...         return sofar[i]
+
+>>> def m235():
+...     yield 1
+...     # Gack:  m235 below actually refers to a LazyList.
+...     me_times2 = times(2, m235)
+...     me_times3 = times(3, m235)
+...     me_times5 = times(5, m235)
+...     for i in merge(merge(me_times2,
+...                          me_times3),
+...                    me_times5):
+...         yield i
+
+Print as many of these as you like -- *this* implementation is memory-
+efficient.
+
+>>> m235 = LazyList(m235())
+>>> for i in range(5):
+...     print [m235[j] for j in range(15*i, 15*(i+1))]
+[1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24]
+[25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80]
+[81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192]
+[200, 216, 225, 240, 243, 250, 256, 270, 288, 300, 320, 324, 360, 375, 384]
+[400, 405, 432, 450, 480, 486, 500, 512, 540, 576, 600, 625, 640, 648, 675]
+
+
+Ye olde Fibonacci generator, LazyList style.
+
+>>> def fibgen(a, b):
+...
+...     def sum(g, h):
+...         while 1:
+...             yield g.next() + h.next()
+...
+...     def tail(g):
+...         g.next()    # throw first away
+...         for x in g:
+...             yield x
+...
+...     yield a
+...     yield b
+...     for s in sum(iter(fib),
+...                  tail(iter(fib))):
+...         yield s
+
+>>> fib = LazyList(fibgen(1, 2))
+>>> firstn(iter(fib), 17)
+[1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584]
+"""
+
+# syntax_tests mostly provokes SyntaxErrors.  Also fiddling with #if 0
+# hackery.
+
+syntax_tests = """
+
+>>> def f():
+...     return 22
+...     yield 1
+Traceback (most recent call last):
+  ...
+SyntaxError: 'return' with argument inside generator (<string>, line 2)
+
+>>> def f():
+...     yield 1
+...     return 22
+Traceback (most recent call last):
+  ...
+SyntaxError: 'return' with argument inside generator (<string>, line 3)
+
+"return None" is not the same as "return" in a generator:
+
+>>> def f():
+...     yield 1
+...     return None
+Traceback (most recent call last):
+  ...
+SyntaxError: 'return' with argument inside generator (<string>, line 3)
+
+This one is fine:
+
+>>> def f():
+...     yield 1
+...     return
+
+>>> def f():
+...     try:
+...         yield 1
+...     finally:
+...         pass
+Traceback (most recent call last):
+  ...
+SyntaxError: 'yield' not allowed in a 'try' block with a 'finally' clause (<string>, line 3)
+
+>>> def f():
+...     try:
+...         try:
+...             1//0
+...         except ZeroDivisionError:
+...             yield 666  # bad because *outer* try has finally
+...         except:
+...             pass
+...     finally:
+...         pass
+Traceback (most recent call last):
+  ...
+SyntaxError: 'yield' not allowed in a 'try' block with a 'finally' clause (<string>, line 6)
+
+But this is fine:
+
+>>> def f():
+...     try:
+...         try:
+...             yield 12
+...             1//0
+...         except ZeroDivisionError:
+...             yield 666
+...         except:
+...             try:
+...                 x = 12
+...             finally:
+...                 yield 12
+...     except:
+...         return
+>>> list(f())
+[12, 666]
+
+>>> def f():
+...    yield
+Traceback (most recent call last):
+SyntaxError: invalid syntax
+
+>>> def f():
+...    if 0:
+...        yield
+Traceback (most recent call last):
+SyntaxError: invalid syntax
+
+>>> def f():
+...     if 0:
+...         yield 1
+>>> type(f())
+<type 'generator'>
+
+>>> def f():
+...    if "":
+...        yield None
+>>> type(f())
+<type 'generator'>
+
+>>> def f():
+...     return
+...     try:
+...         if x==4:
+...             pass
+...         elif 0:
+...             try:
+...                 1//0
+...             except SyntaxError:
+...                 pass
+...             else:
+...                 if 0:
+...                     while 12:
+...                         x += 1
+...                         yield 2 # don't blink
+...                         f(a, b, c, d, e)
+...         else:
+...             pass
+...     except:
+...         x = 1
+...     return
+>>> type(f())
+<type 'generator'>
+
+>>> def f():
+...     if 0:
+...         def g():
+...             yield 1
+...
+>>> type(f())
+<type 'NoneType'>
+
+>>> def f():
+...     if 0:
+...         class C:
+...             def __init__(self):
+...                 yield 1
+...             def f(self):
+...                 yield 2
+>>> type(f())
+<type 'NoneType'>
+
+>>> def f():
+...     if 0:
+...         return
+...     if 0:
+...         yield 2
+>>> type(f())
+<type 'generator'>
+
+
+>>> def f():
+...     if 0:
+...         lambda x:  x        # shouldn't trigger here
+...         return              # or here
+...         def f(i):
+...             return 2*i      # or here
+...         if 0:
+...             return 3        # but *this* sucks (line 8)
+...     if 0:
+...         yield 2             # because it's a generator
+Traceback (most recent call last):
+SyntaxError: 'return' with argument inside generator (<string>, line 8)
+
+This one caused a crash (see SF bug 567538):
+
+>>> def f():
+...     for i in range(3):
+...         try:
+...             continue
+...         finally:
+...             yield i
+...
+>>> g = f()
+>>> print g.next()
+0
+>>> print g.next()
+1
+>>> print g.next()
+2
+>>> print g.next()
+Traceback (most recent call last):
+StopIteration
+"""
+
+# conjoin is a simple backtracking generator, named in honor of Icon's
+# "conjunction" control structure.  Pass a list of no-argument functions
+# that return iterable objects.  Easiest to explain by example:  assume the
+# function list [x, y, z] is passed.  Then conjoin acts like:
+#
+# def g():
+#     values = [None] * 3
+#     for values[0] in x():
+#         for values[1] in y():
+#             for values[2] in z():
+#                 yield values
+#
+# So some 3-lists of values *may* be generated, each time we successfully
+# get into the innermost loop.  If an iterator fails (is exhausted) before
+# then, it "backtracks" to get the next value from the nearest enclosing
+# iterator (the one "to the left"), and starts all over again at the next
+# slot (pumps a fresh iterator).  Of course this is most useful when the
+# iterators have side-effects, so that which values *can* be generated at
+# each slot depend on the values iterated at previous slots.
+
+def conjoin(gs):
+
+    values = [None] * len(gs)
+
+    def gen(i, values=values):
+        if i >= len(gs):
+            yield values
+        else:
+            for values[i] in gs[i]():
+                for x in gen(i+1):
+                    yield x
+
+    for x in gen(0):
+        yield x
+
+# That works fine, but recursing a level and checking i against len(gs) for
+# each item produced is inefficient.  By doing manual loop unrolling across
+# generator boundaries, it's possible to eliminate most of that overhead.
+# This isn't worth the bother *in general* for generators, but conjoin() is
+# a core building block for some CPU-intensive generator applications.
+
+def conjoin(gs):
+
+    n = len(gs)
+    values = [None] * n
+
+    # Do one loop nest at time recursively, until the # of loop nests
+    # remaining is divisible by 3.
+
+    def gen(i, values=values):
+        if i >= n:
+            yield values
+
+        elif (n-i) % 3:
+            ip1 = i+1
+            for values[i] in gs[i]():
+                for x in gen(ip1):
+                    yield x
+
+        else:
+            for x in _gen3(i):
+                yield x
+
+    # Do three loop nests at a time, recursing only if at least three more
+    # remain.  Don't call directly:  this is an internal optimization for
+    # gen's use.
+
+    def _gen3(i, values=values):
+        assert i < n and (n-i) % 3 == 0
+        ip1, ip2, ip3 = i+1, i+2, i+3
+        g, g1, g2 = gs[i : ip3]
+
+        if ip3 >= n:
+            # These are the last three, so we can yield values directly.
+            for values[i] in g():
+                for values[ip1] in g1():
+                    for values[ip2] in g2():
+                        yield values
+
+        else:
+            # At least 6 loop nests remain; peel off 3 and recurse for the
+            # rest.
+            for values[i] in g():
+                for values[ip1] in g1():
+                    for values[ip2] in g2():
+                        for x in _gen3(ip3):
+                            yield x
+
+    for x in gen(0):
+        yield x
+
+# And one more approach:  For backtracking apps like the Knight's Tour
+# solver below, the number of backtracking levels can be enormous (one
+# level per square, for the Knight's Tour, so that e.g. a 100x100 board
+# needs 10,000 levels).  In such cases Python is likely to run out of
+# stack space due to recursion.  So here's a recursion-free version of
+# conjoin too.
+# NOTE WELL:  This allows large problems to be solved with only trivial
+# demands on stack space.  Without explicitly resumable generators, this is
+# much harder to achieve.  OTOH, this is much slower (up to a factor of 2)
+# than the fancy unrolled recursive conjoin.
+
+def flat_conjoin(gs):  # rename to conjoin to run tests with this instead
+    n = len(gs)
+    values = [None] * n
+    iters  = [None] * n
+    _StopIteration = StopIteration  # make local because caught a *lot*
+    i = 0
+    while 1:
+        # Descend.
+        try:
+            while i < n:
+                it = iters[i] = gs[i]().next
+                values[i] = it()
+                i += 1
+        except _StopIteration:
+            pass
+        else:
+            assert i == n
+            yield values
+
+        # Backtrack until an older iterator can be resumed.
+        i -= 1
+        while i >= 0:
+            try:
+                values[i] = iters[i]()
+                # Success!  Start fresh at next level.
+                i += 1
+                break
+            except _StopIteration:
+                # Continue backtracking.
+                i -= 1
+        else:
+            assert i < 0
+            break
+
+# A conjoin-based N-Queens solver.
+
+class Queens:
+    def __init__(self, n):
+        self.n = n
+        rangen = range(n)
+
+        # Assign a unique int to each column and diagonal.
+        # columns:  n of those, range(n).
+        # NW-SE diagonals: 2n-1 of these, i-j unique and invariant along
+        # each, smallest i-j is 0-(n-1) = 1-n, so add n-1 to shift to 0-
+        # based.
+        # NE-SW diagonals: 2n-1 of these, i+j unique and invariant along
+        # each, smallest i+j is 0, largest is 2n-2.
+
+        # For each square, compute a bit vector of the columns and
+        # diagonals it covers, and for each row compute a function that
+        # generates the possiblities for the columns in that row.
+        self.rowgenerators = []
+        for i in rangen:
+            rowuses = [(1L << j) |                  # column ordinal
+                       (1L << (n + i-j + n-1)) |    # NW-SE ordinal
+                       (1L << (n + 2*n-1 + i+j))    # NE-SW ordinal
+                            for j in rangen]
+
+            def rowgen(rowuses=rowuses):
+                for j in rangen:
+                    uses = rowuses[j]
+                    if uses & self.used == 0:
+                        self.used |= uses
+                        yield j
+                        self.used &= ~uses
+
+            self.rowgenerators.append(rowgen)
+
+    # Generate solutions.
+    def solve(self):
+        self.used = 0
+        for row2col in conjoin(self.rowgenerators):
+            yield row2col
+
+    def printsolution(self, row2col):
+        n = self.n
+        assert n == len(row2col)
+        sep = "+" + "-+" * n
+        print sep
+        for i in range(n):
+            squares = [" " for j in range(n)]
+            squares[row2col[i]] = "Q"
+            print "|" + "|".join(squares) + "|"
+            print sep
+
+# A conjoin-based Knight's Tour solver.  This is pretty sophisticated
+# (e.g., when used with flat_conjoin above, and passing hard=1 to the
+# constructor, a 200x200 Knight's Tour was found quickly -- note that we're
+# creating 10s of thousands of generators then!), and is lengthy.
+
+class Knights:
+    def __init__(self, m, n, hard=0):
+        self.m, self.n = m, n
+
+        # solve() will set up succs[i] to be a list of square #i's
+        # successors.
+        succs = self.succs = []
+
+        # Remove i0 from each of its successor's successor lists, i.e.
+        # successors can't go back to i0 again.  Return 0 if we can
+        # detect this makes a solution impossible, else return 1.
+
+        def remove_from_successors(i0, len=len):
+            # If we remove all exits from a free square, we're dead:
+            # even if we move to it next, we can't leave it again.
+            # If we create a square with one exit, we must visit it next;
+            # else somebody else will have to visit it, and since there's
+            # only one adjacent, there won't be a way to leave it again.
+            # Finelly, if we create more than one free square with a
+            # single exit, we can only move to one of them next, leaving
+            # the other one a dead end.
+            ne0 = ne1 = 0
+            for i in succs[i0]:
+                s = succs[i]
+                s.remove(i0)
+                e = len(s)
+                if e == 0:
+                    ne0 += 1
+                elif e == 1:
+                    ne1 += 1
+            return ne0 == 0 and ne1 < 2
+
+        # Put i0 back in each of its successor's successor lists.
+
+        def add_to_successors(i0):
+            for i in succs[i0]:
+                succs[i].append(i0)
+
+        # Generate the first move.
+        def first():
+            if m < 1 or n < 1:
+                return
+
+            # Since we're looking for a cycle, it doesn't matter where we
+            # start.  Starting in a corner makes the 2nd move easy.
+            corner = self.coords2index(0, 0)
+            remove_from_successors(corner)
+            self.lastij = corner
+            yield corner
+            add_to_successors(corner)
+
+        # Generate the second moves.
+        def second():
+            corner = self.coords2index(0, 0)
+            assert self.lastij == corner  # i.e., we started in the corner
+            if m < 3 or n < 3:
+                return
+            assert len(succs[corner]) == 2
+            assert self.coords2index(1, 2) in succs[corner]
+            assert self.coords2index(2, 1) in succs[corner]
+            # Only two choices.  Whichever we pick, the other must be the
+            # square picked on move m*n, as it's the only way to get back
+            # to (0, 0).  Save its index in self.final so that moves before
+            # the last know it must be kept free.
+            for i, j in (1, 2), (2, 1):
+                this  = self.coords2index(i, j)
+                final = self.coords2index(3-i, 3-j)
+                self.final = final
+
+                remove_from_successors(this)
+                succs[final].append(corner)
+                self.lastij = this
+                yield this
+                succs[final].remove(corner)
+                add_to_successors(this)
+
+        # Generate moves 3 thru m*n-1.
+        def advance(len=len):
+            # If some successor has only one exit, must take it.
+            # Else favor successors with fewer exits.
+            candidates = []
+            for i in succs[self.lastij]:
+                e = len(succs[i])
+                assert e > 0, "else remove_from_successors() pruning flawed"
+                if e == 1:
+                    candidates = [(e, i)]
+                    break
+                candidates.append((e, i))
+            else:
+                candidates.sort()
+
+            for e, i in candidates:
+                if i != self.final:
+                    if remove_from_successors(i):
+                        self.lastij = i
+                        yield i
+                    add_to_successors(i)
+
+        # Generate moves 3 thru m*n-1.  Alternative version using a
+        # stronger (but more expensive) heuristic to order successors.
+        # Since the # of backtracking levels is m*n, a poor move early on
+        # can take eons to undo.  Smallest square board for which this
+        # matters a lot is 52x52.
+        def advance_hard(vmid=(m-1)/2.0, hmid=(n-1)/2.0, len=len):
+            # If some successor has only one exit, must take it.
+            # Else favor successors with fewer exits.
+            # Break ties via max distance from board centerpoint (favor
+            # corners and edges whenever possible).
+            candidates = []
+            for i in succs[self.lastij]:
+                e = len(succs[i])
+                assert e > 0, "else remove_from_successors() pruning flawed"
+                if e == 1:
+                    candidates = [(e, 0, i)]
+                    break
+                i1, j1 = self.index2coords(i)
+                d = (i1 - vmid)**2 + (j1 - hmid)**2
+                candidates.append((e, -d, i))
+            else:
+                candidates.sort()
+
+            for e, d, i in candidates:
+                if i != self.final:
+                    if remove_from_successors(i):
+                        self.lastij = i
+                        yield i
+                    add_to_successors(i)
+
+        # Generate the last move.
+        def last():
+            assert self.final in succs[self.lastij]
+            yield self.final
+
+        if m*n < 4:
+            self.squaregenerators = [first]
+        else:
+            self.squaregenerators = [first, second] + \
+                [hard and advance_hard or advance] * (m*n - 3) + \
+                [last]
+
+    def coords2index(self, i, j):
+        assert 0 <= i < self.m
+        assert 0 <= j < self.n
+        return i * self.n + j
+
+    def index2coords(self, index):
+        assert 0 <= index < self.m * self.n
+        return divmod(index, self.n)
+
+    def _init_board(self):
+        succs = self.succs
+        del succs[:]
+        m, n = self.m, self.n
+        c2i = self.coords2index
+
+        offsets = [( 1,  2), ( 2,  1), ( 2, -1), ( 1, -2),
+                   (-1, -2), (-2, -1), (-2,  1), (-1,  2)]
+        rangen = range(n)
+        for i in range(m):
+            for j in rangen:
+                s = [c2i(i+io, j+jo) for io, jo in offsets
+                                     if 0 <= i+io < m and
+                                        0 <= j+jo < n]
+                succs.append(s)
+
+    # Generate solutions.
+    def solve(self):
+        self._init_board()
+        for x in conjoin(self.squaregenerators):
+            yield x
+
+    def printsolution(self, x):
+        m, n = self.m, self.n
+        assert len(x) == m*n
+        w = len(str(m*n))
+        format = "%" + str(w) + "d"
+
+        squares = [[None] * n for i in range(m)]
+        k = 1
+        for i in x:
+            i1, j1 = self.index2coords(i)
+            squares[i1][j1] = format % k
+            k += 1
+
+        sep = "+" + ("-" * w + "+") * n
+        print sep
+        for i in range(m):
+            row = squares[i]
+            print "|" + "|".join(row) + "|"
+            print sep
+
+conjoin_tests = """
+
+Generate the 3-bit binary numbers in order.  This illustrates dumbest-
+possible use of conjoin, just to generate the full cross-product.
+
+>>> for c in conjoin([lambda: iter((0, 1))] * 3):
+...     print c
+[0, 0, 0]
+[0, 0, 1]
+[0, 1, 0]
+[0, 1, 1]
+[1, 0, 0]
+[1, 0, 1]
+[1, 1, 0]
+[1, 1, 1]
+
+For efficiency in typical backtracking apps, conjoin() yields the same list
+object each time.  So if you want to save away a full account of its
+generated sequence, you need to copy its results.
+
+>>> def gencopy(iterator):
+...     for x in iterator:
+...         yield x[:]
+
+>>> for n in range(10):
+...     all = list(gencopy(conjoin([lambda: iter((0, 1))] * n)))
+...     print n, len(all), all[0] == [0] * n, all[-1] == [1] * n
+0 1 1 1
+1 2 1 1
+2 4 1 1
+3 8 1 1
+4 16 1 1
+5 32 1 1
+6 64 1 1
+7 128 1 1
+8 256 1 1
+9 512 1 1
+
+And run an 8-queens solver.
+
+>>> q = Queens(8)
+>>> LIMIT = 2
+>>> count = 0
+>>> for row2col in q.solve():
+...     count += 1
+...     if count <= LIMIT:
+...         print "Solution", count
+...         q.printsolution(row2col)
+Solution 1
++-+-+-+-+-+-+-+-+
+|Q| | | | | | | |
++-+-+-+-+-+-+-+-+
+| | | | |Q| | | |
++-+-+-+-+-+-+-+-+
+| | | | | | | |Q|
++-+-+-+-+-+-+-+-+
+| | | | | |Q| | |
++-+-+-+-+-+-+-+-+
+| | |Q| | | | | |
++-+-+-+-+-+-+-+-+
+| | | | | | |Q| |
++-+-+-+-+-+-+-+-+
+| |Q| | | | | | |
++-+-+-+-+-+-+-+-+
+| | | |Q| | | | |
++-+-+-+-+-+-+-+-+
+Solution 2
++-+-+-+-+-+-+-+-+
+|Q| | | | | | | |
++-+-+-+-+-+-+-+-+
+| | | | | |Q| | |
++-+-+-+-+-+-+-+-+
+| | | | | | | |Q|
++-+-+-+-+-+-+-+-+
+| | |Q| | | | | |
++-+-+-+-+-+-+-+-+
+| | | | | | |Q| |
++-+-+-+-+-+-+-+-+
+| | | |Q| | | | |
++-+-+-+-+-+-+-+-+
+| |Q| | | | | | |
++-+-+-+-+-+-+-+-+
+| | | | |Q| | | |
++-+-+-+-+-+-+-+-+
+
+>>> print count, "solutions in all."
+92 solutions in all.
+
+And run a Knight's Tour on a 10x10 board.  Note that there are about
+20,000 solutions even on a 6x6 board, so don't dare run this to exhaustion.
+
+>>> k = Knights(10, 10)
+>>> LIMIT = 2
+>>> count = 0
+>>> for x in k.solve():
+...     count += 1
+...     if count <= LIMIT:
+...         print "Solution", count
+...         k.printsolution(x)
+...     else:
+...         break
+Solution 1
++---+---+---+---+---+---+---+---+---+---+
+|  1| 58| 27| 34|  3| 40| 29| 10|  5|  8|
++---+---+---+---+---+---+---+---+---+---+
+| 26| 35|  2| 57| 28| 33|  4|  7| 30| 11|
++---+---+---+---+---+---+---+---+---+---+
+| 59|100| 73| 36| 41| 56| 39| 32|  9|  6|
++---+---+---+---+---+---+---+---+---+---+
+| 74| 25| 60| 55| 72| 37| 42| 49| 12| 31|
++---+---+---+---+---+---+---+---+---+---+
+| 61| 86| 99| 76| 63| 52| 47| 38| 43| 50|
++---+---+---+---+---+---+---+---+---+---+
+| 24| 75| 62| 85| 54| 71| 64| 51| 48| 13|
++---+---+---+---+---+---+---+---+---+---+
+| 87| 98| 91| 80| 77| 84| 53| 46| 65| 44|
++---+---+---+---+---+---+---+---+---+---+
+| 90| 23| 88| 95| 70| 79| 68| 83| 14| 17|
++---+---+---+---+---+---+---+---+---+---+
+| 97| 92| 21| 78| 81| 94| 19| 16| 45| 66|
++---+---+---+---+---+---+---+---+---+---+
+| 22| 89| 96| 93| 20| 69| 82| 67| 18| 15|
++---+---+---+---+---+---+---+---+---+---+
+Solution 2
++---+---+---+---+---+---+---+---+---+---+
+|  1| 58| 27| 34|  3| 40| 29| 10|  5|  8|
++---+---+---+---+---+---+---+---+---+---+
+| 26| 35|  2| 57| 28| 33|  4|  7| 30| 11|
++---+---+---+---+---+---+---+---+---+---+
+| 59|100| 73| 36| 41| 56| 39| 32|  9|  6|
++---+---+---+---+---+---+---+---+---+---+
+| 74| 25| 60| 55| 72| 37| 42| 49| 12| 31|
++---+---+---+---+---+---+---+---+---+---+
+| 61| 86| 99| 76| 63| 52| 47| 38| 43| 50|
++---+---+---+---+---+---+---+---+---+---+
+| 24| 75| 62| 85| 54| 71| 64| 51| 48| 13|
++---+---+---+---+---+---+---+---+---+---+
+| 87| 98| 89| 80| 77| 84| 53| 46| 65| 44|
++---+---+---+---+---+---+---+---+---+---+
+| 90| 23| 92| 95| 70| 79| 68| 83| 14| 17|
++---+---+---+---+---+---+---+---+---+---+
+| 97| 88| 21| 78| 81| 94| 19| 16| 45| 66|
++---+---+---+---+---+---+---+---+---+---+
+| 22| 91| 96| 93| 20| 69| 82| 67| 18| 15|
++---+---+---+---+---+---+---+---+---+---+
+"""
+
+__test__ = {"tut":      tutorial_tests,
+            "pep":      pep_tests,
+            "email":    email_tests,
+            "fun":      fun_tests,
+            "syntax":   syntax_tests,
+            "conjoin":  conjoin_tests}
+
+# Magic test name that regrtest.py invokes *after* importing this module.
+# This worms around a bootstrap problem.
+# Note that doctest and regrtest both look in sys.argv for a "-v" argument,
+# so this works as expected in both ways of running regrtest.
+def test_main(verbose=None):
+    import doctest, test_support, test_generators
+    if 0:   # change to 1 to run forever (to check for leaks)
+        while 1:
+            doctest.master = None
+            test_support.run_doctest(test_generators, verbose)
+            print ".",
+    else:
+        test_support.run_doctest(test_generators, verbose)
+
+# This part isn't needed for regrtest, but for running the test directly.
+if __name__ == "__main__":
+    test_main(1)
diff --git a/lib-python/2.2/test/test_getargs.py b/lib-python/2.2/test/test_getargs.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_getargs.py
@@ -0,0 +1,21 @@
+"""Test the internal getargs.c implementation
+
+ PyArg_ParseTuple() is defined here.
+
+The test here is not intended to test all of the module, just the
+single case that failed between 2.1 and 2.2a2.
+"""
+
+# marshal.loads() uses PyArg_ParseTuple(args, "s#:loads")
+# The s code will cause a Unicode conversion to occur.  This test
+# verify that the error is propagated properly from the C code back to
+# Python.
+
+# XXX If the encoding succeeds using the current default encoding,
+# this test will fail because it does not test the right part of the
+# PyArg_ParseTuple() implementation.
+import marshal
+try:
+    marshal.loads(u"\222")
+except UnicodeError:
+    pass
diff --git a/lib-python/2.2/test/test_getopt.py b/lib-python/2.2/test/test_getopt.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_getopt.py
@@ -0,0 +1,110 @@
+# test_getopt.py
+# David Goodger <dgoodger at bigfoot.com> 2000-08-19
+
+import getopt
+from getopt import GetoptError
+from test_support import verify, verbose
+
+def expectException(teststr, expected, failure=AssertionError):
+    """Executes a statement passed in teststr, and raises an exception
+       (failure) if the expected exception is *not* raised."""
+    try:
+        exec teststr
+    except expected:
+        pass
+    else:
+        raise failure
+
+if verbose:
+    print 'Running tests on getopt.short_has_arg'
+verify(getopt.short_has_arg('a', 'a:'))
+verify(not getopt.short_has_arg('a', 'a'))
+expectException("tmp = getopt.short_has_arg('a', 'b')", GetoptError)
+expectException("tmp = getopt.short_has_arg('a', '')", GetoptError)
+
+if verbose:
+    print 'Running tests on getopt.long_has_args'
+has_arg, option = getopt.long_has_args('abc', ['abc='])
+verify(has_arg)
+verify(option == 'abc')
+has_arg, option = getopt.long_has_args('abc', ['abc'])
+verify(not has_arg)
+verify(option == 'abc')
+has_arg, option = getopt.long_has_args('abc', ['abcd'])
+verify(not has_arg)
+verify(option == 'abcd')
+expectException("has_arg, option = getopt.long_has_args('abc', ['def'])",
+                GetoptError)
+expectException("has_arg, option = getopt.long_has_args('abc', [])",
+                GetoptError)
+expectException("has_arg, option = " + \
+                     "getopt.long_has_args('abc', ['abcd','abcde'])",
+                GetoptError)
+
+if verbose:
+    print 'Running tests on getopt.do_shorts'
+opts, args = getopt.do_shorts([], 'a', 'a', [])
+verify(opts == [('-a', '')])
+verify(args == [])
+opts, args = getopt.do_shorts([], 'a1', 'a:', [])
+verify(opts == [('-a', '1')])
+verify(args == [])
+#opts, args = getopt.do_shorts([], 'a=1', 'a:', [])
+#verify(opts == [('-a', '1')])
+#verify(args == [])
+opts, args = getopt.do_shorts([], 'a', 'a:', ['1'])
+verify(opts == [('-a', '1')])
+verify(args == [])
+opts, args = getopt.do_shorts([], 'a', 'a:', ['1', '2'])
+verify(opts == [('-a', '1')])
+verify(args == ['2'])
+expectException("opts, args = getopt.do_shorts([], 'a1', 'a', [])",
+                GetoptError)
+expectException("opts, args = getopt.do_shorts([], 'a', 'a:', [])",
+                GetoptError)
+
+if verbose:
+    print 'Running tests on getopt.do_longs'
+opts, args = getopt.do_longs([], 'abc', ['abc'], [])
+verify(opts == [('--abc', '')])
+verify(args == [])
+opts, args = getopt.do_longs([], 'abc=1', ['abc='], [])
+verify(opts == [('--abc', '1')])
+verify(args == [])
+opts, args = getopt.do_longs([], 'abc=1', ['abcd='], [])
+verify(opts == [('--abcd', '1')])
+verify(args == [])
+opts, args = getopt.do_longs([], 'abc', ['ab', 'abc', 'abcd'], [])
+verify(opts == [('--abc', '')])
+verify(args == [])
+# Much like the preceding, except with a non-alpha character ("-") in
+# option name that precedes "="; failed in
+# http://sourceforge.net/bugs/?func=detailbug&bug_id=126863&group_id=5470
+opts, args = getopt.do_longs([], 'foo=42', ['foo-bar', 'foo=',], [])
+verify(opts == [('--foo', '42')])
+verify(args == [])
+expectException("opts, args = getopt.do_longs([], 'abc=1', ['abc'], [])",
+                GetoptError)
+expectException("opts, args = getopt.do_longs([], 'abc', ['abc='], [])",
+                GetoptError)
+
+# note: the empty string between '-a' and '--beta' is significant:
+# it simulates an empty string option argument ('-a ""') on the command line.
+cmdline = ['-a', '1', '-b', '--alpha=2', '--beta', '-a', '3', '-a', '',
+           '--beta', 'arg1', 'arg2']
+
+if verbose:
+    print 'Running tests on getopt.getopt'
+opts, args = getopt.getopt(cmdline, 'a:b', ['alpha=', 'beta'])
+verify(opts == [('-a', '1'), ('-b', ''), ('--alpha', '2'), ('--beta', ''),
+                ('-a', '3'), ('-a', ''), ('--beta', '')] )
+# Note ambiguity of ('-b', '') and ('-a', '') above. This must be
+# accounted for in the code that calls getopt().
+verify(args == ['arg1', 'arg2'])
+
+expectException(
+    "opts, args = getopt.getopt(cmdline, 'a:b', ['alpha', 'beta'])",
+    GetoptError)
+
+if verbose:
+    print "Module getopt: tests completed successfully."
diff --git a/lib-python/2.2/test/test_gettext.py b/lib-python/2.2/test/test_gettext.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_gettext.py
@@ -0,0 +1,200 @@
+import os
+import base64
+import gettext
+
+
+def test_api_1(localedir, mofile):
+    print 'test api 1'
+
+    # Test basic interface
+    os.environ['LANGUAGE'] = 'xx'
+
+    print 'installing gettext'
+    gettext.install('gettext', localedir)
+
+    # test some translations
+    print _('albatross')
+    print _(u'mullusk')
+    print _(r'Raymond Luxury Yach-t')
+    print _(ur'nudge nudge')
+
+    # double quotes
+    print _("albatross")
+    print _(u"mullusk")
+    print _(r"Raymond Luxury Yach-t")
+    print _(ur"nudge nudge")
+
+    # triple single quotes
+    print _('''albatross''')
+    print _(u'''mullusk''')
+    print _(r'''Raymond Luxury Yach-t''')
+    print _(ur'''nudge nudge''')
+
+    # triple double quotes
+    print _("""albatross""")
+    print _(u"""mullusk""")
+    print _(r"""Raymond Luxury Yach-t""")
+    print _(ur"""nudge nudge""")
+
+    # multiline strings
+    print _('''This module provides internationalization and localization
+support for your Python programs by providing an interface to the GNU
+gettext message catalog library.''')
+
+    # test the alternative interface
+    fp = open(os.path.join(mofile), 'rb')
+    t = gettext.GNUTranslations(fp)
+    fp.close()
+
+    t.install()
+
+    print _('nudge nudge')
+
+    # try unicode return type
+    t.install(unicode=1)
+
+    print _('mullusk')
+
+
+
+def test_api_2(localedir, mofile):
+    print 'test api 2'
+
+    gettext.bindtextdomain('gettext', localedir)
+    print gettext.bindtextdomain('gettext') == localedir
+
+    gettext.textdomain('gettext')
+    # should return 'gettext'
+    print gettext.textdomain()
+
+    # local function override builtin
+    _ = gettext.gettext
+
+    # test some translations
+    print _('albatross')
+    print _(u'mullusk')
+    print _(r'Raymond Luxury Yach-t')
+    print _(ur'nudge nudge')
+
+    # double quotes
+    print _("albatross")
+    print _(u"mullusk")
+    print _(r"Raymond Luxury Yach-t")
+    print _(ur"nudge nudge")
+
+    # triple single quotes
+    print _('''albatross''')
+    print _(u'''mullusk''')
+    print _(r'''Raymond Luxury Yach-t''')
+    print _(ur'''nudge nudge''')
+
+    # triple double quotes
+    print _("""albatross""")
+    print _(u"""mullusk""")
+    print _(r"""Raymond Luxury Yach-t""")
+    print _(ur"""nudge nudge""")
+
+    # multiline strings
+    print _('''This module provides internationalization and localization
+support for your Python programs by providing an interface to the GNU
+gettext message catalog library.''')
+
+    # Now test dgettext()
+    def _(message):
+        return gettext.dgettext('gettext')
+
+
+
+GNU_MO_DATA = '''\
+3hIElQAAAAAFAAAAHAAAAEQAAAAHAAAAbAAAAAAAAACIAAAAFQAAAIkAAAChAAAAnwAAAAcAAABB
+AQAACwAAAEkBAAAZAQAAVQEAABYAAABvAgAAoQAAAIYCAAAFAAAAKAMAAAkAAAAuAwAAAQAAAAQA
+AAACAAAAAAAAAAUAAAAAAAAAAwAAAABSYXltb25kIEx1eHVyeSBZYWNoLXQAVGhpcyBtb2R1bGUg
+cHJvdmlkZXMgaW50ZXJuYXRpb25hbGl6YXRpb24gYW5kIGxvY2FsaXphdGlvbgpzdXBwb3J0IGZv
+ciB5b3VyIFB5dGhvbiBwcm9ncmFtcyBieSBwcm92aWRpbmcgYW4gaW50ZXJmYWNlIHRvIHRoZSBH
+TlUKZ2V0dGV4dCBtZXNzYWdlIGNhdGFsb2cgbGlicmFyeS4AbXVsbHVzawBudWRnZSBudWRnZQBQ
+cm9qZWN0LUlkLVZlcnNpb246IDIuMApQTy1SZXZpc2lvbi1EYXRlOiAyMDAwLTA4LTI5IDEyOjE5
+LTA0OjAwCkxhc3QtVHJhbnNsYXRvcjogQmFycnkgQS4gV2Fyc2F3IDxiYXJyeUBweXRob24ub3Jn
+PgpMYW5ndWFnZS1UZWFtOiBYWCA8cHl0aG9uLWRldkBweXRob24ub3JnPgpNSU1FLVZlcnNpb246
+IDEuMApDb250ZW50LVR5cGU6IHRleHQvcGxhaW47IGNoYXJzZXQ9a29pOF9yCkNvbnRlbnQtVHJh
+bnNmZXItRW5jb2Rpbmc6IG5vbmUKR2VuZXJhdGVkLUJ5OiBweWdldHRleHQucHkgMS4xCgBUaHJv
+YXR3b2JibGVyIE1hbmdyb3ZlAEd1dmYgemJxaHlyIGNlYml2cXJmIHZhZ3JlYW5ndmJhbnl2bW5n
+dmJhIG5hcSB5YnBueXZtbmd2YmEKZmhjY2JlZyBzYmUgbGJoZSBDbGd1YmEgY2VidGVuemYgb2wg
+Y2ViaXZxdmF0IG5hIHZhZ3Jlc25wciBnYiBndXIgVEFICnRyZ2dya2cgenJmZm50ciBwbmdueWJ0
+IHl2b2VuZWwuAGJhY29uAHdpbmsgd2luawA=
+'''
+
+
+LOCALEDIR = os.path.join('xx', 'LC_MESSAGES')
+MOFILE = os.path.join(LOCALEDIR, 'gettext.mo')
+
+def setup():
+    os.makedirs(LOCALEDIR)
+    fp = open(MOFILE, 'wb')
+    fp.write(base64.decodestring(GNU_MO_DATA))
+    fp.close()
+
+def teardown():
+    os.unlink(MOFILE)
+    os.removedirs(LOCALEDIR)
+
+
+try:
+    setup()
+    test_api_1(os.curdir, MOFILE)
+    test_api_2(os.curdir, MOFILE)
+finally:
+    teardown()
+    pass
+
+
+
+# For reference, here's the .po file used to created the .mo data above.
+
+'''
+# Dummy translation for Python's test_gettext.py module.
+# Copyright (C) 2001 Python Software Foundation
+# Barry Warsaw <barry at python.org>, 2000.
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: 2.0\n"
+"PO-Revision-Date: 2000-08-29 12:19-04:00\n"
+"Last-Translator: Barry A. Warsaw <barry at python.org>\n"
+"Language-Team: XX <python-dev at python.org>\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=koi8_r\n"
+"Content-Transfer-Encoding: none\n"
+"Generated-By: pygettext.py 1.1\n"
+
+#: test_gettext.py:19 test_gettext.py:25 test_gettext.py:31 test_gettext.py:37
+#: test_gettext.py:51 test_gettext.py:80 test_gettext.py:86 test_gettext.py:92
+#: test_gettext.py:98
+msgid "nudge nudge"
+msgstr "wink wink"
+
+#: test_gettext.py:16 test_gettext.py:22 test_gettext.py:28 test_gettext.py:34
+#: test_gettext.py:77 test_gettext.py:83 test_gettext.py:89 test_gettext.py:95
+msgid "albatross"
+msgstr ""
+
+#: test_gettext.py:18 test_gettext.py:24 test_gettext.py:30 test_gettext.py:36
+#: test_gettext.py:79 test_gettext.py:85 test_gettext.py:91 test_gettext.py:97
+msgid "Raymond Luxury Yach-t"
+msgstr "Throatwobbler Mangrove"
+
+#: test_gettext.py:17 test_gettext.py:23 test_gettext.py:29 test_gettext.py:35
+#: test_gettext.py:56 test_gettext.py:78 test_gettext.py:84 test_gettext.py:90
+#: test_gettext.py:96
+msgid "mullusk"
+msgstr "bacon"
+
+#: test_gettext.py:40 test_gettext.py:101
+msgid ""
+"This module provides internationalization and localization\n"
+"support for your Python programs by providing an interface to the GNU\n"
+"gettext message catalog library."
+msgstr ""
+"Guvf zbqhyr cebivqrf vagreangvbanyvmngvba naq ybpnyvmngvba\n"
+"fhccbeg sbe lbhe Clguba cebtenzf ol cebivqvat na vagresnpr gb gur TAH\n"
+"trggrkg zrffntr pngnybt yvoenel."
+'''
diff --git a/lib-python/2.2/test/test_gl.py b/lib-python/2.2/test/test_gl.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/test/test_gl.py
@@ -0,0 +1,150 @@
+#! /usr/bin/env python
+"""Very simple test script for the SGI gl library extension module
+    taken mostly from the documentation.
+    Roger E. Masse
+"""
+from test_support import verbose, TestSkipped
+import gl, GL, time
+
+glattrs = ['RGBcolor', 'RGBcursor', 'RGBmode', 'RGBrange', 'RGBwritemask',
+'__doc__', '__name__', 'addtopup', 'altgetmatrix', 'arc', 'arcf',
+'arcfi', 'arcfs', 'arci', 'arcs', 'attachcursor', 'backbuffer',
+'backface', 'bbox2', 'bbox2i', 'bbox2s', 'bgnclosedline', 'bgnline',
+'bgnpoint', 'bgnpolygon', 'bgnsurface', 'bgntmesh', 'bgntrim',
+'blankscreen', 'blanktime', 'blendfunction', 'blink', 'c3f', 'c3i',
+'c3s', 'c4f', 'c4i', 'c4s', 'callobj', 'charstr', 'chunksize', 'circ',
+'circf', 'circfi', 'circfs', 'circi', 'circs', 'clear',
+'clearhitcode', 'clkoff', 'clkon', 'closeobj', 'cmode', 'cmov',
+'cmov2', 'cmov2i', 'cmov2s', 'cmovi', 'cmovs', 'color', 'colorf',
+'compactify', 'concave', 'cpack', 'crv', 'crvn', 'curorigin',
+'cursoff', 'curson', 'curstype', 'curvebasis', 'curveit',
+'curveprecision', 'cyclemap', 'czclear', 'defbasis', 'defcursor',
+'deflinestyle', 'delobj', 'deltag', 'depthcue', 'devport', 'dglclose',
+'dglopen', 'dither', 'dopup', 'doublebuffer', 'draw', 'draw2',
+'draw2i', 'draw2s', 'drawi', 'drawmode', 'draws', 'editobj',
+'endclosedline', 'endfullscrn', 'endline', 'endpick', 'endpoint',
+'endpolygon', 'endpupmode', 'endselect', 'endsurface', 'endtmesh',
+'endtrim', 'finish', 'font', 'foreground', 'freepup', 'frontbuffer',
+'fudge', 'fullscrn', 'gRGBcolor', 'gRGBmask', 'gammaramp', 'gbegin',
+'gconfig', 'genobj', 'gentag', 'getbackface', 'getbuffer',
+'getbutton', 'getcmmode', 'getcolor', 'getcpos', 'getcursor',
+'getdcm', 'getdepth', 'getdescender', 'getdisplaymode', 'getdrawmode',
+'getfont', 'getgdesc', 'getgpos', 'getheight', 'gethitcode',
+'getlsbackup', 'getlsrepeat', 'getlstyle', 'getlwidth', 'getmap',
+'getmatrix', 'getmcolor', 'getmmode', 'getmonitor',
+'getnurbsproperty', 'getopenobj', 'getorigin', 'getothermonitor',
+'getpattern', 'getplanes', 'getport', 'getresetls', 'getscrmask',
+'getshade', 'getsize', 'getsm', 'gettp', 'getvaluator', 'getvideo',
+'getviewport', 'getwritemask', 'getzbuffer', 'gewrite', 'gflush',
+'ginit', 'glcompat', 'greset', 'gselect', 'gsync', 'gversion',
+'iconsize', 'icontitle', 'imakebackground', 'initnames', 'ismex',
+'isobj', 'isqueued', 'istag', 'keepaspect', 'lRGBrange', 'lampoff',
+'lampon', 'linesmooth', 'linewidth', 'lmbind', 'lmcolor', 'lmdef',
+'loadmatrix', 'loadname', 'logicop', 'lookat', 'lrectread',
+'lrectwrite', 'lsbackup', 'lsetdepth', 'lshaderange', 'lsrepeat',
+'makeobj', 'maketag', 'mapcolor', 'mapw', 'mapw2', 'maxsize',
+'minsize', 'mmode', 'move', 'move2', 'move2i', 'move2s', 'movei',
+'moves', 'multimap', 'multmatrix', 'n3f', 'newpup', 'newtag',
+'noborder', 'noise', 'noport', 'normal', 'nurbscurve', 'nurbssurface',
+'nvarray', 'objdelete', 'objinsert', 'objreplace', 'onemap', 'ortho',
+'ortho2', 'overlay', 'packrect', 'pagecolor', 'pagewritemask',
+'passthrough', 'patch', 'patchbasis', 'patchcurves', 'patchprecision',
+'pclos', 'pdr', 'pdr2', 'pdr2i', 'pdr2s', 'pdri', 'pdrs',
+'perspective', 'pick', 'picksize', 'pixmode', 'pmv', 'pmv2', 'pmv2i',
+'pmv2s', 'pmvi', 'pmvs', 'pnt', 'pnt2', 'pnt2i', 'pnt2s', 'pnti',
+'pnts', 'pntsmooth', 'polarview', 'polf', 'polf2', 'polf2i', 'polf2s',
+'polfi', 'polfs', 'poly', 'poly2', 'poly2i', 'poly2s', 'polyi',
+'polys', 'popattributes', 'popmatrix', 'popname', 'popviewport',
+'prefposition', 'prefsize', 'pupmode', 'pushattributes', 'pushmatrix',
+'pushname', 'pushviewport', 'pwlcurve', 'qdevice', 'qenter', 'qgetfd',
+'qread', 'qreset', 'qtest', 'rcrv', 'rcrvn', 'rdr', 'rdr2', 'rdr2i',
+'rdr2s', 'rdri', 'rdrs', 'readdisplay', 'readsource', 'rect',
+'rectcopy', 'rectf', 'rectfi', 'rectfs', 'recti', 'rects', 'rectzoom',
+'resetls', 'reshapeviewport', 'ringbell', 'rmv', 'rmv2', 'rmv2i',
+'rmv2s', 'rmvi', 'rmvs', 'rot', 'rotate', 'rpatch', 'rpdr', 'rpdr2',
+'rpdr2i', 'rpdr2s', 'rpdri', 'rpdrs', 'rpmv', 'rpmv2', 'rpmv2i',
+'rpmv2s', 'rpmvi', 'rpmvs', 'sbox', 'sboxf', 'sboxfi', 'sboxfs',
+'sboxi', 'sboxs', 'scale', 'screenspace', 'scrmask', 'setbell',
+'setcursor', 'setdepth', 'setlinestyle', 'setmap', 'setmonitor',
+'setnurbsproperty', 'setpattern', 'setpup', 'setshade', 'setvaluator',
+'setvideo', 'shademodel', 'shaderange', 'singlebuffer', 'smoothline',
+'spclos', 'splf', 'splf2', 'splf2i', 'splf2s', 'splfi', 'splfs',
+'stepunit', 'strwidth', 'subpixel', 'swapbuffers', 'swapinterval',
+'swaptmesh', 'swinopen', 'textcolor', 'textinit', 'textport',
+'textwritemask', 'tie', 'tpoff', 'tpon', 'translate', 'underlay',
+'unpackrect', 'unqdevice', 'v2d', 'v2f', 'v2i', 'v2s', 'v3d', 'v3f',
+'v3i', 'v3s', 'v4d', 'v4f', 'v4i', 'v4s', 'varray', 'videocmd',
+'viewport', 'vnarray', 'winattach', 'winclose', 'winconstraints',
+'windepth', 'window', 'winget', 'winmove', 'winopen', 'winpop',
+'winposition', 'winpush', 'winset', 'wintitle', 'wmpack', 'writemask',
+'writepixels', 'xfpt', 'xfpt2', 'xfpt2i', 'xfpt2s', 'xfpt4', 'xfpt4i',
+'xfpt4s', 'xfpti', 'xfpts', 'zbuffer', 'zclear', 'zdraw', 'zfunction',
+'zsource', 'zwritemask']
+
+def main():
+    # insure that we at least have an X display before continuing.
+    import os
+    try:
+        display = os.environ['DISPLAY']
+    except:
+        raise TestSkipped, "No $DISPLAY -- skipping gl test"
+
+    # touch all the attributes of gl without doing anything
+    if verbose:
+        print 'Touching gl module attributes...'
+    for attr in glattrs:
+        if verbose:
+            print 'touching: ', attr
+        getattr(gl, attr)
+
+    # create a small 'Crisscross' window
+    if verbose:
+        print 'Creating a small "CrissCross" window...'
+        print 'foreground'
+    gl.foreground()
+    if verbose:
+        print 'prefposition'
+    gl.prefposition(500, 900, 500, 900)
+    if verbose:
+        print 'winopen "CrissCross"'
+    w = gl.winopen('CrissCross')
+    if verbose:
+        print 'clear'
+    gl.clear()
+    if verbose:
+        print 'ortho2'
+    gl.ortho2(0.0, 400.0, 0.0, 400.0)
+    if verbose:
+        print 'color WHITE'
+    gl.color(GL.WHITE)
+    if verbose:
+        print 'color RED'
+    gl.color(GL.RED)
+    if verbose:
+        print 'bgnline'
+    gl.bgnline()
+    if verbose:
+        print 'v2f'
+    gl.v2f(0.0, 0.0)
+    gl.v2f(400.0, 400.0)
+    if verbose:
+        print 'endline'
+    gl.endline()
+    if verbose:
+        print 'bgnline'
+    gl.bgnline()
+    if verbose:
+        print 'v2i'
+    gl.v2i(400, 0)
+    gl.v2i(0, 400)
+    if verbose:
+        print 'endline'
+    gl.endline()
+    if verbose:
+        print 'Displaying window for 2 seconds...'
+    time.sleep(2)
+    if verbose:
+        print 'winclose'
+    gl.winclose(w)
+
+main()
diff --git a/lib-python/2.2/test/test_glob.py b/lib-python/2.2/test/test_glob.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_glob.py
@@ -0,0 +1,115 @@
+import unittest
+from test_support import run_unittest, TESTFN
+import glob
+import os
+
+def mkdirs(fname):
+    if os.path.exists(fname) or fname == '':
+        return
+    base, file = os.path.split(fname)
+    mkdirs(base)
+    os.mkdir(fname)
+
+def touchfile(fname):
+    base, file = os.path.split(fname)
+    mkdirs(base)
+    f = open(fname, 'w')
+    f.close()
+
+def deltree(fname):
+    for f in os.listdir(fname):
+        fullname = os.path.join(fname, f)
+        if os.path.isdir(fullname):
+            deltree(fullname)
+        else:
+            try:
+                os.unlink(fullname)
+            except:
+                pass
+    try:
+        os.rmdir(fname)
+    except:
+        pass
+
+
+class GlobTests(unittest.TestCase):
+
+    def norm(self, *parts):
+        return os.path.normpath(os.path.join(self.tempdir, *parts))
+
+    def mktemp(self, *parts):
+        touchfile(self.norm(*parts))
+
+    def setUp(self):
+        self.tempdir = TESTFN+"_dir"
+        self.mktemp('a', 'D')
+        self.mktemp('aab', 'F')
+        self.mktemp('aaa', 'zzzF')
+        self.mktemp('ZZZ')
+        self.mktemp('a', 'bcd', 'EF')
+        self.mktemp('a', 'bcd', 'efg', 'ha')
+
+    def tearDown(self):
+        deltree(self.tempdir)
+
+    def glob(self, *parts):
+        if len(parts) == 1:
+            pattern = parts[0]
+        else:
+            pattern = os.path.join(*parts)
+        p = os.path.join(self.tempdir, pattern)
+        return glob.glob(p)
+
+    def assertSequencesEqual_noorder(self, l1, l2):
+        l1 = list(l1)
+        l2 = list(l2)
+        l1.sort()
+        l2.sort()
+        self.assertEqual(l1, l2)
+
+    def test_glob_literal(self):
+        eq = self.assertSequencesEqual_noorder
+        np = lambda *f: norm(self.tempdir, *f)
+        eq(self.glob('a'), [self.norm('a')])
+        eq(self.glob('a', 'D'), [self.norm('a', 'D')])
+        eq(self.glob('aab'), [self.norm('aab')])
+        eq(self.glob('zymurgy'), [])
+
+    def test_glob_one_directory(self):
+        eq = self.assertSequencesEqual_noorder
+        np = lambda *f: norm(self.tempdir, *f)
+        eq(self.glob('a*'), map(self.norm, ['a', 'aab', 'aaa']))
+        eq(self.glob('*a'), map(self.norm, ['a', 'aaa']))
+        eq(self.glob('aa?'), map(self.norm, ['aaa', 'aab']))
+        eq(self.glob('aa[ab]'), map(self.norm, ['aaa', 'aab']))
+        eq(self.glob('*q'), [])
+
+    def test_glob_nested_directory(self):
+        eq = self.assertSequencesEqual_noorder
+        np = lambda *f: norm(self.tempdir, *f)
+        if os.path.normcase("abCD") == "abCD":
+            # case-sensitive filesystem
+            eq(self.glob('a', 'bcd', 'E*'), [self.norm('a', 'bcd', 'EF')])
+        else:
+            # case insensitive filesystem
+            eq(self.glob('a', 'bcd', 'E*'), [self.norm('a', 'bcd', 'EF'),
+                                             self.norm('a', 'bcd', 'efg')])
+        eq(self.glob('a', 'bcd', '*g'), [self.norm('a', 'bcd', 'efg')])
+
+    def test_glob_directory_names(self):
+        eq = self.assertSequencesEqual_noorder
+        np = lambda *f: norm(self.tempdir, *f)
+        eq(self.glob('*', 'D'), [self.norm('a', 'D')])
+        eq(self.glob('*', '*a'), [])
+        eq(self.glob('a', '*', '*', '*a'),
+           [self.norm('a', 'bcd', 'efg', 'ha')])
+        eq(self.glob('?a?', '*F'), map(self.norm, [os.path.join('aaa', 'zzzF'),
+                                                   os.path.join('aab', 'F')]))
+
+
+def test_main():
+    run_unittest(GlobTests)
+
+
+if __name__ == "__main__":
+    test_main()
diff --git a/lib-python/2.2/test/test_global.py b/lib-python/2.2/test/test_global.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_global.py
@@ -0,0 +1,51 @@
+"""Verify that warnings are issued for global statements following use."""
+
+from test_support import check_syntax
+
+import warnings
+
+warnings.filterwarnings("error", module="<test code>")
+
+def compile_and_check(text, should_fail=1):
+    try:
+        compile(text, "<test code>", "exec")
+    except SyntaxError, msg:
+        if should_fail:
+            print "got SyntaxError as expected"
+        else:
+            print "raised unexpected SyntaxError:", text
+    else:
+        if should_fail:
+            print "should have raised SyntaxError:", text
+        else:
+            print "as expected, no SyntaxError"
+
+prog_text_1 = """
+def wrong1():
+    a = 1
+    b = 2
+    global a
+    global b
+"""
+compile_and_check(prog_text_1)
+
+prog_text_2 = """
+def wrong2():
+    print x
+    global x
+"""
+compile_and_check(prog_text_2)
+
+prog_text_3 = """
+def wrong3():
+    print x
+    x = 2
+    global x
+"""
+compile_and_check(prog_text_3)
+
+prog_text_4 = """
+global x
+x = 2
+"""
+compile_and_check(prog_text_4, 0)
diff --git a/lib-python/2.2/test/test_grammar.py b/lib-python/2.2/test/test_grammar.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_grammar.py
@@ -0,0 +1,732 @@
+# Python test set -- part 1, grammar.
+# This just tests whether the parser accepts them all.
+
+from test_support import *
+import sys
+
+print '1. Parser'
+
+print '1.1 Tokens'
+
+print '1.1.1 Backslashes'
+
+# Backslash means line continuation:
+x = 1 \
++ 1
+if x != 2: raise TestFailed, 'backslash for line continuation'
+
+# Backslash does not means continuation in comments :\
+x = 0
+if x != 0: raise TestFailed, 'backslash ending comment'
+
+print '1.1.2 Numeric literals'
+
+print '1.1.2.1 Plain integers'
+if 0xff != 255: raise TestFailed, 'hex int'
+if 0377 != 255: raise TestFailed, 'octal int'
+if  2147483647   != 017777777777: raise TestFailed, 'large positive int'
+try:
+    from sys import maxint
+except ImportError:
+    maxint = 2147483647
+if maxint == 2147483647:
+    if -2147483647-1 != 020000000000: raise TestFailed, 'max negative int'
+    # XXX -2147483648
+    if 037777777777 != -1: raise TestFailed, 'oct -1'
+    if 0xffffffff != -1: raise TestFailed, 'hex -1'
+    for s in '2147483648', '040000000000', '0x100000000':
+        try:
+            x = eval(s)
+        except OverflowError:
+            print "OverflowError on huge integer literal " + `s`
+elif eval('maxint == 9223372036854775807'):
+    if eval('-9223372036854775807-1 != 01000000000000000000000'):
+        raise TestFailed, 'max negative int'
+    if eval('01777777777777777777777') != -1: raise TestFailed, 'oct -1'
+    if eval('0xffffffffffffffff') != -1: raise TestFailed, 'hex -1'
+    for s in '9223372036854775808', '02000000000000000000000', \
+             '0x10000000000000000':
+        try:
+            x = eval(s)
+        except OverflowError:
+            print "OverflowError on huge integer literal " + `s`
+else:
+    print 'Weird maxint value', maxint
+
+print '1.1.2.2 Long integers'
+x = 0L
+x = 0l
+x = 0xffffffffffffffffL
+x = 0xffffffffffffffffl
+x = 077777777777777777L
+x = 077777777777777777l
+x = 123456789012345678901234567890L
+x = 123456789012345678901234567890l
+
+print '1.1.2.3 Floating point'
+x = 3.14
+x = 314.
+x = 0.314
+# XXX x = 000.314
+x = .314
+x = 3e14
+x = 3E14
+x = 3e-14
+x = 3e+14
+x = 3.e14
+x = .3e14
+x = 3.1e4
+
+print '1.1.3 String literals'
+
+x = ''; y = ""; verify(len(x) == 0 and x == y)
+x = '\''; y = "'"; verify(len(x) == 1 and x == y and ord(x) == 39)
+x = '"'; y = "\""; verify(len(x) == 1 and x == y and ord(x) == 34)
+x = "doesn't \"shrink\" does it"
+y = 'doesn\'t "shrink" does it'
+verify(len(x) == 24 and x == y)
+x = "does \"shrink\" doesn't it"
+y = 'does "shrink" doesn\'t it'
+verify(len(x) == 24 and x == y)
+x = """
+The "quick"
+brown fox
+jumps over
+the 'lazy' dog.
+"""
+y = '\nThe "quick"\nbrown fox\njumps over\nthe \'lazy\' dog.\n'
+verify(x == y)
+y = '''
+The "quick"
+brown fox
+jumps over
+the 'lazy' dog.
+'''; verify(x == y)
+y = "\n\
+The \"quick\"\n\
+brown fox\n\
+jumps over\n\
+the 'lazy' dog.\n\
+"; verify(x == y)
+y = '\n\
+The \"quick\"\n\
+brown fox\n\
+jumps over\n\
+the \'lazy\' dog.\n\
+'; verify(x == y)
+
+
+print '1.2 Grammar'
+
+print 'single_input' # NEWLINE | simple_stmt | compound_stmt NEWLINE
+# XXX can't test in a script -- this rule is only used when interactive
+
+print 'file_input' # (NEWLINE | stmt)* ENDMARKER
+# Being tested as this very moment this very module
+
+print 'expr_input' # testlist NEWLINE
+# XXX Hard to test -- used only in calls to input()
+
+print 'eval_input' # testlist ENDMARKER
+x = eval('1, 0 or 1')
+
+print 'funcdef'
+### 'def' NAME parameters ':' suite
+### parameters: '(' [varargslist] ')'
+### varargslist: (fpdef ['=' test] ',')* ('*' NAME [',' ('**'|'*' '*') NAME]
+###            | ('**'|'*' '*') NAME)
+###            | fpdef ['=' test] (',' fpdef ['=' test])* [',']
+### fpdef: NAME | '(' fplist ')'
+### fplist: fpdef (',' fpdef)* [',']
+### arglist: (argument ',')* (argument | *' test [',' '**' test] | '**' test)
+### argument: [test '='] test   # Really [keyword '='] test
+def f1(): pass
+f1()
+f1(*())
+f1(*(), **{})
+def f2(one_argument): pass
+def f3(two, arguments): pass
+def f4(two, (compound, (argument, list))): pass
+def f5((compound, first), two): pass
+verify(f2.func_code.co_varnames == ('one_argument',))
+verify(f3.func_code.co_varnames == ('two', 'arguments'))
+if sys.platform.startswith('java'):
+    verify(f4.func_code.co_varnames ==
+           ('two', '(compound, (argument, list))', 'compound', 'argument',
+                        'list',))
+    verify(f5.func_code.co_varnames ==
+           ('(compound, first)', 'two', 'compound', 'first'))
+else:
+    verify(f4.func_code.co_varnames == ('two', '.2', 'compound',
+                                        'argument',  'list'))
+    verify(f5.func_code.co_varnames == ('.0', 'two', 'compound', 'first'))
+def a1(one_arg,): pass
+def a2(two, args,): pass
+def v0(*rest): pass
+def v1(a, *rest): pass
+def v2(a, b, *rest): pass
+def v3(a, (b, c), *rest): return a, b, c, rest
+if sys.platform.startswith('java'):
+    verify(v3.func_code.co_varnames == ('a', '(b, c)', 'rest', 'b', 'c'))
+else:
+    verify(v3.func_code.co_varnames == ('a', '.2', 'rest', 'b', 'c'))
+verify(v3(1, (2, 3), 4) == (1, 2, 3, (4,)))
+def d01(a=1): pass
+d01()
+d01(1)
+d01(*(1,))
+d01(**{'a':2})
+def d11(a, b=1): pass
+d11(1)
+d11(1, 2)
+d11(1, **{'b':2})
+def d21(a, b, c=1): pass
+d21(1, 2)
+d21(1, 2, 3)
+d21(*(1, 2, 3))
+d21(1, *(2, 3))
+d21(1, 2, *(3,))
+d21(1, 2, **{'c':3})
+def d02(a=1, b=2): pass
+d02()
+d02(1)
+d02(1, 2)
+d02(*(1, 2))
+d02(1, *(2,))
+d02(1, **{'b':2})
+d02(**{'a': 1, 'b': 2})
+def d12(a, b=1, c=2): pass
+d12(1)
+d12(1, 2)
+d12(1, 2, 3)
+def d22(a, b, c=1, d=2): pass
+d22(1, 2)
+d22(1, 2, 3)
+d22(1, 2, 3, 4)
+def d01v(a=1, *rest): pass
+d01v()
+d01v(1)
+d01v(1, 2)
+d01v(*(1, 2, 3, 4))
+d01v(*(1,))
+d01v(**{'a':2})
+def d11v(a, b=1, *rest): pass
+d11v(1)
+d11v(1, 2)
+d11v(1, 2, 3)
+def d21v(a, b, c=1, *rest): pass
+d21v(1, 2)
+d21v(1, 2, 3)
+d21v(1, 2, 3, 4)
+d21v(*(1, 2, 3, 4))
+d21v(1, 2, **{'c': 3})
+def d02v(a=1, b=2, *rest): pass
+d02v()
+d02v(1)
+d02v(1, 2)
+d02v(1, 2, 3)
+d02v(1, *(2, 3, 4))
+d02v(**{'a': 1, 'b': 2})
+def d12v(a, b=1, c=2, *rest): pass
+d12v(1)
+d12v(1, 2)
+d12v(1, 2, 3)
+d12v(1, 2, 3, 4)
+d12v(*(1, 2, 3, 4))
+d12v(1, 2, *(3, 4, 5))
+d12v(1, *(2,), **{'c': 3})
+def d22v(a, b, c=1, d=2, *rest): pass
+d22v(1, 2)
+d22v(1, 2, 3)
+d22v(1, 2, 3, 4)
+d22v(1, 2, 3, 4, 5)
+d22v(*(1, 2, 3, 4))
+d22v(1, 2, *(3, 4, 5))
+d22v(1, *(2, 3), **{'d': 4})
+
+### lambdef: 'lambda' [varargslist] ':' test
+print 'lambdef'
+l1 = lambda : 0
+verify(l1() == 0)
+l2 = lambda : a[d] # XXX just testing the expression
+l3 = lambda : [2 < x for x in [-1, 3, 0L]]
+verify(l3() == [0, 1, 0])
+l4 = lambda x = lambda y = lambda z=1 : z : y() : x()
+verify(l4() == 1)
+l5 = lambda x, y, z=2: x + y + z
+verify(l5(1, 2) == 5)
+verify(l5(1, 2, 3) == 6)
+check_syntax("lambda x: x = 2")
+
+### stmt: simple_stmt | compound_stmt
+# Tested below
+
+### simple_stmt: small_stmt (';' small_stmt)* [';']
+print 'simple_stmt'
+x = 1; pass; del x
+
+### small_stmt: expr_stmt | print_stmt  | pass_stmt | del_stmt | flow_stmt | import_stmt | global_stmt | access_stmt | exec_stmt
+# Tested below
+
+print 'expr_stmt' # (exprlist '=')* exprlist
+1
+1, 2, 3
+x = 1
+x = 1, 2, 3
+x = y = z = 1, 2, 3
+x, y, z = 1, 2, 3
+abc = a, b, c = x, y, z = xyz = 1, 2, (3, 4)
+# NB these variables are deleted below
+
+check_syntax("x + 1 = 1")
+check_syntax("a + 1 = b + 2")
+
+print 'print_stmt' # 'print' (test ',')* [test]
+print 1, 2, 3
+print 1, 2, 3,
+print
+print 0 or 1, 0 or 1,
+print 0 or 1
+
+print 'extended print_stmt' # 'print' '>>' test ','
+import sys
+print >> sys.stdout, 1, 2, 3
+print >> sys.stdout, 1, 2, 3,
+print >> sys.stdout
+print >> sys.stdout, 0 or 1, 0 or 1,
+print >> sys.stdout, 0 or 1
+
+# test printing to an instance
+class Gulp:
+    def write(self, msg): pass
+
+gulp = Gulp()
+print >> gulp, 1, 2, 3
+print >> gulp, 1, 2, 3,
+print >> gulp
+print >> gulp, 0 or 1, 0 or 1,
+print >> gulp, 0 or 1
+
+# test print >> None
+def driver():
+    oldstdout = sys.stdout
+    sys.stdout = Gulp()
+    try:
+        tellme(Gulp())
+        tellme()
+    finally:
+        sys.stdout = oldstdout
+
+# we should see this once
+def tellme(file=sys.stdout):
+    print >> file, 'hello world'
+
+driver()
+
+# we should not see this at all
+def tellme(file=None):
+    print >> file, 'goodbye universe'
+
+driver()
+
+# syntax errors
+check_syntax('print ,')
+check_syntax('print >> x,')
+
+print 'del_stmt' # 'del' exprlist
+del abc
+del x, y, (z, xyz)
+
+print 'pass_stmt' # 'pass'
+pass
+
+print 'flow_stmt' # break_stmt | continue_stmt | return_stmt | raise_stmt
+# Tested below
+
+print 'break_stmt' # 'break'
+while 1: break
+
+print 'continue_stmt' # 'continue'
+i = 1
+while i: i = 0; continue
+
+msg = ""
+while not msg:
+    msg = "continue + try/except ok"
+    try:
+        continue
+        msg = "continue failed to continue inside try"
+    except:
+        msg = "continue inside try called except block"
+print msg
+
+msg = ""
+while not msg:
+    msg = "finally block not called"
+    try:
+        continue
+    finally:
+        msg = "continue + try/finally ok"
+print msg
+
+
+# This test warrants an explanation. It is a test specifically for SF bugs
+# #463359 and #462937. The bug is that a 'break' statement executed or
+# exception raised inside a try/except inside a loop, *after* a continue
+# statement has been executed in that loop, will cause the wrong number of
+# arguments to be popped off the stack and the instruction pointer reset to
+# a very small number (usually 0.) Because of this, the following test
+# *must* written as a function, and the tracking vars *must* be function
+# arguments with default values. Otherwise, the test will loop and loop.
+
+print "testing continue and break in try/except in loop"
+def test_break_continue_loop(extra_burning_oil = 1, count=0):
+    big_hippo = 2
+    while big_hippo:
+        count += 1
+        try:
+            if extra_burning_oil and big_hippo == 1:
+                extra_burning_oil -= 1
+                break
+            big_hippo -= 1
+            continue
+        except:
+            raise
+    if count > 2 or big_hippo <> 1:
+        print "continue then break in try/except in loop broken!"
+test_break_continue_loop()
+
+print 'return_stmt' # 'return' [testlist]
+def g1(): return
+def g2(): return 1
+g1()
+x = g2()
+
+print 'raise_stmt' # 'raise' test [',' test]
+try: raise RuntimeError, 'just testing'
+except RuntimeError: pass
+try: raise KeyboardInterrupt
+except KeyboardInterrupt: pass
+
+print 'import_stmt' # 'import' NAME (',' NAME)* | 'from' NAME 'import' ('*' | NAME (',' NAME)*)
+import sys
+import time, sys
+from time import time
+from sys import *
+from sys import path, argv
+
+print 'global_stmt' # 'global' NAME (',' NAME)*
+def f():
+    global a
+    global a, b
+    global one, two, three, four, five, six, seven, eight, nine, ten
+
+print 'exec_stmt' # 'exec' expr ['in' expr [',' expr]]
+def f():
+    z = None
+    del z
+    exec 'z=1+1\n'
+    if z != 2: raise TestFailed, 'exec \'z=1+1\'\\n'
+    del z
+    exec 'z=1+1'
+    if z != 2: raise TestFailed, 'exec \'z=1+1\''
+    z = None
+    del z
+    import types
+    if hasattr(types, "UnicodeType"):
+        exec r"""if 1:
+    exec u'z=1+1\n'
+    if z != 2: raise TestFailed, 'exec u\'z=1+1\'\\n'
+    del z
+    exec u'z=1+1'
+    if z != 2: raise TestFailed, 'exec u\'z=1+1\''
+"""
+f()
+g = {}
+exec 'z = 1' in g
+if g.has_key('__builtins__'): del g['__builtins__']
+if g != {'z': 1}: raise TestFailed, 'exec \'z = 1\' in g'
+g = {}
+l = {}
+
+import warnings
+warnings.filterwarnings("ignore", "global statement", module="<string>")
+exec 'global a; a = 1; b = 2' in g, l
+if g.has_key('__builtins__'): del g['__builtins__']
+if l.has_key('__builtins__'): del l['__builtins__']
+if (g, l) != ({'a':1}, {'b':2}): raise TestFailed, 'exec ... in g (%s), l (%s)' %(g,l)
+
+
+print "assert_stmt" # assert_stmt: 'assert' test [',' test]
+assert 1
+assert 1, 1
+assert lambda x:x
+assert 1, lambda x:x+1
+
+### compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | funcdef | classdef
+# Tested below
+
+print 'if_stmt' # 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
+if 1: pass
+if 1: pass
+else: pass
+if 0: pass
+elif 0: pass
+if 0: pass
+elif 0: pass
+elif 0: pass
+elif 0: pass
+else: pass
+
+print 'while_stmt' # 'while' test ':' suite ['else' ':' suite]
+while 0: pass
+while 0: pass
+else: pass
+
+print 'for_stmt' # 'for' exprlist 'in' exprlist ':' suite ['else' ':' suite]
+for i in 1, 2, 3: pass
+for i, j, k in (): pass
+else: pass
+class Squares:
+    def __init__(self, max):
+        self.max = max
+        self.sofar = []
+    def __len__(self): return len(self.sofar)
+    def __getitem__(self, i):
+        if not 0 <= i < self.max: raise IndexError
+        n = len(self.sofar)
+        while n <= i:
+            self.sofar.append(n*n)
+            n = n+1
+        return self.sofar[i]
+n = 0
+for x in Squares(10): n = n+x
+if n != 285: raise TestFailed, 'for over growing sequence'
+
+print 'try_stmt'
+### try_stmt: 'try' ':' suite (except_clause ':' suite)+ ['else' ':' suite]
+###         | 'try' ':' suite 'finally' ':' suite
+### except_clause: 'except' [expr [',' expr]]
+try:
+    1/0
+except ZeroDivisionError:
+    pass
+else:
+    pass
+try: 1/0
+except EOFError: pass
+except TypeError, msg: pass
+except RuntimeError, msg: pass
+except: pass
+else: pass
+try: 1/0
+except (EOFError, TypeError, ZeroDivisionError): pass
+try: 1/0
+except (EOFError, TypeError, ZeroDivisionError), msg: pass
+try: pass
+finally: pass
+
+print 'suite' # simple_stmt | NEWLINE INDENT NEWLINE* (stmt NEWLINE*)+ DEDENT
+if 1: pass
+if 1:
+    pass
+if 1:
+    #
+    #
+    #
+    pass
+    pass
+    #
+    pass
+    #
+
+print 'test'
+### and_test ('or' and_test)*
+### and_test: not_test ('and' not_test)*
+### not_test: 'not' not_test | comparison
+if not 1: pass
+if 1 and 1: pass
+if 1 or 1: pass
+if not not not 1: pass
+if not 1 and 1 and 1: pass
+if 1 and 1 or 1 and 1 and 1 or not 1 and 1: pass
+
+print 'comparison'
+### comparison: expr (comp_op expr)*
+### comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
+if 1: pass
+x = (1 == 1)
+if 1 == 1: pass
+if 1 != 1: pass
+if 1 <> 1: pass
+if 1 < 1: pass
+if 1 > 1: pass
+if 1 <= 1: pass
+if 1 >= 1: pass
+if 1 is 1: pass
+if 1 is not 1: pass
+if 1 in (): pass
+if 1 not in (): pass
+if 1 < 1 > 1 == 1 >= 1 <= 1 <> 1 != 1 in 1 not in 1 is 1 is not 1: pass
+
+print 'binary mask ops'
+x = 1 & 1
+x = 1 ^ 1
+x = 1 | 1
+
+print 'shift ops'
+x = 1 << 1
+x = 1 >> 1
+x = 1 << 1 >> 1
+
+print 'additive ops'
+x = 1
+x = 1 + 1
+x = 1 - 1 - 1
+x = 1 - 1 + 1 - 1 + 1
+
+print 'multiplicative ops'
+x = 1 * 1
+x = 1 / 1
+x = 1 % 1
+x = 1 / 1 * 1 % 1
+
+print 'unary ops'
+x = +1
+x = -1
+x = ~1
+x = ~1 ^ 1 & 1 | 1 & 1 ^ -1
+x = -1*1/1 + 1*1 - ---1*1
+
+print 'selectors'
+### trailer: '(' [testlist] ')' | '[' subscript ']' | '.' NAME
+### subscript: expr | [expr] ':' [expr]
+f1()
+f2(1)
+f2(1,)
+f3(1, 2)
+f3(1, 2,)
+f4(1, (2, (3, 4)))
+v0()
+v0(1)
+v0(1,)
+v0(1,2)
+v0(1,2,3,4,5,6,7,8,9,0)
+v1(1)
+v1(1,)
+v1(1,2)
+v1(1,2,3)
+v1(1,2,3,4,5,6,7,8,9,0)
+v2(1,2)
+v2(1,2,3)
+v2(1,2,3,4)
+v2(1,2,3,4,5,6,7,8,9,0)
+v3(1,(2,3))
+v3(1,(2,3),4)
+v3(1,(2,3),4,5,6,7,8,9,0)
+print
+import sys, time
+c = sys.path[0]
+x = time.time()
+x = sys.modules['time'].time()
+a = '01234'
+c = a[0]
+c = a[-1]
+s = a[0:5]
+s = a[:5]
+s = a[0:]
+s = a[:]
+s = a[-5:]
+s = a[:-1]
+s = a[-4:-3]
+
+print 'atoms'
+### atom: '(' [testlist] ')' | '[' [testlist] ']' | '{' [dictmaker] '}' | '`' testlist '`' | NAME | NUMBER | STRING
+### dictmaker: test ':' test (',' test ':' test)* [',']
+
+x = (1)
+x = (1 or 2 or 3)
+x = (1 or 2 or 3, 2, 3)
+
+x = []
+x = [1]
+x = [1 or 2 or 3]
+x = [1 or 2 or 3, 2, 3]
+x = []
+
+x = {}
+x = {'one': 1}
+x = {'one': 1,}
+x = {'one' or 'two': 1 or 2}
+x = {'one': 1, 'two': 2}
+x = {'one': 1, 'two': 2,}
+x = {'one': 1, 'two': 2, 'three': 3, 'four': 4, 'five': 5, 'six': 6}
+
+x = `x`
+x = `1 or 2 or 3`
+x = x
+x = 'x'
+x = 123
+
+### exprlist: expr (',' expr)* [',']
+### testlist: test (',' test)* [',']
+# These have been exercised enough above
+
+print 'classdef' # 'class' NAME ['(' testlist ')'] ':' suite
+class B: pass
+class C1(B): pass
+class C2(B): pass
+class D(C1, C2, B): pass
+class C:
+    def meth1(self): pass
+    def meth2(self, arg): pass
+    def meth3(self, a1, a2): pass
+
+# list comprehension tests
+nums = [1, 2, 3, 4, 5]
+strs = ["Apple", "Banana", "Coconut"]
+spcs = ["  Apple", " Banana ", "Coco  nut  "]
+
+print [s.strip() for s in spcs]
+print [3 * x for x in nums]
+print [x for x in nums if x > 2]
+print [(i, s) for i in nums for s in strs]
+print [(i, s) for i in nums for s in [f for f in strs if "n" in f]]
+print [(lambda a:[a**i for i in range(a+1)])(j) for j in range(5)] 
+
+def test_in_func(l):
+    return [None < x < 3 for x in l if x > 2]
+
+print test_in_func(nums)
+
+def test_nested_front():
+    print [[y for y in [x, x + 1]] for x in [1,3,5]]
+
+test_nested_front()
+
+check_syntax("[i, s for i in nums for s in strs]")
+check_syntax("[x if y]")
+
+suppliers = [
+  (1, "Boeing"),
+  (2, "Ford"),
+  (3, "Macdonalds")
+]
+
+parts = [
+  (10, "Airliner"),
+  (20, "Engine"),
+  (30, "Cheeseburger")
+]
+
+suppart = [
+  (1, 10), (1, 20), (2, 20), (3, 30)
+]
+
+print [
+  (sname, pname)
+    for (sno, sname) in suppliers
+      for (pno, pname) in parts
+        for (sp_sno, sp_pno) in suppart
+          if sno == sp_sno and pno == sp_pno
+]
diff --git a/lib-python/2.2/test/test_grp.py b/lib-python/2.2/test/test_grp.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/test/test_grp.py
@@ -0,0 +1,27 @@
+"""Test script for the grp module."""
+
+# XXX This really needs some work, but what are the expected invariants?
+
+import grp
+import test_support
+import unittest
+
+
+class GroupDatabaseTestCase(unittest.TestCase):
+
+    def setUp(self):
+        self.groups = grp.getgrall()
+
+    def test_getgrgid(self):
+        entry = grp.getgrgid(self.groups[0][2])
+
+    def test_getgrnam(self):
+        entry = grp.getgrnam(self.groups[0][0])
+
+
+def test_main():
+    test_support.run_unittest(GroupDatabaseTestCase)
+
+
+if __name__ == "__main__":
+    test_main()
diff --git a/lib-python/2.2/test/test_gzip.py b/lib-python/2.2/test/test_gzip.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_gzip.py
@@ -0,0 +1,82 @@
+from test_support import verify
+import sys, os
+import gzip, tempfile
+
+filename = tempfile.mktemp()
+
+data1 = """  int length=DEFAULTALLOC, err = Z_OK;
+  PyObject *RetVal;
+  int flushmode = Z_FINISH;
+  unsigned long start_total_out;
+
+"""
+
+data2 = """/* zlibmodule.c -- gzip-compatible data compression */
+/* See http://www.cdrom.com/pub/infozip/zlib/ */
+/* See http://www.winimage.com/zLibDll for Windows */
+"""
+
+f = gzip.GzipFile(filename, 'wb') ; f.write(data1 * 50) ; f.close()
+
+f = gzip.GzipFile(filename, 'r') ; d = f.read() ; f.close()
+verify(d == data1*50)
+
+# Append to the previous file
+f = gzip.GzipFile(filename, 'ab') ; f.write(data2 * 15) ; f.close()
+
+f = gzip.GzipFile(filename, 'rb') ; d = f.read() ; f.close()
+verify(d == (data1*50) + (data2*15))
+
+# Try .readline() with varying line lengths
+
+f = gzip.GzipFile(filename, 'rb')
+line_length = 0
+while 1:
+    L = f.readline(line_length)
+    if L == "" and line_length != 0: break
+    verify(len(L) <= line_length)
+    line_length = (line_length + 1) % 50
+f.close()
+
+# Try .readlines()
+
+f = gzip.GzipFile(filename, 'rb')
+L = f.readlines()
+f.close()
+
+f = gzip.GzipFile(filename, 'rb')
+while 1:
+    L = f.readlines(150)
+    if L == []: break
+f.close()
+
+# Try seek, read test
+
+f = gzip.GzipFile(filename)
+while 1:
+    oldpos = f.tell()
+    line1 = f.readline()
+    if not line1: break
+    newpos = f.tell()
+    f.seek(oldpos)  # negative seek
+    if len(line1)>10:
+        amount = 10
+    else:
+        amount = len(line1)
+    line2 = f.read(amount)
+    verify(line1[:amount] == line2)
+    f.seek(newpos)  # positive seek
+f.close()
+
+# Try seek, write test
+f = gzip.GzipFile(filename, 'w')
+for pos in range(0, 256, 16):
+    f.seek(pos)
+    f.write('GZ\n')
+f.close()
+
+f = gzip.GzipFile(filename, 'r')
+verify(f.myfileobj.mode == 'rb')
+f.close()
+
+os.unlink(filename)
diff --git a/lib-python/2.2/test/test_hash.py b/lib-python/2.2/test/test_hash.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_hash.py
@@ -0,0 +1,36 @@
+# test the invariant that
+#   iff a==b then hash(a)==hash(b)
+#
+
+import test_support
+import unittest
+
+
+class HashEqualityTestCase(unittest.TestCase):
+
+    def same_hash(self, *objlist):
+        # Hash each object given and fail if
+        # the hash values are not all the same.
+        hashed = map(hash, objlist)
+        for h in hashed[1:]:
+            if h != hashed[0]:
+                self.fail("hashed values differ: %s" % `objlist`)
+
+    def test_numeric_literals(self):
+        self.same_hash(1, 1L, 1.0, 1.0+0.0j)
+
+    def test_coerced_integers(self):
+        self.same_hash(int(1), long(1), float(1), complex(1),
+                       int('1'), float('1.0'))
+
+    def test_coerced_floats(self):
+        self.same_hash(long(1.23e300), float(1.23e300))
+        self.same_hash(float(0.5), complex(0.5, 0.0))
+
+
+def test_main():
+    test_support.run_unittest(HashEqualityTestCase)
+
+
+if __name__ == "__main__":
+    test_main()
diff --git a/lib-python/2.2/test/test_hmac.py b/lib-python/2.2/test/test_hmac.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_hmac.py
@@ -0,0 +1,108 @@
+import hmac
+import unittest
+import test_support
+
+class TestVectorsTestCase(unittest.TestCase):
+    def test_vectors(self):
+        """Test the HMAC module against test vectors from the RFC."""
+
+        def md5test(key, data, digest):
+            h = hmac.HMAC(key, data)
+            self.failUnless(h.hexdigest().upper() == digest.upper())
+
+        md5test(chr(0x0b) * 16,
+                "Hi There",
+                "9294727A3638BB1C13F48EF8158BFC9D")
+
+        md5test("Jefe",
+                "what do ya want for nothing?",
+                "750c783e6ab0b503eaa86e310a5db738")
+
+        md5test(chr(0xAA)*16,
+                chr(0xDD)*50,
+                "56be34521d144c88dbb8c733f0e8b3f6")
+
+class ConstructorTestCase(unittest.TestCase):
+    def test_normal(self):
+        """Standard constructor call."""
+        failed = 0
+        try:
+            h = hmac.HMAC("key")
+        except:
+            self.fail("Standard constructor call raised exception.")
+
+    def test_withtext(self):
+        """Constructor call with text."""
+        try:
+            h = hmac.HMAC("key", "hash this!")
+        except:
+            self.fail("Constructor call with text argument raised exception.")
+
+    def test_withmodule(self):
+        """Constructor call with text and digest module."""
+        import sha
+        try:
+            h = hmac.HMAC("key", "", sha)
+        except:
+            self.fail("Constructor call with sha module raised exception.")
+
+class SanityTestCase(unittest.TestCase):
+    def test_default_is_md5(self):
+        """Testing if HMAC defaults to MD5 algorithm."""
+        import md5
+        h = hmac.HMAC("key")
+        self.failUnless(h.digestmod == md5)
+
+    def test_exercise_all_methods(self):
+        """Exercising all methods once."""
+        # This must not raise any exceptions
+        try:
+            h = hmac.HMAC("my secret key")
+            h.update("compute the hash of this text!")
+            dig = h.digest()
+            dig = h.hexdigest()
+            h2 = h.copy()
+        except:
+            fail("Exception raised during normal usage of HMAC class.")
+
+class CopyTestCase(unittest.TestCase):
+    def test_attributes(self):
+        """Testing if attributes are of same type."""
+        h1 = hmac.HMAC("key")
+        h2 = h1.copy()
+        self.failUnless(h1.digestmod == h2.digestmod,
+            "Modules don't match.")
+        self.failUnless(type(h1.inner) == type(h2.inner),
+            "Types of inner don't match.")
+        self.failUnless(type(h1.outer) == type(h2.outer),
+            "Types of outer don't match.")
+
+    def test_realcopy(self):
+        """Testing if the copy method created a real copy."""
+        h1 = hmac.HMAC("key")
+        h2 = h1.copy()
+        # Using id() in case somebody has overridden __cmp__.
+        self.failUnless(id(h1) != id(h2), "No real copy of the HMAC instance.")
+        self.failUnless(id(h1.inner) != id(h2.inner),
+            "No real copy of the attribute 'inner'.")
+        self.failUnless(id(h1.outer) != id(h2.outer),
+            "No real copy of the attribute 'outer'.")
+
+    def test_equality(self):
+        """Testing if the copy has the same digests."""
+        h1 = hmac.HMAC("key")
+        h1.update("some random text")
+        h2 = h1.copy()
+        self.failUnless(h1.digest() == h2.digest(),
+            "Digest of copy doesn't match original digest.")
+        self.failUnless(h1.hexdigest() == h2.hexdigest(),
+            "Hexdigest of copy doesn't match original hexdigest.")
+
+def test_main():
+    test_support.run_unittest(TestVectorsTestCase)
+    test_support.run_unittest(ConstructorTestCase)
+    test_support.run_unittest(SanityTestCase)
+    test_support.run_unittest(CopyTestCase)
+
+if __name__ == "__main__":
+    test_main()
diff --git a/lib-python/2.2/test/test_hotshot.py b/lib-python/2.2/test/test_hotshot.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_hotshot.py
@@ -0,0 +1,117 @@
+import hotshot
+import hotshot.log
+import os
+import pprint
+import unittest
+
+import test_support
+
+from hotshot.log import ENTER, EXIT, LINE
+
+
+def shortfilename(fn):
+    # We use a really shortened filename since an exact match is made,
+    # and the source may be either a Python source file or a
+    # pre-compiled bytecode file.
+    if fn:
+        return os.path.splitext(os.path.basename(fn))[0]
+    else:
+        return fn
+
+
+class UnlinkingLogReader(hotshot.log.LogReader):
+    """Extend the LogReader so the log file is unlinked when we're
+    done with it."""
+
+    def __init__(self, logfn):
+        self.__logfn = logfn
+        hotshot.log.LogReader.__init__(self, logfn)
+
+    def next(self, index=None):
+        try:
+            return hotshot.log.LogReader.next(self)
+        except (IndexError, StopIteration):
+            os.unlink(self.__logfn)
+            raise
+
+
+class HotShotTestCase(unittest.TestCase):
+    def new_profiler(self, lineevents=0, linetimings=1):
+        self.logfn = test_support.TESTFN
+        return hotshot.Profile(self.logfn, lineevents, linetimings)
+
+    def get_logreader(self):
+        return UnlinkingLogReader(self.logfn)
+
+    def get_events_wotime(self):
+        L = []
+        for event in self.get_logreader():
+            what, (filename, lineno, funcname), tdelta = event
+            L.append((what, (shortfilename(filename), lineno, funcname)))
+        return L
+
+    def check_events(self, expected):
+        events = self.get_events_wotime()
+        if not __debug__:
+            # Running under -O, so we don't get LINE events
+            expected = [ev for ev in expected if ev[0] != LINE]
+        if events != expected:
+            self.fail(
+                "events did not match expectation; got:\n%s\nexpected:\n%s"
+                % (pprint.pformat(events), pprint.pformat(expected)))
+
+    def run_test(self, callable, events, profiler=None):
+        if profiler is None:
+            profiler = self.new_profiler()
+        profiler.runcall(callable)
+        profiler.close()
+        self.check_events(events)
+
+    def test_addinfo(self):
+        def f(p):
+            p.addinfo("test-key", "test-value")
+        profiler = self.new_profiler()
+        profiler.runcall(f, profiler)
+        profiler.close()
+        log = self.get_logreader()
+        info = log._info
+        list(log)
+        self.failUnless(info["test-key"] == ["test-value"])
+
+    def test_line_numbers(self):
+        def f():
+            y = 2
+            x = 1
+        def g():
+            f()
+        f_lineno = f.func_code.co_firstlineno
+        g_lineno = g.func_code.co_firstlineno
+        events = [(ENTER, ("test_hotshot", g_lineno, "g")),
+                  (LINE,  ("test_hotshot", g_lineno, "g")),
+                  (LINE,  ("test_hotshot", g_lineno+1, "g")),
+                  (ENTER, ("test_hotshot", f_lineno, "f")),
+                  (LINE,  ("test_hotshot", f_lineno, "f")),
+                  (LINE,  ("test_hotshot", f_lineno+1, "f")),
+                  (LINE,  ("test_hotshot", f_lineno+2, "f")),
+                  (EXIT,  ("test_hotshot", f_lineno, "f")),
+                  (EXIT,  ("test_hotshot", g_lineno, "g")),
+                  ]
+        self.run_test(g, events, self.new_profiler(lineevents=1))
+
+    def test_start_stop(self):
+        # Make sure we don't return NULL in the start() and stop()
+        # methods when there isn't an error.  Bug in 2.2 noted by
+        # Anthony Baxter.
+        profiler = self.new_profiler()
+        profiler.start()
+        profiler.stop()
+        profiler.close()
+        os.unlink(self.logfn)
+
+
+def test_main():
+    test_support.run_unittest(HotShotTestCase)
+
+
+if __name__ == "__main__":
+    test_main()
diff --git a/lib-python/2.2/test/test_htmllib.py b/lib-python/2.2/test/test_htmllib.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_htmllib.py
@@ -0,0 +1,42 @@
+import formatter
+import htmllib
+import unittest
+
+import test_support
+
+
+class AnchorCollector(htmllib.HTMLParser):
+    def __init__(self, *args, **kw):
+        self.__anchors = []
+        htmllib.HTMLParser.__init__(self, *args, **kw)
+
+    def get_anchor_info(self):
+        return self.__anchors
+
+    def anchor_bgn(self, *args):
+        self.__anchors.append(args)
+
+
+class HTMLParserTestCase(unittest.TestCase):
+    def test_anchor_collection(self):
+        # See SF bug #467059.
+        parser = AnchorCollector(formatter.NullFormatter(), verbose=1)
+        parser.feed(
+            """<a href='http://foo.org/' name='splat'> </a>
+            <a href='http://www.python.org/'> </a>
+            <a name='frob'> </a>
+            """)
+        parser.close()
+        self.assertEquals(parser.get_anchor_info(),
+                          [('http://foo.org/', 'splat', ''),
+                           ('http://www.python.org/', '', ''),
+                           ('', 'frob', ''),
+                           ])
+
+
+def test_main():
+    test_support.run_unittest(HTMLParserTestCase)
+
+
+if __name__ == "__main__":
+    test_main()
diff --git a/lib-python/2.2/test/test_htmlparser.py b/lib-python/2.2/test/test_htmlparser.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/test/test_htmlparser.py
@@ -0,0 +1,294 @@
+"""Tests for HTMLParser.py."""
+
+import HTMLParser
+import pprint
+import sys
+import test_support
+import unittest
+
+
+class EventCollector(HTMLParser.HTMLParser):
+
+    def __init__(self):
+        self.events = []
+        self.append = self.events.append
+        HTMLParser.HTMLParser.__init__(self)
+
+    def get_events(self):
+        # Normalize the list of events so that buffer artefacts don't
+        # separate runs of contiguous characters.
+        L = []
+        prevtype = None
+        for event in self.events:
+            type = event[0]
+            if type == prevtype == "data":
+                L[-1] = ("data", L[-1][1] + event[1])
+            else:
+                L.append(event)
+            prevtype = type
+        self.events = L
+        return L
+
+    # structure markup
+
+    def handle_starttag(self, tag, attrs):
+        self.append(("starttag", tag, attrs))
+
+    def handle_startendtag(self, tag, attrs):
+        self.append(("startendtag", tag, attrs))
+
+    def handle_endtag(self, tag):
+        self.append(("endtag", tag))
+
+    # all other markup
+
+    def handle_comment(self, data):
+        self.append(("comment", data))
+
+    def handle_charref(self, data):
+        self.append(("charref", data))
+
+    def handle_data(self, data):
+        self.append(("data", data))
+
+    def handle_decl(self, data):
+        self.append(("decl", data))
+
+    def handle_entityref(self, data):
+        self.append(("entityref", data))
+
+    def handle_pi(self, data):
+        self.append(("pi", data))
+
+    def unknown_decl(self, decl):
+        self.append(("unknown decl", decl))
+
+
+class EventCollectorExtra(EventCollector):
+
+    def handle_starttag(self, tag, attrs):
+        EventCollector.handle_starttag(self, tag, attrs)
+        self.append(("starttag_text", self.get_starttag_text()))
+
+
+class TestCaseBase(unittest.TestCase):
+
+    def _run_check(self, source, expected_events, collector=EventCollector):
+        parser = collector()
+        for s in source:
+            parser.feed(s)
+        parser.close()
+        events = parser.get_events()
+        if events != expected_events:
+            self.fail("received events did not match expected events\n"
+                      "Expected:\n" + pprint.pformat(expected_events) +
+                      "\nReceived:\n" + pprint.pformat(events))
+
+    def _run_check_extra(self, source, events):
+        self._run_check(source, events, EventCollectorExtra)
+
+    def _parse_error(self, source):
+        def parse(source=source):
+            parser = HTMLParser.HTMLParser()
+            parser.feed(source)
+            parser.close()
+        self.assertRaises(HTMLParser.HTMLParseError, parse)
+
+
+class HTMLParserTestCase(TestCaseBase):
+
+    def test_processing_instruction_only(self):
+        self._run_check("<?processing instruction>", [
+            ("pi", "processing instruction"),
+            ])
+
+    def test_simple_html(self):
+        self._run_check("""
+<!DOCTYPE html PUBLIC 'foo'>
+<HTML>&entity;&#32;
+<!--comment1a
+-></foo><bar>&lt;<?pi?></foo<bar
+comment1b-->
+<Img sRc='Bar' isMAP>sample
+text
+&#x201C;
+<!--comment2a-- --comment2b-->
+</Html>
+""", [
+    ("data", "\n"),
+    ("decl", "DOCTYPE html PUBLIC 'foo'"),
+    ("data", "\n"),
+    ("starttag", "html", []),
+    ("entityref", "entity"),
+    ("charref", "32"),
+    ("data", "\n"),
+    ("comment", "comment1a\n-></foo><bar>&lt;<?pi?></foo<bar\ncomment1b"),
+    ("data", "\n"),
+    ("starttag", "img", [("src", "Bar"), ("ismap", None)]),
+    ("data", "sample\ntext\n"),
+    ("charref", "x201C"),
+    ("data", "\n"),
+    ("comment", "comment2a-- --comment2b"),
+    ("data", "\n"),
+    ("endtag", "html"),
+    ("data", "\n"),
+    ])
+
+    def test_unclosed_entityref(self):
+        self._run_check("&entityref foo", [
+            ("entityref", "entityref"),
+            ("data", " foo"),
+            ])
+
+    def test_doctype_decl(self):
+        inside = """\
+DOCTYPE html [
+  <!ELEMENT html - O EMPTY>
+  <!ATTLIST html
+      version CDATA #IMPLIED
+      profile CDATA 'DublinCore'>
+  <!NOTATION datatype SYSTEM 'http://xml.python.org/notations/python-module'>
+  <!ENTITY myEntity 'internal parsed entity'>
+  <!ENTITY anEntity SYSTEM 'http://xml.python.org/entities/something.xml'>
+  <!ENTITY % paramEntity 'name|name|name'>
+  %paramEntity;
+  <!-- comment -->
+]"""
+        self._run_check("<!%s>" % inside, [
+            ("decl", inside),
+            ])
+
+    def test_bad_nesting(self):
+        # Strangely, this *is* supposed to test that overlapping
+        # elements are allowed.  HTMLParser is more geared toward
+        # lexing the input that parsing the structure.
+        self._run_check("<a><b></a></b>", [
+            ("starttag", "a", []),
+            ("starttag", "b", []),
+            ("endtag", "a"),
+            ("endtag", "b"),
+            ])
+
+    def test_bare_ampersands(self):
+        self._run_check("this text & contains & ampersands &", [
+            ("data", "this text & contains & ampersands &"),
+            ])
+
+    def test_bare_pointy_brackets(self):
+        self._run_check("this < text > contains < bare>pointy< brackets", [
+            ("data", "this < text > contains < bare>pointy< brackets"),
+            ])
+
+    def test_attr_syntax(self):
+        output = [
+          ("starttag", "a", [("b", "v"), ("c", "v"), ("d", "v"), ("e", None)])
+          ]
+        self._run_check("""<a b='v' c="v" d=v e>""", output)
+        self._run_check("""<a  b = 'v' c = "v" d = v e>""", output)
+        self._run_check("""<a\nb\n=\n'v'\nc\n=\n"v"\nd\n=\nv\ne>""", output)
+        self._run_check("""<a\tb\t=\t'v'\tc\t=\t"v"\td\t=\tv\te>""", output)
+
+    def test_attr_values(self):
+        self._run_check("""<a b='xxx\n\txxx' c="yyy\t\nyyy" d='\txyz\n'>""",
+                        [("starttag", "a", [("b", "xxx\n\txxx"),
+                                            ("c", "yyy\t\nyyy"),
+                                            ("d", "\txyz\n")])
+                         ])
+        self._run_check("""<a b='' c="">""", [
+            ("starttag", "a", [("b", ""), ("c", "")]),
+            ])
+
+    def test_attr_entity_replacement(self):
+        self._run_check("""<a b='&amp;&gt;&lt;&quot;&apos;'>""", [
+            ("starttag", "a", [("b", "&><\"'")]),
+            ])
+
+    def test_attr_funky_names(self):
+        self._run_check("""<a a.b='v' c:d=v e-f=v>""", [
+            ("starttag", "a", [("a.b", "v"), ("c:d", "v"), ("e-f", "v")]),
+            ])
+
+    def test_illegal_declarations(self):
+        self._parse_error('<!spacer type="block" height="25">')
+
+    def test_starttag_end_boundary(self):
+        self._run_check("""<a b='<'>""", [("starttag", "a", [("b", "<")])])
+        self._run_check("""<a b='>'>""", [("starttag", "a", [("b", ">")])])
+
+    def test_buffer_artefacts(self):
+        output = [("starttag", "a", [("b", "<")])]
+        self._run_check(["<a b='<'>"], output)
+        self._run_check(["<a ", "b='<'>"], output)
+        self._run_check(["<a b", "='<'>"], output)
+        self._run_check(["<a b=", "'<'>"], output)
+        self._run_check(["<a b='<", "'>"], output)
+        self._run_check(["<a b='<'", ">"], output)
+
+        output = [("starttag", "a", [("b", ">")])]
+        self._run_check(["<a b='>'>"], output)
+        self._run_check(["<a ", "b='>'>"], output)
+        self._run_check(["<a b", "='>'>"], output)
+        self._run_check(["<a b=", "'>'>"], output)
+        self._run_check(["<a b='>", "'>"], output)
+        self._run_check(["<a b='>'", ">"], output)
+
+    def test_starttag_junk_chars(self):
+        self._parse_error("</>")
+        self._parse_error("</$>")
+        self._parse_error("</")
+        self._parse_error("</a")
+        self._parse_error("<a<a>")
+        self._parse_error("</a<a>")
+        self._parse_error("<!")
+        self._parse_error("<a $>")
+        self._parse_error("<a")
+        self._parse_error("<a foo='bar'")
+        self._parse_error("<a foo='bar")
+        self._parse_error("<a foo='>'")
+        self._parse_error("<a foo='>")
+        self._parse_error("<a foo=>")
+
+    def test_declaration_junk_chars(self):
+        self._parse_error("<!DOCTYPE foo $ >")
+
+    def test_startendtag(self):
+        self._run_check("<p/>", [
+            ("startendtag", "p", []),
+            ])
+        self._run_check("<p></p>", [
+            ("starttag", "p", []),
+            ("endtag", "p"),
+            ])
+        self._run_check("<p><img src='foo' /></p>", [
+            ("starttag", "p", []),
+            ("startendtag", "img", [("src", "foo")]),
+            ("endtag", "p"),
+            ])
+
+    def test_get_starttag_text(self):
+        s = """<foo:bar   \n   one="1"\ttwo=2   >"""
+        self._run_check_extra(s, [
+            ("starttag", "foo:bar", [("one", "1"), ("two", "2")]),
+            ("starttag_text", s)])
+
+    def test_cdata_content(self):
+        s = """<script> <!-- not a comment --> &not-an-entity-ref; </script>"""
+        self._run_check(s, [
+            ("starttag", "script", []),
+            ("data", " <!-- not a comment --> &not-an-entity-ref; "),
+            ("endtag", "script"),
+            ])
+        s = """<script> <not a='start tag'> </script>"""
+        self._run_check(s, [
+            ("starttag", "script", []),
+            ("data", " <not a='start tag'> "),
+            ("endtag", "script"),
+            ])
+
+
+def test_main():
+    test_support.run_unittest(HTMLParserTestCase)
+
+
+if __name__ == "__main__":
+    test_main()
diff --git a/lib-python/2.2/test/test_httplib.py b/lib-python/2.2/test/test_httplib.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_httplib.py
@@ -0,0 +1,58 @@
+from test_support import verify,verbose
+import httplib
+import StringIO
+
+class FakeSocket:
+    def __init__(self, text):
+        self.text = text
+
+    def makefile(self, mode, bufsize=None):
+        if mode != 'r' and mode != 'rb':
+            raise httplib.UnimplementedFileMode()
+        return StringIO.StringIO(self.text)
+
+# Test HTTP status lines
+
+body = "HTTP/1.1 200 Ok\n\nText"
+sock = FakeSocket(body)
+resp = httplib.HTTPResponse(sock, 1)
+resp.begin()
+print resp.read()
+resp.close()
+
+body = "HTTP/1.1 400.100 Not Ok\n\nText"
+sock = FakeSocket(body)
+resp = httplib.HTTPResponse(sock, 1)
+try:
+    resp.begin()
+except httplib.BadStatusLine:
+    print "BadStatusLine raised as expected"
+else:
+    print "Expect BadStatusLine"
+
+# Check invalid host_port
+
+for hp in ("www.python.org:abc", "www.python.org:"):
+    try:
+        h = httplib.HTTP(hp)
+    except httplib.InvalidURL:
+        print "InvalidURL raised as expected"
+    else:
+        print "Expect InvalidURL"
+
+# test response with multiple message headers with the same field name.
+text = ('HTTP/1.1 200 OK\n'
+        'Set-Cookie: Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"\n'
+        'Set-Cookie: Part_Number="Rocket_Launcher_0001"; Version="1";'
+        ' Path="/acme"\n'
+        '\n'
+        'No body\n')
+hdr = ('Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"'
+       ', '
+       'Part_Number="Rocket_Launcher_0001"; Version="1"; Path="/acme"')
+s = FakeSocket(text)
+r = httplib.HTTPResponse(s, 1)
+r.begin()
+cookies = r.getheader("Set-Cookie")
+if cookies != hdr:
+    raise AssertionError, "multiple headers not combined properly"
diff --git a/lib-python/2.2/test/test_imageop.py b/lib-python/2.2/test/test_imageop.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/test/test_imageop.py
@@ -0,0 +1,171 @@
+#! /usr/bin/env python
+
+"""Test script for the imageop module.  This has the side
+   effect of partially testing the imgfile module as well.
+   Roger E. Masse
+"""
+
+from test_support import verbose, unlink
+
+import imageop, uu, os
+
+def main(use_rgbimg=1):
+
+    # Create binary test files
+    uu.decode(get_qualified_path('testrgb'+os.extsep+'uue'), 'test'+os.extsep+'rgb')
+
+    if use_rgbimg:
+        image, width, height = getrgbimage('test'+os.extsep+'rgb')
+    else:
+        image, width, height = getimage('test'+os.extsep+'rgb')
+
+    # Return the selected part of image, which should by width by height
+    # in size and consist of pixels of psize bytes.
+    if verbose:
+        print 'crop'
+    newimage = imageop.crop (image, 4, width, height, 0, 0, 1, 1)
+
+    # Return image scaled to size newwidth by newheight. No interpolation
+    # is done, scaling is done by simple-minded pixel duplication or removal.
+    # Therefore, computer-generated images or dithered images will
+    # not look nice after scaling.
+    if verbose:
+        print 'scale'
+    scaleimage = imageop.scale(image, 4, width, height, 1, 1)
+
+    # Run a vertical low-pass filter over an image. It does so by computing
+    # each destination pixel as the average of two vertically-aligned source
+    # pixels. The main use of this routine is to forestall excessive flicker
+    # if the image two vertically-aligned source pixels,  hence the name.
+    if verbose:
+        print 'tovideo'
+    videoimage = imageop.tovideo (image, 4, width, height)
+
+    # Convert an rgb image to an 8 bit rgb
+    if verbose:
+        print 'rgb2rgb8'
+    greyimage = imageop.rgb2rgb8(image, width, height)
+
+    # Convert an 8 bit rgb image to a 24 bit rgb image
+    if verbose:
+        print 'rgb82rgb'
+    image = imageop.rgb82rgb(greyimage, width, height)
+
+    # Convert an rgb image to an 8 bit greyscale image
+    if verbose:
+        print 'rgb2grey'
+    greyimage = imageop.rgb2grey(image, width, height)
+
+    # Convert an 8 bit greyscale image to a 24 bit rgb image
+    if verbose:
+        print 'grey2rgb'
+    image = imageop.grey2rgb(greyimage, width, height)
+
+    # Convert a 8-bit deep greyscale image to a 1-bit deep image by
+    # thresholding all the pixels. The resulting image is tightly packed
+    # and is probably only useful as an argument to mono2grey.
+    if verbose:
+        print 'grey2mono'
+    monoimage = imageop.grey2mono (greyimage, width, height, 0)
+
+    # monoimage, width, height = getimage('monotest.rgb')
+    # Convert a 1-bit monochrome image to an 8 bit greyscale or color image.
+    # All pixels that are zero-valued on input get value p0 on output and
+    # all one-value input pixels get value p1 on output. To convert a
+    # monochrome  black-and-white image to greyscale pass the values 0 and
+    # 255 respectively.
+    if verbose:
+        print 'mono2grey'
+    greyimage = imageop.mono2grey (monoimage, width, height, 0, 255)
+
+    # Convert an 8-bit greyscale image to a 1-bit monochrome image using a
+    # (simple-minded) dithering algorithm.
+    if verbose:
+        print 'dither2mono'
+    monoimage = imageop.dither2mono (greyimage, width, height)
+
+    # Convert an 8-bit greyscale image to a 4-bit greyscale image without
+    # dithering.
+    if verbose:
+        print 'grey2grey4'
+    grey4image = imageop.grey2grey4 (greyimage, width, height)
+
+    # Convert an 8-bit greyscale image to a 2-bit greyscale image without
+    # dithering.
+    if verbose:
+        print 'grey2grey2'
+    grey2image = imageop.grey2grey2 (greyimage, width, height)
+
+    # Convert an 8-bit greyscale image to a 2-bit greyscale image with
+    # dithering. As for dither2mono, the dithering algorithm is currently
+    # very simple.
+    if verbose:
+        print 'dither2grey2'
+    grey2image = imageop.dither2grey2 (greyimage, width, height)
+
+    # Convert a 4-bit greyscale image to an 8-bit greyscale image.
+    if verbose:
+        print 'grey42grey'
+    greyimage = imageop.grey42grey (grey4image, width, height)
+
+    # Convert a 2-bit greyscale image to an 8-bit greyscale image.
+    if verbose:
+        print 'grey22grey'
+    image = imageop.grey22grey (grey2image, width, height)
+
+    # Cleanup
+    unlink('test'+os.extsep+'rgb')
+
+def getrgbimage(name):
+    """return a tuple consisting of image (in 'imgfile' format but
+    using rgbimg instead) width and height"""
+
+    import rgbimg
+
+    try:
+        sizes = rgbimg.sizeofimage(name)
+    except rgbimg.error:
+        name = get_qualified_path(name)
+        sizes = rgbimg.sizeofimage(name)
+    if verbose:
+        print 'rgbimg opening test image: %s, sizes: %s' % (name, str(sizes))
+
+    image = rgbimg.longimagedata(name)
+    return (image, sizes[0], sizes[1])
+
+def getimage(name):
+    """return a tuple consisting of
+       image (in 'imgfile' format) width and height
+    """
+
+    import imgfile
+
+    try:
+        sizes = imgfile.getsizes(name)
+    except imgfile.error:
+        name = get_qualified_path(name)
+        sizes = imgfile.getsizes(name)
+    if verbose:
+        print 'imgfile opening test image: %s, sizes: %s' % (name, str(sizes))
+
+    image = imgfile.read(name)
+    return (image, sizes[0], sizes[1])
+
+def get_qualified_path(name):
+    """ return a more qualified path to name"""
+    import sys
+    import os
+    path = sys.path
+    try:
+        path = [os.path.dirname(__file__)] + path
+    except NameError:
+        pass
+    for dir in path:
+        fullname = os.path.join(dir, name)
+        if os.path.exists(fullname):
+            return fullname
+    return name
+
+# rgbimg (unlike imgfile) is portable to platforms other than SGI.
+# So we prefer to use it.
+main(use_rgbimg=1)
diff --git a/lib-python/2.2/test/test_imgfile.py b/lib-python/2.2/test/test_imgfile.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/test/test_imgfile.py
@@ -0,0 +1,116 @@
+#! /usr/bin/env python
+
+"""Simple test script for imgfile.c
+   Roger E. Masse
+"""
+
+from test_support import verbose, unlink, findfile
+
+import imgfile, uu, os
+
+
+def main():
+
+    uu.decode(findfile('testrgb.uue'), 'test.rgb')
+    uu.decode(findfile('greyrgb.uue'), 'greytest.rgb')
+
+    # Test a 3 byte color image
+    testimage('test.rgb')
+
+    # Test a 1 byte greyscale image
+    testimage('greytest.rgb')
+
+    unlink('test.rgb')
+    unlink('greytest.rgb')
+
+def testimage(name):
+    """Run through the imgfile's battery of possible methods
+       on the image passed in name.
+    """
+
+    import sys
+    import os
+
+    outputfile = '/tmp/deleteme'
+
+    # try opening the name directly
+    try:
+        # This function returns a tuple (x, y, z) where x and y are the size
+        # of the image in pixels and z is the number of bytes per pixel. Only
+        # 3 byte RGB pixels and 1 byte greyscale pixels are supported.
+        sizes = imgfile.getsizes(name)
+    except imgfile.error:
+        # get a more qualified path component of the script...
+        if __name__ == '__main__':
+            ourname = sys.argv[0]
+        else: # ...or the full path of the module
+            ourname = sys.modules[__name__].__file__
+
+        parts = ourname.split(os.sep)
+        parts[-1] = name
+        name = os.sep.join(parts)
+        sizes = imgfile.getsizes(name)
+    if verbose:
+        print 'Opening test image: %s, sizes: %s' % (name, str(sizes))
+    # This function reads and decodes the image on the specified file,
+    # and returns it as a python string. The string has either 1 byte
+    # greyscale pixels or 4 byte RGBA pixels. The bottom left pixel
+    # is the first in the string. This format is suitable to pass
+    # to gl.lrectwrite, for instance.
+    image = imgfile.read(name)
+
+    # This function writes the RGB or greyscale data in data to
+    # image file file. x and y give the size of the image, z is
+    # 1 for 1 byte greyscale images or 3 for RGB images (which
+    # are stored as 4 byte values of which only the lower three
+    # bytes are used). These are the formats returned by gl.lrectread.
+    if verbose:
+        print 'Writing output file'
+    imgfile.write (outputfile, image, sizes[0], sizes[1], sizes[2])
+
+
+    if verbose:
+        print 'Opening scaled test image: %s, sizes: %s' % (name, str(sizes))
+    # This function is identical to read but it returns an image that
+    # is scaled to the given x and y sizes. If the filter and blur
+    # parameters are omitted scaling is done by simply dropping
+    # or duplicating pixels, so the result will be less than perfect,
+    # especially for computer-generated images.  Alternatively,
+    # you can specify a filter to use to smoothen the image after
+    # scaling. The filter forms supported are 'impulse', 'box',
+    # 'triangle', 'quadratic' and 'gaussian'. If a filter is
+    # specified blur is an optional parameter specifying the
+    # blurriness of the filter. It defaults to 1.0.  readscaled
+    # makes no attempt to keep the aspect ratio correct, so that
+    # is the users' responsibility.
+    if verbose:
+        print 'Filtering with "impulse"'
+    simage = imgfile.readscaled (name, sizes[0]/2, sizes[1]/2, 'impulse', 2.0)
+
+    # This function sets a global flag which defines whether the
+    # scan lines of the image are read or written from bottom to
+    # top (flag is zero, compatible with SGI GL) or from top to
+    # bottom(flag is one, compatible with X). The default is zero.
+    if verbose:
+        print 'Switching to X compatibility'
+    imgfile.ttob (1)
+
+    if verbose:
+        print 'Filtering with "triangle"'
+    simage = imgfile.readscaled (name, sizes[0]/2, sizes[1]/2, 'triangle', 3.0)
+    if verbose:
+        print 'Switching back to SGI compatibility'
+    imgfile.ttob (0)
+
+    if verbose: print 'Filtering with "quadratic"'
+    simage = imgfile.readscaled (name, sizes[0]/2, sizes[1]/2, 'quadratic')
+    if verbose: print 'Filtering with "gaussian"'
+    simage = imgfile.readscaled (name, sizes[0]/2, sizes[1]/2, 'gaussian', 1.0)
+
+    if verbose:
+        print 'Writing output file'
+    imgfile.write (outputfile, simage, sizes[0]/2, sizes[1]/2, sizes[2])
+
+    os.unlink(outputfile)
+
+main()
diff --git a/lib-python/2.2/test/test_import.py b/lib-python/2.2/test/test_import.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_import.py
@@ -0,0 +1,71 @@
+from test_support import TESTFN, TestFailed
+
+import os
+import random
+import sys
+
+# Brief digression to test that import is case-sensitive:  if we got this
+# far, we know for sure that "random" exists.
+try:
+    import RAnDoM
+except ImportError:
+    pass
+else:
+    raise TestFailed("import of RAnDoM should have failed (case mismatch)")
+
+# Another brief digression to test the accuracy of manifest float constants.
+import double_const  # don't blink -- that *was* the test
+
+def test_with_extension(ext): # ext normally ".py"; perhaps ".pyw"
+    source = TESTFN + ext
+    pyo = TESTFN + os.extsep + "pyo"
+    if sys.platform.startswith('java'):
+        pyc = TESTFN + "$py.class"
+    else:
+        pyc = TESTFN + os.extsep + "pyc"
+
+    f = open(source, "w")
+    print >> f, "# This tests Python's ability to import a", ext, "file."
+    a = random.randrange(1000)
+    b = random.randrange(1000)
+    print >> f, "a =", a
+    print >> f, "b =", b
+    f.close()
+
+    try:
+        try:
+            mod = __import__(TESTFN)
+        except ImportError, err:
+            raise ValueError("import from %s failed: %s" % (ext, err))
+
+        if mod.a != a or mod.b != b:
+            print a, "!=", mod.a
+            print b, "!=", mod.b
+            raise ValueError("module loaded (%s) but contents invalid" % mod)
+    finally:
+        os.unlink(source)
+
+    try:
+        try:
+            reload(mod)
+        except ImportError, err:
+            raise ValueError("import from .pyc/.pyo failed: %s" % err)
+    finally:
+        try:
+            os.unlink(pyc)
+        except os.error:
+            pass
+        try:
+            os.unlink(pyo)
+        except os.error:
+            pass
+        del sys.modules[TESTFN]
+
+sys.path.insert(0, os.curdir)
+try:
+    test_with_extension(os.extsep + "py")
+    if sys.platform.startswith("win"):
+        for ext in ".PY", ".Py", ".pY", ".pyw", ".PYW", ".pYw":
+            test_with_extension(ext)
+finally:
+    del sys.path[0]
diff --git a/lib-python/2.2/test/test_inspect.py b/lib-python/2.2/test/test_inspect.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_inspect.py
@@ -0,0 +1,363 @@
+source = '''# line 1
+'A module docstring.'
+
+import sys, inspect
+# line 5
+
+# line 7
+def spam(a, b, c, d=3, (e, (f,))=(4, (5,)), *g, **h):
+    eggs(b + d, c + f)
+
+# line 11
+def eggs(x, y):
+    "A docstring."
+    global fr, st
+    fr = inspect.currentframe()
+    st = inspect.stack()
+    p = x
+    q = y / 0
+
+# line 20
+class StupidGit:
+    """A longer,
+
+    indented
+
+    docstring."""
+# line 27
+
+    def abuse(self, a, b, c):
+        """Another
+
+\tdocstring
+
+        containing
+
+\ttabs
+\t
+        """
+        self.argue(a, b, c)
+# line 40
+    def argue(self, a, b, c):
+        try:
+            spam(a, b, c)
+        except:
+            self.ex = sys.exc_info()
+            self.tr = inspect.trace()
+
+# line 48
+class MalodorousPervert(StupidGit):
+    pass
+
+class ParrotDroppings:
+    pass
+
+class FesteringGob(MalodorousPervert, ParrotDroppings):
+    pass
+'''
+
+# Functions tested in this suite:
+# ismodule, isclass, ismethod, isfunction, istraceback, isframe, iscode,
+# isbuiltin, isroutine, getmembers, getdoc, getfile, getmodule,
+# getsourcefile, getcomments, getsource, getclasstree, getargspec,
+# getargvalues, formatargspec, formatargvalues, currentframe, stack, trace
+
+from test_support import TestFailed, TESTFN
+import sys, imp, os, string
+
+def test(assertion, message, *args):
+    if not assertion:
+        raise TestFailed, message % args
+
+import inspect
+
+file = open(TESTFN, 'w')
+file.write(source)
+file.close()
+
+# Note that load_source creates file TESTFN+'c' or TESTFN+'o'.
+mod = imp.load_source('testmod', TESTFN)
+files_to_clean_up = [TESTFN, TESTFN + 'c', TESTFN + 'o']
+
+def istest(func, exp):
+    obj = eval(exp)
+    test(func(obj), '%s(%s)' % (func.__name__, exp))
+    for other in [inspect.isbuiltin, inspect.isclass, inspect.iscode,
+                  inspect.isframe, inspect.isfunction, inspect.ismethod,
+                  inspect.ismodule, inspect.istraceback]:
+        if other is not func:
+            test(not other(obj), 'not %s(%s)' % (other.__name__, exp))
+
+git = mod.StupidGit()
+try:
+    1/0
+except:
+    tb = sys.exc_traceback
+
+istest(inspect.isbuiltin, 'sys.exit')
+istest(inspect.isbuiltin, '[].append')
+istest(inspect.isclass, 'mod.StupidGit')
+istest(inspect.iscode, 'mod.spam.func_code')
+istest(inspect.isframe, 'tb.tb_frame')
+istest(inspect.isfunction, 'mod.spam')
+istest(inspect.ismethod, 'mod.StupidGit.abuse')
+istest(inspect.ismethod, 'git.argue')
+istest(inspect.ismodule, 'mod')
+istest(inspect.istraceback, 'tb')
+test(inspect.isroutine(mod.spam), 'isroutine(mod.spam)')
+test(inspect.isroutine([].count), 'isroutine([].count)')
+
+classes = inspect.getmembers(mod, inspect.isclass)
+test(classes ==
+     [('FesteringGob', mod.FesteringGob),
+      ('MalodorousPervert', mod.MalodorousPervert),
+      ('ParrotDroppings', mod.ParrotDroppings),
+      ('StupidGit', mod.StupidGit)], 'class list')
+tree = inspect.getclasstree(map(lambda x: x[1], classes), 1)
+test(tree ==
+     [(mod.ParrotDroppings, ()),
+      (mod.StupidGit, ()),
+      [(mod.MalodorousPervert, (mod.StupidGit,)),
+       [(mod.FesteringGob, (mod.MalodorousPervert, mod.ParrotDroppings))
+       ]
+      ]
+     ], 'class tree')
+
+functions = inspect.getmembers(mod, inspect.isfunction)
+test(functions == [('eggs', mod.eggs), ('spam', mod.spam)], 'function list')
+
+test(inspect.getdoc(mod) == 'A module docstring.', 'getdoc(mod)')
+test(inspect.getcomments(mod) == '# line 1\n', 'getcomments(mod)')
+test(inspect.getmodule(mod.StupidGit) == mod, 'getmodule(mod.StupidGit)')
+test(inspect.getfile(mod.StupidGit) == TESTFN, 'getfile(mod.StupidGit)')
+test(inspect.getsourcefile(mod.spam) == TESTFN, 'getsourcefile(mod.spam)')
+test(inspect.getsourcefile(git.abuse) == TESTFN, 'getsourcefile(git.abuse)')
+
+def sourcerange(top, bottom):
+    lines = string.split(source, '\n')
+    return string.join(lines[top-1:bottom], '\n') + '\n'
+
+test(inspect.getsource(git.abuse) == sourcerange(29, 39),
+     'getsource(git.abuse)')
+test(inspect.getsource(mod.StupidGit) == sourcerange(21, 46),
+     'getsource(mod.StupidGit)')
+test(inspect.getdoc(mod.StupidGit) ==
+     'A longer,\n\nindented\n\ndocstring.', 'getdoc(mod.StupidGit)')
+test(inspect.getdoc(git.abuse) ==
+     'Another\n\ndocstring\n\ncontaining\n\ntabs\n\n', 'getdoc(git.abuse)')
+test(inspect.getcomments(mod.StupidGit) == '# line 20\n',
+     'getcomments(mod.StupidGit)')
+
+args, varargs, varkw, defaults = inspect.getargspec(mod.eggs)
+test(args == ['x', 'y'], 'mod.eggs args')
+test(varargs == None, 'mod.eggs varargs')
+test(varkw == None, 'mod.eggs varkw')
+test(defaults == None, 'mod.eggs defaults')
+test(inspect.formatargspec(args, varargs, varkw, defaults) ==
+     '(x, y)', 'mod.eggs formatted argspec')
+args, varargs, varkw, defaults = inspect.getargspec(mod.spam)
+test(args == ['a', 'b', 'c', 'd', ['e', ['f']]], 'mod.spam args')
+test(varargs == 'g', 'mod.spam varargs')
+test(varkw == 'h', 'mod.spam varkw')
+test(defaults == (3, (4, (5,))), 'mod.spam defaults')
+test(inspect.formatargspec(args, varargs, varkw, defaults) ==
+     '(a, b, c, d=3, (e, (f,))=(4, (5,)), *g, **h)',
+     'mod.spam formatted argspec')
+
+git.abuse(7, 8, 9)
+
+istest(inspect.istraceback, 'git.ex[2]')
+istest(inspect.isframe, 'mod.fr')
+
+test(len(git.tr) == 3, 'trace() length')
+test(git.tr[0][1:] == (TESTFN, 46, 'argue',
+                       ['            self.tr = inspect.trace()\n'], 0),
+     'trace() row 2')
+test(git.tr[1][1:] == (TESTFN, 9, 'spam', ['    eggs(b + d, c + f)\n'], 0),
+     'trace() row 2')
+test(git.tr[2][1:] == (TESTFN, 18, 'eggs', ['    q = y / 0\n'], 0),
+     'trace() row 3')
+
+test(len(mod.st) >= 5, 'stack() length')
+test(mod.st[0][1:] ==
+     (TESTFN, 16, 'eggs', ['    st = inspect.stack()\n'], 0),
+     'stack() row 1')
+test(mod.st[1][1:] ==
+     (TESTFN, 9, 'spam', ['    eggs(b + d, c + f)\n'], 0),
+     'stack() row 2')
+test(mod.st[2][1:] ==
+     (TESTFN, 43, 'argue', ['            spam(a, b, c)\n'], 0),
+     'stack() row 3')
+test(mod.st[3][1:] ==
+     (TESTFN, 39, 'abuse', ['        self.argue(a, b, c)\n'], 0),
+     'stack() row 4')
+
+args, varargs, varkw, locals = inspect.getargvalues(mod.fr)
+test(args == ['x', 'y'], 'mod.fr args')
+test(varargs == None, 'mod.fr varargs')
+test(varkw == None, 'mod.fr varkw')
+test(locals == {'x': 11, 'p': 11, 'y': 14}, 'mod.fr locals')
+test(inspect.formatargvalues(args, varargs, varkw, locals) ==
+     '(x=11, y=14)', 'mod.fr formatted argvalues')
+
+args, varargs, varkw, locals = inspect.getargvalues(mod.fr.f_back)
+test(args == ['a', 'b', 'c', 'd', ['e', ['f']]], 'mod.fr.f_back args')
+test(varargs == 'g', 'mod.fr.f_back varargs')
+test(varkw == 'h', 'mod.fr.f_back varkw')
+test(inspect.formatargvalues(args, varargs, varkw, locals) ==
+     '(a=7, b=8, c=9, d=3, (e=4, (f=5,)), *g=(), **h={})',
+     'mod.fr.f_back formatted argvalues')
+
+for fname in files_to_clean_up:
+    try:
+        os.unlink(fname)
+    except:
+        pass
+
+# Test classic-class method resolution order.
+class A:    pass
+class B(A): pass
+class C(A): pass
+class D(B, C): pass
+
+expected = (D, B, A, C)
+got = inspect.getmro(D)
+test(expected == got, "expected %r mro, got %r", expected, got)
+
+# The same w/ new-class MRO.
+class A(object):    pass
+class B(A): pass
+class C(A): pass
+class D(B, C): pass
+
+expected = (D, B, C, A, object)
+got = inspect.getmro(D)
+test(expected == got, "expected %r mro, got %r", expected, got)
+
+# Test classify_class_attrs.
+def attrs_wo_objs(cls):
+    return [t[:3] for t in inspect.classify_class_attrs(cls)]
+
+class A:
+    def s(): pass
+    s = staticmethod(s)
+
+    def c(cls): pass
+    c = classmethod(c)
+
+    def getp(self): pass
+    p = property(getp)
+
+    def m(self): pass
+
+    def m1(self): pass
+
+    datablob = '1'
+
+attrs = attrs_wo_objs(A)
+test(('s', 'static method', A) in attrs, 'missing static method')
+test(('c', 'class method', A) in attrs, 'missing class method')
+test(('p', 'property', A) in attrs, 'missing property')
+test(('m', 'method', A) in attrs, 'missing plain method')
+test(('m1', 'method', A) in attrs, 'missing plain method')
+test(('datablob', 'data', A) in attrs, 'missing data')
+
+class B(A):
+    def m(self): pass
+
+attrs = attrs_wo_objs(B)
+test(('s', 'static method', A) in attrs, 'missing static method')
+test(('c', 'class method', A) in attrs, 'missing class method')
+test(('p', 'property', A) in attrs, 'missing property')
+test(('m', 'method', B) in attrs, 'missing plain method')
+test(('m1', 'method', A) in attrs, 'missing plain method')
+test(('datablob', 'data', A) in attrs, 'missing data')
+
+
+class C(A):
+    def m(self): pass
+    def c(self): pass
+
+attrs = attrs_wo_objs(C)
+test(('s', 'static method', A) in attrs, 'missing static method')
+test(('c', 'method', C) in attrs, 'missing plain method')
+test(('p', 'property', A) in attrs, 'missing property')
+test(('m', 'method', C) in attrs, 'missing plain method')
+test(('m1', 'method', A) in attrs, 'missing plain method')
+test(('datablob', 'data', A) in attrs, 'missing data')
+
+class D(B, C):
+    def m1(self): pass
+
+attrs = attrs_wo_objs(D)
+test(('s', 'static method', A) in attrs, 'missing static method')
+test(('c', 'class method', A) in attrs, 'missing class method')
+test(('p', 'property', A) in attrs, 'missing property')
+test(('m', 'method', B) in attrs, 'missing plain method')
+test(('m1', 'method', D) in attrs, 'missing plain method')
+test(('datablob', 'data', A) in attrs, 'missing data')
+
+# Repeat all that, but w/ new-style classes.
+
+class A(object):
+
+    def s(): pass
+    s = staticmethod(s)
+
+    def c(cls): pass
+    c = classmethod(c)
+
+    def getp(self): pass
+    p = property(getp)
+
+    def m(self): pass
+
+    def m1(self): pass
+
+    datablob = '1'
+
+attrs = attrs_wo_objs(A)
+test(('s', 'static method', A) in attrs, 'missing static method')
+test(('c', 'class method', A) in attrs, 'missing class method')
+test(('p', 'property', A) in attrs, 'missing property')
+test(('m', 'method', A) in attrs, 'missing plain method')
+test(('m1', 'method', A) in attrs, 'missing plain method')
+test(('datablob', 'data', A) in attrs, 'missing data')
+
+class B(A):
+
+    def m(self): pass
+
+attrs = attrs_wo_objs(B)
+test(('s', 'static method', A) in attrs, 'missing static method')
+test(('c', 'class method', A) in attrs, 'missing class method')
+test(('p', 'property', A) in attrs, 'missing property')
+test(('m', 'method', B) in attrs, 'missing plain method')
+test(('m1', 'method', A) in attrs, 'missing plain method')
+test(('datablob', 'data', A) in attrs, 'missing data')
+
+
+class C(A):
+
+    def m(self): pass
+    def c(self): pass
+
+attrs = attrs_wo_objs(C)
+test(('s', 'static method', A) in attrs, 'missing static method')
+test(('c', 'method', C) in attrs, 'missing plain method')
+test(('p', 'property', A) in attrs, 'missing property')
+test(('m', 'method', C) in attrs, 'missing plain method')
+test(('m1', 'method', A) in attrs, 'missing plain method')
+test(('datablob', 'data', A) in attrs, 'missing data')
+
+class D(B, C):
+
+    def m1(self): pass
+
+attrs = attrs_wo_objs(D)
+test(('s', 'static method', A) in attrs, 'missing static method')
+test(('c', 'method', C) in attrs, 'missing plain method')
+test(('p', 'property', A) in attrs, 'missing property')
+test(('m', 'method', B) in attrs, 'missing plain method')
+test(('m1', 'method', D) in attrs, 'missing plain method')
+test(('datablob', 'data', A) in attrs, 'missing data')
diff --git a/lib-python/2.2/test/test_iter.py b/lib-python/2.2/test/test_iter.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_iter.py
@@ -0,0 +1,779 @@
+# Test iterators.
+
+import unittest
+from test_support import run_unittest, TESTFN, unlink, have_unicode
+
+# Test result of triple loop (too big to inline)
+TRIPLETS = [(0, 0, 0), (0, 0, 1), (0, 0, 2),
+            (0, 1, 0), (0, 1, 1), (0, 1, 2),
+            (0, 2, 0), (0, 2, 1), (0, 2, 2),
+
+            (1, 0, 0), (1, 0, 1), (1, 0, 2),
+            (1, 1, 0), (1, 1, 1), (1, 1, 2),
+            (1, 2, 0), (1, 2, 1), (1, 2, 2),
+
+            (2, 0, 0), (2, 0, 1), (2, 0, 2),
+            (2, 1, 0), (2, 1, 1), (2, 1, 2),
+            (2, 2, 0), (2, 2, 1), (2, 2, 2)]
+
+# Helper classes
+
+class BasicIterClass:
+    def __init__(self, n):
+        self.n = n
+        self.i = 0
+    def next(self):
+        res = self.i
+        if res >= self.n:
+            raise StopIteration
+        self.i = res + 1
+        return res
+
+class IteratingSequenceClass:
+    def __init__(self, n):
+        self.n = n
+    def __iter__(self):
+        return BasicIterClass(self.n)
+
+class SequenceClass:
+    def __init__(self, n):
+        self.n = n
+    def __getitem__(self, i):
+        if 0 <= i < self.n:
+            return i
+        else:
+            raise IndexError
+
+# Main test suite
+
+class TestCase(unittest.TestCase):
+
+    # Helper to check that an iterator returns a given sequence
+    def check_iterator(self, it, seq):
+        res = []
+        while 1:
+            try:
+                val = it.next()
+            except StopIteration:
+                break
+            res.append(val)
+        self.assertEqual(res, seq)
+
+    # Helper to check that a for loop generates a given sequence
+    def check_for_loop(self, expr, seq):
+        res = []
+        for val in expr:
+            res.append(val)
+        self.assertEqual(res, seq)
+
+    # Test basic use of iter() function
+    def test_iter_basic(self):
+        self.check_iterator(iter(range(10)), range(10))
+
+    # Test that iter(iter(x)) is the same as iter(x)
+    def test_iter_idempotency(self):
+        seq = range(10)
+        it = iter(seq)
+        it2 = iter(it)
+        self.assert_(it is it2)
+
+    # Test that for loops over iterators work
+    def test_iter_for_loop(self):
+        self.check_for_loop(iter(range(10)), range(10))
+
+    # Test several independent iterators over the same list
+    def test_iter_independence(self):
+        seq = range(3)
+        res = []
+        for i in iter(seq):
+            for j in iter(seq):
+                for k in iter(seq):
+                    res.append((i, j, k))
+        self.assertEqual(res, TRIPLETS)
+
+    # Test triple list comprehension using iterators
+    def test_nested_comprehensions_iter(self):
+        seq = range(3)
+        res = [(i, j, k)
+               for i in iter(seq) for j in iter(seq) for k in iter(seq)]
+        self.assertEqual(res, TRIPLETS)
+
+    # Test triple list comprehension without iterators
+    def test_nested_comprehensions_for(self):
+        seq = range(3)
+        res = [(i, j, k) for i in seq for j in seq for k in seq]
+        self.assertEqual(res, TRIPLETS)
+
+    # Test a class with __iter__ in a for loop
+    def test_iter_class_for(self):
+        self.check_for_loop(IteratingSequenceClass(10), range(10))
+
+    # Test a class with __iter__ with explicit iter()
+    def test_iter_class_iter(self):
+        self.check_iterator(iter(IteratingSequenceClass(10)), range(10))
+
+    # Test for loop on a sequence class without __iter__
+    def test_seq_class_for(self):
+        self.check_for_loop(SequenceClass(10), range(10))
+
+    # Test iter() on a sequence class without __iter__
+    def test_seq_class_iter(self):
+        self.check_iterator(iter(SequenceClass(10)), range(10))
+
+    # Test two-argument iter() with callable instance
+    def test_iter_callable(self):
+        class C:
+            def __init__(self):
+                self.i = 0
+            def __call__(self):
+                i = self.i
+                self.i = i + 1
+                if i > 100:
+                    raise IndexError # Emergency stop
+                return i
+        self.check_iterator(iter(C(), 10), range(10))
+
+    # Test two-argument iter() with function
+    def test_iter_function(self):
+        def spam(state=[0]):
+            i = state[0]
+            state[0] = i+1
+            return i
+        self.check_iterator(iter(spam, 10), range(10))
+
+    # Test two-argument iter() with function that raises StopIteration
+    def test_iter_function_stop(self):
+        def spam(state=[0]):
+            i = state[0]
+            if i == 10:
+                raise StopIteration
+            state[0] = i+1
+            return i
+        self.check_iterator(iter(spam, 20), range(10))
+
+    # Test exception propagation through function iterator
+    def test_exception_function(self):
+        def spam(state=[0]):
+            i = state[0]
+            state[0] = i+1
+            if i == 10:
+                raise RuntimeError
+            return i
+        res = []
+        try:
+            for x in iter(spam, 20):
+                res.append(x)
+        except RuntimeError:
+            self.assertEqual(res, range(10))
+        else:
+            self.fail("should have raised RuntimeError")
+
+    # Test exception propagation through sequence iterator
+    def test_exception_sequence(self):
+        class MySequenceClass(SequenceClass):
+            def __getitem__(self, i):
+                if i == 10:
+                    raise RuntimeError
+                return SequenceClass.__getitem__(self, i)
+        res = []
+        try:
+            for x in MySequenceClass(20):
+                res.append(x)
+        except RuntimeError:
+            self.assertEqual(res, range(10))
+        else:
+            self.fail("should have raised RuntimeError")
+
+    # Test for StopIteration from __getitem__
+    def test_stop_sequence(self):
+        class MySequenceClass(SequenceClass):
+            def __getitem__(self, i):
+                if i == 10:
+                    raise StopIteration
+                return SequenceClass.__getitem__(self, i)
+        self.check_for_loop(MySequenceClass(20), range(10))
+
+    # Test a big range
+    def test_iter_big_range(self):
+        self.check_for_loop(iter(range(10000)), range(10000))
+
+    # Test an empty list
+    def test_iter_empty(self):
+        self.check_for_loop(iter([]), [])
+
+    # Test a tuple
+    def test_iter_tuple(self):
+        self.check_for_loop(iter((0,1,2,3,4,5,6,7,8,9)), range(10))
+
+    # Test an xrange
+    def test_iter_xrange(self):
+        self.check_for_loop(iter(xrange(10)), range(10))
+
+    # Test a string
+    def test_iter_string(self):
+        self.check_for_loop(iter("abcde"), ["a", "b", "c", "d", "e"])
+
+    # Test a Unicode string
+    if have_unicode:
+        def test_iter_unicode(self):
+            self.check_for_loop(iter(unicode("abcde")),
+                                [unicode("a"), unicode("b"), unicode("c"),
+                                 unicode("d"), unicode("e")])
+
+    # Test a directory
+    def test_iter_dict(self):
+        dict = {}
+        for i in range(10):
+            dict[i] = None
+        self.check_for_loop(dict, dict.keys())
+
+    # Test a file
+    def test_iter_file(self):
+        f = open(TESTFN, "w")
+        try:
+            for i in range(5):
+                f.write("%d\n" % i)
+        finally:
+            f.close()
+        f = open(TESTFN, "r")
+        try:
+            self.check_for_loop(f, ["0\n", "1\n", "2\n", "3\n", "4\n"])
+            self.check_for_loop(f, [])
+        finally:
+            f.close()
+            try:
+                unlink(TESTFN)
+            except OSError:
+                pass
+
+    # Test list()'s use of iterators.
+    def test_builtin_list(self):
+        self.assertEqual(list(SequenceClass(5)), range(5))
+        self.assertEqual(list(SequenceClass(0)), [])
+        self.assertEqual(list(()), [])
+        self.assertEqual(list(range(10, -1, -1)), range(10, -1, -1))
+
+        d = {"one": 1, "two": 2, "three": 3}
+        self.assertEqual(list(d), d.keys())
+
+        self.assertRaises(TypeError, list, list)
+        self.assertRaises(TypeError, list, 42)
+
+        f = open(TESTFN, "w")
+        try:
+            for i in range(5):
+                f.write("%d\n" % i)
+        finally:
+            f.close()
+        f = open(TESTFN, "r")
+        try:
+            self.assertEqual(list(f), ["0\n", "1\n", "2\n", "3\n", "4\n"])
+            f.seek(0, 0)
+            self.assertEqual(list(f.xreadlines()),
+                             ["0\n", "1\n", "2\n", "3\n", "4\n"])
+        finally:
+            f.close()
+            try:
+                unlink(TESTFN)
+            except OSError:
+                pass
+
+    # Test tuples()'s use of iterators.
+    def test_builtin_tuple(self):
+        self.assertEqual(tuple(SequenceClass(5)), (0, 1, 2, 3, 4))
+        self.assertEqual(tuple(SequenceClass(0)), ())
+        self.assertEqual(tuple([]), ())
+        self.assertEqual(tuple(()), ())
+        self.assertEqual(tuple("abc"), ("a", "b", "c"))
+
+        d = {"one": 1, "two": 2, "three": 3}
+        self.assertEqual(tuple(d), tuple(d.keys()))
+
+        self.assertRaises(TypeError, tuple, list)
+        self.assertRaises(TypeError, tuple, 42)
+
+        f = open(TESTFN, "w")
+        try:
+            for i in range(5):
+                f.write("%d\n" % i)
+        finally:
+            f.close()
+        f = open(TESTFN, "r")
+        try:
+            self.assertEqual(tuple(f), ("0\n", "1\n", "2\n", "3\n", "4\n"))
+            f.seek(0, 0)
+            self.assertEqual(tuple(f.xreadlines()),
+                             ("0\n", "1\n", "2\n", "3\n", "4\n"))
+        finally:
+            f.close()
+            try:
+                unlink(TESTFN)
+            except OSError:
+                pass
+
+    # Test filter()'s use of iterators.
+    def test_builtin_filter(self):
+        self.assertEqual(filter(None, SequenceClass(5)), range(1, 5))
+        self.assertEqual(filter(None, SequenceClass(0)), [])
+        self.assertEqual(filter(None, ()), ())
+        self.assertEqual(filter(None, "abc"), "abc")
+
+        d = {"one": 1, "two": 2, "three": 3}
+        self.assertEqual(filter(None, d), d.keys())
+
+        self.assertRaises(TypeError, filter, None, list)
+        self.assertRaises(TypeError, filter, None, 42)
+
+        class Boolean:
+            def __init__(self, truth):
+                self.truth = truth
+            def __nonzero__(self):
+                return self.truth
+        True = Boolean(1)
+        False = Boolean(0)
+
+        class Seq:
+            def __init__(self, *args):
+                self.vals = args
+            def __iter__(self):
+                class SeqIter:
+                    def __init__(self, vals):
+                        self.vals = vals
+                        self.i = 0
+                    def __iter__(self):
+                        return self
+                    def next(self):
+                        i = self.i
+                        self.i = i + 1
+                        if i < len(self.vals):
+                            return self.vals[i]
+                        else:
+                            raise StopIteration
+                return SeqIter(self.vals)
+
+        seq = Seq(*([True, False] * 25))
+        self.assertEqual(filter(lambda x: not x, seq), [False]*25)
+        self.assertEqual(filter(lambda x: not x, iter(seq)), [False]*25)
+
+    # Test max() and min()'s use of iterators.
+    def test_builtin_max_min(self):
+        self.assertEqual(max(SequenceClass(5)), 4)
+        self.assertEqual(min(SequenceClass(5)), 0)
+        self.assertEqual(max(8, -1), 8)
+        self.assertEqual(min(8, -1), -1)
+
+        d = {"one": 1, "two": 2, "three": 3}
+        self.assertEqual(max(d), "two")
+        self.assertEqual(min(d), "one")
+        self.assertEqual(max(d.itervalues()), 3)
+        self.assertEqual(min(iter(d.itervalues())), 1)
+
+        f = open(TESTFN, "w")
+        try:
+            f.write("medium line\n")
+            f.write("xtra large line\n")
+            f.write("itty-bitty line\n")
+        finally:
+            f.close()
+        f = open(TESTFN, "r")
+        try:
+            self.assertEqual(min(f), "itty-bitty line\n")
+            f.seek(0, 0)
+            self.assertEqual(max(f), "xtra large line\n")
+        finally:
+            f.close()
+            try:
+                unlink(TESTFN)
+            except OSError:
+                pass
+
+    # Test map()'s use of iterators.
+    def test_builtin_map(self):
+        self.assertEqual(map(None, SequenceClass(5)), range(5))
+        self.assertEqual(map(lambda x: x+1, SequenceClass(5)), range(1, 6))
+
+        d = {"one": 1, "two": 2, "three": 3}
+        self.assertEqual(map(None, d), d.keys())
+        self.assertEqual(map(lambda k, d=d: (k, d[k]), d), d.items())
+        dkeys = d.keys()
+        expected = [(i < len(d) and dkeys[i] or None,
+                     i,
+                     i < len(d) and dkeys[i] or None)
+                    for i in range(5)]
+        self.assertEqual(map(None, d,
+                                   SequenceClass(5),
+                                   iter(d.iterkeys())),
+                         expected)
+
+        f = open(TESTFN, "w")
+        try:
+            for i in range(10):
+                f.write("xy" * i + "\n") # line i has len 2*i+1
+        finally:
+            f.close()
+        f = open(TESTFN, "r")
+        try:
+            self.assertEqual(map(len, f), range(1, 21, 2))
+        finally:
+            f.close()
+            try:
+                unlink(TESTFN)
+            except OSError:
+                pass
+
+    # Test zip()'s use of iterators.
+    def test_builtin_zip(self):
+        self.assertRaises(TypeError, zip)
+        self.assertRaises(TypeError, zip, None)
+        self.assertRaises(TypeError, zip, range(10), 42)
+        self.assertRaises(TypeError, zip, range(10), zip)
+
+        self.assertEqual(zip(IteratingSequenceClass(3)),
+                         [(0,), (1,), (2,)])
+        self.assertEqual(zip(SequenceClass(3)),
+                         [(0,), (1,), (2,)])
+
+        d = {"one": 1, "two": 2, "three": 3}
+        self.assertEqual(d.items(), zip(d, d.itervalues()))
+
+        # Generate all ints starting at constructor arg.
+        class IntsFrom:
+            def __init__(self, start):
+                self.i = start
+
+            def __iter__(self):
+                return self
+
+            def next(self):
+                i = self.i
+                self.i = i+1
+                return i
+
+        f = open(TESTFN, "w")
+        try:
+            f.write("a\n" "bbb\n" "cc\n")
+        finally:
+            f.close()
+        f = open(TESTFN, "r")
+        try:
+            self.assertEqual(zip(IntsFrom(0), f, IntsFrom(-100)),
+                             [(0, "a\n", -100),
+                              (1, "bbb\n", -99),
+                              (2, "cc\n", -98)])
+        finally:
+            f.close()
+            try:
+                unlink(TESTFN)
+            except OSError:
+                pass
+
+    # Test reduces()'s use of iterators.
+    def test_builtin_reduce(self):
+        from operator import add
+        self.assertEqual(reduce(add, SequenceClass(5)), 10)
+        self.assertEqual(reduce(add, SequenceClass(5), 42), 52)
+        self.assertRaises(TypeError, reduce, add, SequenceClass(0))
+        self.assertEqual(reduce(add, SequenceClass(0), 42), 42)
+        self.assertEqual(reduce(add, SequenceClass(1)), 0)
+        self.assertEqual(reduce(add, SequenceClass(1), 42), 42)
+
+        d = {"one": 1, "two": 2, "three": 3}
+        self.assertEqual(reduce(add, d), "".join(d.keys()))
+
+    # This test case will be removed if we don't have Unicode
+    def test_unicode_join_endcase(self):
+
+        # This class inserts a Unicode object into its argument's natural
+        # iteration, in the 3rd position.
+        class OhPhooey:
+            def __init__(self, seq):
+                self.it = iter(seq)
+                self.i = 0
+
+            def __iter__(self):
+                return self
+
+            def next(self):
+                i = self.i
+                self.i = i+1
+                if i == 2:
+                    return unicode("fooled you!")
+                return self.it.next()
+
+        f = open(TESTFN, "w")
+        try:
+            f.write("a\n" + "b\n" + "c\n")
+        finally:
+            f.close()
+
+        f = open(TESTFN, "r")
+        # Nasty:  string.join(s) can't know whether unicode.join() is needed
+        # until it's seen all of s's elements.  But in this case, f's
+        # iterator cannot be restarted.  So what we're testing here is
+        # whether string.join() can manage to remember everything it's seen
+        # and pass that on to unicode.join().
+        try:
+            got = " - ".join(OhPhooey(f))
+            self.assertEqual(got, unicode("a\n - b\n - fooled you! - c\n"))
+        finally:
+            f.close()
+            try:
+                unlink(TESTFN)
+            except OSError:
+                pass
+    if not have_unicode:
+        def test_unicode_join_endcase(self): pass
+
+    # Test iterators with 'x in y' and 'x not in y'.
+    def test_in_and_not_in(self):
+        for sc5 in IteratingSequenceClass(5), SequenceClass(5):
+            for i in range(5):
+                self.assert_(i in sc5)
+            for i in "abc", -1, 5, 42.42, (3, 4), [], {1: 1}, 3-12j, sc5:
+                self.assert_(i not in sc5)
+
+        self.assertRaises(TypeError, lambda: 3 in 12)
+        self.assertRaises(TypeError, lambda: 3 not in map)
+
+        d = {"one": 1, "two": 2, "three": 3, 1j: 2j}
+        for k in d:
+            self.assert_(k in d)
+            self.assert_(k not in d.itervalues())
+        for v in d.values():
+            self.assert_(v in d.itervalues())
+            self.assert_(v not in d)
+        for k, v in d.iteritems():
+            self.assert_((k, v) in d.iteritems())
+            self.assert_((v, k) not in d.iteritems())
+
+        f = open(TESTFN, "w")
+        try:
+            f.write("a\n" "b\n" "c\n")
+        finally:
+            f.close()
+        f = open(TESTFN, "r")
+        try:
+            for chunk in "abc":
+                f.seek(0, 0)
+                self.assert_(chunk not in f)
+                f.seek(0, 0)
+                self.assert_((chunk + "\n") in f)
+        finally:
+            f.close()
+            try:
+                unlink(TESTFN)
+            except OSError:
+                pass
+
+    # Test iterators with operator.countOf (PySequence_Count).
+    def test_countOf(self):
+        from operator import countOf
+        self.assertEqual(countOf([1,2,2,3,2,5], 2), 3)
+        self.assertEqual(countOf((1,2,2,3,2,5), 2), 3)
+        self.assertEqual(countOf("122325", "2"), 3)
+        self.assertEqual(countOf("122325", "6"), 0)
+
+        self.assertRaises(TypeError, countOf, 42, 1)
+        self.assertRaises(TypeError, countOf, countOf, countOf)
+
+        d = {"one": 3, "two": 3, "three": 3, 1j: 2j}
+        for k in d:
+            self.assertEqual(countOf(d, k), 1)
+        self.assertEqual(countOf(d.itervalues(), 3), 3)
+        self.assertEqual(countOf(d.itervalues(), 2j), 1)
+        self.assertEqual(countOf(d.itervalues(), 1j), 0)
+
+        f = open(TESTFN, "w")
+        try:
+            f.write("a\n" "b\n" "c\n" "b\n")
+        finally:
+            f.close()
+        f = open(TESTFN, "r")
+        try:
+            for letter, count in ("a", 1), ("b", 2), ("c", 1), ("d", 0):
+                f.seek(0, 0)
+                self.assertEqual(countOf(f, letter + "\n"), count)
+        finally:
+            f.close()
+            try:
+                unlink(TESTFN)
+            except OSError:
+                pass
+
+    # Test iterators with operator.indexOf (PySequence_Index).
+    def test_indexOf(self):
+        from operator import indexOf
+        self.assertEqual(indexOf([1,2,2,3,2,5], 1), 0)
+        self.assertEqual(indexOf((1,2,2,3,2,5), 2), 1)
+        self.assertEqual(indexOf((1,2,2,3,2,5), 3), 3)
+        self.assertEqual(indexOf((1,2,2,3,2,5), 5), 5)
+        self.assertRaises(ValueError, indexOf, (1,2,2,3,2,5), 0)
+        self.assertRaises(ValueError, indexOf, (1,2,2,3,2,5), 6)
+
+        self.assertEqual(indexOf("122325", "2"), 1)
+        self.assertEqual(indexOf("122325", "5"), 5)
+        self.assertRaises(ValueError, indexOf, "122325", "6")
+
+        self.assertRaises(TypeError, indexOf, 42, 1)
+        self.assertRaises(TypeError, indexOf, indexOf, indexOf)
+
+        f = open(TESTFN, "w")
+        try:
+            f.write("a\n" "b\n" "c\n" "d\n" "e\n")
+        finally:
+            f.close()
+        f = open(TESTFN, "r")
+        try:
+            fiter = iter(f)
+            self.assertEqual(indexOf(fiter, "b\n"), 1)
+            self.assertEqual(indexOf(fiter, "d\n"), 1)
+            self.assertEqual(indexOf(fiter, "e\n"), 0)
+            self.assertRaises(ValueError, indexOf, fiter, "a\n")
+        finally:
+            f.close()
+            try:
+                unlink(TESTFN)
+            except OSError:
+                pass
+
+        iclass = IteratingSequenceClass(3)
+        for i in range(3):
+            self.assertEqual(indexOf(iclass, i), i)
+        self.assertRaises(ValueError, indexOf, iclass, -1)
+
+    # Test iterators with file.writelines().
+    def test_writelines(self):
+        f = file(TESTFN, "w")
+
+        try:
+            self.assertRaises(TypeError, f.writelines, None)
+            self.assertRaises(TypeError, f.writelines, 42)
+
+            f.writelines(["1\n", "2\n"])
+            f.writelines(("3\n", "4\n"))
+            f.writelines({'5\n': None})
+            f.writelines({})
+
+            # Try a big chunk too.
+            class Iterator:
+                def __init__(self, start, finish):
+                    self.start = start
+                    self.finish = finish
+                    self.i = self.start
+
+                def next(self):
+                    if self.i >= self.finish:
+                        raise StopIteration
+                    result = str(self.i) + '\n'
+                    self.i += 1
+                    return result
+
+                def __iter__(self):
+                    return self
+
+            class Whatever:
+                def __init__(self, start, finish):
+                    self.start = start
+                    self.finish = finish
+
+                def __iter__(self):
+                    return Iterator(self.start, self.finish)
+
+            f.writelines(Whatever(6, 6+2000))
+            f.close()
+
+            f = file(TESTFN)
+            expected = [str(i) + "\n" for i in range(1, 2006)]
+            self.assertEqual(list(f), expected)
+
+        finally:
+            f.close()
+            try:
+                unlink(TESTFN)
+            except OSError:
+                pass
+
+
+    # Test iterators on RHS of unpacking assignments.
+    def test_unpack_iter(self):
+        a, b = 1, 2
+        self.assertEqual((a, b), (1, 2))
+
+        a, b, c = IteratingSequenceClass(3)
+        self.assertEqual((a, b, c), (0, 1, 2))
+
+        try:    # too many values
+            a, b = IteratingSequenceClass(3)
+        except ValueError:
+            pass
+        else:
+            self.fail("should have raised ValueError")
+
+        try:    # not enough values
+            a, b, c = IteratingSequenceClass(2)
+        except ValueError:
+            pass
+        else:
+            self.fail("should have raised ValueError")
+
+        try:    # not iterable
+            a, b, c = len
+        except TypeError:
+            pass
+        else:
+            self.fail("should have raised TypeError")
+
+        a, b, c = {1: 42, 2: 42, 3: 42}.itervalues()
+        self.assertEqual((a, b, c), (42, 42, 42))
+
+        f = open(TESTFN, "w")
+        lines = ("a\n", "bb\n", "ccc\n")
+        try:
+            for line in lines:
+                f.write(line)
+        finally:
+            f.close()
+        f = open(TESTFN, "r")
+        try:
+            a, b, c = f
+            self.assertEqual((a, b, c), lines)
+        finally:
+            f.close()
+            try:
+                unlink(TESTFN)
+            except OSError:
+                pass
+
+        (a, b), (c,) = IteratingSequenceClass(2), {42: 24}
+        self.assertEqual((a, b, c), (0, 1, 42))
+
+        # Test reference count behavior
+
+        class C(object):
+            count = 0
+            def __new__(cls):
+                cls.count += 1
+                return object.__new__(cls)
+            def __del__(self):
+                cls = self.__class__
+                assert cls.count > 0
+                cls.count -= 1
+        x = C()
+        self.assertEqual(C.count, 1)
+        del x
+        self.assertEqual(C.count, 0)
+        l = [C(), C(), C()]
+        self.assertEqual(C.count, 3)
+        try:
+            a, b = iter(l)
+        except ValueError:
+            pass
+        del l
+        self.assertEqual(C.count, 0)
+
+def test_main():
+    run_unittest(TestCase)
+
+
+if __name__ == "__main__":
+    test_main()
diff --git a/lib-python/2.2/test/test_largefile.py b/lib-python/2.2/test/test_largefile.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_largefile.py
@@ -0,0 +1,162 @@
+#!python
+
+#----------------------------------------------------------------------
+# test largefile support on system where this makes sense
+#
+#----------------------------------------------------------------------
+
+import test_support
+import os, struct, stat, sys
+
+try:
+    import signal
+    # The default handler for SIGXFSZ is to abort the process.
+    # By ignoring it, system calls exceeding the file size resource
+    # limit will raise IOError instead of crashing the interpreter.
+    oldhandler = signal.signal(signal.SIGXFSZ, signal.SIG_IGN)
+except (ImportError, AttributeError):
+    pass
+
+
+# create >2GB file (2GB = 2147483648 bytes)
+size = 2500000000L
+name = test_support.TESTFN
+
+
+# On Windows and Mac OSX this test comsumes large resources; It takes
+# a long time to build the >2GB file and takes >2GB of disk space
+# therefore the resource must be enabled to run this test.  If not,
+# nothing after this line stanza will be executed.
+if sys.platform[:3] == 'win' or sys.platform == 'darwin':
+    test_support.requires(
+        'largefile',
+        'test requires %s bytes and a long time to run' % str(size))
+else:
+    # Only run if the current filesystem supports large files.
+    # (Skip this test on Windows, since we now always support large files.)
+    f = open(test_support.TESTFN, 'wb')
+    try:
+        # 2**31 == 2147483648
+        f.seek(2147483649L)
+        # Seeking is not enough of a test: you must write and flush, too!
+        f.write("x")
+        f.flush()
+    except (IOError, OverflowError):
+        f.close()
+        os.unlink(test_support.TESTFN)
+        raise test_support.TestSkipped, \
+              "filesystem does not have largefile support"
+    else:
+        f.close()
+
+
+def expect(got_this, expect_this):
+    if test_support.verbose:
+        print '%r =?= %r ...' % (got_this, expect_this),
+    if got_this != expect_this:
+        if test_support.verbose:
+            print 'no'
+        raise test_support.TestFailed, 'got %r, but expected %r' %\
+              (got_this, expect_this)
+    else:
+        if test_support.verbose:
+            print 'yes'
+
+
+# test that each file function works as expected for a large (i.e. >2GB, do
+# we have to check >4GB) files
+
+if test_support.verbose:
+    print 'create large file via seek (may be sparse file) ...'
+f = open(name, 'wb')
+f.write('z')
+f.seek(0)
+f.seek(size)
+f.write('a')
+f.flush()
+if test_support.verbose:
+    print 'check file size with os.fstat'
+expect(os.fstat(f.fileno())[stat.ST_SIZE], size+1)
+f.close()
+if test_support.verbose:
+    print 'check file size with os.stat'
+expect(os.stat(name)[stat.ST_SIZE], size+1)
+
+if test_support.verbose:
+    print 'play around with seek() and read() with the built largefile'
+f = open(name, 'rb')
+expect(f.tell(), 0)
+expect(f.read(1), 'z')
+expect(f.tell(), 1)
+f.seek(0)
+expect(f.tell(), 0)
+f.seek(0, 0)
+expect(f.tell(), 0)
+f.seek(42)
+expect(f.tell(), 42)
+f.seek(42, 0)
+expect(f.tell(), 42)
+f.seek(42, 1)
+expect(f.tell(), 84)
+f.seek(0, 1)
+expect(f.tell(), 84)
+f.seek(0, 2) # seek from the end
+expect(f.tell(), size + 1 + 0)
+f.seek(-10, 2)
+expect(f.tell(), size + 1 - 10)
+f.seek(-size-1, 2)
+expect(f.tell(), 0)
+f.seek(size)
+expect(f.tell(), size)
+expect(f.read(1), 'a') # the 'a' that was written at the end of the file above
+f.seek(-size-1, 1)
+expect(f.read(1), 'z')
+expect(f.tell(), 1)
+f.close()
+
+if test_support.verbose:
+    print 'play around with os.lseek() with the built largefile'
+f = open(name, 'rb')
+expect(os.lseek(f.fileno(), 0, 0), 0)
+expect(os.lseek(f.fileno(), 42, 0), 42)
+expect(os.lseek(f.fileno(), 42, 1), 84)
+expect(os.lseek(f.fileno(), 0, 1), 84)
+expect(os.lseek(f.fileno(), 0, 2), size+1+0)
+expect(os.lseek(f.fileno(), -10, 2), size+1-10)
+expect(os.lseek(f.fileno(), -size-1, 2), 0)
+expect(os.lseek(f.fileno(), size, 0), size)
+expect(f.read(1), 'a') # the 'a' that was written at the end of the file above
+f.close()
+
+if hasattr(f, 'truncate'):
+    if test_support.verbose:
+        print 'try truncate'
+    f = open(name, 'r+b')
+    f.seek(0, 2)
+    expect(f.tell(), size+1)    # else we've lost track of the true size
+    # Cut it back via seek + truncate with no argument.
+    newsize = size - 10
+    f.seek(newsize)
+    f.truncate()
+    expect(f.tell(), newsize)   # else pointer moved
+    f.seek(0, 2)
+    expect(f.tell(), newsize)   # else wasn't truncated
+    # Ensure that truncate(smaller than true size) shrinks the file.
+    newsize -= 1
+    f.seek(42)
+    f.truncate(newsize)
+    expect(f.tell(), 42)        # else pointer moved
+    f.seek(0, 2)
+    expect(f.tell(), newsize)   # else wasn't truncated
+
+    # XXX truncate(larger than true size) is ill-defined across platforms
+
+    # cut it waaaaay back
+    f.seek(0)
+    f.truncate(1)
+    expect(f.tell(), 0)         # else pointer moved
+    expect(len(f.read()), 1)    # else wasn't truncated
+
+    f.close()
+
+os.unlink(name)
diff --git a/lib-python/2.2/test/test_linuxaudiodev.py b/lib-python/2.2/test/test_linuxaudiodev.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_linuxaudiodev.py
@@ -0,0 +1,89 @@
+from test_support import verbose, findfile, TestFailed, TestSkipped
+
+import errno
+import fcntl
+import linuxaudiodev
+import os
+import sys
+import select
+import sunaudio
+import time
+import audioop
+
+SND_FORMAT_MULAW_8 = 1
+
+def play_sound_file(path):
+    fp = open(path, 'r')
+    size, enc, rate, nchannels, extra = sunaudio.gethdr(fp)
+    data = fp.read()
+    fp.close()
+
+    if enc != SND_FORMAT_MULAW_8:
+        print "Expect .au file with 8-bit mu-law samples"
+        return
+
+    try:
+        a = linuxaudiodev.open('w')
+    except linuxaudiodev.error, msg:
+        if msg[0] in (errno.EACCES, errno.ENODEV, errno.EBUSY):
+            raise TestSkipped, msg
+        raise TestFailed, msg
+
+    # convert the data to 16-bit signed
+    data = audioop.ulaw2lin(data, 2)
+
+    # set the data format
+    if sys.byteorder == 'little':
+        fmt = linuxaudiodev.AFMT_S16_LE
+    else:
+        fmt = linuxaudiodev.AFMT_S16_BE
+
+    # at least check that these methods can be invoked
+    a.bufsize()
+    a.obufcount()
+    a.obuffree()
+    a.getptr()
+    a.fileno()
+
+    # set parameters based on .au file headers
+    a.setparameters(rate, 16, nchannels, fmt)
+    a.write(data)
+    a.flush()
+    a.close()
+
+def test_errors():
+    a = linuxaudiodev.open("w")
+    size = 8
+    fmt = linuxaudiodev.AFMT_U8
+    rate = 8000
+    nchannels = 1
+    try:
+        a.setparameters(-1, size, nchannels, fmt)
+    except ValueError, msg:
+        print msg
+    try:
+        a.setparameters(rate, -2, nchannels, fmt)
+    except ValueError, msg:
+        print msg
+    try:
+        a.setparameters(rate, size, 3, fmt)
+    except ValueError, msg:
+        print msg
+    try:
+        a.setparameters(rate, size, nchannels, 177)
+    except ValueError, msg:
+        print msg
+    try:
+        a.setparameters(rate, size, nchannels, linuxaudiodev.AFMT_U16_LE)
+    except ValueError, msg:
+        print msg
+    try:
+        a.setparameters(rate, 16, nchannels, fmt)
+    except ValueError, msg:
+        print msg
+
+def test():
+    play_sound_file(findfile('audiotest.au'))
+    test_errors()
+
+test()
diff --git a/lib-python/2.2/test/test_locale.py b/lib-python/2.2/test/test_locale.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_locale.py
@@ -0,0 +1,44 @@
+from test_support import verbose, TestSkipped
+import locale
+import sys
+
+if sys.platform == 'darwin':
+    raise TestSkipped("Locale support on MacOSX is minimal and cannot be tested")
+oldlocale = locale.setlocale(locale.LC_NUMERIC)
+
+tloc = "en_US"
+if sys.platform[:3] == "win":
+    tloc = "en"
+
+try:
+    locale.setlocale(locale.LC_NUMERIC, tloc)
+except locale.Error:
+    raise ImportError, "test locale %s not supported" % tloc
+
+def testformat(formatstr, value, grouping = 0, output=None):
+    if verbose:
+        if output:
+            print "%s %% %s =? %s ..." %\
+                (repr(formatstr), repr(value), repr(output)),
+        else:
+            print "%s %% %s works? ..." % (repr(formatstr), repr(value)),
+    result = locale.format(formatstr, value, grouping = grouping)
+    if output and result != output:
+        if verbose:
+            print 'no'
+        print "%s %% %s == %s != %s" %\
+              (repr(formatstr), repr(value), repr(result), repr(output))
+    else:
+        if verbose:
+            print "yes"
+
+try:
+    testformat("%f", 1024, grouping=1, output='1,024.000000')
+    testformat("%f", 102, grouping=1, output='102.000000')
+    testformat("%f", -42, grouping=1, output='-42.000000')
+    testformat("%+f", -42, grouping=1, output='-42.000000')
+    testformat("%20.f", -42, grouping=1, output='                 -42')
+    testformat("%+10.f", -4200, grouping=1, output='    -4,200')
+    testformat("%-10.f", 4200, grouping=1, output='4,200     ')
+finally:
+    locale.setlocale(locale.LC_NUMERIC, oldlocale)
diff --git a/lib-python/2.2/test/test_long.py b/lib-python/2.2/test/test_long.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_long.py
@@ -0,0 +1,410 @@
+from test_support import verify, verbose, TestFailed, fcmp
+from string import join
+from random import random, randint
+
+# SHIFT should match the value in longintrepr.h for best testing.
+SHIFT = 15
+BASE = 2 ** SHIFT
+MASK = BASE - 1
+
+# Max number of base BASE digits to use in test cases.  Doubling
+# this will at least quadruple the runtime.
+MAXDIGITS = 10
+
+# build some special values
+special = map(long, [0, 1, 2, BASE, BASE >> 1])
+special.append(0x5555555555555555L)
+special.append(0xaaaaaaaaaaaaaaaaL)
+#  some solid strings of one bits
+p2 = 4L  # 0 and 1 already added
+for i in range(2*SHIFT):
+    special.append(p2 - 1)
+    p2 = p2 << 1
+del p2
+# add complements & negations
+special = special + map(lambda x: ~x, special) + \
+                    map(lambda x: -x, special)
+
+# ------------------------------------------------------------ utilities
+
+# Use check instead of assert so the test still does something
+# under -O.
+
+def check(ok, *args):
+    if not ok:
+        raise TestFailed, join(map(str, args), " ")
+
+# Get quasi-random long consisting of ndigits digits (in base BASE).
+# quasi == the most-significant digit will not be 0, and the number
+# is constructed to contain long strings of 0 and 1 bits.  These are
+# more likely than random bits to provoke digit-boundary errors.
+# The sign of the number is also random.
+
+def getran(ndigits):
+    verify(ndigits > 0)
+    nbits_hi = ndigits * SHIFT
+    nbits_lo = nbits_hi - SHIFT + 1
+    answer = 0L
+    nbits = 0
+    r = int(random() * (SHIFT * 2)) | 1  # force 1 bits to start
+    while nbits < nbits_lo:
+        bits = (r >> 1) + 1
+        bits = min(bits, nbits_hi - nbits)
+        verify(1 <= bits <= SHIFT)
+        nbits = nbits + bits
+        answer = answer << bits
+        if r & 1:
+            answer = answer | ((1 << bits) - 1)
+        r = int(random() * (SHIFT * 2))
+    verify(nbits_lo <= nbits <= nbits_hi)
+    if random() < 0.5:
+        answer = -answer
+    return answer
+
+# Get random long consisting of ndigits random digits (relative to base
+# BASE).  The sign bit is also random.
+
+def getran2(ndigits):
+    answer = 0L
+    for i in range(ndigits):
+        answer = (answer << SHIFT) | randint(0, MASK)
+    if random() < 0.5:
+        answer = -answer
+    return answer
+
+# --------------------------------------------------------------- divmod
+
+def test_division_2(x, y):
+    q, r = divmod(x, y)
+    q2, r2 = x//y, x%y
+    pab, pba = x*y, y*x
+    check(pab == pba, "multiplication does not commute for", x, y)
+    check(q == q2, "divmod returns different quotient than / for", x, y)
+    check(r == r2, "divmod returns different mod than % for", x, y)
+    check(x == q*y + r, "x != q*y + r after divmod on", x, y)
+    if y > 0:
+        check(0 <= r < y, "bad mod from divmod on", x, y)
+    else:
+        check(y < r <= 0, "bad mod from divmod on", x, y)
+
+def test_division(maxdigits=MAXDIGITS):
+    if verbose:
+        print "long / * % divmod"
+    digits = range(1, maxdigits+1)
+    for lenx in digits:
+        x = getran(lenx)
+        for leny in digits:
+            y = getran(leny) or 1L
+            test_division_2(x, y)
+
+# -------------------------------------------------------------- ~ & | ^
+
+def test_bitop_identities_1(x):
+    check(x & 0 == 0, "x & 0 != 0 for", x)
+    check(x | 0 == x, "x | 0 != x for", x)
+    check(x ^ 0 == x, "x ^ 0 != x for", x)
+    check(x & -1 == x, "x & -1 != x for", x)
+    check(x | -1 == -1, "x | -1 != -1 for", x)
+    check(x ^ -1 == ~x, "x ^ -1 != ~x for", x)
+    check(x == ~~x, "x != ~~x for", x)
+    check(x & x == x, "x & x != x for", x)
+    check(x | x == x, "x | x != x for", x)
+    check(x ^ x == 0, "x ^ x != 0 for", x)
+    check(x & ~x == 0, "x & ~x != 0 for", x)
+    check(x | ~x == -1, "x | ~x != -1 for", x)
+    check(x ^ ~x == -1, "x ^ ~x != -1 for", x)
+    check(-x == 1 + ~x == ~(x-1), "not -x == 1 + ~x == ~(x-1) for", x)
+    for n in range(2*SHIFT):
+        p2 = 2L ** n
+        check(x << n >> n == x, "x << n >> n != x for", x, n)
+        check(x // p2 == x >> n, "x // p2 != x >> n for x n p2", x, n, p2)
+        check(x * p2 == x << n, "x * p2 != x << n for x n p2", x, n, p2)
+        check(x & -p2 == x >> n << n == x & ~(p2 - 1),
+            "not x & -p2 == x >> n << n == x & ~(p2 - 1) for x n p2",
+            x, n, p2)
+
+def test_bitop_identities_2(x, y):
+    check(x & y == y & x, "x & y != y & x for", x, y)
+    check(x | y == y | x, "x | y != y | x for", x, y)
+    check(x ^ y == y ^ x, "x ^ y != y ^ x for", x, y)
+    check(x ^ y ^ x == y, "x ^ y ^ x != y for", x, y)
+    check(x & y == ~(~x | ~y), "x & y != ~(~x | ~y) for", x, y)
+    check(x | y == ~(~x & ~y), "x | y != ~(~x & ~y) for", x, y)
+    check(x ^ y == (x | y) & ~(x & y),
+         "x ^ y != (x | y) & ~(x & y) for", x, y)
+    check(x ^ y == (x & ~y) | (~x & y),
+         "x ^ y == (x & ~y) | (~x & y) for", x, y)
+    check(x ^ y == (x | y) & (~x | ~y),
+         "x ^ y == (x | y) & (~x | ~y) for", x, y)
+
+def test_bitop_identities_3(x, y, z):
+    check((x & y) & z == x & (y & z),
+         "(x & y) & z != x & (y & z) for", x, y, z)
+    check((x | y) | z == x | (y | z),
+         "(x | y) | z != x | (y | z) for", x, y, z)
+    check((x ^ y) ^ z == x ^ (y ^ z),
+         "(x ^ y) ^ z != x ^ (y ^ z) for", x, y, z)
+    check(x & (y | z) == (x & y) | (x & z),
+         "x & (y | z) != (x & y) | (x & z) for", x, y, z)
+    check(x | (y & z) == (x | y) & (x | z),
+         "x | (y & z) != (x | y) & (x | z) for", x, y, z)
+
+def test_bitop_identities(maxdigits=MAXDIGITS):
+    if verbose:
+        print "long bit-operation identities"
+    for x in special:
+        test_bitop_identities_1(x)
+    digits = range(1, maxdigits+1)
+    for lenx in digits:
+        x = getran(lenx)
+        test_bitop_identities_1(x)
+        for leny in digits:
+            y = getran(leny)
+            test_bitop_identities_2(x, y)
+            test_bitop_identities_3(x, y, getran((lenx + leny)//2))
+
+# ------------------------------------------------- hex oct repr str atol
+
+def slow_format(x, base):
+    if (x, base) == (0, 8):
+        # this is an oddball!
+        return "0L"
+    digits = []
+    sign = 0
+    if x < 0:
+        sign, x = 1, -x
+    while x:
+        x, r = divmod(x, base)
+        digits.append(int(r))
+    digits.reverse()
+    digits = digits or [0]
+    return '-'[:sign] + \
+           {8: '0', 10: '', 16: '0x'}[base] + \
+           join(map(lambda i: "0123456789ABCDEF"[i], digits), '') + \
+           "L"
+
+def test_format_1(x):
+    from string import atol
+    for base, mapper in (8, oct), (10, repr), (16, hex):
+        got = mapper(x)
+        expected = slow_format(x, base)
+        check(got == expected, mapper.__name__, "returned",
+              got, "but expected", expected, "for", x)
+        check(atol(got, 0) == x, 'atol("%s", 0) !=' % got, x)
+    # str() has to be checked a little differently since there's no
+    # trailing "L"
+    got = str(x)
+    expected = slow_format(x, 10)[:-1]
+    check(got == expected, mapper.__name__, "returned",
+          got, "but expected", expected, "for", x)
+
+def test_format(maxdigits=MAXDIGITS):
+    if verbose:
+        print "long str/hex/oct/atol"
+    for x in special:
+        test_format_1(x)
+    for i in range(10):
+        for lenx in range(1, maxdigits+1):
+            x = getran(lenx)
+            test_format_1(x)
+
+# ----------------------------------------------------------------- misc
+
+def test_misc(maxdigits=MAXDIGITS):
+    if verbose:
+        print "long miscellaneous operations"
+    import sys
+
+    # check the extremes in int<->long conversion
+    hugepos = sys.maxint
+    hugeneg = -hugepos - 1
+    hugepos_aslong = long(hugepos)
+    hugeneg_aslong = long(hugeneg)
+    check(hugepos == hugepos_aslong, "long(sys.maxint) != sys.maxint")
+    check(hugeneg == hugeneg_aslong,
+        "long(-sys.maxint-1) != -sys.maxint-1")
+
+    # long -> int should not fail for hugepos_aslong or hugeneg_aslong
+    try:
+        check(int(hugepos_aslong) == hugepos,
+              "converting sys.maxint to long and back to int fails")
+    except OverflowError:
+        raise TestFailed, "int(long(sys.maxint)) overflowed!"
+    try:
+        check(int(hugeneg_aslong) == hugeneg,
+              "converting -sys.maxint-1 to long and back to int fails")
+    except OverflowError:
+        raise TestFailed, "int(long(-sys.maxint-1)) overflowed!"
+
+    # but long -> int should overflow for hugepos+1 and hugeneg-1
+    x = hugepos_aslong + 1
+    try:
+        int(x)
+        raise ValueError
+    except OverflowError:
+        pass
+    except:
+        raise TestFailed, "int(long(sys.maxint) + 1) didn't overflow"
+
+    x = hugeneg_aslong - 1
+    try:
+        int(x)
+        raise ValueError
+    except OverflowError:
+        pass
+    except:
+        raise TestFailed, "int(long(-sys.maxint-1) - 1) didn't overflow"
+
+# ----------------------------------- tests of auto int->long conversion
+
+def test_auto_overflow():
+    import math, sys
+
+    if verbose:
+        print "auto-convert int->long on overflow"
+
+    special = [0, 1, 2, 3, sys.maxint-1, sys.maxint, sys.maxint+1]
+    sqrt = int(math.sqrt(sys.maxint))
+    special.extend([sqrt-1, sqrt, sqrt+1])
+    special.extend([-i for i in special])
+
+    def checkit(*args):
+        # Heavy use of nested scopes here!
+        verify(got == expected, "for %r expected %r got %r" %
+                                (args, expected, got))
+
+    for x in special:
+        longx = long(x)
+
+        expected = -longx
+        got = -x
+        checkit('-', x)
+
+        for y in special:
+            longy = long(y)
+
+            expected = longx + longy
+            got = x + y
+            checkit(x, '+', y)
+
+            expected = longx - longy
+            got = x - y
+            checkit(x, '-', y)
+
+            expected = longx * longy
+            got = x * y
+            checkit(x, '*', y)
+
+            if y:
+                expected = longx / longy
+                got = x / y
+                checkit(x, '/', y)
+
+                expected = longx // longy
+                got = x // y
+                checkit(x, '//', y)
+
+                expected = divmod(longx, longy)
+                got = divmod(longx, longy)
+                checkit(x, 'divmod', y)
+
+            if abs(y) < 5 and not (x == 0 and y < 0):
+                expected = longx ** longy
+                got = x ** y
+                checkit(x, '**', y)
+
+                for z in special:
+                    if z != 0 :
+                        if y >= 0:
+                            expected = pow(longx, longy, long(z))
+                            got = pow(x, y, z)
+                            checkit('pow', x, y, '%', z)
+                        else:
+                            try:
+                                pow(longx, longy, long(z))
+                            except TypeError:
+                                pass
+                            else:
+                                raise TestFailed("pow%r should have raised "
+                                "TypeError" % ((longx, longy, long(z))))
+
+# ---------------------------------------- tests of long->float overflow
+
+def test_float_overflow():
+    import math
+
+    if verbose:
+        print "long->float overflow"
+
+    for x in -2.0, -1.0, 0.0, 1.0, 2.0:
+        verify(float(long(x)) == x)
+
+    shuge = '12345' * 1000
+    huge = 1L << 30000
+    mhuge = -huge
+    namespace = {'huge': huge, 'mhuge': mhuge, 'shuge': shuge, 'math': math}
+    for test in ["float(huge)", "float(mhuge)",
+                 "complex(huge)", "complex(mhuge)",
+                 "complex(huge, 1)", "complex(mhuge, 1)",
+                 "complex(1, huge)", "complex(1, mhuge)",
+                 "1. + huge", "huge + 1.", "1. + mhuge", "mhuge + 1.",
+                 "1. - huge", "huge - 1.", "1. - mhuge", "mhuge - 1.",
+                 "1. * huge", "huge * 1.", "1. * mhuge", "mhuge * 1.",
+                 "1. // huge", "huge // 1.", "1. // mhuge", "mhuge // 1.",
+                 "1. / huge", "huge / 1.", "1. / mhuge", "mhuge / 1.",
+                 "1. ** huge", "huge ** 1.", "1. ** mhuge", "mhuge ** 1.",
+                 "math.sin(huge)", "math.sin(mhuge)",
+                 "math.sqrt(huge)", "math.sqrt(mhuge)", # should do better
+                 "math.floor(huge)", "math.floor(mhuge)",
+                 "float(shuge) == long(shuge)"]:
+
+        try:
+            eval(test, namespace)
+        except OverflowError:
+            pass
+        else:
+            raise TestFailed("expected OverflowError from %s" % test)
+
+# ---------------------------------------------- test huge log and log10
+
+def test_logs():
+    import math
+
+    if verbose:
+        print "log and log10"
+
+    LOG10E = math.log10(math.e)
+
+    for exp in range(10) + [100, 1000, 10000]:
+        value = 10 ** exp
+        log10 = math.log10(value)
+        verify(fcmp(log10, exp) == 0)
+
+        # log10(value) == exp, so log(value) == log10(value)/log10(e) ==
+        # exp/LOG10E
+        expected = exp / LOG10E
+        log = math.log(value)
+        verify(fcmp(log, expected) == 0)
+
+    for bad in -(1L << 10000), -2L, 0L:
+        try:
+            math.log(bad)
+            raise TestFailed("expected ValueError from log(<= 0)")
+        except ValueError:
+            pass
+
+        try:
+            math.log10(bad)
+            raise TestFailed("expected ValueError from log10(<= 0)")
+        except ValueError:
+            pass
+
+# ---------------------------------------------------------------- do it
+
+test_division()
+test_bitop_identities()
+test_format()
+test_misc()
+test_auto_overflow()
+test_float_overflow()
+test_logs()
diff --git a/lib-python/2.2/test/test_long_future.py b/lib-python/2.2/test/test_long_future.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_long_future.py
@@ -0,0 +1,55 @@
+from __future__ import division
+# When true division is the default, get rid of this and add it to
+# test_long.py instead.  In the meantime, it's too obscure to try to
+# trick just part of test_long into using future division.
+
+from test_support import TestFailed, verify, verbose
+
+def test_true_division():
+    if verbose:
+        print "long true division"
+    huge = 1L << 40000
+    mhuge = -huge
+    verify(huge / huge == 1.0)
+    verify(mhuge / mhuge == 1.0)
+    verify(huge / mhuge == -1.0)
+    verify(mhuge / huge == -1.0)
+    verify(1 / huge == 0.0)
+    verify(1L / huge == 0.0)
+    verify(1 / mhuge == 0.0)
+    verify(1L / mhuge == 0.0)
+    verify((666 * huge + (huge >> 1)) / huge == 666.5)
+    verify((666 * mhuge + (mhuge >> 1)) / mhuge == 666.5)
+    verify((666 * huge + (huge >> 1)) / mhuge == -666.5)
+    verify((666 * mhuge + (mhuge >> 1)) / huge == -666.5)
+    verify(huge / (huge << 1) == 0.5)
+    verify((1000000 * huge) / huge == 1000000)
+
+    namespace = {'huge': huge, 'mhuge': mhuge}
+
+    for overflow in ["float(huge)", "float(mhuge)",
+                     "huge / 1", "huge / 2L", "huge / -1", "huge / -2L",
+                     "mhuge / 100", "mhuge / 100L"]:
+        try:
+            eval(overflow, namespace)
+        except OverflowError:
+            pass
+        else:
+            raise TestFailed("expected OverflowError from %r" % overflow)
+
+    for underflow in ["1 / huge", "2L / huge", "-1 / huge", "-2L / huge",
+                     "100 / mhuge", "100L / mhuge"]:
+        result = eval(underflow, namespace)
+        if result != 0.0:
+            raise TestFailed("expected underflow to 0 from %r" % underflow)
+
+    for zero in ["huge / 0", "huge / 0L",
+                 "mhuge / 0", "mhuge / 0L"]:
+        try:
+            eval(zero, namespace)
+        except ZeroDivisionError:
+            pass
+        else:
+            raise TestFailed("expected ZeroDivisionError from %r" % zero)
+
+test_true_division()
diff --git a/lib-python/2.2/test/test_longexp.py b/lib-python/2.2/test/test_longexp.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_longexp.py
@@ -0,0 +1,12 @@
+import sys
+from test_support import TestSkipped
+
+REPS = 65580
+
+if sys.platform == 'mac':
+    import gestalt
+    if gestalt.gestalt('sysv') > 0x9ff:
+        raise TestSkipped, 'Triggers pathological malloc slowdown on OSX MacPython'
+
+l = eval("[" + "2," * REPS + "]")
+print len(l)
diff --git a/lib-python/2.2/test/test_mailbox.py b/lib-python/2.2/test/test_mailbox.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_mailbox.py
@@ -0,0 +1,104 @@
+import mailbox
+import os
+import test_support
+import time
+import unittest
+
+# cleanup earlier tests
+try:
+    os.unlink(test_support.TESTFN)
+except os.error:
+    pass
+
+
+DUMMY_MESSAGE = """\
+From: some.body at dummy.domain
+To: me at my.domain
+
+This is a dummy message.
+"""
+
+
+class MaildirTestCase(unittest.TestCase):
+
+    def setUp(self):
+        # create a new maildir mailbox to work with:
+        self._dir = test_support.TESTFN
+        os.mkdir(self._dir)
+        os.mkdir(os.path.join(self._dir, "cur"))
+        os.mkdir(os.path.join(self._dir, "tmp"))
+        os.mkdir(os.path.join(self._dir, "new"))
+        self._counter = 1
+        self._msgfiles = []
+
+    def tearDown(self):
+        map(os.unlink, self._msgfiles)
+        os.rmdir(os.path.join(self._dir, "cur"))
+        os.rmdir(os.path.join(self._dir, "tmp"))
+        os.rmdir(os.path.join(self._dir, "new"))
+        os.rmdir(self._dir)
+
+    def createMessage(self, dir):
+        t = int(time.time() % 1000000)
+        pid = self._counter
+        self._counter += 1
+        filename = os.extsep.join((str(t), str(pid), "myhostname", "mydomain"))
+        tmpname = os.path.join(self._dir, "tmp", filename)
+        newname = os.path.join(self._dir, dir, filename)
+        fp = open(tmpname, "w")
+        self._msgfiles.append(tmpname)
+        fp.write(DUMMY_MESSAGE)
+        fp.close()
+        if hasattr(os, "link"):
+            os.link(tmpname, newname)
+        else:
+            fp = open(newname, "w")
+            fp.write(DUMMY_MESSAGE)
+            fp.close()
+        self._msgfiles.append(newname)
+
+    def test_empty_maildir(self):
+        """Test an empty maildir mailbox"""
+        # Test for regression on bug #117490:
+        # Make sure the boxes attribute actually gets set.
+        self.mbox = mailbox.Maildir(test_support.TESTFN)
+        self.assert_(hasattr(self.mbox, "boxes"))
+        self.assert_(len(self.mbox.boxes) == 0)
+        self.assert_(self.mbox.next() is None)
+        self.assert_(self.mbox.next() is None)
+
+    def test_nonempty_maildir_cur(self):
+        self.createMessage("cur")
+        self.mbox = mailbox.Maildir(test_support.TESTFN)
+        self.assert_(len(self.mbox.boxes) == 1)
+        self.assert_(self.mbox.next() is not None)
+        self.assert_(self.mbox.next() is None)
+        self.assert_(self.mbox.next() is None)
+
+    def test_nonempty_maildir_new(self):
+        self.createMessage("new")
+        self.mbox = mailbox.Maildir(test_support.TESTFN)
+        self.assert_(len(self.mbox.boxes) == 1)
+        self.assert_(self.mbox.next() is not None)
+        self.assert_(self.mbox.next() is None)
+        self.assert_(self.mbox.next() is None)
+
+    def test_nonempty_maildir_both(self):
+        self.createMessage("cur")
+        self.createMessage("new")
+        self.mbox = mailbox.Maildir(test_support.TESTFN)
+        self.assert_(len(self.mbox.boxes) == 2)
+        self.assert_(self.mbox.next() is not None)
+        self.assert_(self.mbox.next() is not None)
+        self.assert_(self.mbox.next() is None)
+        self.assert_(self.mbox.next() is None)
+
+    # XXX We still need more tests!
+
+
+def test_main():
+    test_support.run_unittest(MaildirTestCase)
+
+
+if __name__ == "__main__":
+    test_main()
diff --git a/lib-python/2.2/test/test_marshal.py b/lib-python/2.2/test/test_marshal.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_marshal.py
@@ -0,0 +1,44 @@
+from test_support import TestFailed
+import marshal
+import sys
+
+# XXX Much more needed here.
+
+# Test the full range of Python ints.
+n = sys.maxint
+while n:
+    for expected in (-n, n):
+        s = marshal.dumps(expected)
+        got = marshal.loads(s)
+        if expected != got:
+            raise TestFailed("for int %d, marshal string is %r, loaded "
+                             "back as %d" % (expected, s, got))
+    n = n >> 1
+
+# Simulate int marshaling on a 64-bit box.  This is most interesting if
+# we're running the test on a 32-bit box, of course.
+
+def to_little_endian_string(value, nbytes):
+    bytes = []
+    for i in range(nbytes):
+        bytes.append(chr(value & 0xff))
+        value >>= 8
+    return ''.join(bytes)
+
+maxint64 = (1L << 63) - 1
+minint64 = -maxint64-1
+
+for base in maxint64, minint64, -maxint64, -(minint64 >> 1):
+    while base:
+        s = 'I' + to_little_endian_string(base, 8)
+        got = marshal.loads(s)
+        if base != got:
+            raise TestFailed("for int %d, simulated marshal string is %r, "
+                             "loaded back as %d" % (base, s, got))
+        if base == -1:  # a fixed-point for shifting right 1
+            base = 0
+        else:
+            base >>= 1
+
+# Simple-minded check for SF 588452: Debug build crashes
+marshal.dumps([128] * 1000)
diff --git a/lib-python/2.2/test/test_math.py b/lib-python/2.2/test/test_math.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_math.py
@@ -0,0 +1,195 @@
+# Python test set -- math module
+# XXXX Should not do tests around zero only
+
+from test_support import *
+
+seps='1e-05'
+eps = eval(seps)
+print 'math module, testing with eps', seps
+import math
+
+def testit(name, value, expected):
+    if abs(value-expected) > eps:
+        raise TestFailed, '%s returned %f, expected %f'%\
+              (name, value, expected)
+
+print 'constants'
+testit('pi', math.pi, 3.1415926)
+testit('e', math.e, 2.7182818)
+
+print 'acos'
+testit('acos(-1)', math.acos(-1), math.pi)
+testit('acos(0)', math.acos(0), math.pi/2)
+testit('acos(1)', math.acos(1), 0)
+
+print 'asin'
+testit('asin(-1)', math.asin(-1), -math.pi/2)
+testit('asin(0)', math.asin(0), 0)
+testit('asin(1)', math.asin(1), math.pi/2)
+
+print 'atan'
+testit('atan(-1)', math.atan(-1), -math.pi/4)
+testit('atan(0)', math.atan(0), 0)
+testit('atan(1)', math.atan(1), math.pi/4)
+
+print 'atan2'
+testit('atan2(-1, 0)', math.atan2(-1, 0), -math.pi/2)
+testit('atan2(-1, 1)', math.atan2(-1, 1), -math.pi/4)
+testit('atan2(0, 1)', math.atan2(0, 1), 0)
+testit('atan2(1, 1)', math.atan2(1, 1), math.pi/4)
+testit('atan2(1, 0)', math.atan2(1, 0), math.pi/2)
+
+print 'ceil'
+testit('ceil(0.5)', math.ceil(0.5), 1)
+testit('ceil(1.0)', math.ceil(1.0), 1)
+testit('ceil(1.5)', math.ceil(1.5), 2)
+testit('ceil(-0.5)', math.ceil(-0.5), 0)
+testit('ceil(-1.0)', math.ceil(-1.0), -1)
+testit('ceil(-1.5)', math.ceil(-1.5), -1)
+
+print 'cos'
+testit('cos(-pi/2)', math.cos(-math.pi/2), 0)
+testit('cos(0)', math.cos(0), 1)
+testit('cos(pi/2)', math.cos(math.pi/2), 0)
+testit('cos(pi)', math.cos(math.pi), -1)
+
+print 'cosh'
+testit('cosh(0)', math.cosh(0), 1)
+testit('cosh(2)-2*cosh(1)**2', math.cosh(2)-2*math.cosh(1)**2, -1) # Thanks to Lambert
+
+print 'exp'
+testit('exp(-1)', math.exp(-1), 1/math.e)
+testit('exp(0)', math.exp(0), 1)
+testit('exp(1)', math.exp(1), math.e)
+
+print 'fabs'
+testit('fabs(-1)', math.fabs(-1), 1)
+testit('fabs(0)', math.fabs(0), 0)
+testit('fabs(1)', math.fabs(1), 1)
+
+print 'floor'
+testit('floor(0.5)', math.floor(0.5), 0)
+testit('floor(1.0)', math.floor(1.0), 1)
+testit('floor(1.5)', math.floor(1.5), 1)
+testit('floor(-0.5)', math.floor(-0.5), -1)
+testit('floor(-1.0)', math.floor(-1.0), -1)
+testit('floor(-1.5)', math.floor(-1.5), -2)
+
+print 'fmod'
+testit('fmod(10,1)', math.fmod(10,1), 0)
+testit('fmod(10,0.5)', math.fmod(10,0.5), 0)
+testit('fmod(10,1.5)', math.fmod(10,1.5), 1)
+testit('fmod(-10,1)', math.fmod(-10,1), 0)
+testit('fmod(-10,0.5)', math.fmod(-10,0.5), 0)
+testit('fmod(-10,1.5)', math.fmod(-10,1.5), -1)
+
+print 'frexp'
+def testfrexp(name, (mant, exp), (emant, eexp)):
+    if abs(mant-emant) > eps or exp != eexp:
+        raise TestFailed, '%s returned %s, expected %s'%\
+              (name, `mant, exp`, `emant,eexp`)
+
+testfrexp('frexp(-1)', math.frexp(-1), (-0.5, 1))
+testfrexp('frexp(0)', math.frexp(0), (0, 0))
+testfrexp('frexp(1)', math.frexp(1), (0.5, 1))
+testfrexp('frexp(2)', math.frexp(2), (0.5, 2))
+
+print 'hypot'
+testit('hypot(0,0)', math.hypot(0,0), 0)
+testit('hypot(3,4)', math.hypot(3,4), 5)
+
+print 'ldexp'
+testit('ldexp(0,1)', math.ldexp(0,1), 0)
+testit('ldexp(1,1)', math.ldexp(1,1), 2)
+testit('ldexp(1,-1)', math.ldexp(1,-1), 0.5)
+testit('ldexp(-1,1)', math.ldexp(-1,1), -2)
+
+print 'log'
+testit('log(1/e)', math.log(1/math.e), -1)
+testit('log(1)', math.log(1), 0)
+testit('log(e)', math.log(math.e), 1)
+
+print 'log10'
+testit('log10(0.1)', math.log10(0.1), -1)
+testit('log10(1)', math.log10(1), 0)
+testit('log10(10)', math.log10(10), 1)
+
+print 'modf'
+def testmodf(name, (v1, v2), (e1, e2)):
+    if abs(v1-e1) > eps or abs(v2-e2):
+        raise TestFailed, '%s returned %s, expected %s'%\
+              (name, `v1,v2`, `e1,e2`)
+
+testmodf('modf(1.5)', math.modf(1.5), (0.5, 1.0))
+testmodf('modf(-1.5)', math.modf(-1.5), (-0.5, -1.0))
+
+print 'pow'
+testit('pow(0,1)', math.pow(0,1), 0)
+testit('pow(1,0)', math.pow(1,0), 1)
+testit('pow(2,1)', math.pow(2,1), 2)
+testit('pow(2,-1)', math.pow(2,-1), 0.5)
+
+print 'sin'
+testit('sin(0)', math.sin(0), 0)
+testit('sin(pi/2)', math.sin(math.pi/2), 1)
+testit('sin(-pi/2)', math.sin(-math.pi/2), -1)
+
+print 'sinh'
+testit('sinh(0)', math.sinh(0), 0)
+testit('sinh(1)**2-cosh(1)**2', math.sinh(1)**2-math.cosh(1)**2, -1)
+testit('sinh(1)+sinh(-1)', math.sinh(1)+math.sinh(-1), 0)
+
+print 'sqrt'
+testit('sqrt(0)', math.sqrt(0), 0)
+testit('sqrt(1)', math.sqrt(1), 1)
+testit('sqrt(4)', math.sqrt(4), 2)
+
+print 'tan'
+testit('tan(0)', math.tan(0), 0)
+testit('tan(pi/4)', math.tan(math.pi/4), 1)
+testit('tan(-pi/4)', math.tan(-math.pi/4), -1)
+
+print 'tanh'
+testit('tanh(0)', math.tanh(0), 0)
+testit('tanh(1)+tanh(-1)', math.tanh(1)+math.tanh(-1), 0)
+
+# RED_FLAG 16-Oct-2000 Tim
+# While 2.0 is more consistent about exceptions than previous releases, it
+# still fails this part of the test on some platforms.  For now, we only
+# *run* test_exceptions() in verbose mode, so that this isn't normally
+# tested.
+
+def test_exceptions():
+    print 'exceptions'
+    try:
+        x = math.exp(-1000000000)
+    except:
+        # mathmodule.c is failing to weed out underflows from libm, or
+        # we've got an fp format with huge dynamic range
+        raise TestFailed("underflowing exp() should not have raised "
+                         "an exception")
+    if x != 0:
+        raise TestFailed("underflowing exp() should have returned 0")
+
+    # If this fails, probably using a strict IEEE-754 conforming libm, and x
+    # is +Inf afterwards.  But Python wants overflows detected by default.
+    try:
+        x = math.exp(1000000000)
+    except OverflowError:
+        pass
+    else:
+        raise TestFailed("overflowing exp() didn't trigger OverflowError")
+
+    # If this fails, it could be a puzzle.  One odd possibility is that
+    # mathmodule.c's macros are getting confused while comparing
+    # Inf (HUGE_VAL) to a NaN, and artificially setting errno to ERANGE
+    # as a result (and so raising OverflowError instead).
+    try:
+        x = math.sqrt(-1.0)
+    except ValueError:
+        pass
+    else:
+        raise TestFailed("sqrt(-1) didn't raise ValueError")
+
+if verbose:
+    test_exceptions()
diff --git a/lib-python/2.2/test/test_md5.py b/lib-python/2.2/test/test_md5.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_md5.py
@@ -0,0 +1,30 @@
+# Testing md5 module
+
+import string
+from md5 import md5
+
+def hexstr(s):
+    h = string.hexdigits
+    r = ''
+    for c in s:
+        i = ord(c)
+        r = r + h[(i >> 4) & 0xF] + h[i & 0xF]
+    return r
+
+def md5test(s):
+    return 'MD5 ("' + s + '") = ' + hexstr(md5(s).digest())
+
+print 'MD5 test suite:'
+print md5test('')
+print md5test('a')
+print md5test('abc')
+print md5test('message digest')
+print md5test('abcdefghijklmnopqrstuvwxyz')
+print md5test('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789')
+print md5test('12345678901234567890123456789012345678901234567890123456789012345678901234567890')
+
+# hexdigest is new with Python 2.0
+m = md5('testing the hexdigest method')
+h = m.hexdigest()
+if hexstr(m.digest()) != h:
+    print 'hexdigest() failed'
diff --git a/lib-python/2.2/test/test_mhlib.py b/lib-python/2.2/test/test_mhlib.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_mhlib.py
@@ -0,0 +1,340 @@
+"""
+   Tests for the mhlib module
+   Nick Mathewson
+"""
+
+### BUG: This suite doesn't currently test the mime functionality of
+###      mhlib.  It should.
+
+import unittest
+from test_support import run_unittest, TESTFN, TestSkipped
+import os, StringIO
+import sys
+import mhlib
+
+if sys.platform.startswith("win") or sys.platform=="riscos":
+    raise TestSkipped("test_mhlib skipped on %s -- "%sys.platform +
+                      "too many Unix assumptions")
+
+_mhroot = TESTFN+"_MH"
+_mhpath = os.path.join(_mhroot, "MH")
+_mhprofile = os.path.join(_mhroot, ".mh_profile")
+
+def normF(f):
+    return os.path.join(*f.split('/'))
+
+def writeFile(fname, contents):
+    dir = os.path.split(fname)[0]
+    if dir and not os.path.exists(dir):
+        mkdirs(dir)
+    f = open(fname, 'w')
+    f.write(contents)
+    f.close()
+
+def readFile(fname):
+    f = open(fname)
+    r = f.read()
+    f.close()
+    return r
+
+def writeProfile(dict):
+    contents = [ "%s: %s\n" % (k, v) for k, v in dict.iteritems() ]
+    writeFile(_mhprofile, "".join(contents))
+
+def writeContext(folder):
+    folder = normF(folder)
+    writeFile(os.path.join(_mhpath, "context"),
+              "Current-Folder: %s\n" % folder)
+
+def writeCurMessage(folder, cur):
+    folder = normF(folder)
+    writeFile(os.path.join(_mhpath, folder, ".mh_sequences"),
+              "cur: %s\n"%cur)
+
+def writeMessage(folder, n, headers, body):
+    folder = normF(folder)
+    headers = "".join([ "%s: %s\n" % (k, v) for k, v in headers.iteritems() ])
+    contents = "%s\n%s\n" % (headers,body)
+    mkdirs(os.path.join(_mhpath, folder))
+    writeFile(os.path.join(_mhpath, folder, str(n)), contents)
+
+def getMH():
+    return mhlib.MH(os.path.abspath(_mhpath), _mhprofile)
+
+def sortLines(s):
+    lines = s.split("\n")
+    lines = [ line.strip() for line in lines if len(line) >= 2 ]
+    lines.sort()
+    return lines
+
+# These next 2 functions are copied from test_glob.py.
+def mkdirs(fname):
+    if os.path.exists(fname) or fname == '':
+        return
+    base, file = os.path.split(fname)
+    mkdirs(base)
+    os.mkdir(fname)
+
+def deltree(fname):
+    if not os.path.exists(fname):
+        return
+    for f in os.listdir(fname):
+        fullname = os.path.join(fname, f)
+        if os.path.isdir(fullname):
+            deltree(fullname)
+        else:
+            try:
+                os.unlink(fullname)
+            except:
+                pass
+    try:
+        os.rmdir(fname)
+    except:
+        pass
+
+class MhlibTests(unittest.TestCase):
+    def setUp(self):
+        deltree(_mhroot)
+        mkdirs(_mhpath)
+        writeProfile({'Path' : os.path.abspath(_mhpath),
+                      'Editor': 'emacs',
+                      'ignored-attribute': 'camping holiday'})
+        # Note: These headers aren't really conformant to RFC822, but
+        #  mhlib shouldn't care about that.
+
+        # An inbox with a couple of messages.
+        writeMessage('inbox', 1,
+                     {'From': 'Mrs. Premise',
+                      'To': 'Mrs. Conclusion',
+                      'Date': '18 July 2001'}, "Hullo, Mrs. Conclusion!\n")
+        writeMessage('inbox', 2,
+                     {'From': 'Mrs. Conclusion',
+                      'To': 'Mrs. Premise',
+                      'Date': '29 July 2001'}, "Hullo, Mrs. Premise!\n")
+
+        # A folder with many messages
+        for i in range(5, 101)+range(101, 201, 2):
+            writeMessage('wide', i,
+                         {'From': 'nowhere', 'Subject': 'message #%s' % i},
+                         "This is message number %s\n" % i)
+
+        # A deeply nested folder
+        def deep(folder, n):
+            writeMessage(folder, n,
+                         {'Subject': 'Message %s/%s' % (folder, n) },
+                         "This is message number %s in %s\n" % (n, folder) )
+        deep('deep/f1', 1)
+        deep('deep/f1', 2)
+        deep('deep/f1', 3)
+        deep('deep/f2', 4)
+        deep('deep/f2', 6)
+        deep('deep', 3)
+        deep('deep/f2/f3', 1)
+        deep('deep/f2/f3', 2)
+
+    def tearDown(self):
+        deltree(_mhroot)
+
+    def test_basic(self):
+        writeContext('inbox')
+        writeCurMessage('inbox', 2)
+        mh = getMH()
+
+        eq = self.assertEquals
+        eq(mh.getprofile('Editor'), 'emacs')
+        eq(mh.getprofile('not-set'), None)
+        eq(mh.getpath(), os.path.abspath(_mhpath))
+        eq(mh.getcontext(), 'inbox')
+
+        mh.setcontext('wide')
+        eq(mh.getcontext(), 'wide')
+        eq(readFile(os.path.join(_mhpath, 'context')),
+           "Current-Folder: wide\n")
+
+        mh.setcontext('inbox')
+
+        inbox = mh.openfolder('inbox')
+        eq(inbox.getfullname(),
+           os.path.join(os.path.abspath(_mhpath), 'inbox'))
+        eq(inbox.getsequencesfilename(),
+           os.path.join(os.path.abspath(_mhpath), 'inbox', '.mh_sequences'))
+        eq(inbox.getmessagefilename(1),
+           os.path.join(os.path.abspath(_mhpath), 'inbox', '1'))
+
+    def test_listfolders(self):
+        mh = getMH()
+        eq = self.assertEquals
+
+        folders = mh.listfolders()
+        folders.sort()
+        eq(folders, ['deep', 'inbox', 'wide'])
+
+        folders = mh.listallfolders()
+        folders.sort()
+        tfolders = map(normF, ['deep', 'deep/f1', 'deep/f2', 'deep/f2/f3',
+                                'inbox', 'wide'])
+        tfolders.sort()
+        eq(folders, tfolders)
+
+        folders = mh.listsubfolders('deep')
+        folders.sort()
+        eq(folders, map(normF, ['deep/f1', 'deep/f2']))
+
+        folders = mh.listallsubfolders('deep')
+        folders.sort()
+        eq(folders, map(normF, ['deep/f1', 'deep/f2', 'deep/f2/f3']))
+        eq(mh.listsubfolders(normF('deep/f2')), [normF('deep/f2/f3')])
+
+        eq(mh.listsubfolders('inbox'), [])
+        eq(mh.listallsubfolders('inbox'), [])
+
+    def test_sequence(self):
+        mh = getMH()
+        eq = self.assertEquals
+        writeCurMessage('wide', 55)
+
+        f = mh.openfolder('wide')
+        all = f.listmessages()
+        eq(all, range(5, 101)+range(101, 201, 2))
+        eq(f.getcurrent(), 55)
+        f.setcurrent(99)
+        eq(readFile(os.path.join(_mhpath, 'wide', '.mh_sequences')),
+           'cur: 99\n')
+
+        def seqeq(seq, val):
+            eq(f.parsesequence(seq), val)
+
+        seqeq('5-55', range(5, 56))
+        seqeq('90-108', range(90, 101)+range(101, 109, 2))
+        seqeq('90-108', range(90, 101)+range(101, 109, 2))
+
+        seqeq('10:10', range(10, 20))
+        seqeq('10:+10', range(10, 20))
+        seqeq('101:10', range(101, 121, 2))
+
+        seqeq('cur', [99])
+        seqeq('.', [99])
+        seqeq('prev', [98])
+        seqeq('next', [100])
+        seqeq('cur:-3', [97, 98, 99])
+        seqeq('first-cur', range(5, 100))
+        seqeq('150-last', range(151, 201, 2))
+        seqeq('prev-next', [98, 99, 100])
+
+        lowprimes = [5, 7, 11, 13, 17, 19, 23, 29]
+        lowcompos = [x for x in range(5, 31) if not x in lowprimes ]
+        f.putsequences({'cur': [5],
+                        'lowprime': lowprimes,
+                        'lowcompos': lowcompos})
+        seqs = readFile(os.path.join(_mhpath, 'wide', '.mh_sequences'))
+        seqs = sortLines(seqs)
+        eq(seqs, ["cur: 5",
+                  "lowcompos: 6 8-10 12 14-16 18 20-22 24-28 30",
+                  "lowprime: 5 7 11 13 17 19 23 29"])
+
+        seqeq('lowprime', lowprimes)
+        seqeq('lowprime:1', [5])
+        seqeq('lowprime:2', [5, 7])
+        seqeq('lowprime:-2', [23, 29])
+
+        ## Not supported
+        #seqeq('lowprime:first', [5])
+        #seqeq('lowprime:last', [29])
+        #seqeq('lowprime:prev', [29])
+        #seqeq('lowprime:next', [29])
+
+    def test_modify(self):
+        mh = getMH()
+        eq = self.assertEquals
+
+        mh.makefolder("dummy1")
+        self.assert_("dummy1" in mh.listfolders())
+        path = os.path.join(_mhpath, "dummy1")
+        self.assert_(os.path.exists(path))
+
+        f = mh.openfolder('dummy1')
+        def create(n):
+            msg = "From: foo\nSubject: %s\n\nDummy Message %s\n" % (n,n)
+            f.createmessage(n, StringIO.StringIO(msg))
+
+        create(7)
+        create(8)
+        create(9)
+
+        eq(readFile(f.getmessagefilename(9)),
+           "From: foo\nSubject: 9\n\nDummy Message 9\n")
+
+        eq(f.listmessages(), [7, 8, 9])
+        files = os.listdir(path)
+        files.sort()
+        eq(files, ['7', '8', '9'])
+
+        f.removemessages(['7', '8'])
+        files = os.listdir(path)
+        files.sort()
+        eq(files, [',7', ',8', '9'])
+        eq(f.listmessages(), [9])
+        create(10)
+        create(11)
+        create(12)
+
+        mh.makefolder("dummy2")
+        f2 = mh.openfolder("dummy2")
+        eq(f2.listmessages(), [])
+        f.movemessage(10, f2, 3)
+        f.movemessage(11, f2, 5)
+        eq(f.listmessages(), [9, 12])
+        eq(f2.listmessages(), [3, 5])
+        eq(readFile(f2.getmessagefilename(3)),
+           "From: foo\nSubject: 10\n\nDummy Message 10\n")
+
+        f.copymessage(9, f2, 4)
+        eq(f.listmessages(), [9, 12])
+        eq(readFile(f2.getmessagefilename(4)),
+           "From: foo\nSubject: 9\n\nDummy Message 9\n")
+
+        f.refilemessages([9, 12], f2)
+        eq(f.listmessages(), [])
+        eq(f2.listmessages(), [3, 4, 5, 6, 7])
+        eq(readFile(f2.getmessagefilename(7)),
+           "From: foo\nSubject: 12\n\nDummy Message 12\n")
+        # XXX This should check that _copysequences does the right thing.
+
+        mh.deletefolder('dummy1')
+        mh.deletefolder('dummy2')
+        self.assert_('dummy1' not in mh.listfolders())
+        self.assert_(not os.path.exists(path))
+
+    def test_read(self):
+        mh = getMH()
+        eq = self.assertEquals
+
+        f = mh.openfolder('inbox')
+        msg = f.openmessage(1)
+        # Check some basic stuff from rfc822
+        eq(msg.getheader('From'), "Mrs. Premise")
+        eq(msg.getheader('To'), "Mrs. Conclusion")
+
+        # Okay, we have the right message.  Let's check the stuff from
+        # mhlib.
+        lines = sortLines(msg.getheadertext())
+        eq(lines, ["Date: 18 July 2001",
+                   "From: Mrs. Premise",
+                   "To: Mrs. Conclusion"])
+        lines = sortLines(msg.getheadertext(lambda h: len(h)==4))
+        eq(lines, ["Date: 18 July 2001",
+                   "From: Mrs. Premise"])
+        eq(msg.getbodytext(), "Hullo, Mrs. Conclusion!\n\n")
+        eq(msg.getbodytext(0), "Hullo, Mrs. Conclusion!\n\n")
+
+        # XXXX there should be a better way to reclaim the file handle
+        msg.fp.close()
+        del msg
+
+
+def test_main():
+    run_unittest(MhlibTests)
+
+
+if __name__ == "__main__":
+    test_main()
diff --git a/lib-python/2.2/test/test_mimetools.py b/lib-python/2.2/test/test_mimetools.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_mimetools.py
@@ -0,0 +1,18 @@
+from test_support import TestFailed
+import mimetools
+
+import string,StringIO
+start = string.ascii_letters + "=" + string.digits + "\n"
+for enc in ['7bit','8bit','base64','quoted-printable']:
+    print enc,
+    i = StringIO.StringIO(start)
+    o = StringIO.StringIO()
+    mimetools.encode(i,o,enc)
+    i = StringIO.StringIO(o.getvalue())
+    o = StringIO.StringIO()
+    mimetools.decode(i,o,enc)
+    if o.getvalue()==start:
+        print "PASS"
+    else:
+        print "FAIL"
+        print o.getvalue()
diff --git a/lib-python/2.2/test/test_mimetypes.py b/lib-python/2.2/test/test_mimetypes.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_mimetypes.py
@@ -0,0 +1,59 @@
+import mimetypes
+import StringIO
+import unittest
+
+import test_support
+
+# Tell it we don't know about external files:
+mimetypes.knownfiles = []
+
+
+class MimeTypesTestCase(unittest.TestCase):
+    def setUp(self):
+        self.db = mimetypes.MimeTypes()
+
+    def test_default_data(self):
+        self.assertEqual(self.db.guess_type("foo.html"),
+                         ("text/html", None))
+        self.assertEqual(self.db.guess_type("foo.tgz"),
+                         ("application/x-tar", "gzip"))
+        self.assertEqual(self.db.guess_type("foo.tar.gz"),
+                         ("application/x-tar", "gzip"))
+        self.assertEqual(self.db.guess_type("foo.tar.Z"),
+                         ("application/x-tar", "compress"))
+
+    def test_data_urls(self):
+        self.assertEqual(self.db.guess_type("data:,thisIsTextPlain"),
+                         ("text/plain", None))
+        self.assertEqual(self.db.guess_type("data:;base64,thisIsTextPlain"),
+                         ("text/plain", None))
+        self.assertEqual(self.db.guess_type("data:text/x-foo,thisIsTextXFoo"),
+                         ("text/x-foo", None))
+
+    def test_file_parsing(self):
+        sio = StringIO.StringIO("x-application/x-unittest pyunit\n")
+        self.db.readfp(sio)
+        self.assertEqual(self.db.guess_type("foo.pyunit"),
+                         ("x-application/x-unittest", None))
+        self.assertEqual(self.db.guess_extension("x-application/x-unittest"),
+                         ".pyunit")
+
+    def test_non_standard_types(self):
+        # First try strict
+        self.assertEqual(self.db.guess_type('foo.xul', strict=1),
+                         (None, None))
+        self.assertEqual(self.db.guess_extension('image/jpg', strict=1),
+                         None)
+        # And then non-strict
+        self.assertEqual(self.db.guess_type('foo.xul', strict=0),
+                         ('text/xul', None))
+        self.assertEqual(self.db.guess_extension('image/jpg', strict=0),
+                         '.jpg')
+
+
+def test_main():
+    test_support.run_unittest(MimeTypesTestCase)
+
+
+if __name__ == "__main__":
+    test_main()
diff --git a/lib-python/2.2/test/test_minidom.py b/lib-python/2.2/test/test_minidom.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_minidom.py
@@ -0,0 +1,649 @@
+# test for xml.dom.minidom
+
+from xml.dom.minidom import parse, Node, Document, parseString
+from xml.dom import HierarchyRequestErr
+import xml.parsers.expat
+
+import os
+import sys
+import traceback
+from test_support import verbose
+
+if __name__ == "__main__":
+    base = sys.argv[0]
+else:
+    base = __file__
+tstfile = os.path.join(os.path.dirname(base), "test"+os.extsep+"xml")
+del base
+
+def confirm(test, testname = "Test"):
+    if not test:
+        print "Failed " + testname
+        raise Exception
+
+Node._debug = 1
+
+def testParseFromFile():
+    from StringIO import StringIO
+    dom = parse(StringIO(open(tstfile).read()))
+    dom.unlink()
+    confirm(isinstance(dom,Document))
+
+def testGetElementsByTagName():
+    dom = parse(tstfile)
+    confirm(dom.getElementsByTagName("LI") == \
+            dom.documentElement.getElementsByTagName("LI"))
+    dom.unlink()
+
+def testInsertBefore():
+    dom = parseString("<doc><foo/></doc>")
+    root = dom.documentElement
+    elem = root.childNodes[0]
+    nelem = dom.createElement("element")
+    root.insertBefore(nelem, elem)
+    confirm(len(root.childNodes) == 2
+            and root.childNodes.length == 2
+            and root.childNodes[0] is nelem
+            and root.childNodes.item(0) is nelem
+            and root.childNodes[1] is elem
+            and root.childNodes.item(1) is elem
+            and root.firstChild is nelem
+            and root.lastChild is elem
+            and root.toxml() == "<doc><element/><foo/></doc>"
+            , "testInsertBefore -- node properly placed in tree")
+    nelem = dom.createElement("element")
+    root.insertBefore(nelem, None)
+    confirm(len(root.childNodes) == 3
+            and root.childNodes.length == 3
+            and root.childNodes[1] is elem
+            and root.childNodes.item(1) is elem
+            and root.childNodes[2] is nelem
+            and root.childNodes.item(2) is nelem
+            and root.lastChild is nelem
+            and nelem.previousSibling is elem
+            and root.toxml() == "<doc><element/><foo/><element/></doc>"
+            , "testInsertBefore -- node properly placed in tree")
+    nelem2 = dom.createElement("bar")
+    root.insertBefore(nelem2, nelem)
+    confirm(len(root.childNodes) == 4
+            and root.childNodes.length == 4
+            and root.childNodes[2] is nelem2
+            and root.childNodes.item(2) is nelem2
+            and root.childNodes[3] is nelem
+            and root.childNodes.item(3) is nelem
+            and nelem2.nextSibling is nelem
+            and nelem.previousSibling is nelem2
+            and root.toxml() == "<doc><element/><foo/><bar/><element/></doc>"
+            , "testInsertBefore -- node properly placed in tree")
+    dom.unlink()
+
+def _create_fragment_test_nodes():
+    dom = parseString("<doc/>")
+    orig = dom.createTextNode("original")
+    c1 = dom.createTextNode("foo")
+    c2 = dom.createTextNode("bar")
+    c3 = dom.createTextNode("bat")
+    dom.documentElement.appendChild(orig)
+    frag = dom.createDocumentFragment()
+    frag.appendChild(c1)
+    frag.appendChild(c2)
+    frag.appendChild(c3)
+    return dom, orig, c1, c2, c3, frag
+
+def testInsertBeforeFragment():
+    dom, orig, c1, c2, c3, frag = _create_fragment_test_nodes()
+    dom.documentElement.insertBefore(frag, None)
+    confirm(tuple(dom.documentElement.childNodes) == (orig, c1, c2, c3),
+            "insertBefore(<fragment>, None)")
+    frag.unlink()
+    dom.unlink()
+    #
+    dom, orig, c1, c2, c3, frag = _create_fragment_test_nodes()
+    dom.documentElement.insertBefore(frag, orig)
+    confirm(tuple(dom.documentElement.childNodes) == (c1, c2, c3, orig),
+            "insertBefore(<fragment>, orig)")
+    frag.unlink()
+    dom.unlink()
+
+def testAppendChild():
+    dom = parse(tstfile)
+    dom.documentElement.appendChild(dom.createComment(u"Hello"))
+    confirm(dom.documentElement.childNodes[-1].nodeName == "#comment")
+    confirm(dom.documentElement.childNodes[-1].data == "Hello")
+    dom.unlink()
+
+def testAppendChildFragment():
+    dom, orig, c1, c2, c3, frag = _create_fragment_test_nodes()
+    dom.documentElement.appendChild(frag)
+    confirm(tuple(dom.documentElement.childNodes) == (orig, c1, c2, c3),
+            "appendChild(<fragment>)")
+    frag.unlink()
+    dom.unlink()
+
+def testReplaceChildFragment():
+    dom, orig, c1, c2, c3, frag = _create_fragment_test_nodes()
+    dom.documentElement.replaceChild(frag, orig)
+    orig.unlink()
+    confirm(tuple(dom.documentElement.childNodes) == (c1, c2, c3),
+            "replaceChild(<fragment>)")
+    frag.unlink()
+    dom.unlink()
+
+def testLegalChildren():
+    dom = Document()
+    elem = dom.createElement('element')
+    text = dom.createTextNode('text')
+
+    try: dom.appendChild(text)
+    except HierarchyRequestErr: pass
+    else:
+        print "dom.appendChild didn't raise HierarchyRequestErr"
+
+    dom.appendChild(elem)
+    try: dom.insertBefore(text, elem)
+    except HierarchyRequestErr: pass
+    else:
+        print "dom.appendChild didn't raise HierarchyRequestErr"
+
+    try: dom.replaceChild(text, elem)
+    except HierarchyRequestErr: pass
+    else:
+        print "dom.appendChild didn't raise HierarchyRequestErr"
+
+    nodemap = elem.attributes
+    try: nodemap.setNamedItem(text)
+    except HierarchyRequestErr: pass
+    else:
+        print "NamedNodeMap.setNamedItem didn't raise HierarchyRequestErr"
+
+    try: nodemap.setNamedItemNS(text)
+    except HierarchyRequestErr: pass
+    else:
+        print "NamedNodeMap.setNamedItemNS didn't raise HierarchyRequestErr"
+
+    elem.appendChild(text)
+    dom.unlink()
+
+def testNamedNodeMapSetItem():
+    dom = Document()
+    elem = dom.createElement('element')
+    attrs = elem.attributes
+    attrs["foo"] = "bar"
+    a = attrs.item(0)
+    confirm(a.ownerDocument is dom,
+            "NamedNodeMap.__setitem__() sets ownerDocument")
+    confirm(a.ownerElement is elem,
+            "NamedNodeMap.__setitem__() sets ownerElement")
+    confirm(a.value == "bar",
+            "NamedNodeMap.__setitem__() sets value")
+    confirm(a.nodeValue == "bar",
+            "NamedNodeMap.__setitem__() sets nodeValue")
+    elem.unlink()
+    dom.unlink()
+
+def testNonZero():
+    dom = parse(tstfile)
+    confirm(dom)# should not be zero
+    dom.appendChild(dom.createComment("foo"))
+    confirm(not dom.childNodes[-1].childNodes)
+    dom.unlink()
+
+def testUnlink():
+    dom = parse(tstfile)
+    dom.unlink()
+
+def testElement():
+    dom = Document()
+    dom.appendChild(dom.createElement("abc"))
+    confirm(dom.documentElement)
+    dom.unlink()
+
+def testAAA():
+    dom = parseString("<abc/>")
+    el = dom.documentElement
+    el.setAttribute("spam", "jam2")
+    confirm(el.toxml() == '<abc spam="jam2"/>', "testAAA")
+    a = el.getAttributeNode("spam")
+    confirm(a.ownerDocument is dom,
+            "setAttribute() sets ownerDocument")
+    confirm(a.ownerElement is dom.documentElement,
+            "setAttribute() sets ownerElement")
+    dom.unlink()
+
+def testAAB():
+    dom = parseString("<abc/>")
+    el = dom.documentElement
+    el.setAttribute("spam", "jam")
+    el.setAttribute("spam", "jam2")
+    confirm(el.toxml() == '<abc spam="jam2"/>', "testAAB")
+    dom.unlink()
+
+def testAddAttr():
+    dom = Document()
+    child = dom.appendChild(dom.createElement("abc"))
+
+    child.setAttribute("def", "ghi")
+    confirm(child.getAttribute("def") == "ghi")
+    confirm(child.attributes["def"].value == "ghi")
+
+    child.setAttribute("jkl", "mno")
+    confirm(child.getAttribute("jkl") == "mno")
+    confirm(child.attributes["jkl"].value == "mno")
+
+    confirm(len(child.attributes) == 2)
+
+    child.setAttribute("def", "newval")
+    confirm(child.getAttribute("def") == "newval")
+    confirm(child.attributes["def"].value == "newval")
+
+    confirm(len(child.attributes) == 2)
+    dom.unlink()
+
+def testDeleteAttr():
+    dom = Document()
+    child = dom.appendChild(dom.createElement("abc"))
+
+    confirm(len(child.attributes) == 0)
+    child.setAttribute("def", "ghi")
+    confirm(len(child.attributes) == 1)
+    del child.attributes["def"]
+    confirm(len(child.attributes) == 0)
+    dom.unlink()
+
+def testRemoveAttr():
+    dom = Document()
+    child = dom.appendChild(dom.createElement("abc"))
+
+    child.setAttribute("def", "ghi")
+    confirm(len(child.attributes) == 1)
+    child.removeAttribute("def")
+    confirm(len(child.attributes) == 0)
+
+    dom.unlink()
+
+def testRemoveAttrNS():
+    dom = Document()
+    child = dom.appendChild(
+            dom.createElementNS("http://www.python.org", "python:abc"))
+    child.setAttributeNS("http://www.w3.org", "xmlns:python",
+                                            "http://www.python.org")
+    child.setAttributeNS("http://www.python.org", "python:abcattr", "foo")
+    confirm(len(child.attributes) == 2)
+    child.removeAttributeNS("http://www.python.org", "abcattr")
+    confirm(len(child.attributes) == 1)
+
+    dom.unlink()
+
+def testRemoveAttributeNode():
+    dom = Document()
+    child = dom.appendChild(dom.createElement("foo"))
+    child.setAttribute("spam", "jam")
+    confirm(len(child.attributes) == 1)
+    node = child.getAttributeNode("spam")
+    child.removeAttributeNode(node)
+    confirm(len(child.attributes) == 0)
+
+    dom.unlink()
+
+def testChangeAttr():
+    dom = parseString("<abc/>")
+    el = dom.documentElement
+    el.setAttribute("spam", "jam")
+    confirm(len(el.attributes) == 1)
+    el.setAttribute("spam", "bam")
+    confirm(len(el.attributes) == 1)
+    el.attributes["spam"] = "ham"
+    confirm(len(el.attributes) == 1)
+    el.setAttribute("spam2", "bam")
+    confirm(len(el.attributes) == 2)
+    el.attributes[ "spam2"] = "bam2"
+    confirm(len(el.attributes) == 2)
+    dom.unlink()
+
+def testGetAttrList():
+    pass
+
+def testGetAttrValues(): pass
+
+def testGetAttrLength(): pass
+
+def testGetAttribute(): pass
+
+def testGetAttributeNS(): pass
+
+def testGetAttributeNode(): pass
+
+def testGetElementsByTagNameNS():
+    d="""<foo xmlns:minidom="http://pyxml.sf.net/minidom">
+    <minidom:myelem/>
+    </foo>"""
+    dom = parseString(d)
+    elem = dom.getElementsByTagNameNS("http://pyxml.sf.net/minidom","myelem")
+    confirm(len(elem) == 1)
+    dom.unlink()
+
+def testGetEmptyNodeListFromElementsByTagNameNS(): pass
+
+def testElementReprAndStr():
+    dom = Document()
+    el = dom.appendChild(dom.createElement("abc"))
+    string1 = repr(el)
+    string2 = str(el)
+    confirm(string1 == string2)
+    dom.unlink()
+
+# commented out until Fredrick's fix is checked in
+def _testElementReprAndStrUnicode():
+    dom = Document()
+    el = dom.appendChild(dom.createElement(u"abc"))
+    string1 = repr(el)
+    string2 = str(el)
+    confirm(string1 == string2)
+    dom.unlink()
+
+# commented out until Fredrick's fix is checked in
+def _testElementReprAndStrUnicodeNS():
+    dom = Document()
+    el = dom.appendChild(
+        dom.createElementNS(u"http://www.slashdot.org", u"slash:abc"))
+    string1 = repr(el)
+    string2 = str(el)
+    confirm(string1 == string2)
+    confirm(string1.find("slash:abc") != -1)
+    dom.unlink()
+
+def testAttributeRepr():
+    dom = Document()
+    el = dom.appendChild(dom.createElement(u"abc"))
+    node = el.setAttribute("abc", "def")
+    confirm(str(node) == repr(node))
+    dom.unlink()
+
+def testTextNodeRepr(): pass
+
+def testWriteXML():
+    str = '<?xml version="1.0" ?>\n<a b="c"/>'
+    dom = parseString(str)
+    domstr = dom.toxml()
+    dom.unlink()
+    confirm(str == domstr)
+
+def testProcessingInstruction(): pass
+
+def testProcessingInstructionRepr(): pass
+
+def testTextRepr(): pass
+
+def testWriteText(): pass
+
+def testDocumentElement(): pass
+
+def testTooManyDocumentElements():
+    doc = parseString("<doc/>")
+    elem = doc.createElement("extra")
+    try:
+        doc.appendChild(elem)
+    except HierarchyRequestErr:
+        pass
+    else:
+        print "Failed to catch expected exception when" \
+              " adding extra document element."
+    elem.unlink()
+    doc.unlink()
+
+def testCreateElementNS(): pass
+
+def testCreateAttributeNS(): pass
+
+def testParse(): pass
+
+def testParseString(): pass
+
+def testComment(): pass
+
+def testAttrListItem(): pass
+
+def testAttrListItems(): pass
+
+def testAttrListItemNS(): pass
+
+def testAttrListKeys(): pass
+
+def testAttrListKeysNS(): pass
+
+def testAttrListValues(): pass
+
+def testAttrListLength(): pass
+
+def testAttrList__getitem__(): pass
+
+def testAttrList__setitem__(): pass
+
+def testSetAttrValueandNodeValue(): pass
+
+def testParseElement(): pass
+
+def testParseAttributes(): pass
+
+def testParseElementNamespaces(): pass
+
+def testParseAttributeNamespaces(): pass
+
+def testParseProcessingInstructions(): pass
+
+def testChildNodes(): pass
+
+def testFirstChild(): pass
+
+def testHasChildNodes(): pass
+
+def testCloneElementShallow():
+    dom, clone = _setupCloneElement(0)
+    confirm(len(clone.childNodes) == 0
+            and clone.childNodes.length == 0
+            and clone.parentNode is None
+            and clone.toxml() == '<doc attr="value"/>'
+            , "testCloneElementShallow")
+    dom.unlink()
+
+def testCloneElementDeep():
+    dom, clone = _setupCloneElement(1)
+    confirm(len(clone.childNodes) == 1
+            and clone.childNodes.length == 1
+            and clone.parentNode is None
+            and clone.toxml() == '<doc attr="value"><foo/></doc>'
+            , "testCloneElementDeep")
+    dom.unlink()
+
+def _setupCloneElement(deep):
+    dom = parseString("<doc attr='value'><foo/></doc>")
+    root = dom.documentElement
+    clone = root.cloneNode(deep)
+    _testCloneElementCopiesAttributes(
+        root, clone, "testCloneElement" + (deep and "Deep" or "Shallow"))
+    # mutilate the original so shared data is detected
+    root.tagName = root.nodeName = "MODIFIED"
+    root.setAttribute("attr", "NEW VALUE")
+    root.setAttribute("added", "VALUE")
+    return dom, clone
+
+def _testCloneElementCopiesAttributes(e1, e2, test):
+    attrs1 = e1.attributes
+    attrs2 = e2.attributes
+    keys1 = attrs1.keys()
+    keys2 = attrs2.keys()
+    keys1.sort()
+    keys2.sort()
+    confirm(keys1 == keys2, "clone of element has same attribute keys")
+    for i in range(len(keys1)):
+        a1 = attrs1.item(i)
+        a2 = attrs2.item(i)
+        confirm(a1 is not a2
+                and a1.value == a2.value
+                and a1.nodeValue == a2.nodeValue
+                and a1.namespaceURI == a2.namespaceURI
+                and a1.localName == a2.localName
+                , "clone of attribute node has proper attribute values")
+        confirm(a2.ownerElement is e2,
+                "clone of attribute node correctly owned")
+
+
+def testCloneDocumentShallow(): pass
+
+def testCloneDocumentDeep(): pass
+
+def testCloneAttributeShallow(): pass
+
+def testCloneAttributeDeep(): pass
+
+def testClonePIShallow(): pass
+
+def testClonePIDeep(): pass
+
+def testNormalize():
+    doc = parseString("<doc/>")
+    root = doc.documentElement
+    root.appendChild(doc.createTextNode("first"))
+    root.appendChild(doc.createTextNode("second"))
+    confirm(len(root.childNodes) == 2
+            and root.childNodes.length == 2, "testNormalize -- preparation")
+    doc.normalize()
+    confirm(len(root.childNodes) == 1
+            and root.childNodes.length == 1
+            and root.firstChild is root.lastChild
+            and root.firstChild.data == "firstsecond"
+            , "testNormalize -- result")
+    doc.unlink()
+
+    doc = parseString("<doc/>")
+    root = doc.documentElement
+    root.appendChild(doc.createTextNode(""))
+    doc.normalize()
+    confirm(len(root.childNodes) == 0
+            and root.childNodes.length == 0,
+            "testNormalize -- single empty node removed")
+    doc.unlink()
+
+def testSiblings():
+    doc = parseString("<doc><?pi?>text?<elm/></doc>")
+    root = doc.documentElement
+    (pi, text, elm) = root.childNodes
+
+    confirm(pi.nextSibling is text and
+            pi.previousSibling is None and
+            text.nextSibling is elm and
+            text.previousSibling is pi and
+            elm.nextSibling is None and
+            elm.previousSibling is text, "testSiblings")
+
+    doc.unlink()
+
+def testParents():
+    doc = parseString("<doc><elm1><elm2/><elm2><elm3/></elm2></elm1></doc>")
+    root = doc.documentElement
+    elm1 = root.childNodes[0]
+    (elm2a, elm2b) = elm1.childNodes
+    elm3 = elm2b.childNodes[0]
+
+    confirm(root.parentNode is doc and
+            elm1.parentNode is root and
+            elm2a.parentNode is elm1 and
+            elm2b.parentNode is elm1 and
+            elm3.parentNode is elm2b, "testParents")
+
+    doc.unlink()
+
+def testNodeListItem():
+    doc = parseString("<doc><e/><e/></doc>")
+    children = doc.childNodes
+    docelem = children[0]
+    confirm(children[0] is children.item(0)
+            and children.item(1) is None
+            and docelem.childNodes.item(0) is docelem.childNodes[0]
+            and docelem.childNodes.item(1) is docelem.childNodes[1]
+            and docelem.childNodes.item(0).childNodes.item(0) is None,
+            "test NodeList.item()")
+    doc.unlink()
+
+def testSAX2DOM():
+    from xml.dom import pulldom
+
+    sax2dom = pulldom.SAX2DOM()
+    sax2dom.startDocument()
+    sax2dom.startElement("doc", {})
+    sax2dom.characters("text")
+    sax2dom.startElement("subelm", {})
+    sax2dom.characters("text")
+    sax2dom.endElement("subelm")
+    sax2dom.characters("text")
+    sax2dom.endElement("doc")
+    sax2dom.endDocument()
+
+    doc = sax2dom.document
+    root = doc.documentElement
+    (text1, elm1, text2) = root.childNodes
+    text3 = elm1.childNodes[0]
+
+    confirm(text1.previousSibling is None and
+            text1.nextSibling is elm1 and
+            elm1.previousSibling is text1 and
+            elm1.nextSibling is text2 and
+            text2.previousSibling is elm1 and
+            text2.nextSibling is None and
+            text3.previousSibling is None and
+            text3.nextSibling is None, "testSAX2DOM - siblings")
+
+    confirm(root.parentNode is doc and
+            text1.parentNode is root and
+            elm1.parentNode is root and
+            text2.parentNode is root and
+            text3.parentNode is elm1, "testSAX2DOM - parents")
+
+    doc.unlink()
+
+# --- MAIN PROGRAM
+
+names = globals().keys()
+names.sort()
+
+failed = []
+
+try:
+    Node.allnodes
+except AttributeError:
+    # We don't actually have the minidom from the standard library,
+    # but are picking up the PyXML version from site-packages.
+    def check_allnodes():
+        pass
+else:
+    def check_allnodes():
+        confirm(len(Node.allnodes) == 0,
+                "assertion: len(Node.allnodes) == 0")
+        if len(Node.allnodes):
+            print "Garbage left over:"
+            if verbose:
+                print Node.allnodes.items()[0:10]
+            else:
+                # Don't print specific nodes if repeatable results
+                # are needed
+                print len(Node.allnodes)
+        Node.allnodes = {}
+
+for name in names:
+    if name.startswith("test"):
+        func = globals()[name]
+        try:
+            func()
+            check_allnodes()
+        except:
+            failed.append(name)
+            print "Test Failed: ", name
+            sys.stdout.flush()
+            traceback.print_exception(*sys.exc_info())
+            print `sys.exc_info()[1]`
+            Node.allnodes = {}
+
+if failed:
+    print "\n\n\n**** Check for failures in these tests:"
+    for name in failed:
+        print "  " + name
diff --git a/lib-python/2.2/test/test_mmap.py b/lib-python/2.2/test/test_mmap.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_mmap.py
@@ -0,0 +1,317 @@
+from test.test_support import verify, vereq, TESTFN
+import mmap
+import os, re
+
+PAGESIZE = mmap.PAGESIZE
+
+def test_both():
+    "Test mmap module on Unix systems and Windows"
+
+    # Create a file to be mmap'ed.
+    if os.path.exists(TESTFN):
+        os.unlink(TESTFN)
+    f = open(TESTFN, 'w+')
+
+    try:    # unlink TESTFN no matter what
+        # Write 2 pages worth of data to the file
+        f.write('\0'* PAGESIZE)
+        f.write('foo')
+        f.write('\0'* (PAGESIZE-3) )
+        f.flush()
+        m = mmap.mmap(f.fileno(), 2 * PAGESIZE)
+        f.close()
+
+        # Simple sanity checks
+
+        print type(m)  # SF bug 128713:  segfaulted on Linux
+        print '  Position of foo:', m.find('foo') / float(PAGESIZE), 'pages'
+        vereq(m.find('foo'), PAGESIZE)
+
+        print '  Length of file:', len(m) / float(PAGESIZE), 'pages'
+        vereq(len(m), 2*PAGESIZE)
+
+        print '  Contents of byte 0:', repr(m[0])
+        vereq(m[0], '\0')
+        print '  Contents of first 3 bytes:', repr(m[0:3])
+        vereq(m[0:3], '\0\0\0')
+
+        # Modify the file's content
+        print "\n  Modifying file's content..."
+        m[0] = '3'
+        m[PAGESIZE +3: PAGESIZE +3+3] = 'bar'
+
+        # Check that the modification worked
+        print '  Contents of byte 0:', repr(m[0])
+        vereq(m[0], '3')
+        print '  Contents of first 3 bytes:', repr(m[0:3])
+        vereq(m[0:3], '3\0\0')
+        print '  Contents of second page:',  repr(m[PAGESIZE-1 : PAGESIZE + 7])
+        vereq(m[PAGESIZE-1 : PAGESIZE + 7], '\0foobar\0')
+
+        m.flush()
+
+        # Test doing a regular expression match in an mmap'ed file
+        match = re.search('[A-Za-z]+', m)
+        if match is None:
+            print '  ERROR: regex match on mmap failed!'
+        else:
+            start, end = match.span(0)
+            length = end - start
+
+            print '  Regex match on mmap (page start, length of match):',
+            print start / float(PAGESIZE), length
+
+            vereq(start, PAGESIZE)
+            vereq(end, PAGESIZE + 6)
+
+        # test seeking around (try to overflow the seek implementation)
+        m.seek(0,0)
+        print '  Seek to zeroth byte'
+        vereq(m.tell(), 0)
+        m.seek(42,1)
+        print '  Seek to 42nd byte'
+        vereq(m.tell(), 42)
+        m.seek(0,2)
+        print '  Seek to last byte'
+        vereq(m.tell(), len(m))
+
+        print '  Try to seek to negative position...'
+        try:
+            m.seek(-1)
+        except ValueError:
+            pass
+        else:
+            verify(0, 'expected a ValueError but did not get it')
+
+        print '  Try to seek beyond end of mmap...'
+        try:
+            m.seek(1,2)
+        except ValueError:
+            pass
+        else:
+            verify(0, 'expected a ValueError but did not get it')
+
+        print '  Try to seek to negative position...'
+        try:
+            m.seek(-len(m)-1,2)
+        except ValueError:
+            pass
+        else:
+            verify(0, 'expected a ValueError but did not get it')
+
+        # Try resizing map
+        print '  Attempting resize()'
+        try:
+            m.resize(512)
+        except SystemError:
+            # resize() not supported
+            # No messages are printed, since the output of this test suite
+            # would then be different across platforms.
+            pass
+        else:
+            # resize() is supported
+            verify(len(m) == 512,
+                    "len(m) is %d, but expecting 512" % (len(m),) )
+            # Check that we can no longer seek beyond the new size.
+            try:
+                m.seek(513,0)
+            except ValueError:
+                pass
+            else:
+                verify(0, 'Could seek beyond the new size')
+
+        m.close()
+
+    finally:
+        try:
+            f.close()
+        except OSError:
+            pass
+        try:
+            os.unlink(TESTFN)
+        except OSError:
+            pass
+
+    # Test for "access" keyword parameter
+    try:
+        mapsize = 10
+        print "  Creating", mapsize, "byte test data file."
+        open(TESTFN, "wb").write("a"*mapsize)
+        print "  Opening mmap with access=ACCESS_READ"
+        f = open(TESTFN, "rb")
+        m = mmap.mmap(f.fileno(), mapsize, access=mmap.ACCESS_READ)
+        verify(m[:] == 'a'*mapsize, "Readonly memory map data incorrect.")
+
+        print "  Ensuring that readonly mmap can't be slice assigned."
+        try:
+            m[:] = 'b'*mapsize
+        except TypeError:
+            pass
+        else:
+            verify(0, "Able to write to readonly memory map")
+
+        print "  Ensuring that readonly mmap can't be item assigned."
+        try:
+            m[0] = 'b'
+        except TypeError:
+            pass
+        else:
+            verify(0, "Able to write to readonly memory map")
+
+        print "  Ensuring that readonly mmap can't be write() to."
+        try:
+            m.seek(0,0)
+            m.write('abc')
+        except TypeError:
+            pass
+        else:
+            verify(0, "Able to write to readonly memory map")
+
+        print "  Ensuring that readonly mmap can't be write_byte() to."
+        try:
+            m.seek(0,0)
+            m.write_byte('d')
+        except TypeError:
+            pass
+        else:
+            verify(0, "Able to write to readonly memory map")
+
+        print "  Ensuring that readonly mmap can't be resized."
+        try:
+            m.resize(2*mapsize)
+        except SystemError:   # resize is not universally supported
+            pass
+        except TypeError:
+            pass
+        else:
+            verify(0, "Able to resize readonly memory map")
+        del m, f
+        verify(open(TESTFN, "rb").read() == 'a'*mapsize,
+               "Readonly memory map data file was modified")
+
+        print "  Opening mmap with size too big"
+        import sys
+        f = open(TESTFN, "r+b")
+        try:
+            m = mmap.mmap(f.fileno(), mapsize+1)
+        except ValueError:
+            # we do not expect a ValueError on Windows
+            # CAUTION:  This also changes the size of the file on disk, and
+            # later tests assume that the length hasn't changed.  We need to
+            # repair that.
+            if sys.platform.startswith('win'):
+                verify(0, "Opening mmap with size+1 should work on Windows.")
+        else:
+            # we expect a ValueError on Unix, but not on Windows
+            if not sys.platform.startswith('win'):
+                verify(0, "Opening mmap with size+1 should raise ValueError.")
+            m.close()
+        f.close()
+        if sys.platform.startswith('win'):
+            # Repair damage from the resizing test.
+            f = open(TESTFN, 'r+b')
+            f.truncate(mapsize)
+            f.close()
+
+        print "  Opening mmap with access=ACCESS_WRITE"
+        f = open(TESTFN, "r+b")
+        m = mmap.mmap(f.fileno(), mapsize, access=mmap.ACCESS_WRITE)
+        print "  Modifying write-through memory map."
+        m[:] = 'c'*mapsize
+        verify(m[:] == 'c'*mapsize,
+               "Write-through memory map memory not updated properly.")
+        m.flush()
+        m.close()
+        f.close()
+        f = open(TESTFN, 'rb')
+        stuff = f.read()
+        f.close()
+        verify(stuff == 'c'*mapsize,
+               "Write-through memory map data file not updated properly.")
+
+        print "  Opening mmap with access=ACCESS_COPY"
+        f = open(TESTFN, "r+b")
+        m = mmap.mmap(f.fileno(), mapsize, access=mmap.ACCESS_COPY)
+        print "  Modifying copy-on-write memory map."
+        m[:] = 'd'*mapsize
+        verify(m[:] == 'd' * mapsize,
+               "Copy-on-write memory map data not written correctly.")
+        m.flush()
+        verify(open(TESTFN, "rb").read() == 'c'*mapsize,
+               "Copy-on-write test data file should not be modified.")
+        try:
+            print "  Ensuring copy-on-write maps cannot be resized."
+            m.resize(2*mapsize)
+        except TypeError:
+            pass
+        else:
+            verify(0, "Copy-on-write mmap resize did not raise exception.")
+        del m, f
+        try:
+            print "  Ensuring invalid access parameter raises exception."
+            f = open(TESTFN, "r+b")
+            m = mmap.mmap(f.fileno(), mapsize, access=4)
+        except ValueError:
+            pass
+        else:
+            verify(0, "Invalid access code should have raised exception.")
+
+        if os.name == "posix":
+            # Try incompatible flags, prot and access parameters.
+            f = open(TESTFN, "r+b")
+            try:
+                m = mmap.mmap(f.fileno(), mapsize, flags=mmap.MAP_PRIVATE,
+                              prot=mmap.PROT_READ, access=mmap.ACCESS_WRITE)
+            except ValueError:
+                pass
+            else:
+                verify(0, "Incompatible parameters should raise ValueError.")
+            f.close()
+    finally:
+        try:
+            os.unlink(TESTFN)
+        except OSError:
+            pass
+
+    # Do a tougher .find() test.  SF bug 515943 pointed out that, in 2.2,
+    # searching for data with embedded \0 bytes didn't work.
+    f = open(TESTFN, 'w+')
+
+    try:    # unlink TESTFN no matter what
+        data = 'aabaac\x00deef\x00\x00aa\x00'
+        n = len(data)
+        f.write(data)
+        f.flush()
+        m = mmap.mmap(f.fileno(), n)
+        f.close()
+
+        for start in range(n+1):
+            for finish in range(start, n+1):
+                slice = data[start : finish]
+                vereq(m.find(slice), data.find(slice))
+                vereq(m.find(slice + 'x'), -1)
+        m.close()
+
+    finally:
+        os.unlink(TESTFN)
+
+    # make sure a double close doesn't crash on Solaris (Bug# 665913)
+    f = open(TESTFN, 'w+')
+
+    try:    # unlink TESTFN no matter what
+        f.write(2**16 * 'a') # Arbitrary character
+        f.close()
+
+        f = open(TESTFN)
+        mf = mmap.mmap(f.fileno(), 2**16, access=mmap.ACCESS_READ)
+        mf.close()
+        mf.close()
+        f.close()
+
+    finally:
+        os.unlink(TESTFN)
+
+
+    print ' Test passed'
+
+test_both()
diff --git a/lib-python/2.2/test/test_multifile.py b/lib-python/2.2/test/test_multifile.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_multifile.py
@@ -0,0 +1,66 @@
+import mimetools
+import multifile
+import cStringIO
+
+msg = """Mime-Version: 1.0
+Content-Type: multipart/mixed;
+        boundary="=====================_590453667==_"
+X-OriginalArrivalTime: 05 Feb 2002 03:43:23.0310 (UTC) FILETIME=[42D88CE0:01C1ADF7]
+
+--=====================_590453667==_
+Content-Type: multipart/alternative;
+        boundary="=====================_590453677==_.ALT"
+
+--=====================_590453677==_.ALT
+Content-Type: text/plain; charset="us-ascii"; format=flowed
+
+test A
+--=====================_590453677==_.ALT
+Content-Type: text/html; charset="us-ascii"
+
+<html>
+<b>test B</font></b></html>
+
+--=====================_590453677==_.ALT--
+
+--=====================_590453667==_
+Content-Type: text/plain; charset="us-ascii"
+Content-Disposition: attachment; filename="att.txt"
+
+Attached Content.
+Attached Content.
+Attached Content.
+Attached Content.
+
+--=====================_590453667==_--
+
+"""
+
+boundaries = 0
+linecount = 0
+
+def getMIMEMsg(mf):
+    global boundaries, linecount
+    msg = mimetools.Message(mf)
+
+    #print "TYPE: %s" % msg.gettype()
+    if msg.getmaintype() == 'multipart':
+        boundary = msg.getparam("boundary")
+        boundaries += 1
+
+        mf.push(boundary)
+        while mf.next():
+            getMIMEMsg(mf)
+        mf.pop()
+    else:
+        lines = mf.readlines()
+        linecount += len(lines)
+
+def main():
+    f = cStringIO.StringIO(msg)
+    getMIMEMsg(multifile.MultiFile(f))
+    assert boundaries == 2
+    assert linecount == 9
+
+if __name__ == '__main__':
+    main()
diff --git a/lib-python/2.2/test/test_mutants.py b/lib-python/2.2/test/test_mutants.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_mutants.py
@@ -0,0 +1,285 @@
+from test_support import verbose, TESTFN
+import random
+import os
+
+# From SF bug #422121:  Insecurities in dict comparison.
+
+# Safety of code doing comparisons has been an historical Python weak spot.
+# The problem is that comparison of structures written in C *naturally*
+# wants to hold on to things like the size of the container, or "the
+# biggest" containee so far, across a traversal of the container; but
+# code to do containee comparisons can call back into Python and mutate
+# the container in arbitrary ways while the C loop is in midstream.  If the
+# C code isn't extremely paranoid about digging things out of memory on
+# each trip, and artificially boosting refcounts for the duration, anything
+# from infinite loops to OS crashes can result (yes, I use Windows <wink>).
+#
+# The other problem is that code designed to provoke a weakness is usually
+# white-box code, and so catches only the particular vulnerabilities the
+# author knew to protect against.  For example, Python's list.sort() code
+# went thru many iterations as one "new" vulnerability after another was
+# discovered.
+#
+# So the dict comparison test here uses a black-box approach instead,
+# generating dicts of various sizes at random, and performing random
+# mutations on them at random times.  This proved very effective,
+# triggering at least six distinct failure modes the first 20 times I
+# ran it.  Indeed, at the start, the driver never got beyond 6 iterations
+# before the test died.
+
+# The dicts are global to make it easy to mutate tham from within functions.
+dict1 = {}
+dict2 = {}
+
+# The current set of keys in dict1 and dict2.  These are materialized as
+# lists to make it easy to pick a dict key at random.
+dict1keys = []
+dict2keys = []
+
+# Global flag telling maybe_mutate() wether to *consider* mutating.
+mutate = 0
+
+# If global mutate is true, consider mutating a dict.  May or may not
+# mutate a dict even if mutate is true.  If it does decide to mutate a
+# dict, it picks one of {dict1, dict2} at random, and deletes a random
+# entry from it; or, more rarely, adds a random element.
+
+def maybe_mutate():
+    global mutate
+    if not mutate:
+        return
+    if random.random() < 0.5:
+        return
+
+    if random.random() < 0.5:
+        target, keys = dict1, dict1keys
+    else:
+        target, keys = dict2, dict2keys
+
+    if random.random() < 0.2:
+        # Insert a new key.
+        mutate = 0   # disable mutation until key inserted
+        while 1:
+            newkey = Horrid(random.randrange(100))
+            if newkey not in target:
+                break
+        target[newkey] = Horrid(random.randrange(100))
+        keys.append(newkey)
+        mutate = 1
+
+    elif keys:
+        # Delete a key at random.
+        i = random.randrange(len(keys))
+        key = keys[i]
+        del target[key]
+        # CAUTION:  don't use keys.remove(key) here.  Or do <wink>.  The
+        # point is that .remove() would trigger more comparisons, and so
+        # also more calls to this routine.  We're mutating often enough
+        # without that.
+        del keys[i]
+
+# A horrid class that triggers random mutations of dict1 and dict2 when
+# instances are compared.
+
+class Horrid:
+    def __init__(self, i):
+        # Comparison outcomes are determined by the value of i.
+        self.i = i
+
+        # An artificial hashcode is selected at random so that we don't
+        # have any systematic relationship between comparison outcomes
+        # (based on self.i and other.i) and relative position within the
+        # hash vector (based on hashcode).
+        self.hashcode = random.randrange(1000000000)
+
+    def __hash__(self):
+        return self.hashcode
+
+    def __cmp__(self, other):
+        maybe_mutate()   # The point of the test.
+        return cmp(self.i, other.i)
+
+    def __repr__(self):
+        return "Horrid(%d)" % self.i
+
+# Fill dict d with numentries (Horrid(i), Horrid(j)) key-value pairs,
+# where i and j are selected at random from the candidates list.
+# Return d.keys() after filling.
+
+def fill_dict(d, candidates, numentries):
+    d.clear()
+    for i in xrange(numentries):
+        d[Horrid(random.choice(candidates))] = \
+            Horrid(random.choice(candidates))
+    return d.keys()
+
+# Test one pair of randomly generated dicts, each with n entries.
+# Note that dict comparison is trivial if they don't have the same number
+# of entires (then the "shorter" dict is instantly considered to be the
+# smaller one, without even looking at the entries).
+
+def test_one(n):
+    global mutate, dict1, dict2, dict1keys, dict2keys
+
+    # Fill the dicts without mutating them.
+    mutate = 0
+    dict1keys = fill_dict(dict1, range(n), n)
+    dict2keys = fill_dict(dict2, range(n), n)
+
+    # Enable mutation, then compare the dicts so long as they have the
+    # same size.
+    mutate = 1
+    if verbose:
+        print "trying w/ lengths", len(dict1), len(dict2),
+    while dict1 and len(dict1) == len(dict2):
+        if verbose:
+            print ".",
+        c = cmp(dict1, dict2)
+    if verbose:
+        print
+
+# Run test_one n times.  At the start (before the bugs were fixed), 20
+# consecutive runs of this test each blew up on or before the sixth time
+# test_one was run.  So n doesn't have to be large to get an interesting
+# test.
+# OTOH, calling with large n is also interesting, to ensure that the fixed
+# code doesn't hold on to refcounts *too* long (in which case memory would
+# leak).
+
+def test(n):
+    for i in xrange(n):
+        test_one(random.randrange(1, 100))
+
+# See last comment block for clues about good values for n.
+test(100)
+
+##########################################################################
+# Another segfault bug, distilled by Michael Hudson from a c.l.py post.
+
+class Child:
+    def __init__(self, parent):
+        self.__dict__['parent'] = parent
+    def __getattr__(self, attr):
+        self.parent.a = 1
+        self.parent.b = 1
+        self.parent.c = 1
+        self.parent.d = 1
+        self.parent.e = 1
+        self.parent.f = 1
+        self.parent.g = 1
+        self.parent.h = 1
+        self.parent.i = 1
+        return getattr(self.parent, attr)
+
+class Parent:
+    def __init__(self):
+        self.a = Child(self)
+
+# Hard to say what this will print!  May vary from time to time.  But
+# we're specifically trying to test the tp_print slot here, and this is
+# the clearest way to do it.  We print the result to a temp file so that
+# the expected-output file doesn't need to change.
+
+f = open(TESTFN, "w")
+print >> f, Parent().__dict__
+f.close()
+os.unlink(TESTFN)
+
+##########################################################################
+# And another core-dumper from Michael Hudson.
+
+dict = {}
+
+# Force dict to malloc its table.
+for i in range(1, 10):
+    dict[i] = i
+
+f = open(TESTFN, "w")
+
+class Machiavelli:
+    def __repr__(self):
+        dict.clear()
+
+        # Michael sez:  "doesn't crash without this.  don't know why."
+        # Tim sez:  "luck of the draw; crashes with or without for me."
+        print >> f
+
+        return `"machiavelli"`
+
+    def __hash__(self):
+        return 0
+
+dict[Machiavelli()] = Machiavelli()
+
+print >> f, str(dict)
+f.close()
+os.unlink(TESTFN)
+del f, dict
+
+
+##########################################################################
+# And another core-dumper from Michael Hudson.
+
+dict = {}
+
+# let's force dict to malloc its table
+for i in range(1, 10):
+    dict[i] = i
+
+class Machiavelli2:
+    def __eq__(self, other):
+        dict.clear()
+        return 1
+
+    def __hash__(self):
+        return 0
+
+dict[Machiavelli2()] = Machiavelli2()
+
+try:
+    dict[Machiavelli2()]
+except KeyError:
+    pass
+
+del dict
+
+##########################################################################
+# And another core-dumper from Michael Hudson.
+
+dict = {}
+
+# let's force dict to malloc its table
+for i in range(1, 10):
+    dict[i] = i
+
+class Machiavelli3:
+    def __init__(self, id):
+        self.id = id
+
+    def __eq__(self, other):
+        if self.id == other.id:
+            dict.clear()
+            return 1
+        else:
+            return 0
+
+    def __repr__(self):
+        return "%s(%s)"%(self.__class__.__name__, self.id)
+
+    def __hash__(self):
+        return 0
+
+dict[Machiavelli3(1)] = Machiavelli3(0)
+dict[Machiavelli3(2)] = Machiavelli3(0)
+
+f = open(TESTFN, "w")
+try:
+    try:
+        print >> f, dict[Machiavelli3(2)]
+    except KeyError:
+        pass
+finally:
+    f.close()
+    os.unlink(TESTFN)
+
+del dict
diff --git a/lib-python/2.2/test/test_netrc.py b/lib-python/2.2/test/test_netrc.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_netrc.py
@@ -0,0 +1,42 @@
+
+import netrc, os, tempfile, test_support, unittest
+
+TEST_NETRC = """
+machine foo login log1 password pass1 account acct1
+
+macdef macro1
+line1
+line2
+
+macdef macro2
+line3
+line4
+
+default login log2 password pass2
+
+"""
+
+temp_filename = tempfile.mktemp()
+
+class NetrcTestCase(unittest.TestCase):
+
+    def setUp (self):
+        fp = open(temp_filename, 'wt')
+        fp.write(TEST_NETRC)
+        fp.close()
+        self.netrc = netrc.netrc(temp_filename)
+
+    def tearDown (self):
+        del self.netrc
+        os.unlink(temp_filename)
+
+    def test_case_1(self):
+        self.assert_(self.netrc.macros == {'macro1':['line1\n', 'line2\n'],
+                                           'macro2':['line3\n', 'line4\n']}
+                                           )
+        self.assert_(self.netrc.hosts['foo'] == ('log1', 'acct1', 'pass1'))
+        self.assert_(self.netrc.hosts['default'] == ('log2', None, 'pass2'))
+
+
+if __name__ == "__main__":
+    test_support.run_unittest(NetrcTestCase)
diff --git a/lib-python/2.2/test/test_new.py b/lib-python/2.2/test/test_new.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_new.py
@@ -0,0 +1,108 @@
+from test_support import verbose, verify
+import sys
+import new
+
+class Eggs:
+    def get_yolks(self):
+        return self.yolks
+
+print 'new.module()'
+m = new.module('Spam')
+if verbose:
+    print m
+m.Eggs = Eggs
+sys.modules['Spam'] = m
+import Spam
+
+def get_more_yolks(self):
+    return self.yolks + 3
+
+print 'new.classobj()'
+C = new.classobj('Spam', (Spam.Eggs,), {'get_more_yolks': get_more_yolks})
+if verbose:
+    print C
+print 'new.instance()'
+c = new.instance(C, {'yolks': 3})
+if verbose:
+    print c
+o = new.instance(C)
+verify(o.__dict__ == {},
+       "new __dict__ should be empty")
+del o
+o = new.instance(C, None)
+verify(o.__dict__ == {},
+       "new __dict__ should be empty")
+del o
+
+def break_yolks(self):
+    self.yolks = self.yolks - 2
+print 'new.instancemethod()'
+im = new.instancemethod(break_yolks, c, C)
+if verbose:
+    print im
+
+verify(c.get_yolks() == 3 and c.get_more_yolks() == 6,
+       'Broken call of hand-crafted class instance')
+im()
+verify(c.get_yolks() == 1 and c.get_more_yolks() == 4,
+       'Broken call of hand-crafted instance method')
+
+# It's unclear what the semantics should be for a code object compiled at
+# module scope, but bound and run in a function.  In CPython, `c' is global
+# (by accident?) while in Jython, `c' is local.  The intent of the test
+# clearly is to make `c' global, so let's be explicit about it.
+codestr = '''
+global c
+a = 1
+b = 2
+c = a + b
+'''
+
+ccode = compile(codestr, '<string>', 'exec')
+# Jython doesn't have a __builtins__, so use a portable alternative
+import __builtin__
+g = {'c': 0, '__builtins__': __builtin__}
+# this test could be more robust
+print 'new.function()'
+func = new.function(ccode, g)
+if verbose:
+    print func
+func()
+verify(g['c'] == 3,
+       'Could not create a proper function object')
+
+# test the various extended flavors of function.new
+def f(x):
+    def g(y):
+        return x + y
+    return g
+g = f(4)
+new.function(f.func_code, {}, "blah")
+g2 = new.function(g.func_code, {}, "blah", (2,), g.func_closure)
+verify(g2() == 6)
+g3 = new.function(g.func_code, {}, "blah", None, g.func_closure)
+verify(g3(5) == 9)
+def test_closure(func, closure, exc):
+    try:
+        new.function(func.func_code, {}, "", None, closure)
+    except exc:
+        pass
+    else:
+        print "corrupt closure accepted"
+
+test_closure(g, None, TypeError) # invalid closure
+test_closure(g, (1,), TypeError) # non-cell in closure
+test_closure(g, (1, 1), ValueError) # closure is wrong size
+test_closure(f, g.func_closure, ValueError) # no closure needed
+
+print 'new.code()'
+# bogus test of new.code()
+# Note: Jython will never have new.code()
+if hasattr(new, 'code'):
+    d = new.code(3, 3, 3, 3, codestr, (), (), (),
+                 "<string>", "<name>", 1, "", (), ())
+    # test backwards-compatibility version with no freevars or cellvars
+    d = new.code(3, 3, 3, 3, codestr, (), (), (),
+                 "<string>", "<name>", 1, "")
+    if verbose:
+        print d
diff --git a/lib-python/2.2/test/test_nis.py b/lib-python/2.2/test/test_nis.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_nis.py
@@ -0,0 +1,32 @@
+from test_support import verbose, TestFailed, TestSkipped
+import nis
+
+print 'nis.maps()'
+try:
+    maps = nis.maps()
+except nis.error, msg:
+    # NIS is probably not active, so this test isn't useful
+    if verbose:
+        raise TestFailed, msg
+    # only do this if running under the regression suite
+    raise TestSkipped, msg
+
+done = 0
+for nismap in maps:
+    if verbose:
+        print nismap
+    mapping = nis.cat(nismap)
+    for k, v in mapping.items():
+        if verbose:
+            print '    ', k, v
+        if not k:
+            continue
+        if nis.match(k, nismap) != v:
+            print "NIS match failed for key `%s' in map `%s'" % (k, nismap)
+        else:
+            # just test the one key, otherwise this test could take a
+            # very long time
+            done = 1
+            break
+    if done:
+        break
diff --git a/lib-python/2.2/test/test_ntpath.py b/lib-python/2.2/test/test_ntpath.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_ntpath.py
@@ -0,0 +1,114 @@
+import ntpath
+from test_support import verbose, TestFailed
+import os
+
+errors = 0
+
+def tester(fn, wantResult):
+    global errors
+    fn = fn.replace("\\", "\\\\")
+    gotResult = eval(fn)
+    if wantResult != gotResult:
+        print "error!"
+        print "evaluated: " + str(fn)
+        print "should be: " + str(wantResult)
+        print " returned: " + str(gotResult)
+        print ""
+        errors = errors + 1
+
+tester('ntpath.splitdrive("c:\\foo\\bar")',
+       ('c:', '\\foo\\bar'))
+tester('ntpath.splitunc("\\\\conky\\mountpoint\\foo\\bar")',
+       ('\\\\conky\\mountpoint', '\\foo\\bar'))
+tester('ntpath.splitdrive("c:/foo/bar")',
+       ('c:', '/foo/bar'))
+tester('ntpath.splitunc("//conky/mountpoint/foo/bar")',
+       ('//conky/mountpoint', '/foo/bar'))
+
+tester('ntpath.split("c:\\foo\\bar")', ('c:\\foo', 'bar'))
+tester('ntpath.split("\\\\conky\\mountpoint\\foo\\bar")',
+       ('\\\\conky\\mountpoint\\foo', 'bar'))
+
+tester('ntpath.split("c:\\")', ('c:\\', ''))
+tester('ntpath.split("\\\\conky\\mountpoint\\")',
+       ('\\\\conky\\mountpoint', ''))
+
+tester('ntpath.split("c:/")', ('c:/', ''))
+tester('ntpath.split("//conky/mountpoint/")', ('//conky/mountpoint', ''))
+
+tester('ntpath.isabs("c:\\")', 1)
+tester('ntpath.isabs("\\\\conky\\mountpoint\\")', 1)
+tester('ntpath.isabs("\\foo")', 1)
+tester('ntpath.isabs("\\foo\\bar")', 1)
+
+tester('ntpath.abspath("C:\\")', "C:\\")
+
+tester('ntpath.commonprefix(["/home/swenson/spam", "/home/swen/spam"])',
+       "/home/swen")
+tester('ntpath.commonprefix(["\\home\\swen\\spam", "\\home\\swen\\eggs"])',
+       "\\home\\swen\\")
+tester('ntpath.commonprefix(["/home/swen/spam", "/home/swen/spam"])',
+       "/home/swen/spam")
+
+tester('ntpath.join("")', '')
+tester('ntpath.join("", "", "")', '')
+tester('ntpath.join("a")', 'a')
+tester('ntpath.join("/a")', '/a')
+tester('ntpath.join("\\a")', '\\a')
+tester('ntpath.join("a:")', 'a:')
+tester('ntpath.join("a:", "b")', 'a:b')
+tester('ntpath.join("a:", "/b")', 'a:/b')
+tester('ntpath.join("a:", "\\b")', 'a:\\b')
+tester('ntpath.join("a", "/b")', '/b')
+tester('ntpath.join("a", "\\b")', '\\b')
+tester('ntpath.join("a", "b", "c")', 'a\\b\\c')
+tester('ntpath.join("a\\", "b", "c")', 'a\\b\\c')
+tester('ntpath.join("a", "b\\", "c")', 'a\\b\\c')
+tester('ntpath.join("a", "b", "\\c")', '\\c')
+tester('ntpath.join("d:\\", "\\pleep")', 'd:\\pleep')
+tester('ntpath.join("d:\\", "a", "b")', 'd:\\a\\b')
+tester("ntpath.join('c:', '/a')", 'c:/a')
+tester("ntpath.join('c:/', '/a')", 'c:/a')
+tester("ntpath.join('c:/a', '/b')", '/b')
+tester("ntpath.join('c:', 'd:/')", 'd:/')
+tester("ntpath.join('c:/', 'd:/')", 'd:/')
+tester("ntpath.join('c:/', 'd:/a/b')", 'd:/a/b')
+
+tester("ntpath.join('')", '')
+tester("ntpath.join('', '', '', '', '')", '')
+tester("ntpath.join('a')", 'a')
+tester("ntpath.join('', 'a')", 'a')
+tester("ntpath.join('', '', '', '', 'a')", 'a')
+tester("ntpath.join('a', '')", 'a\\')
+tester("ntpath.join('a', '', '', '', '')", 'a\\')
+tester("ntpath.join('a\\', '')", 'a\\')
+tester("ntpath.join('a\\', '', '', '', '')", 'a\\')
+
+tester("ntpath.normpath('A//////././//.//B')", r'A\B')
+tester("ntpath.normpath('A/./B')", r'A\B')
+tester("ntpath.normpath('A/foo/../B')", r'A\B')
+tester("ntpath.normpath('C:A//B')", r'C:A\B')
+tester("ntpath.normpath('D:A/./B')", r'D:A\B')
+tester("ntpath.normpath('e:A/foo/../B')", r'e:A\B')
+
+# Next 3 seem dubious, and especially the 3rd, but normpath is possibly
+# trying to leave UNC paths alone without actually knowing anything about
+# them.
+tester("ntpath.normpath('C:///A//B')", r'C:\\\A\B')
+tester("ntpath.normpath('D:///A/./B')", r'D:\\\A\B')
+tester("ntpath.normpath('e:///A/foo/../B')", r'e:\\\A\B')
+
+tester("ntpath.normpath('..')", r'..')
+tester("ntpath.normpath('.')", r'.')
+tester("ntpath.normpath('')", r'.')
+tester("ntpath.normpath('/')", '\\')
+tester("ntpath.normpath('c:/')", 'c:\\')
+tester("ntpath.normpath('/../.././..')", '\\')
+tester("ntpath.normpath('c:/../../..')", 'c:\\')
+tester("ntpath.normpath('../.././..')", r'..\..\..')
+tester("ntpath.normpath('K:../.././..')", r'K:..\..\..')
+
+if errors:
+    raise TestFailed(str(errors) + " errors.")
+elif verbose:
+    print "No errors.  Thank your lucky stars."
diff --git a/lib-python/2.2/test/test_opcodes.py b/lib-python/2.2/test/test_opcodes.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_opcodes.py
@@ -0,0 +1,101 @@
+# Python test set -- part 2, opcodes
+
+from test_support import *
+
+
+print '2. Opcodes'
+print 'XXX Not yet fully implemented'
+
+print '2.1 try inside for loop'
+n = 0
+for i in range(10):
+    n = n+i
+    try: 1/0
+    except NameError: pass
+    except ZeroDivisionError: pass
+    except TypeError: pass
+    try: pass
+    except: pass
+    try: pass
+    finally: pass
+    n = n+i
+if n != 90:
+    raise TestFailed, 'try inside for'
+
+
+print '2.2 raise class exceptions'
+
+class AClass: pass
+class BClass(AClass): pass
+class CClass: pass
+class DClass(AClass):
+    def __init__(self, ignore):
+        pass
+
+try: raise AClass()
+except: pass
+
+try: raise AClass()
+except AClass: pass
+
+try: raise BClass()
+except AClass: pass
+
+try: raise BClass()
+except CClass: raise TestFailed
+except: pass
+
+a = AClass()
+b = BClass()
+
+try: raise AClass, b
+except BClass, v:
+    if v != b: raise TestFailed, "v!=b"
+else: raise TestFailed, "no exception"
+
+try: raise b
+except AClass, v:
+    if v != b: raise TestFailed, "v!=b AClass"
+
+# not enough arguments
+try:  raise BClass, a
+except TypeError: pass
+
+try:  raise DClass, a
+except DClass, v:
+    if not isinstance(v, DClass):
+        raise TestFailed, "v not DClass"
+
+print '2.3 comparing function objects'
+
+f = eval('lambda: None')
+g = eval('lambda: None')
+if f == g: raise TestFailed, "functions should not be same"
+
+f = eval('lambda a: a')
+g = eval('lambda a: a')
+if f == g: raise TestFailed, "functions should not be same"
+
+f = eval('lambda a=1: a')
+g = eval('lambda a=1: a')
+if f == g: raise TestFailed, "functions should not be same"
+
+f = eval('lambda: 0')
+g = eval('lambda: 1')
+if f == g: raise TestFailed
+
+f = eval('lambda: None')
+g = eval('lambda a: None')
+if f == g: raise TestFailed
+
+f = eval('lambda a: None')
+g = eval('lambda b: None')
+if f == g: raise TestFailed
+
+f = eval('lambda a: None')
+g = eval('lambda a=None: None')
+if f == g: raise TestFailed
+
+f = eval('lambda a=0: None')
+g = eval('lambda a=1: None')
+if f == g: raise TestFailed
diff --git a/lib-python/2.2/test/test_openpty.py b/lib-python/2.2/test/test_openpty.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_openpty.py
@@ -0,0 +1,21 @@
+# Test to see if openpty works. (But don't worry if it isn't available.)
+
+import os
+from test_support import verbose, TestFailed, TestSkipped
+
+try:
+    if verbose:
+        print "Calling os.openpty()"
+    master, slave = os.openpty()
+    if verbose:
+        print "(master, slave) = (%d, %d)"%(master, slave)
+except AttributeError:
+    raise TestSkipped, "No openpty() available."
+
+if not os.isatty(master):
+    raise TestFailed, "Master-end of pty is not a terminal."
+if not os.isatty(slave):
+    raise TestFailed, "Slave-end of pty is not a terminal."
+
+os.write(slave, 'Ping!')
+print os.read(master, 1024)
diff --git a/lib-python/2.2/test/test_operations.py b/lib-python/2.2/test/test_operations.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_operations.py
@@ -0,0 +1,52 @@
+# Python test set -- part 3, built-in operations.
+
+
+print '3. Operations'
+print 'XXX Mostly not yet implemented'
+
+
+print '3.1 Dictionary lookups succeed even if __cmp__() raises an exception'
+
+# SourceForge bug #112558:
+# http://sourceforge.net/bugs/?func=detailbug&bug_id=112558&group_id=5470
+
+class BadDictKey:
+    already_printed_raising_error = 0
+
+    def __hash__(self):
+        return hash(self.__class__)
+
+    def __cmp__(self, other):
+        if isinstance(other, self.__class__):
+            if not BadDictKey.already_printed_raising_error:
+                # How many times __cmp__ gets called depends on the hash
+                # code and the internals of the dict implementation; we
+                # know it will be called at least once, but that's it.
+                # already_printed_raising_error makes sure the expected-
+                # output file prints the msg at most once.
+                BadDictKey.already_printed_raising_error = 1
+                print "raising error"
+            raise RuntimeError, "gotcha"
+        return other
+
+d = {}
+x1 = BadDictKey()
+x2 = BadDictKey()
+d[x1] = 1
+d[x2] = 2
+print "No exception passed through."
+
+# Dict resizing bug, found by Jack Jansen in 2.2 CVS development.
+# This version got an assert failure in debug build, infinite loop in
+# release build.  Unfortunately, provoking this kind of stuff requires
+# a mix of inserts and deletes hitting exactly the right hash codes in
+# exactly the right order, and I can't think of a randomized approach
+# that would be *likely* to hit a failing case in reasonable time.
+
+d = {}
+for i in range(5):
+    d[i] = i
+for i in range(5):
+    del d[i]
+for i in range(5, 9):  # i==8 was the problem
+    d[i] = i
diff --git a/lib-python/2.2/test/test_operator.py b/lib-python/2.2/test/test_operator.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_operator.py
@@ -0,0 +1,218 @@
+import operator
+import unittest
+
+import test_support
+
+
+class OperatorTestCase(unittest.TestCase):
+    def test_lt(self):
+        self.failIf(operator.lt(1, 0))
+        self.failIf(operator.lt(1, 0.0))
+        self.failIf(operator.lt(1, 1))
+        self.failIf(operator.lt(1, 1.0))
+        self.failUnless(operator.lt(1, 2))
+        self.failUnless(operator.lt(1, 2.0))
+
+    def test_le(self):
+        self.failIf(operator.le(1, 0))
+        self.failIf(operator.le(1, 0.0))
+        self.failUnless(operator.le(1, 1))
+        self.failUnless(operator.le(1, 1.0))
+        self.failUnless(operator.le(1, 2))
+        self.failUnless(operator.le(1, 2.0))
+
+    def test_eq(self):
+        self.failIf(operator.eq(1, 0))
+        self.failIf(operator.eq(1, 0.0))
+        self.failUnless(operator.eq(1, 1))
+        self.failUnless(operator.eq(1, 1.0))
+        self.failIf(operator.eq(1, 2))
+        self.failIf(operator.eq(1, 2.0))
+
+    def test_ne(self):
+        self.failUnless(operator.ne(1, 0))
+        self.failUnless(operator.ne(1, 0.0))
+        self.failIf(operator.ne(1, 1))
+        self.failIf(operator.ne(1, 1.0))
+        self.failUnless(operator.ne(1, 2))
+        self.failUnless(operator.ne(1, 2.0))
+
+    def test_ge(self):
+        self.failUnless(operator.ge(1, 0))
+        self.failUnless(operator.ge(1, 0.0))
+        self.failUnless(operator.ge(1, 1))
+        self.failUnless(operator.ge(1, 1.0))
+        self.failIf(operator.ge(1, 2))
+        self.failIf(operator.ge(1, 2.0))
+
+    def test_gt(self):
+        self.failUnless(operator.gt(1, 0))
+        self.failUnless(operator.gt(1, 0.0))
+        self.failIf(operator.gt(1, 1))
+        self.failIf(operator.gt(1, 1.0))
+        self.failIf(operator.gt(1, 2))
+        self.failIf(operator.gt(1, 2.0))
+
+    def test_abs(self):
+        self.failUnless(operator.abs(-1) == 1)
+        self.failUnless(operator.abs(1) == 1)
+
+    def test_add(self):
+        self.failUnless(operator.add(3, 4) == 7)
+
+    def test_bitwise_and(self):
+        self.failUnless(operator.and_(0xf, 0xa) == 0xa)
+
+    def test_concat(self):
+        self.failUnless(operator.concat('py', 'thon') == 'python')
+        self.failUnless(operator.concat([1, 2], [3, 4]) == [1, 2, 3, 4])
+
+    def test_countOf(self):
+        self.failUnless(operator.countOf([1, 2, 1, 3, 1, 4], 3) == 1)
+        self.failUnless(operator.countOf([1, 2, 1, 3, 1, 4], 5) == 0)
+
+    def test_delitem(self):
+        a = [4, 3, 2, 1]
+        self.failUnless(operator.delitem(a, 1) is None)
+        self.assert_(a == [4, 2, 1])
+
+    def test_delslice(self):
+        a = range(10)
+        self.failUnless(operator.delslice(a, 2, 8) is None)
+        self.assert_(a == [0, 1, 8, 9])
+
+    def test_div(self):
+        self.failUnless(operator.floordiv(5, 2) == 2)
+
+    def test_floordiv(self):
+        self.failUnless(operator.floordiv(5, 2) == 2)
+
+    def test_truediv(self):
+        self.failUnless(operator.truediv(5, 2) == 2.5)
+
+    def test_getitem(self):
+        a = range(10)
+        self.failUnless(operator.getitem(a, 2) == 2)
+
+    def test_getslice(self):
+        a = range(10)
+        self.failUnless(operator.getslice(a, 4, 6) == [4, 5])
+
+    def test_indexOf(self):
+        self.failUnless(operator.indexOf([4, 3, 2, 1], 3) == 1)
+        self.assertRaises(ValueError, operator.indexOf, [4, 3, 2, 1], 0)
+
+    def test_invert(self):
+        self.failUnless(operator.inv(4) == -5)
+
+    def test_isCallable(self):
+        class C:
+            pass
+        def check(self, o, v):
+            self.assert_(operator.isCallable(o) == callable(o) == v)
+        check(self, 4, 0)
+        check(self, operator.isCallable, 1)
+        check(self, C, 1)
+        check(self, C(), 0)
+
+    def test_isMappingType(self):
+        self.failIf(operator.isMappingType(1))
+        self.failIf(operator.isMappingType(operator.isMappingType))
+        self.failUnless(operator.isMappingType(operator.__dict__))
+        self.failUnless(operator.isMappingType({}))
+
+    def test_isNumberType(self):
+        self.failUnless(operator.isNumberType(8))
+        self.failUnless(operator.isNumberType(8j))
+        self.failUnless(operator.isNumberType(8L))
+        self.failUnless(operator.isNumberType(8.3))
+        self.failIf(operator.isNumberType(dir()))
+
+    def test_isSequenceType(self):
+        self.failUnless(operator.isSequenceType(dir()))
+        self.failUnless(operator.isSequenceType(()))
+        self.failUnless(operator.isSequenceType(xrange(10)))
+        self.failUnless(operator.isSequenceType('yeahbuddy'))
+        self.failIf(operator.isSequenceType(3))
+
+    def test_lshift(self):
+        self.failUnless(operator.lshift(5, 1) == 10)
+        self.failUnless(operator.lshift(5, 0) == 5)
+        self.assertRaises(ValueError, operator.lshift, 2, -1)
+
+    def test_mod(self):
+        self.failUnless(operator.mod(5, 2) == 1)
+
+    def test_mul(self):
+        self.failUnless(operator.mul(5, 2) == 10)
+
+    def test_neg(self):
+        self.failUnless(operator.neg(5) == -5)
+        self.failUnless(operator.neg(-5) == 5)
+        self.failUnless(operator.neg(0) == 0)
+        self.failUnless(operator.neg(-0) == 0)
+
+    def test_bitwise_or(self):
+        self.failUnless(operator.or_(0xa, 0x5) == 0xf)
+
+    def test_pos(self):
+        self.failUnless(operator.pos(5) == 5)
+        self.failUnless(operator.pos(-5) == -5)
+        self.failUnless(operator.pos(0) == 0)
+        self.failUnless(operator.pos(-0) == 0)
+
+    def test_repeat(self):
+        a = range(3)
+        self.failUnless(operator.repeat(a, 2) == a+a)
+        self.failUnless(operator.repeat(a, 1) == a)
+        self.failUnless(operator.repeat(a, 0) == [])
+        a = (1, 2, 3)
+        self.failUnless(operator.repeat(a, 2) == a+a)
+        self.failUnless(operator.repeat(a, 1) == a)
+        self.failUnless(operator.repeat(a, 0) == ())
+        a = '123'
+        self.failUnless(operator.repeat(a, 2) == a+a)
+        self.failUnless(operator.repeat(a, 1) == a)
+        self.failUnless(operator.repeat(a, 0) == '')
+
+    def test_rshift(self):
+        self.failUnless(operator.rshift(5, 1) == 2)
+        self.failUnless(operator.rshift(5, 0) == 5)
+        self.assertRaises(ValueError, operator.rshift, 2, -1)
+
+    def test_contains(self):
+        self.failUnless(operator.contains(range(4), 2))
+        self.failIf(operator.contains(range(4), 5))
+        self.failUnless(operator.sequenceIncludes(range(4), 2))
+        self.failIf(operator.sequenceIncludes(range(4), 5))
+
+    def test_setitem(self):
+        a = range(3)
+        self.failUnless(operator.setitem(a, 0, 2) is None)
+        self.assert_(a == [2, 1, 2])
+        self.assertRaises(IndexError, operator.setitem, a, 4, 2)
+
+    def test_setslice(self):
+        a = range(4)
+        self.failUnless(operator.setslice(a, 1, 3, [2, 1]) is None)
+        self.assert_(a == [0, 2, 1, 3])
+
+    def test_sub(self):
+        self.failUnless(operator.sub(5, 2) == 3)
+
+    def test_truth(self):
+        self.failUnless(operator.truth(5))
+        self.failUnless(operator.truth([0]))
+        self.failIf(operator.truth(0))
+        self.failIf(operator.truth([]))
+
+    def test_bitwise_xor(self):
+        self.failUnless(operator.xor(0xb, 0xc) == 0x7)
+
+
+def test_main():
+    test_support.run_unittest(OperatorTestCase)
+
+
+if __name__ == "__main__":
+    test_main()
diff --git a/lib-python/2.2/test/test_os.py b/lib-python/2.2/test/test_os.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_os.py
@@ -0,0 +1,187 @@
+# As a test suite for the os module, this is woefully inadequate, but this
+# does add tests for a few functions which have been determined to be more
+# more portable than they had been thought to be.
+
+import os
+import unittest
+import warnings
+
+warnings.filterwarnings("ignore", "tempnam", RuntimeWarning, __name__)
+warnings.filterwarnings("ignore", "tmpnam", RuntimeWarning, __name__)
+
+from test_support import TESTFN, run_unittest
+
+class TemporaryFileTests(unittest.TestCase):
+    def setUp(self):
+        self.files = []
+        os.mkdir(TESTFN)
+
+    def tearDown(self):
+        for name in self.files:
+            os.unlink(name)
+        os.rmdir(TESTFN)
+
+    def check_tempfile(self, name):
+        # make sure it doesn't already exist:
+        self.failIf(os.path.exists(name),
+                    "file already exists for temporary file")
+        # make sure we can create the file
+        open(name, "w")
+        self.files.append(name)
+
+    def test_tempnam(self):
+        if not hasattr(os, "tempnam"):
+            return
+        warnings.filterwarnings("ignore", "tempnam", RuntimeWarning,
+                                "test_os")
+        self.check_tempfile(os.tempnam())
+
+        name = os.tempnam(TESTFN)
+        self.check_tempfile(name)
+
+        name = os.tempnam(TESTFN, "pfx")
+        self.assert_(os.path.basename(name)[:3] == "pfx")
+        self.check_tempfile(name)
+
+    def test_tmpfile(self):
+        if not hasattr(os, "tmpfile"):
+            return
+        fp = os.tmpfile()
+        fp.write("foobar")
+        fp.seek(0,0)
+        s = fp.read()
+        fp.close()
+        self.assert_(s == "foobar")
+
+    def test_tmpnam(self):
+        if not hasattr(os, "tmpnam"):
+            return
+        warnings.filterwarnings("ignore", "tmpnam", RuntimeWarning,
+                                "test_os")
+        self.check_tempfile(os.tmpnam())
+
+# Test attributes on return values from os.*stat* family.
+class StatAttributeTests(unittest.TestCase):
+    def setUp(self):
+        os.mkdir(TESTFN)
+        self.fname = os.path.join(TESTFN, "f1")
+        f = open(self.fname, 'wb')
+        f.write("ABC")
+        f.close()
+
+    def tearDown(self):
+        os.unlink(self.fname)
+        os.rmdir(TESTFN)
+
+    def test_stat_attributes(self):
+        if not hasattr(os, "stat"):
+            return
+
+        import stat
+        result = os.stat(self.fname)
+
+        # Make sure direct access works
+        self.assertEquals(result[stat.ST_SIZE], 3)
+        self.assertEquals(result.st_size, 3)
+
+        import sys
+
+        # Make sure all the attributes are there
+        members = dir(result)
+        for name in dir(stat):
+            if name[:3] == 'ST_':
+                attr = name.lower()
+                self.assertEquals(getattr(result, attr),
+                                  result[getattr(stat, name)])
+                self.assert_(attr in members)
+
+        try:
+            result[200]
+            self.fail("No exception thrown")
+        except IndexError:
+            pass
+
+        # Make sure that assignment fails
+        try:
+            result.st_mode = 1
+            self.fail("No exception thrown")
+        except TypeError:
+            pass
+
+        try:
+            result.st_rdev = 1
+            self.fail("No exception thrown")
+        except (AttributeError, TypeError):
+            pass
+
+        try:
+            result.parrot = 1
+            self.fail("No exception thrown")
+        except AttributeError:
+            pass
+
+        # Use the stat_result constructor with a too-short tuple.
+        try:
+            result2 = os.stat_result((10,))
+            self.fail("No exception thrown")
+        except TypeError:
+            pass
+
+        # Use the constructr with a too-long tuple.
+        try:
+            result2 = os.stat_result((0,1,2,3,4,5,6,7,8,9,10,11,12,13,14))
+        except TypeError:
+            pass
+
+
+    def test_statvfs_attributes(self):
+        if not hasattr(os, "statvfs"):
+            return
+
+        import statvfs
+        result = os.statvfs(self.fname)
+
+        # Make sure direct access works
+        self.assertEquals(result.f_bfree, result[statvfs.F_BFREE])
+
+        # Make sure all the attributes are there
+        members = dir(result)
+        for name in dir(statvfs):
+            if name[:2] == 'F_':
+                attr = name.lower()
+                self.assertEquals(getattr(result, attr),
+                                  result[getattr(statvfs, name)])
+                self.assert_(attr in members)
+
+        # Make sure that assignment really fails
+        try:
+            result.f_bfree = 1
+            self.fail("No exception thrown")
+        except TypeError:
+            pass
+
+        try:
+            result.parrot = 1
+            self.fail("No exception thrown")
+        except AttributeError:
+            pass
+
+        # Use the constructor with a too-short tuple.
+        try:
+            result2 = os.statvfs_result((10,))
+            self.fail("No exception thrown")
+        except TypeError:
+            pass
+
+        # Use the constructr with a too-long tuple.
+        try:
+            result2 = os.statvfs_result((0,1,2,3,4,5,6,7,8,9,10,11,12,13,14))
+        except TypeError:
+            pass
+
+def test_main():
+    run_unittest(TemporaryFileTests)
+    run_unittest(StatAttributeTests)
+
+if __name__ == "__main__":
+    test_main()
diff --git a/lib-python/2.2/test/test_parser.py b/lib-python/2.2/test/test_parser.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_parser.py
@@ -0,0 +1,383 @@
+import parser
+import test_support
+import unittest
+
+#
+#  First, we test that we can generate trees from valid source fragments,
+#  and that these valid trees are indeed allowed by the tree-loading side
+#  of the parser module.
+#
+
+class RoundtripLegalSyntaxTestCase(unittest.TestCase):
+    def roundtrip(self, f, s):
+        st1 = f(s)
+        t = st1.totuple()
+        try:
+            st2 = parser.sequence2st(t)
+        except parser.ParserError:
+            self.fail("could not roundtrip %r" % s)
+
+        self.assertEquals(t, st2.totuple(),
+                          "could not re-generate syntax tree")
+
+    def check_expr(self, s):
+        self.roundtrip(parser.expr, s)
+
+    def check_suite(self, s):
+        self.roundtrip(parser.suite, s)
+
+    def test_yield_statement(self):
+        self.check_suite("from __future__ import generators\n"
+                         "def f(): yield 1")
+        self.check_suite("from __future__ import generators\n"
+                         "def f(): return; yield 1")
+        self.check_suite("from __future__ import generators\n"
+                         "def f(): yield 1; return")
+        self.check_suite("from __future__ import generators\n"
+                         "def f():\n"
+                         "    for x in range(30):\n"
+                         "        yield x\n")
+
+    def test_expressions(self):
+        self.check_expr("foo(1)")
+        self.check_expr("[1, 2, 3]")
+        self.check_expr("[x**3 for x in range(20)]")
+        self.check_expr("[x**3 for x in range(20) if x % 3]")
+        self.check_expr("foo(*args)")
+        self.check_expr("foo(*args, **kw)")
+        self.check_expr("foo(**kw)")
+        self.check_expr("foo(key=value)")
+        self.check_expr("foo(key=value, *args)")
+        self.check_expr("foo(key=value, *args, **kw)")
+        self.check_expr("foo(key=value, **kw)")
+        self.check_expr("foo(a, b, c, *args)")
+        self.check_expr("foo(a, b, c, *args, **kw)")
+        self.check_expr("foo(a, b, c, **kw)")
+        self.check_expr("foo + bar")
+        self.check_expr("foo - bar")
+        self.check_expr("foo * bar")
+        self.check_expr("foo / bar")
+        self.check_expr("foo // bar")
+        self.check_expr("lambda: 0")
+        self.check_expr("lambda x: 0")
+        self.check_expr("lambda *y: 0")
+        self.check_expr("lambda *y, **z: 0")
+        self.check_expr("lambda **z: 0")
+        self.check_expr("lambda x, y: 0")
+        self.check_expr("lambda foo=bar: 0")
+        self.check_expr("lambda foo=bar, spaz=nifty+spit: 0")
+        self.check_expr("lambda foo=bar, **z: 0")
+        self.check_expr("lambda foo=bar, blaz=blat+2, **z: 0")
+        self.check_expr("lambda foo=bar, blaz=blat+2, *y, **z: 0")
+        self.check_expr("lambda x, *y, **z: 0")
+
+    def test_print(self):
+        self.check_suite("print")
+        self.check_suite("print 1")
+        self.check_suite("print 1,")
+        self.check_suite("print >>fp")
+        self.check_suite("print >>fp, 1")
+        self.check_suite("print >>fp, 1,")
+
+    def test_simple_expression(self):
+        # expr_stmt
+        self.check_suite("a")
+
+    def test_simple_assignments(self):
+        self.check_suite("a = b")
+        self.check_suite("a = b = c = d = e")
+
+    def test_simple_augmented_assignments(self):
+        self.check_suite("a += b")
+        self.check_suite("a -= b")
+        self.check_suite("a *= b")
+        self.check_suite("a /= b")
+        self.check_suite("a //= b")
+        self.check_suite("a %= b")
+        self.check_suite("a &= b")
+        self.check_suite("a |= b")
+        self.check_suite("a ^= b")
+        self.check_suite("a <<= b")
+        self.check_suite("a >>= b")
+        self.check_suite("a **= b")
+
+    def test_function_defs(self):
+        self.check_suite("def f(): pass")
+        self.check_suite("def f(*args): pass")
+        self.check_suite("def f(*args, **kw): pass")
+        self.check_suite("def f(**kw): pass")
+        self.check_suite("def f(foo=bar): pass")
+        self.check_suite("def f(foo=bar, *args): pass")
+        self.check_suite("def f(foo=bar, *args, **kw): pass")
+        self.check_suite("def f(foo=bar, **kw): pass")
+
+        self.check_suite("def f(a, b): pass")
+        self.check_suite("def f(a, b, *args): pass")
+        self.check_suite("def f(a, b, *args, **kw): pass")
+        self.check_suite("def f(a, b, **kw): pass")
+        self.check_suite("def f(a, b, foo=bar): pass")
+        self.check_suite("def f(a, b, foo=bar, *args): pass")
+        self.check_suite("def f(a, b, foo=bar, *args, **kw): pass")
+        self.check_suite("def f(a, b, foo=bar, **kw): pass")
+
+    def test_import_from_statement(self):
+        self.check_suite("from sys.path import *")
+        self.check_suite("from sys.path import dirname")
+        self.check_suite("from sys.path import dirname as my_dirname")
+        self.check_suite("from sys.path import dirname, basename")
+        self.check_suite(
+            "from sys.path import dirname as my_dirname, basename")
+        self.check_suite(
+            "from sys.path import dirname, basename as my_basename")
+
+    def test_basic_import_statement(self):
+        self.check_suite("import sys")
+        self.check_suite("import sys as system")
+        self.check_suite("import sys, math")
+        self.check_suite("import sys as system, math")
+        self.check_suite("import sys, math as my_math")
+
+    def test_assert(self):
+        self.check_suite("assert alo < ahi and blo < bhi\n")
+
+#
+#  Second, we take *invalid* trees and make sure we get ParserError
+#  rejections for them.
+#
+
+class IllegalSyntaxTestCase(unittest.TestCase):
+    def check_bad_tree(self, tree, label):
+        try:
+            parser.sequence2st(tree)
+        except parser.ParserError:
+            pass
+        else:
+            self.fail("did not detect invalid tree for %r" % label)
+
+    def test_junk(self):
+        # not even remotely valid:
+        self.check_bad_tree((1, 2, 3), "<junk>")
+
+    def test_illegal_yield_1(self):
+        """Illegal yield statement: def f(): return 1; yield 1"""
+        tree = \
+        (257,
+         (264,
+          (285,
+           (259,
+            (1, 'def'),
+            (1, 'f'),
+            (260, (7, '('), (8, ')')),
+            (11, ':'),
+            (291,
+             (4, ''),
+             (5, ''),
+             (264,
+              (265,
+               (266,
+                (272,
+                 (275,
+                  (1, 'return'),
+                  (313,
+                   (292,
+                    (293,
+                     (294,
+                      (295,
+                       (297,
+                        (298,
+                         (299,
+                          (300,
+                           (301,
+                            (302, (303, (304, (305, (2, '1')))))))))))))))))),
+               (264,
+                (265,
+                 (266,
+                  (272,
+                   (276,
+                    (1, 'yield'),
+                    (313,
+                     (292,
+                      (293,
+                       (294,
+                        (295,
+                         (297,
+                          (298,
+                           (299,
+                            (300,
+                             (301,
+                              (302,
+                               (303, (304, (305, (2, '1')))))))))))))))))),
+                 (4, ''))),
+               (6, ''))))),
+           (4, ''),
+           (0, ''))))
+        self.check_bad_tree(tree, "def f():\n  return 1\n  yield 1")
+
+    def test_illegal_yield_2(self):
+        """Illegal return in generator: def f(): return 1; yield 1"""
+        tree = \
+        (257,
+         (264,
+          (265,
+           (266,
+            (278,
+             (1, 'from'),
+             (281, (1, '__future__')),
+             (1, 'import'),
+             (279, (1, 'generators')))),
+           (4, ''))),
+         (264,
+          (285,
+           (259,
+            (1, 'def'),
+            (1, 'f'),
+            (260, (7, '('), (8, ')')),
+            (11, ':'),
+            (291,
+             (4, ''),
+             (5, ''),
+             (264,
+              (265,
+               (266,
+                (272,
+                 (275,
+                  (1, 'return'),
+                  (313,
+                   (292,
+                    (293,
+                     (294,
+                      (295,
+                       (297,
+                        (298,
+                         (299,
+                          (300,
+                           (301,
+                            (302, (303, (304, (305, (2, '1')))))))))))))))))),
+               (264,
+                (265,
+                 (266,
+                  (272,
+                   (276,
+                    (1, 'yield'),
+                    (313,
+                     (292,
+                      (293,
+                       (294,
+                        (295,
+                         (297,
+                          (298,
+                           (299,
+                            (300,
+                             (301,
+                              (302,
+                               (303, (304, (305, (2, '1')))))))))))))))))),
+                 (4, ''))),
+               (6, ''))))),
+           (4, ''),
+           (0, ''))))
+        self.check_bad_tree(tree, "def f():\n  return 1\n  yield 1")
+
+    def test_print_chevron_comma(self):
+        """Illegal input: print >>fp,"""
+        tree = \
+        (257,
+         (264,
+          (265,
+           (266,
+            (268,
+             (1, 'print'),
+             (35, '>>'),
+             (290,
+              (291,
+               (292,
+                (293,
+                 (295,
+                  (296,
+                   (297,
+                    (298, (299, (300, (301, (302, (303, (1, 'fp')))))))))))))),
+             (12, ','))),
+           (4, ''))),
+         (0, ''))
+        self.check_bad_tree(tree, "print >>fp,")
+
+    def test_a_comma_comma_c(self):
+        """Illegal input: a,,c"""
+        tree = \
+        (258,
+         (311,
+          (290,
+           (291,
+            (292,
+             (293,
+              (295,
+               (296,
+                (297,
+                 (298, (299, (300, (301, (302, (303, (1, 'a')))))))))))))),
+          (12, ','),
+          (12, ','),
+          (290,
+           (291,
+            (292,
+             (293,
+              (295,
+               (296,
+                (297,
+                 (298, (299, (300, (301, (302, (303, (1, 'c'))))))))))))))),
+         (4, ''),
+         (0, ''))
+        self.check_bad_tree(tree, "a,,c")
+
+    def test_illegal_operator(self):
+        """Illegal input: a $= b"""
+        tree = \
+        (257,
+         (264,
+          (265,
+           (266,
+            (267,
+             (312,
+              (291,
+               (292,
+                (293,
+                 (294,
+                  (296,
+                   (297,
+                    (298,
+                     (299,
+                      (300, (301, (302, (303, (304, (1, 'a'))))))))))))))),
+             (268, (37, '$=')),
+             (312,
+              (291,
+               (292,
+                (293,
+                 (294,
+                  (296,
+                   (297,
+                    (298,
+                     (299,
+                      (300, (301, (302, (303, (304, (1, 'b'))))))))))))))))),
+           (4, ''))),
+         (0, ''))
+        self.check_bad_tree(tree, "a $= b")
+
+    def test_malformed_global(self):
+        #doesn't have global keyword in ast
+        tree = (257,
+                (264,
+                 (265,
+                  (266,
+                   (282, (1, 'foo'))), (4, ''))),
+                (4, ''),
+                (0, '')) 
+        self.check_bad_tree(tree, "malformed global ast")
+
+def test_main():
+    loader = unittest.TestLoader()
+    suite = unittest.TestSuite()
+    suite.addTest(loader.loadTestsFromTestCase(RoundtripLegalSyntaxTestCase))
+    suite.addTest(loader.loadTestsFromTestCase(IllegalSyntaxTestCase))
+    test_support.run_suite(suite)
+
+
+if __name__ == "__main__":
+    test_main()
diff --git a/lib-python/2.2/test/test_pep247.py b/lib-python/2.2/test/test_pep247.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_pep247.py
@@ -0,0 +1,50 @@
+#
+# Test suite to check compliance with PEP 247, the standard API for
+# hashing algorithms.
+#
+
+import md5, sha, hmac
+
+def check_hash_module(module, key=None):
+    assert hasattr(module, 'digest_size'), "Must have digest_size"
+    assert (module.digest_size is None or
+            module.digest_size > 0), "digest_size must be None or positive"
+
+    if key is not None:
+        obj1 = module.new(key)
+        obj2 = module.new(key, "string")
+
+        h1 = module.new(key, "string").digest()
+        obj3 = module.new(key) ; obj3.update("string") ; h2 = obj3.digest()
+        assert h1 == h2, "Hashes must match"
+
+    else:
+        obj1 = module.new()
+        obj2 = module.new("string")
+
+        h1 = module.new("string").digest()
+        obj3 = module.new() ; obj3.update("string") ; h2 = obj3.digest()
+        assert h1 == h2, "Hashes must match"
+
+    assert hasattr(obj1, 'digest_size'), "Objects must have digest_size attr"
+    if module.digest_size is not None:
+        assert obj1.digest_size == module.digest_size, "digest_size must match"
+    assert obj1.digest_size == len(h1), "digest_size must match actual size"
+    obj1.update("string")
+    obj_copy = obj1.copy()
+    assert obj1.digest() == obj_copy.digest(), "Copied objects must match"
+    assert obj1.hexdigest() == obj_copy.hexdigest(), \
+           "Copied objects must match"
+    digest, hexdigest = obj1.digest(), obj1.hexdigest()
+    hd2 = ""
+    for byte in digest:
+        hd2 += "%02x" % ord(byte)
+    assert hd2 == hexdigest, "hexdigest doesn't appear correct"
+
+    print 'Module', module.__name__, 'seems to comply with PEP 247'
+
+
+if __name__ == '__main__':
+    check_hash_module(md5)
+    check_hash_module(sha)
+    check_hash_module(hmac, key='abc')
diff --git a/lib-python/2.2/test/test_pickle.py b/lib-python/2.2/test/test_pickle.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_pickle.py
@@ -0,0 +1,40 @@
+import pickle
+import test_support
+import unittest
+from cStringIO import StringIO
+from pickletester import AbstractPickleTests, AbstractPickleModuleTests
+
+class PickleTests(AbstractPickleTests, AbstractPickleModuleTests):
+
+    def setUp(self):
+        self.dumps = pickle.dumps
+        self.loads = pickle.loads
+
+    module = pickle
+    error = KeyError
+
+class PicklerTests(AbstractPickleTests):
+
+    error = KeyError
+
+    def dumps(self, arg, bin=0):
+        f = StringIO()
+        p = pickle.Pickler(f, bin)
+        p.dump(arg)
+        f.seek(0)
+        return f.read()
+
+    def loads(self, buf):
+        f = StringIO(buf)
+        u = pickle.Unpickler(f)
+        return u.load()
+
+def test_main():
+    loader = unittest.TestLoader()
+    suite = unittest.TestSuite()
+    suite.addTest(loader.loadTestsFromTestCase(PickleTests))
+    suite.addTest(loader.loadTestsFromTestCase(PicklerTests))
+    test_support.run_suite(suite)
+
+if __name__ == "__main__":
+    test_main()
diff --git a/lib-python/2.2/test/test_pkg.py b/lib-python/2.2/test/test_pkg.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_pkg.py
@@ -0,0 +1,259 @@
+# Test packages (dotted-name import)
+
+import sys, os, tempfile, traceback
+from os import mkdir, rmdir, extsep          # Can't test if these fail
+del mkdir, rmdir
+from test_support import verify, verbose, TestFailed
+
+# Helpers to create and destroy hierarchies.
+
+def mkhier(root, descr):
+    mkdir(root)
+    for name, contents in descr:
+        comps = name.split()
+        fullname = root
+        for c in comps:
+            fullname = os.path.join(fullname, c)
+        if contents is None:
+            mkdir(fullname)
+        else:
+            if verbose: print "write", fullname
+            f = open(fullname, "w")
+            f.write(contents)
+            if contents and contents[-1] != '\n':
+                f.write('\n')
+            f.close()
+
+def mkdir(x):
+    if verbose: print "mkdir", x
+    os.mkdir(x)
+
+def cleanout(root):
+    names = os.listdir(root)
+    for name in names:
+        fullname = os.path.join(root, name)
+        if os.path.isdir(fullname) and not os.path.islink(fullname):
+            cleanout(fullname)
+        else:
+            os.remove(fullname)
+    rmdir(root)
+
+def rmdir(x):
+    if verbose: print "rmdir", x
+    os.rmdir(x)
+
+def fixdir(lst):
+    try:
+        lst.remove('__builtins__')
+    except ValueError:
+        pass
+    return lst
+
+# Helper to run a test
+
+def runtest(hier, code):
+    root = tempfile.mktemp()
+    mkhier(root, hier)
+    savepath = sys.path[:]
+    codefile = tempfile.mktemp()
+    f = open(codefile, "w")
+    f.write(code)
+    f.close()
+    try:
+        sys.path.insert(0, root)
+        if verbose: print "sys.path =", sys.path
+        try:
+            execfile(codefile, globals(), {})
+        except:
+            traceback.print_exc(file=sys.stdout)
+    finally:
+        sys.path[:] = savepath
+        try:
+            cleanout(root)
+        except (os.error, IOError):
+            pass
+        os.remove(codefile)
+
+# Test descriptions
+
+tests = [
+    ("t1", [("t1", None), ("t1 __init__"+os.extsep+"py", "")], "import t1"),
+
+    ("t2", [
+    ("t2", None),
+    ("t2 __init__"+os.extsep+"py", "'doc for t2'; print __name__, 'loading'"),
+    ("t2 sub", None),
+    ("t2 sub __init__"+os.extsep+"py", ""),
+    ("t2 sub subsub", None),
+    ("t2 sub subsub __init__"+os.extsep+"py", "print __name__, 'loading'; spam = 1"),
+    ],
+"""
+import t2
+print t2.__doc__
+import t2.sub
+import t2.sub.subsub
+print t2.__name__, t2.sub.__name__, t2.sub.subsub.__name__
+import t2
+from t2 import *
+print dir()
+from t2 import sub
+from t2.sub import subsub
+from t2.sub.subsub import spam
+print sub.__name__, subsub.__name__
+print sub.subsub.__name__
+print dir()
+import t2.sub
+import t2.sub.subsub
+print t2.__name__, t2.sub.__name__, t2.sub.subsub.__name__
+from t2 import *
+print dir()
+"""),
+
+    ("t3", [
+    ("t3", None),
+    ("t3 __init__"+os.extsep+"py", "print __name__, 'loading'"),
+    ("t3 sub", None),
+    ("t3 sub __init__"+os.extsep+"py", ""),
+    ("t3 sub subsub", None),
+    ("t3 sub subsub __init__"+os.extsep+"py", "print __name__, 'loading'; spam = 1"),
+    ],
+"""
+import t3.sub.subsub
+print t3.__name__, t3.sub.__name__, t3.sub.subsub.__name__
+reload(t3)
+reload(t3.sub)
+reload(t3.sub.subsub)
+"""),
+
+    ("t4", [
+    ("t4"+os.extsep+"py", "print 'THIS SHOULD NOT BE PRINTED (t4"+os.extsep+"py)'"),
+    ("t4", None),
+    ("t4 __init__"+os.extsep+"py", "print __name__, 'loading'"),
+    ("t4 sub"+os.extsep+"py", "print 'THIS SHOULD NOT BE PRINTED (sub"+os.extsep+"py)'"),
+    ("t4 sub", None),
+    ("t4 sub __init__"+os.extsep+"py", ""),
+    ("t4 sub subsub"+os.extsep+"py", "print 'THIS SHOULD NOT BE PRINTED (subsub"+os.extsep+"py)'"),
+    ("t4 sub subsub", None),
+    ("t4 sub subsub __init__"+os.extsep+"py", "print __name__, 'loading'; spam = 1"),
+    ],
+"""
+from t4.sub.subsub import *
+print "t4.sub.subsub.spam =", spam
+"""),
+
+    ("t5", [
+    ("t5", None),
+    ("t5 __init__"+os.extsep+"py", "import t5.foo"),
+    ("t5 string"+os.extsep+"py", "print __name__, 'loading'; spam = 1"),
+    ("t5 foo"+os.extsep+"py",
+     "print __name__, 'loading'; import string; print string.spam"),
+     ],
+"""
+import t5
+from t5 import *
+print dir()
+import t5
+print fixdir(dir(t5))
+print fixdir(dir(t5.foo))
+print fixdir(dir(t5.string))
+"""),
+
+    ("t6", [
+    ("t6", None),
+    ("t6 __init__"+os.extsep+"py", "__all__ = ['spam', 'ham', 'eggs']"),
+    ("t6 spam"+os.extsep+"py", "print __name__, 'loading'"),
+    ("t6 ham"+os.extsep+"py", "print __name__, 'loading'"),
+    ("t6 eggs"+os.extsep+"py", "print __name__, 'loading'"),
+    ],
+"""
+import t6
+print fixdir(dir(t6))
+from t6 import *
+print fixdir(dir(t6))
+print dir()
+"""),
+
+    ("t7", [
+    ("t7"+os.extsep+"py", "print 'Importing t7"+os.extsep+"py'"),
+    ("t7", None),
+    ("t7 __init__"+os.extsep+"py", "print __name__, 'loading'"),
+    ("t7 sub"+os.extsep+"py", "print 'THIS SHOULD NOT BE PRINTED (sub"+os.extsep+"py)'"),
+    ("t7 sub", None),
+    ("t7 sub __init__"+os.extsep+"py", ""),
+    ("t7 sub subsub"+os.extsep+"py", "print 'THIS SHOULD NOT BE PRINTED (subsub"+os.extsep+"py)'"),
+    ("t7 sub subsub", None),
+    ("t7 sub subsub __init__"+os.extsep+"py", "print __name__, 'loading'; spam = 1"),
+    ],
+"""
+t7, sub, subsub = None, None, None
+import t7 as tas
+print fixdir(dir(tas))
+verify(not t7)
+from t7 import sub as subpar
+print fixdir(dir(subpar))
+verify(not t7 and not sub)
+from t7.sub import subsub as subsubsub
+print fixdir(dir(subsubsub))
+verify(not t7 and not sub and not subsub)
+from t7.sub.subsub import spam as ham
+print "t7.sub.subsub.spam =", ham
+verify(not t7 and not sub and not subsub)
+"""),
+
+]
+
+nontests = [
+    ("x5", [], ("import a" + ".a"*400)),
+    ("x6", [], ("import a" + ".a"*499)),
+    ("x7", [], ("import a" + ".a"*500)),
+    ("x8", [], ("import a" + ".a"*1100)),
+    ("x9", [], ("import " + "a"*400)),
+    ("x10", [], ("import " + "a"*500)),
+    ("x11", [], ("import " + "a"*998)),
+    ("x12", [], ("import " + "a"*999)),
+    ("x13", [], ("import " + "a"*999)),
+    ("x14", [], ("import " + "a"*2000)),
+]
+
+"""XXX Things to test
+
+import package without __init__
+import package with __init__
+__init__ importing submodule
+__init__ importing global module
+__init__ defining variables
+submodule importing other submodule
+submodule importing global module
+submodule import submodule via global name
+from package import submodule
+from package import subpackage
+from package import variable (defined in __init__)
+from package import * (defined in __init__)
+"""
+
+# Run the tests
+
+args = []
+if __name__ == '__main__':
+    args = sys.argv[1:]
+    if args and args[0] == '-q':
+        verbose = 0
+        del args[0]
+
+for name, hier, code in tests:
+    if args and name not in args:
+        print "skipping test", name
+        continue
+    print "running test", name
+    runtest(hier, code)
+
+# Test
+import sys
+import imp
+try:
+    import sys.imp
+except ImportError:
+    # This is what we expect
+    pass
+else:
+    raise TestFailed, "No ImportError exception on 'import sys.imp'"
diff --git a/lib-python/2.2/test/test_pkgimport.py b/lib-python/2.2/test/test_pkgimport.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_pkgimport.py
@@ -0,0 +1,84 @@
+import os, sys, string, random, tempfile, unittest
+
+from test_support import run_unittest
+
+class TestImport(unittest.TestCase):
+
+    def __init__(self, *args, **kw):
+        self.package_name = 'PACKAGE_'
+        while sys.modules.has_key(self.package_name):
+            self.package_name += random.choose(string.letters)
+        self.module_name = self.package_name + '.foo'
+        unittest.TestCase.__init__(self, *args, **kw)
+
+    def remove_modules(self):
+        for module_name in (self.package_name, self.module_name):
+            if sys.modules.has_key(module_name):
+                del sys.modules[module_name]
+
+    def setUp(self):
+        self.test_dir = tempfile.mktemp()
+        os.mkdir(self.test_dir)
+        sys.path.append(self.test_dir)
+        self.package_dir = os.path.join(self.test_dir,
+                                        self.package_name)
+        os.mkdir(self.package_dir)
+        open(os.path.join(self.package_dir, '__init__'+os.extsep+'py'), 'w')
+        self.module_path = os.path.join(self.package_dir, 'foo'+os.extsep+'py')
+
+    def tearDown(self):
+        for file in os.listdir(self.package_dir):
+            os.remove(os.path.join(self.package_dir, file))
+        os.rmdir(self.package_dir)
+        os.rmdir(self.test_dir)
+        self.assertNotEqual(sys.path.count(self.test_dir), 0)
+        sys.path.remove(self.test_dir)
+        self.remove_modules()
+
+    def rewrite_file(self, contents):
+        for extension in "co":
+            compiled_path = self.module_path + extension
+            if os.path.exists(compiled_path):
+                os.remove(compiled_path)
+        f = open(self.module_path, 'w')
+        f.write(contents)
+        f.close()
+
+    def test_package_import__semantics(self):
+
+        # Generate a couple of broken modules to try importing.
+
+        # ...try loading the module when there's a SyntaxError
+        self.rewrite_file('for')
+        try: __import__(self.module_name)
+        except SyntaxError: pass
+        else: raise RuntimeError, 'Failed to induce SyntaxError'
+        self.assert_(not sys.modules.has_key(self.module_name) and
+                     not hasattr(sys.modules[self.package_name], 'foo'))
+
+        # ...make up a variable name that isn't bound in __builtins__
+        var = 'a'
+        while var in dir(__builtins__):
+            var += random.choose(string.letters)
+
+        # ...make a module that just contains that
+        self.rewrite_file(var)
+
+        try: __import__(self.module_name)
+        except NameError: pass
+        else: raise RuntimeError, 'Failed to induce NameError.'
+        module = __import__(self.module_name).foo
+
+        # ...now  change  the module  so  that  the NameError  doesn't
+        # happen
+        self.rewrite_file('%s = 1' % var)
+        reload(module)
+        self.assertEqual(getattr(module, var), 1)
+
+
+def test_main():
+    run_unittest(TestImport)
+
+
+if __name__ == "__main__":
+    test_main()
diff --git a/lib-python/2.2/test/test_poll.py b/lib-python/2.2/test/test_poll.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_poll.py
@@ -0,0 +1,172 @@
+# Test case for the os.poll() function
+
+import sys, os, select, random
+from test_support import verify, verbose, TestSkipped, TESTFN
+
+try:
+    select.poll
+except AttributeError:
+    raise TestSkipped, "select.poll not defined -- skipping test_poll"
+
+
+def find_ready_matching(ready, flag):
+    match = []
+    for fd, mode in ready:
+        if mode & flag:
+            match.append(fd)
+    return match
+
+def test_poll1():
+    """Basic functional test of poll object
+
+    Create a bunch of pipe and test that poll works with them.
+    """
+    print 'Running poll test 1'
+    p = select.poll()
+
+    NUM_PIPES = 12
+    MSG = " This is a test."
+    MSG_LEN = len(MSG)
+    readers = []
+    writers = []
+    r2w = {}
+    w2r = {}
+
+    for i in range(NUM_PIPES):
+        rd, wr = os.pipe()
+        p.register(rd, select.POLLIN)
+        p.register(wr, select.POLLOUT)
+        readers.append(rd)
+        writers.append(wr)
+        r2w[rd] = wr
+        w2r[wr] = rd
+
+    while writers:
+        ready = p.poll()
+        ready_writers = find_ready_matching(ready, select.POLLOUT)
+        if not ready_writers:
+            raise RuntimeError, "no pipes ready for writing"
+        wr = random.choice(ready_writers)
+        os.write(wr, MSG)
+
+        ready = p.poll()
+        ready_readers = find_ready_matching(ready, select.POLLIN)
+        if not ready_readers:
+            raise RuntimeError, "no pipes ready for reading"
+        rd = random.choice(ready_readers)
+        buf = os.read(rd, MSG_LEN)
+        verify(len(buf) == MSG_LEN)
+        print buf
+        os.close(r2w[rd]) ; os.close( rd )
+        p.unregister( r2w[rd] )
+        p.unregister( rd )
+        writers.remove(r2w[rd])
+
+    poll_unit_tests()
+    print 'Poll test 1 complete'
+
+def poll_unit_tests():
+    # returns NVAL for invalid file descriptor
+    FD = 42
+    try:
+        os.close(FD)
+    except OSError:
+        pass
+    p = select.poll()
+    p.register(FD)
+    r = p.poll()
+    verify(r[0] == (FD, select.POLLNVAL))
+
+    f = open(TESTFN, 'w')
+    fd = f.fileno()
+    p = select.poll()
+    p.register(f)
+    r = p.poll()
+    verify(r[0][0] == fd)
+    f.close()
+    r = p.poll()
+    verify(r[0] == (fd, select.POLLNVAL))
+    os.unlink(TESTFN)
+
+    # type error for invalid arguments
+    p = select.poll()
+    try:
+        p.register(p)
+    except TypeError:
+        pass
+    else:
+        print "Bogus register call did not raise TypeError"
+    try:
+        p.unregister(p)
+    except TypeError:
+        pass
+    else:
+        print "Bogus unregister call did not raise TypeError"
+
+    # can't unregister non-existent object
+    p = select.poll()
+    try:
+        p.unregister(3)
+    except KeyError:
+        pass
+    else:
+        print "Bogus unregister call did not raise KeyError"
+
+    # Test error cases
+    pollster = select.poll()
+    class Nope:
+        pass
+
+    class Almost:
+        def fileno(self):
+            return 'fileno'
+
+    try:
+        pollster.register( Nope(), 0 )
+    except TypeError: pass
+    else: print 'expected TypeError exception, not raised'
+
+    try:
+        pollster.register( Almost(), 0 )
+    except TypeError: pass
+    else: print 'expected TypeError exception, not raised'
+
+
+# Another test case for poll().  This is copied from the test case for
+# select(), modified to use poll() instead.
+
+def test_poll2():
+    print 'Running poll test 2'
+    cmd = 'for i in 0 1 2 3 4 5 6 7 8 9; do echo testing...; sleep 1; done'
+    p = os.popen(cmd, 'r')
+    pollster = select.poll()
+    pollster.register( p, select.POLLIN )
+    for tout in (0, 1000, 2000, 4000, 8000, 16000) + (-1,)*10:
+        if verbose:
+            print 'timeout =', tout
+        fdlist = pollster.poll(tout)
+        if (fdlist == []):
+            continue
+        fd, flags = fdlist[0]
+        if flags & select.POLLHUP:
+            line = p.readline()
+            if line != "":
+                print 'error: pipe seems to be closed, but still returns data'
+            continue
+
+        elif flags & select.POLLIN:
+            line = p.readline()
+            if verbose:
+                print `line`
+            if not line:
+                if verbose:
+                    print 'EOF'
+                break
+            continue
+        else:
+            print 'Unexpected return value from select.poll:', fdlist
+    p.close()
+    print 'Poll test 2 complete'
+
+test_poll1()
+test_poll2()
diff --git a/lib-python/2.2/test/test_popen2.py b/lib-python/2.2/test/test_popen2.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_popen2.py
@@ -0,0 +1,72 @@
+#! /usr/bin/env python
+"""Test script for popen2.py
+   Christian Tismer
+"""
+
+import os
+import sys
+from test_support import TestSkipped
+
+# popen2 contains its own testing routine
+# which is especially useful to see if open files
+# like stdin can be read successfully by a forked
+# subprocess.
+
+def main():
+    print "Test popen2 module:"
+    if sys.platform[:4] == 'beos' and __name__ != '__main__':
+        #  Locks get messed up or something.  Generally we're supposed
+        #  to avoid mixing "posix" fork & exec with native threads, and
+        #  they may be right about that after all.
+        raise TestSkipped, "popen2() doesn't work during import on BeOS"
+    try:
+        from os import popen
+    except ImportError:
+        # if we don't have os.popen, check that
+        # we have os.fork.  if not, skip the test
+        # (by raising an ImportError)
+        from os import fork
+    import popen2
+    popen2._test()
+
+
+def _test():
+    # same test as popen2._test(), but using the os.popen*() API
+    print "Testing os module:"
+    import popen2
+    cmd  = "cat"
+    teststr = "ab cd\n"
+    if os.name == "nt":
+        cmd = "more"
+    # "more" doesn't act the same way across Windows flavors,
+    # sometimes adding an extra newline at the start or the
+    # end.  So we strip whitespace off both ends for comparison.
+    expected = teststr.strip()
+    print "testing popen2..."
+    w, r = os.popen2(cmd)
+    w.write(teststr)
+    w.close()
+    got = r.read()
+    if got.strip() != expected:
+        raise ValueError("wrote %s read %s" % (`teststr`, `got`))
+    print "testing popen3..."
+    try:
+        w, r, e = os.popen3([cmd])
+    except:
+        w, r, e = os.popen3(cmd)
+    w.write(teststr)
+    w.close()
+    got = r.read()
+    if got.strip() != expected:
+        raise ValueError("wrote %s read %s" % (`teststr`, `got`))
+    got = e.read()
+    if got:
+        raise ValueError("unexected %s on stderr" % `got`)
+    for inst in popen2._active[:]:
+        inst.wait()
+    if popen2._active:
+        raise ValueError("_active not empty")
+    print "All OK"
+
+main()
+_test()
diff --git a/lib-python/2.2/test/test_posixpath.py b/lib-python/2.2/test/test_posixpath.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_posixpath.py
@@ -0,0 +1,40 @@
+import posixpath
+
+errors = 0
+
+def tester(fn, wantResult):
+    gotResult = eval(fn)
+    if wantResult != gotResult:
+        print "error!"
+        print "evaluated: " + str(fn)
+        print "should be: " + str(wantResult)
+        print " returned: " + str(gotResult)
+        print ""
+        global errors
+        errors = errors + 1
+
+tester('posixpath.splitdrive("/foo/bar")', ('', '/foo/bar'))
+
+tester('posixpath.split("/foo/bar")', ('/foo', 'bar'))
+tester('posixpath.split("/")', ('/', ''))
+tester('posixpath.split("foo")', ('', 'foo'))
+
+tester('posixpath.splitext("foo.ext")', ('foo', '.ext'))
+tester('posixpath.splitext("/foo/foo.ext")', ('/foo/foo', '.ext'))
+
+tester('posixpath.isabs("/")', 1)
+tester('posixpath.isabs("/foo")', 1)
+tester('posixpath.isabs("/foo/bar")', 1)
+tester('posixpath.isabs("foo/bar")', 0)
+
+tester('posixpath.commonprefix(["/home/swenson/spam", "/home/swen/spam"])',
+       "/home/swen")
+tester('posixpath.commonprefix(["/home/swen/spam", "/home/swen/eggs"])',
+       "/home/swen/")
+tester('posixpath.commonprefix(["/home/swen/spam", "/home/swen/spam"])',
+       "/home/swen/spam")
+
+if errors:
+    print str(errors) + " errors."
+else:
+    print "No errors.  Thank your lucky stars."
diff --git a/lib-python/2.2/test/test_pow.py b/lib-python/2.2/test/test_pow.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_pow.py
@@ -0,0 +1,125 @@
+import sys
+import test_support
+
+
+def powtest(type):
+    if type != float:
+        print "    Testing 2-argument pow() function..."
+        for i in range(-1000, 1000):
+            if pow(type(i), 0) != 1:
+                raise ValueError, 'pow('+str(i)+',0) != 1'
+            if pow(type(i), 1) != type(i):
+                raise ValueError, 'pow('+str(i)+',1) != '+str(i)
+            if pow(type(0), 1) != type(0):
+                raise ValueError, 'pow(0,'+str(i)+') != 0'
+            if pow(type(1), 1) != type(1):
+                raise ValueError, 'pow(1,'+str(i)+') != 1'
+
+        for i in range(-100, 100):
+            if pow(type(i), 3) != i*i*i:
+                raise ValueError, 'pow('+str(i)+',3) != '+str(i*i*i)
+
+        pow2 = 1
+        for i in range(0,31):
+            if pow(2, i) != pow2:
+                raise ValueError, 'pow(2,'+str(i)+') != '+str(pow2)
+            if i != 30 : pow2 = pow2*2
+
+        for othertype in int, long:
+            for i in range(-10, 0) + range(1, 10):
+                ii = type(i)
+                for j in range(1, 11):
+                    jj = -othertype(j)
+                    try:
+                        pow(ii, jj)
+                    except ValueError:
+                        raise ValueError, "pow(%s, %s) failed" % (ii, jj)
+
+    for othertype in int, long, float:
+        for i in range(1, 100):
+            zero = type(0)
+            exp = -othertype(i/10.0)
+            if exp == 0:
+                continue
+            try:
+                pow(zero, exp)
+            except ZeroDivisionError:
+                pass # taking zero to any negative exponent should fail
+            else:
+                raise ValueError, "pow(%s, %s) did not fail" % (zero, exp)
+
+    print "    Testing 3-argument pow() function..."
+    il, ih = -20, 20
+    jl, jh = -5,   5
+    kl, kh = -10, 10
+    compare = cmp
+    if type == float:
+        il = 1
+        compare = test_support.fcmp
+    elif type == int:
+        jl = 0
+    elif type == long:
+        jl, jh = 0, 15
+    for i in range(il, ih+1):
+        for j in range(jl, jh+1):
+            for k in range(kl, kh+1):
+                if k != 0:
+                    if type == float or j < 0:
+                        try:
+                            pow(type(i),j,k)
+                        except TypeError:
+                            pass
+                        else:
+                            raise TestFailed("expected TypeError from "
+                                "pow%r" % ((type(i), j, k)))
+                        continue
+                    if compare(pow(type(i),j,k), pow(type(i),j)% type(k)):
+                        raise ValueError, "pow(" +str(i)+ "," +str(j)+ \
+                             "," +str(k)+ ") != pow(" +str(i)+ "," + \
+                             str(j)+ ") % " +str(k)
+
+
+print 'Testing integer mode...'
+powtest(int)
+print 'Testing long integer mode...'
+powtest(long)
+print 'Testing floating point mode...'
+powtest(float)
+
+# Other tests-- not very systematic
+
+print 'The number in both columns should match.'
+print `pow(3,3) % 8`, `pow(3,3,8)`
+print `pow(3,3) % -8`, `pow(3,3,-8)`
+print `pow(3,2) % -2`, `pow(3,2,-2)`
+print `pow(-3,3) % 8`, `pow(-3,3,8)`
+print `pow(-3,3) % -8`, `pow(-3,3,-8)`
+print `pow(5,2) % -8`, `pow(5,2,-8)`
+print
+
+print `pow(3L,3L) % 8`, `pow(3L,3L,8)`
+print `pow(3L,3L) % -8`, `pow(3L,3L,-8)`
+print `pow(3L,2) % -2`, `pow(3L,2,-2)`
+print `pow(-3L,3L) % 8`, `pow(-3L,3L,8)`
+print `pow(-3L,3L) % -8`, `pow(-3L,3L,-8)`
+print `pow(5L,2) % -8`, `pow(5L,2,-8)`
+print
+
+print
+
+for i in range(-10, 11):
+    for j in range(0, 6):
+        for k in range(-7, 11):
+            if j >= 0 and k != 0:
+                o = pow(i,j) % k
+                n = pow(i,j,k)
+                if o != n: print 'Integer mismatch:', i,j,k
+            if j >= 0 and k != 0:
+                o = pow(long(i),j) % k
+                n = pow(long(i),j,k)
+                if o != n: print 'Integer mismatch:', i,j,k
+
+class TestRpow:
+    def __rpow__(self, other):
+        return None
+None ** TestRpow()      # Won't fail when __rpow__ invoked.  SF bug #643260.
diff --git a/lib-python/2.2/test/test_pprint.py b/lib-python/2.2/test/test_pprint.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_pprint.py
@@ -0,0 +1,104 @@
+import pprint
+import unittest
+
+import test_support
+
+try:
+    uni = unicode
+except NameError:
+    def uni(x):return x
+
+
+class QueryTestCase(unittest.TestCase):
+
+    def setUp(self):
+        self.a = range(100)
+        self.b = range(200)
+        self.a[-12] = self.b
+
+    def test_basic(self):
+        """Verify .isrecursive() and .isreadable() w/o recursion."""
+        verify = self.assert_
+        for safe in (2, 2.0, 2j, "abc", [3], (2,2), {3: 3}, uni("yaddayadda"),
+                     self.a, self.b):
+            verify(not pprint.isrecursive(safe),
+                   "expected not isrecursive for " + `safe`)
+            verify(pprint.isreadable(safe),
+                   "expected isreadable for " + `safe`)
+
+    def test_knotted(self):
+        """Verify .isrecursive() and .isreadable() w/ recursion."""
+        # Tie a knot.
+        self.b[67] = self.a
+        # Messy dict.
+        self.d = {}
+        self.d[0] = self.d[1] = self.d[2] = self.d
+
+        verify = self.assert_
+
+        for icky in self.a, self.b, self.d, (self.d, self.d):
+            verify(pprint.isrecursive(icky), "expected isrecursive")
+            verify(not pprint.isreadable(icky),  "expected not isreadable")
+
+        # Break the cycles.
+        self.d.clear()
+        del self.a[:]
+        del self.b[:]
+
+        for safe in self.a, self.b, self.d, (self.d, self.d):
+            verify(not pprint.isrecursive(safe),
+                   "expected not isrecursive for " + `safe`)
+            verify(pprint.isreadable(safe),
+                   "expected isreadable for " + `safe`)
+
+    def test_unreadable(self):
+        """Not recursive but not readable anyway."""
+        verify = self.assert_
+        for unreadable in type(3), pprint, pprint.isrecursive:
+            verify(not pprint.isrecursive(unreadable),
+                   "expected not isrecursive for " + `unreadable`)
+            verify(not pprint.isreadable(unreadable),
+                   "expected not isreadable for " + `unreadable`)
+
+    def test_same_as_repr(self):
+        "Simple objects and small containers that should be same as repr()."
+        verify = self.assert_
+        for simple in (0, 0L, 0+0j, 0.0, "", uni(""), (), [], {}, verify, pprint,
+                       -6, -6L, -6-6j, -1.5, "x", uni("x"), (3,), [3], {3: 6},
+                       (1,2), [3,4], {5: 6, 7: 8},
+                       {"xy\tab\n": (3,), 5: [[]], (): {}},
+                       range(10, -11, -1)
+                      ):
+            native = repr(simple)
+            for function in "pformat", "saferepr":
+                f = getattr(pprint, function)
+                got = f(simple)
+                verify(native == got, "expected %s got %s from pprint.%s" %
+                                      (native, got, function))
+
+
+    def test_basic_line_wrap(self):
+        """verify basic line-wrapping operation"""
+        o = {'RPM_cal': 0,
+             'RPM_cal2': 48059,
+             'Speed_cal': 0,
+             'controldesk_runtime_us': 0,
+             'main_code_runtime_us': 0,
+             'read_io_runtime_us': 0,
+             'write_io_runtime_us': 43690}
+        exp = """\
+{'RPM_cal': 0,
+ 'RPM_cal2': 48059,
+ 'Speed_cal': 0,
+ 'controldesk_runtime_us': 0,
+ 'main_code_runtime_us': 0,
+ 'read_io_runtime_us': 0,
+ 'write_io_runtime_us': 43690}"""
+        self.assertEqual(pprint.pformat(o), exp)
+
+def test_main():
+    test_support.run_unittest(QueryTestCase)
+
+
+if __name__ == "__main__":
+    test_main()
diff --git a/lib-python/2.2/test/test_profile.py b/lib-python/2.2/test/test_profile.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_profile.py
@@ -0,0 +1,86 @@
+"""Test suite for the profile module."""
+
+import profile
+
+# In order to have reproducible time, we simulate a timer in the global
+# variable 'ticks', which represents simulated time in milliseconds.
+# (We can't use a helper function increment the timer since it would be
+# included in the profile and would appear to consume all the time.)
+ticks = 0
+
+def test_main():
+    global ticks
+    ticks = 0
+    prof = profile.Profile(timer)
+    prof.runctx("testfunc()", globals(), globals())
+    prof.print_stats()
+
+def timer():
+    return ticks*0.001
+
+def testfunc():
+    # 1 call
+    # 1000 ticks total: 400 ticks local, 600 ticks in subfunctions
+    global ticks
+    ticks += 199
+    helper()                            # 300
+    helper()                            # 300
+    ticks += 201
+
+def helper():
+    # 2 calls
+    # 300 ticks total: 40 ticks local, 260 ticks in subfunctions
+    global ticks
+    ticks += 1
+    helper1()                           # 30
+    ticks += 3
+    helper1()                           # 30
+    ticks += 6
+    helper2()                           # 50
+    ticks += 5
+    helper2()                           # 50
+    ticks += 4
+    helper2()                           # 50
+    ticks += 7
+    helper2()                           # 50
+    ticks += 14
+
+def helper1():
+    # 4 calls
+    # 30 ticks total: 29 ticks local, 1 tick in subfunctions
+    global ticks
+    ticks += 10
+    hasattr(C(), "foo")
+    ticks += 19
+
+def helper2():
+    # 8 calls
+    # 50 ticks local: 39 ticks local, 11 ticks in subfunctions
+    global ticks
+    ticks += 11
+    hasattr(C(), "bar")                 # 1
+    ticks += 13
+    subhelper()                         # 10
+    ticks += 15
+
+def subhelper():
+    # 8 calls
+    # 10 ticks total: 8 ticks local, 2 ticks in subfunctions
+    global ticks
+    ticks += 2
+    for i in range(2):
+        try:
+            C().foo                     # 1 x 2
+        except AttributeError:
+            ticks += 3                  # 3 x 2
+
+class C:
+    def __getattr__(self, name):
+        # 28 calls
+        # 1 tick, local
+        global ticks
+        ticks += 1
+        raise AttributeError
+
+if __name__ == "__main__":
+    test_main()
diff --git a/lib-python/2.2/test/test_profilehooks.py b/lib-python/2.2/test/test_profilehooks.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_profilehooks.py
@@ -0,0 +1,360 @@
+from __future__ import generators
+
+from test_support import TestFailed
+
+import pprint
+import sys
+import unittest
+
+import test_support
+
+
+class HookWatcher:
+    def __init__(self):
+        self.frames = []
+        self.events = []
+
+    def callback(self, frame, event, arg):
+        self.add_event(event, frame)
+
+    def add_event(self, event, frame=None):
+        """Add an event to the log."""
+        if frame is None:
+            frame = sys._getframe(1)
+
+        try:
+            frameno = self.frames.index(frame)
+        except ValueError:
+            frameno = len(self.frames)
+            self.frames.append(frame)
+
+        self.events.append((frameno, event, ident(frame)))
+
+    def get_events(self):
+        """Remove calls to add_event()."""
+        disallowed = [ident(self.add_event.im_func), ident(ident)]
+        self.frames = None
+
+        return [item for item in self.events if item[2] not in disallowed]
+
+
+class ProfileSimulator(HookWatcher):
+    def __init__(self, testcase):
+        self.testcase = testcase
+        self.stack = []
+        HookWatcher.__init__(self)
+
+    def callback(self, frame, event, arg):
+        # Callback registered with sys.setprofile()/sys.settrace()
+        self.dispatch[event](self, frame)
+
+    def trace_call(self, frame):
+        self.add_event('call', frame)
+        self.stack.append(frame)
+
+    def trace_return(self, frame):
+        self.add_event('return', frame)
+        self.stack.pop()
+
+    def trace_exception(self, frame):
+        self.testcase.fail(
+            "the profiler should never receive exception events")
+
+    dispatch = {
+        'call': trace_call,
+        'exception': trace_exception,
+        'return': trace_return,
+        }
+
+
+class TestCaseBase(unittest.TestCase):
+    def check_events(self, callable, expected):
+        events = capture_events(callable, self.new_watcher())
+        if events != expected:
+            self.fail("Expected events:\n%s\nReceived events:\n%s"
+                      % (pprint.pformat(expected), pprint.pformat(events)))
+
+
+class ProfileHookTestCase(TestCaseBase):
+    def new_watcher(self):
+        return HookWatcher()
+
+    def test_simple(self):
+        def f(p):
+            pass
+        f_ident = ident(f)
+        self.check_events(f, [(1, 'call', f_ident),
+                              (1, 'return', f_ident),
+                              ])
+
+    def test_exception(self):
+        def f(p):
+            1/0
+        f_ident = ident(f)
+        self.check_events(f, [(1, 'call', f_ident),
+                              (1, 'return', f_ident),
+                              ])
+
+    def test_caught_exception(self):
+        def f(p):
+            try: 1/0
+            except: pass
+        f_ident = ident(f)
+        self.check_events(f, [(1, 'call', f_ident),
+                              (1, 'return', f_ident),
+                              ])
+
+    def test_caught_nested_exception(self):
+        def f(p):
+            try: 1/0
+            except: pass
+        f_ident = ident(f)
+        self.check_events(f, [(1, 'call', f_ident),
+                              (1, 'return', f_ident),
+                              ])
+
+    def test_nested_exception(self):
+        def f(p):
+            1/0
+        f_ident = ident(f)
+        self.check_events(f, [(1, 'call', f_ident),
+                              # This isn't what I expected:
+                              # (0, 'exception', protect_ident),
+                              # I expected this again:
+                              (1, 'return', f_ident),
+                              ])
+
+    def test_exception_in_except_clause(self):
+        def f(p):
+            1/0
+        def g(p):
+            try:
+                f(p)
+            except:
+                try: f(p)
+                except: pass
+        f_ident = ident(f)
+        g_ident = ident(g)
+        self.check_events(g, [(1, 'call', g_ident),
+                              (2, 'call', f_ident),
+                              (2, 'return', f_ident),
+                              (3, 'call', f_ident),
+                              (3, 'return', f_ident),
+                              (1, 'return', g_ident),
+                              ])
+
+    def test_exception_propogation(self):
+        def f(p):
+            1/0
+        def g(p):
+            try: f(p)
+            finally: p.add_event("falling through")
+        f_ident = ident(f)
+        g_ident = ident(g)
+        self.check_events(g, [(1, 'call', g_ident),
+                              (2, 'call', f_ident),
+                              (2, 'return', f_ident),
+                              (1, 'falling through', g_ident),
+                              (1, 'return', g_ident),
+                              ])
+
+    def test_raise_twice(self):
+        def f(p):
+            try: 1/0
+            except: 1/0
+        f_ident = ident(f)
+        self.check_events(f, [(1, 'call', f_ident),
+                              (1, 'return', f_ident),
+                              ])
+
+    def test_raise_reraise(self):
+        def f(p):
+            try: 1/0
+            except: raise
+        f_ident = ident(f)
+        self.check_events(f, [(1, 'call', f_ident),
+                              (1, 'return', f_ident),
+                              ])
+
+    def test_raise(self):
+        def f(p):
+            raise Exception()
+        f_ident = ident(f)
+        self.check_events(f, [(1, 'call', f_ident),
+                              (1, 'return', f_ident),
+                              ])
+
+    def test_distant_exception(self):
+        def f():
+            1/0
+        def g():
+            f()
+        def h():
+            g()
+        def i():
+            h()
+        def j(p):
+            i()
+        f_ident = ident(f)
+        g_ident = ident(g)
+        h_ident = ident(h)
+        i_ident = ident(i)
+        j_ident = ident(j)
+        self.check_events(j, [(1, 'call', j_ident),
+                              (2, 'call', i_ident),
+                              (3, 'call', h_ident),
+                              (4, 'call', g_ident),
+                              (5, 'call', f_ident),
+                              (5, 'return', f_ident),
+                              (4, 'return', g_ident),
+                              (3, 'return', h_ident),
+                              (2, 'return', i_ident),
+                              (1, 'return', j_ident),
+                              ])
+
+    def test_generator(self):
+        def f():
+            for i in range(2):
+                yield i
+        def g(p):
+            for i in f():
+                pass
+        f_ident = ident(f)
+        g_ident = ident(g)
+        self.check_events(g, [(1, 'call', g_ident),
+                              # call the iterator twice to generate values
+                              (2, 'call', f_ident),
+                              (2, 'return', f_ident),
+                              (2, 'call', f_ident),
+                              (2, 'return', f_ident),
+                              # once more; returns end-of-iteration with
+                              # actually raising an exception
+                              (2, 'call', f_ident),
+                              (2, 'return', f_ident),
+                              (1, 'return', g_ident),
+                              ])
+
+    def test_stop_iteration(self):
+        def f():
+            for i in range(2):
+                yield i
+            raise StopIteration
+        def g(p):
+            for i in f():
+                pass
+        f_ident = ident(f)
+        g_ident = ident(g)
+        self.check_events(g, [(1, 'call', g_ident),
+                              # call the iterator twice to generate values
+                              (2, 'call', f_ident),
+                              (2, 'return', f_ident),
+                              (2, 'call', f_ident),
+                              (2, 'return', f_ident),
+                              # once more to hit the raise:
+                              (2, 'call', f_ident),
+                              (2, 'return', f_ident),
+                              (1, 'return', g_ident),
+                              ])
+
+
+class ProfileSimulatorTestCase(TestCaseBase):
+    def new_watcher(self):
+        return ProfileSimulator(self)
+
+    def test_simple(self):
+        def f(p):
+            pass
+        f_ident = ident(f)
+        self.check_events(f, [(1, 'call', f_ident),
+                              (1, 'return', f_ident),
+                              ])
+
+    def test_basic_exception(self):
+        def f(p):
+            1/0
+        f_ident = ident(f)
+        self.check_events(f, [(1, 'call', f_ident),
+                              (1, 'return', f_ident),
+                              ])
+
+    def test_caught_exception(self):
+        def f(p):
+            try: 1/0
+            except: pass
+        f_ident = ident(f)
+        self.check_events(f, [(1, 'call', f_ident),
+                              (1, 'return', f_ident),
+                              ])
+
+    def test_distant_exception(self):
+        def f():
+            1/0
+        def g():
+            f()
+        def h():
+            g()
+        def i():
+            h()
+        def j(p):
+            i()
+        f_ident = ident(f)
+        g_ident = ident(g)
+        h_ident = ident(h)
+        i_ident = ident(i)
+        j_ident = ident(j)
+        self.check_events(j, [(1, 'call', j_ident),
+                              (2, 'call', i_ident),
+                              (3, 'call', h_ident),
+                              (4, 'call', g_ident),
+                              (5, 'call', f_ident),
+                              (5, 'return', f_ident),
+                              (4, 'return', g_ident),
+                              (3, 'return', h_ident),
+                              (2, 'return', i_ident),
+                              (1, 'return', j_ident),
+                              ])
+
+
+def ident(function):
+    if hasattr(function, "f_code"):
+        code = function.f_code
+    else:
+        code = function.func_code
+    return code.co_firstlineno, code.co_name
+
+
+def protect(f, p):
+    try: f(p)
+    except: pass
+
+protect_ident = ident(protect)
+
+
+def capture_events(callable, p=None):
+    try: sys.setprofile()
+    except TypeError: pass
+    else: raise TestFailed, 'sys.setprofile() did not raise TypeError'
+
+    if p is None:
+        p = HookWatcher()
+    sys.setprofile(p.callback)
+    protect(callable, p)
+    sys.setprofile(None)
+    return p.get_events()[1:-1]
+
+
+def show_events(callable):
+    import pprint
+    pprint.pprint(capture_events(callable))
+
+
+def test_main():
+    loader = unittest.TestLoader()
+    suite = unittest.TestSuite()
+    suite.addTest(loader.loadTestsFromTestCase(ProfileHookTestCase))
+    suite.addTest(loader.loadTestsFromTestCase(ProfileSimulatorTestCase))
+    test_support.run_suite(suite)
+
+
+if __name__ == "__main__":
+    test_main()
diff --git a/lib-python/2.2/test/test_pty.py b/lib-python/2.2/test/test_pty.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_pty.py
@@ -0,0 +1,98 @@
+import pty, os, sys
+from test_support import verbose, TestFailed, TestSkipped
+
+TEST_STRING_1 = "I wish to buy a fish license.\n"
+TEST_STRING_2 = "For my pet fish, Eric.\n"
+
+if verbose:
+    def debug(msg):
+        print msg
+else:
+    def debug(msg):
+        pass
+
+# Marginal testing of pty suite. Cannot do extensive 'do or fail' testing
+# because pty code is not too portable.
+
+try:
+    debug("Calling master_open()")
+    master_fd, slave_name = pty.master_open()
+    debug("Got master_fd '%d', slave_name '%s'"%(master_fd, slave_name))
+    debug("Calling slave_open(%s)"%`slave_name`)
+    slave_fd = pty.slave_open(slave_name)
+    debug("Got slave_fd '%d'"%slave_fd)
+except OSError:
+    # " An optional feature could not be imported " ... ?
+    raise TestSkipped, "Pseudo-terminals (seemingly) not functional."
+
+if not os.isatty(slave_fd):
+    raise TestFailed, "slave_fd is not a tty"
+
+# IRIX apparently turns \n into \r\n. Allow that, but avoid allowing other
+# differences (like extra whitespace, trailing garbage, etc.)
+
+debug("Writing to slave_fd")
+os.write(slave_fd, TEST_STRING_1)
+s1 = os.read(master_fd, 1024)
+sys.stdout.write(s1.replace("\r\n", "\n"))
+
+debug("Writing chunked output")
+os.write(slave_fd, TEST_STRING_2[:5])
+os.write(slave_fd, TEST_STRING_2[5:])
+s2 = os.read(master_fd, 1024)
+sys.stdout.write(s2.replace("\r\n", "\n"))
+
+os.close(slave_fd)
+os.close(master_fd)
+
+# basic pty passed.
+
+debug("calling pty.fork()")
+pid, master_fd = pty.fork()
+if pid == pty.CHILD:
+    # stdout should be connected to a tty.
+    if not os.isatty(1):
+        debug("Child's fd 1 is not a tty?!")
+        os._exit(3)
+
+    # After pty.fork(), the child should already be a session leader.
+    # (on those systems that have that concept.)
+    debug("In child, calling os.setsid()")
+    try:
+        os.setsid()
+    except OSError:
+        # Good, we already were session leader
+        debug("Good: OSError was raised.")
+        pass
+    except AttributeError:
+        # Have pty, but not setsid() ?
+        debug("No setsid() available ?")
+        pass
+    except:
+        # We don't want this error to propagate, escaping the call to
+        # os._exit() and causing very peculiar behavior in the calling
+        # regrtest.py !
+        # Note: could add traceback printing here.
+        debug("An unexpected error was raised.")
+        os._exit(1)
+    else:
+        debug("os.setsid() succeeded! (bad!)")
+        os._exit(2)
+    os._exit(4)
+else:
+    debug("Waiting for child (%d) to finish."%pid)
+    (pid, status) = os.waitpid(pid, 0)
+    res = status >> 8
+    debug("Child (%d) exited with status %d (%d)."%(pid, res, status))
+    if res == 1:
+        raise TestFailed, "Child raised an unexpected exception in os.setsid()"
+    elif res == 2:
+        raise TestFailed, "pty.fork() failed to make child a session leader."
+    elif res == 3:
+        raise TestFailed, "Child spawned by pty.fork() did not have a tty as stdout"
+    elif res != 4:
+        raise TestFailed, "pty.fork() failed for unknown reasons."
+
+os.close(master_fd)
+
+# pty.fork() passed.
diff --git a/lib-python/2.2/test/test_pwd.py b/lib-python/2.2/test/test_pwd.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_pwd.py
@@ -0,0 +1,71 @@
+from test_support import verbose
+import pwd
+
+print 'pwd.getpwall()'
+entries = pwd.getpwall()
+
+for e in entries:
+    name = e[0]
+    uid = e[2]
+    if verbose:
+        print name, uid
+    print 'pwd.getpwuid()'
+    dbuid = pwd.getpwuid(uid)
+    if dbuid[0] != name:
+        print 'Mismatch in pwd.getpwuid()'
+    print 'pwd.getpwnam()'
+    dbname = pwd.getpwnam(name)
+    if dbname[2] != uid:
+        print 'Mismatch in pwd.getpwnam()'
+    else:
+        print 'name matches uid'
+    break
+
+# try to get some errors
+bynames = {}
+byuids = {}
+for n, p, u, g, gecos, d, s in entries:
+    bynames[n] = u
+    byuids[u] = n
+
+allnames = bynames.keys()
+namei = 0
+fakename = allnames[namei]
+while bynames.has_key(fakename):
+    chars = map(None, fakename)
+    for i in range(len(chars)):
+        if chars[i] == 'z':
+            chars[i] = 'A'
+            break
+        elif chars[i] == 'Z':
+            continue
+        else:
+            chars[i] = chr(ord(chars[i]) + 1)
+            break
+    else:
+        namei = namei + 1
+        try:
+            fakename = allnames[namei]
+        except IndexError:
+            # should never happen... if so, just forget it
+            break
+    fakename = ''.join(map(None, chars))
+
+try:
+    pwd.getpwnam(fakename)
+except KeyError:
+    print 'caught expected exception'
+else:
+    print 'fakename', fakename, 'did not except pwd.getpwnam()'
+
+# Choose a non-existent uid.
+fakeuid = 4127
+while byuids.has_key(fakeuid):
+    fakeuid = (fakeuid * 3) % 0x10000
+
+try:
+    pwd.getpwuid(fakeuid)
+except KeyError:
+    print 'caught expected exception'
+else:
+    print 'fakeuid', fakeuid, 'did not except pwd.getpwuid()'
diff --git a/lib-python/2.2/test/test_pyclbr.py b/lib-python/2.2/test/test_pyclbr.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_pyclbr.py
@@ -0,0 +1,154 @@
+'''
+   Test cases for pyclbr.py
+   Nick Mathewson
+'''
+from test_support import run_unittest
+import unittest, sys
+from types import ClassType, FunctionType, MethodType
+import pyclbr
+
+# This next line triggers an error on old versions of pyclbr.
+
+from commands import getstatus
+
+# Here we test the python class browser code.
+#
+# The main function in this suite, 'testModule', compares the output
+# of pyclbr with the introspected members of a module.  Because pyclbr
+# is imperfect (as designed), testModule is called with a set of
+# members to ignore.
+
+class PyclbrTest(unittest.TestCase):
+
+    def assertListEq(self, l1, l2, ignore):
+        ''' succeed iff {l1} - {ignore} == {l2} - {ignore} '''
+        for p1, p2 in (l1, l2), (l2, l1):
+            for item in p1:
+                ok = (item in p2) or (item in ignore)
+                if not ok:
+                    self.fail("%r missing" % item)
+
+
+    def assertHasattr(self, obj, attr, ignore):
+        ''' succeed iff hasattr(obj,attr) or attr in ignore. '''
+        if attr in ignore: return
+        if not hasattr(obj, attr): print "???", attr
+        self.failUnless(hasattr(obj, attr))
+
+
+    def assertHaskey(self, obj, key, ignore):
+        ''' succeed iff obj.has_key(key) or key in ignore. '''
+        if key in ignore: return
+        if not obj.has_key(key): print "***",key
+        self.failUnless(obj.has_key(key))
+
+    def assertEquals(self, a, b, ignore=None):
+        ''' succeed iff a == b or a in ignore or b in ignore '''
+        if (ignore == None) or (a in ignore) or (b in ignore): return
+
+        unittest.TestCase.assertEquals(self, a, b)
+
+    def checkModule(self, moduleName, module=None, ignore=()):
+        ''' succeed iff pyclbr.readmodule_ex(modulename) corresponds
+            to the actual module object, module.  Any identifiers in
+            ignore are ignored.   If no module is provided, the appropriate
+            module is loaded with __import__.'''
+
+        if module == None:
+            module = __import__(moduleName, globals(), {}, [])
+
+        dict = pyclbr.readmodule_ex(moduleName)
+
+        # Make sure the toplevel functions and classes are the same.
+        for name, value in dict.items():
+            if name in ignore:
+                continue
+            self.assertHasattr(module, name, ignore)
+            py_item = getattr(module, name)
+            if isinstance(value, pyclbr.Function):
+                self.assertEquals(type(py_item), FunctionType)
+            else:
+                self.assertEquals(type(py_item), ClassType)
+                real_bases = [base.__name__ for base in py_item.__bases__]
+                pyclbr_bases = [ getattr(base, 'name', base)
+                                 for base in value.super ]
+
+                self.assertListEq(real_bases, pyclbr_bases, ignore)
+
+                actualMethods = []
+                for m in py_item.__dict__.keys():
+                    if type(getattr(py_item, m)) == MethodType:
+                        actualMethods.append(m)
+                foundMethods = []
+                for m in value.methods.keys():
+                    if m[:2] == '__' and m[-2:] != '__':
+                        foundMethods.append('_'+name+m)
+                    else:
+                        foundMethods.append(m)
+
+                self.assertListEq(foundMethods, actualMethods, ignore)
+                self.assertEquals(py_item.__module__, value.module)
+
+                self.assertEquals(py_item.__name__, value.name, ignore)
+                # can't check file or lineno
+
+        # Now check for missing stuff.
+        for name in dir(module):
+            item = getattr(module, name)
+            if type(item) in (ClassType, FunctionType):
+                self.assertHaskey(dict, name, ignore)
+
+    def test_easy(self):
+        self.checkModule('pyclbr')
+        self.checkModule('doctest',
+                         ignore=['_isclass',
+                                 '_isfunction',
+                                 '_ismodule',
+                                 '_classify_class_attrs'])
+        self.checkModule('rfc822', ignore=["get"])
+        self.checkModule('xmllib')
+        self.checkModule('difflib')
+
+    def test_others(self):
+        cm = self.checkModule
+
+        # these are about the 20 longest modules.
+
+        cm('random', ignore=('_verify',)) # deleted
+
+        cm('cgi', ignore=('f', 'g',       # nested declarations
+                          'log'))         # set with =, not def
+
+        cm('mhlib', ignore=('do',          # nested declaration
+                            'bisect'))     # imported method, set with =
+
+        cm('urllib', ignore=('getproxies_environment', # set with =
+                             'getproxies_registry',    # set with =
+                             'open_https'))            # not on all platforms
+
+        cm('pickle', ignore=('g',))       # deleted declaration
+
+        cm('aifc', ignore=('openfp',))    # set with =
+
+        cm('Cookie', ignore=('__str__', 'Cookie')) # set with =
+
+        cm('sre_parse', ignore=('literal', # nested def
+                                'makedict', 'dump' # from sre_constants
+                                ))
+
+        cm('test.test_pyclbr',
+           module=sys.modules[__name__])
+
+        # pydoc doesn't work because of string issues
+        # cm('pydoc', pydoc)
+
+        # pdb plays too many dynamic games
+        # cm('pdb', pdb)
+
+
+def test_main():
+    run_unittest(PyclbrTest)
+
+
+if __name__ == "__main__":
+    test_main()
diff --git a/lib-python/2.2/test/test_pyexpat.py b/lib-python/2.2/test/test_pyexpat.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_pyexpat.py
@@ -0,0 +1,202 @@
+# Very simple test - Parse a file and print what happens
+
+# XXX TypeErrors on calling handlers, or on bad return values from a
+# handler, are obscure and unhelpful.
+
+import pyexpat
+from xml.parsers import expat
+
+from test_support import sortdict
+
+class Outputter:
+    def StartElementHandler(self, name, attrs):
+        print 'Start element:\n\t', repr(name), sortdict(attrs)
+
+    def EndElementHandler(self, name):
+        print 'End element:\n\t', repr(name)
+
+    def CharacterDataHandler(self, data):
+        data = data.strip()
+        if data:
+            print 'Character data:'
+            print '\t', repr(data)
+
+    def ProcessingInstructionHandler(self, target, data):
+        print 'PI:\n\t', repr(target), repr(data)
+
+    def StartNamespaceDeclHandler(self, prefix, uri):
+        print 'NS decl:\n\t', repr(prefix), repr(uri)
+
+    def EndNamespaceDeclHandler(self, prefix):
+        print 'End of NS decl:\n\t', repr(prefix)
+
+    def StartCdataSectionHandler(self):
+        print 'Start of CDATA section'
+
+    def EndCdataSectionHandler(self):
+        print 'End of CDATA section'
+
+    def CommentHandler(self, text):
+        print 'Comment:\n\t', repr(text)
+
+    def NotationDeclHandler(self, *args):
+        name, base, sysid, pubid = args
+        print 'Notation declared:', args
+
+    def UnparsedEntityDeclHandler(self, *args):
+        entityName, base, systemId, publicId, notationName = args
+        print 'Unparsed entity decl:\n\t', args
+
+    def NotStandaloneHandler(self, userData):
+        print 'Not standalone'
+        return 1
+
+    def ExternalEntityRefHandler(self, *args):
+        context, base, sysId, pubId = args
+        print 'External entity ref:', args[1:]
+        return 1
+
+    def DefaultHandler(self, userData):
+        pass
+
+    def DefaultHandlerExpand(self, userData):
+        pass
+
+
+def confirm(ok):
+    if ok:
+        print "OK."
+    else:
+        print "Not OK."
+
+out = Outputter()
+parser = expat.ParserCreate(namespace_separator='!')
+
+# Test getting/setting returns_unicode
+parser.returns_unicode = 0; confirm(parser.returns_unicode == 0)
+parser.returns_unicode = 1; confirm(parser.returns_unicode == 1)
+parser.returns_unicode = 2; confirm(parser.returns_unicode == 1)
+parser.returns_unicode = 0; confirm(parser.returns_unicode == 0)
+
+# Test getting/setting ordered_attributes
+parser.ordered_attributes = 0; confirm(parser.ordered_attributes == 0)
+parser.ordered_attributes = 1; confirm(parser.ordered_attributes == 1)
+parser.ordered_attributes = 2; confirm(parser.ordered_attributes == 1)
+parser.ordered_attributes = 0; confirm(parser.ordered_attributes == 0)
+
+# Test getting/setting specified_attributes
+parser.specified_attributes = 0; confirm(parser.specified_attributes == 0)
+parser.specified_attributes = 1; confirm(parser.specified_attributes == 1)
+parser.specified_attributes = 2; confirm(parser.specified_attributes == 1)
+parser.specified_attributes = 0; confirm(parser.specified_attributes == 0)
+
+HANDLER_NAMES = [
+    'StartElementHandler', 'EndElementHandler',
+    'CharacterDataHandler', 'ProcessingInstructionHandler',
+    'UnparsedEntityDeclHandler', 'NotationDeclHandler',
+    'StartNamespaceDeclHandler', 'EndNamespaceDeclHandler',
+    'CommentHandler', 'StartCdataSectionHandler',
+    'EndCdataSectionHandler',
+    'DefaultHandler', 'DefaultHandlerExpand',
+    #'NotStandaloneHandler',
+    'ExternalEntityRefHandler'
+    ]
+for name in HANDLER_NAMES:
+    setattr(parser, name, getattr(out, name))
+
+data = '''\
+<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
+<?xml-stylesheet href="stylesheet.css"?>
+<!-- comment data -->
+<!DOCTYPE quotations SYSTEM "quotations.dtd" [
+<!ELEMENT root ANY>
+<!NOTATION notation SYSTEM "notation.jpeg">
+<!ENTITY acirc "&#226;">
+<!ENTITY external_entity SYSTEM "entity.file">
+<!ENTITY unparsed_entity SYSTEM "entity.file" NDATA notation>
+%unparsed_entity;
+]>
+
+<root attr1="value1" attr2="value2&#8000;">
+<myns:subelement xmlns:myns="http://www.python.org/namespace">
+     Contents of subelements
+</myns:subelement>
+<sub2><![CDATA[contents of CDATA section]]></sub2>
+&external_entity;
+</root>
+'''
+
+# Produce UTF-8 output
+parser.returns_unicode = 0
+try:
+    parser.Parse(data, 1)
+except expat.error:
+    print '** Error', parser.ErrorCode, expat.ErrorString(parser.ErrorCode)
+    print '** Line', parser.ErrorLineNumber
+    print '** Column', parser.ErrorColumnNumber
+    print '** Byte', parser.ErrorByteIndex
+
+# Try the parse again, this time producing Unicode output
+parser = expat.ParserCreate(namespace_separator='!')
+parser.returns_unicode = 1
+
+for name in HANDLER_NAMES:
+    setattr(parser, name, getattr(out, name))
+try:
+    parser.Parse(data, 1)
+except expat.error:
+    print '** Error', parser.ErrorCode, expat.ErrorString(parser.ErrorCode)
+    print '** Line', parser.ErrorLineNumber
+    print '** Column', parser.ErrorColumnNumber
+    print '** Byte', parser.ErrorByteIndex
+
+# Try parsing a file
+parser = expat.ParserCreate(namespace_separator='!')
+parser.returns_unicode = 1
+
+for name in HANDLER_NAMES:
+    setattr(parser, name, getattr(out, name))
+import StringIO
+file = StringIO.StringIO(data)
+try:
+    parser.ParseFile(file)
+except expat.error:
+    print '** Error', parser.ErrorCode, expat.ErrorString(parser.ErrorCode)
+    print '** Line', parser.ErrorLineNumber
+    print '** Column', parser.ErrorColumnNumber
+    print '** Byte', parser.ErrorByteIndex
+
+
+# Tests that make sure we get errors when the namespace_separator value
+# is illegal, and that we don't for good values:
+print
+print "Testing constructor for proper handling of namespace_separator values:"
+expat.ParserCreate()
+expat.ParserCreate(namespace_separator=None)
+expat.ParserCreate(namespace_separator=' ')
+print "Legal values tested o.k."
+try:
+    expat.ParserCreate(namespace_separator=42)
+except TypeError, e:
+    print "Caught expected TypeError:"
+    print e
+else:
+    print "Failed to catch expected TypeError."
+
+try:
+    expat.ParserCreate(namespace_separator='too long')
+except ValueError, e:
+    print "Caught expected ValueError:"
+    print e
+else:
+    print "Failed to catch expected ValueError."
+
+# ParserCreate() needs to accept a namespace_separator of zero length
+# to satisfy the requirements of RDF applications that are required
+# to simply glue together the namespace URI and the localname.  Though
+# considered a wart of the RDF specifications, it needs to be supported.
+#
+# See XML-SIG mailing list thread starting with
+# http://mail.python.org/pipermail/xml-sig/2001-April/005202.html
+#
+expat.ParserCreate(namespace_separator='') # too short
diff --git a/lib-python/2.2/test/test_queue.py b/lib-python/2.2/test/test_queue.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_queue.py
@@ -0,0 +1,158 @@
+# Some simple Queue module tests, plus some failure conditions
+# to ensure the Queue locks remain stable
+import Queue
+import sys
+import threading
+import time
+
+from test_support import verify, TestFailed, verbose
+
+queue_size = 5
+
+# Execute a function that blocks, and in a seperate thread, a function that
+# triggers the release.  Returns the result of the blocking function.
+class _TriggerThread(threading.Thread):
+    def __init__(self, fn, args):
+        self.fn = fn
+        self.args = args
+        self.startedEvent = threading.Event()
+        threading.Thread.__init__(self)
+    def run(self):
+        time.sleep(.1)
+        self.startedEvent.set()
+        self.fn(*self.args)
+
+def _doBlockingTest( block_func, block_args, trigger_func, trigger_args):
+    t = _TriggerThread(trigger_func, trigger_args)
+    t.start()
+    try:
+        return block_func(*block_args)
+    finally:
+        # If we unblocked before our thread made the call, we failed!
+        if not t.startedEvent.isSet():
+            raise TestFailed("blocking function '%r' appeared not to block" % (block_func,))
+        t.join(1) # make sure the thread terminates
+        if t.isAlive():
+            raise TestFailed("trigger function '%r' appeared to not return" % (trigger_func,))
+
+# A Queue subclass that can provoke failure at a moment's notice :)
+class FailingQueueException(Exception):
+    pass
+
+class FailingQueue(Queue.Queue):
+    def __init__(self, *args):
+        self.fail_next_put = False
+        self.fail_next_get = False
+        Queue.Queue.__init__(self, *args)
+    def _put(self, item):
+        if self.fail_next_put:
+            self.fail_next_put = False
+            raise FailingQueueException, "You Lose"
+        return Queue.Queue._put(self, item)
+    def _get(self):
+        if self.fail_next_get:
+            self.fail_next_get = False
+            raise FailingQueueException, "You Lose"
+        return Queue.Queue._get(self)
+
+def FailingQueueTest(q):
+    if not q.empty():
+        raise RuntimeError, "Call this function with an empty queue"
+    for i in range(queue_size-1):
+        q.put(i)
+    q.fail_next_put = True
+    # Test a failing non-blocking put.
+    try:
+        q.put("oops", block=0)
+        raise TestFailed("The queue didn't fail when it should have")
+    except FailingQueueException:
+        pass
+    q.put("last")
+    verify(q.full(), "Queue should be full")
+    q.fail_next_put = True
+    # Test a failing blocking put
+    try:
+        _doBlockingTest( q.put, ("full",), q.get, ())
+        raise TestFailed("The queue didn't fail when it should have")
+    except FailingQueueException:
+        pass
+    # Check the Queue isn't damaged.
+    # put failed, but get succeeded - re-add
+    q.put("last")
+    verify(q.full(), "Queue should be full")
+    q.get()
+    verify(not q.full(), "Queue should not be full")
+    q.put("last")
+    verify(q.full(), "Queue should be full")
+    # Test a blocking put
+    _doBlockingTest( q.put, ("full",), q.get, ())
+    # Empty it
+    for i in range(queue_size):
+        q.get()
+    verify(q.empty(), "Queue should be empty")
+    q.put("first")
+    q.fail_next_get = True
+    try:
+        q.get()
+        raise TestFailed("The queue didn't fail when it should have")
+    except FailingQueueException:
+        pass
+    verify(not q.empty(), "Queue should not be empty")
+    q.get()
+    verify(q.empty(), "Queue should be empty")
+    q.fail_next_get = True
+    try:
+        _doBlockingTest( q.get, (), q.put, ('empty',))
+        raise TestFailed("The queue didn't fail when it should have")
+    except FailingQueueException:
+        pass
+    # put succeeded, but get failed.
+    verify(not q.empty(), "Queue should not be empty")
+    q.get()
+    verify(q.empty(), "Queue should be empty")
+
+def SimpleQueueTest(q):
+    if not q.empty():
+        raise RuntimeError, "Call this function with an empty queue"
+    # I guess we better check things actually queue correctly a little :)
+    q.put(111)
+    q.put(222)
+    verify(q.get()==111 and q.get()==222, "Didn't seem to queue the correct data!")
+    for i in range(queue_size-1):
+        q.put(i)
+    verify(not q.full(), "Queue should not be full")
+    q.put("last")
+    verify(q.full(), "Queue should be full")
+    try:
+        q.put("full", block=0)
+        raise TestFailed("Didn't appear to block with a full queue")
+    except Queue.Full:
+        pass
+    # Test a blocking put
+    _doBlockingTest( q.put, ("full",), q.get, ())
+    # Empty it
+    for i in range(queue_size):
+        q.get()
+    verify(q.empty(), "Queue should be empty")
+    try:
+        q.get(block=0)
+        raise TestFailed("Didn't appear to block with an empty queue")
+    except Queue.Empty:
+        pass
+    # Test a blocking get
+    _doBlockingTest( q.get, (), q.put, ('empty',))
+
+def test():
+    q=Queue.Queue(queue_size)
+    # Do it a couple of times on the same queue
+    SimpleQueueTest(q)
+    SimpleQueueTest(q)
+    if verbose:
+        print "Simple Queue tests seemed to work"
+    q = FailingQueue(queue_size)
+    FailingQueueTest(q)
+    FailingQueueTest(q)
+    if verbose:
+        print "Failing Queue tests seemed to work"
+
+test()
diff --git a/lib-python/2.2/test/test_quopri.py b/lib-python/2.2/test/test_quopri.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_quopri.py
@@ -0,0 +1,157 @@
+import test_support
+import unittest
+
+from cStringIO import StringIO
+from quopri import *
+
+
+
+ENCSAMPLE = """\
+Here's a bunch of special=20
+
+=A1=A2=A3=A4=A5=A6=A7=A8=A9
+=AA=AB=AC=AD=AE=AF=B0=B1=B2=B3
+=B4=B5=B6=B7=B8=B9=BA=BB=BC=BD=BE
+=BF=C0=C1=C2=C3=C4=C5=C6
+=C7=C8=C9=CA=CB=CC=CD=CE=CF
+=D0=D1=D2=D3=D4=D5=D6=D7
+=D8=D9=DA=DB=DC=DD=DE=DF
+=E0=E1=E2=E3=E4=E5=E6=E7
+=E8=E9=EA=EB=EC=ED=EE=EF
+=F0=F1=F2=F3=F4=F5=F6=F7
+=F8=F9=FA=FB=FC=FD=FE=FF
+
+characters... have fun!
+"""
+
+# First line ends with a space
+DECSAMPLE = "Here's a bunch of special \n" + \
+"""\
+
+\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9
+\xaa\xab\xac\xad\xae\xaf\xb0\xb1\xb2\xb3
+\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe
+\xbf\xc0\xc1\xc2\xc3\xc4\xc5\xc6
+\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf
+\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7
+\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf
+\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7
+\xe8\xe9\xea\xeb\xec\xed\xee\xef
+\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7
+\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff
+
+characters... have fun!
+"""
+
+
+
+class QuopriTestCase(unittest.TestCase):
+    # Each entry is a tuple of (plaintext, encoded string).  These strings are
+    # used in the "quotetabs=0" tests.
+    STRINGS = (
+        # Some normal strings
+        ('hello', 'hello'),
+        ('''hello
+        there
+        world''', '''hello
+        there
+        world'''),
+        ('''hello
+        there
+        world
+''', '''hello
+        there
+        world
+'''),
+        ('\201\202\203', '=81=82=83'),
+        # Add some trailing MUST QUOTE strings
+        ('hello ', 'hello=20'),
+        ('hello\t', 'hello=09'),
+        # Some long lines.  First, a single line of 108 characters
+        ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\xd8\xd9\xda\xdb\xdc\xdd\xde\xdfxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx',
+         '''xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx=D8=D9=DA=DB=DC=DD=DE=DFx=
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'''),
+        # A line of exactly 76 characters, no soft line break should be needed
+        ('yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy',
+        'yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy'),
+        # A line of 77 characters, forcing a soft line break at position 75,
+        # and a second line of exactly 2 characters (because the soft line
+        # break `=' sign counts against the line length limit).
+        ('zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz',
+         '''zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz=
+zz'''),
+        # A line of 151 characters, forcing a soft line break at position 75,
+        # with a second line of exactly 76 characters and no trailing =
+        ('zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz',
+         '''zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz=
+zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz'''),
+        # A string containing a hard line break, but which the first line is
+        # 151 characters and the second line is exactly 76 characters.  This
+        # should leave us with three lines, the first which has a soft line
+        # break, and which the second and third do not.
+        ('''yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy
+zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz''',
+         '''yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy=
+yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy
+zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz'''),
+        # Now some really complex stuff ;)
+        (DECSAMPLE, ENCSAMPLE),
+        )
+
+    # These are used in the "quotetabs=1" tests.
+    ESTRINGS = (
+        ('hello world', 'hello=20world'),
+        ('hello\tworld', 'hello=09world'),
+        )
+
+    # These are used in the "header=1" tests.
+    HSTRINGS = (
+        ('hello world', 'hello_world'),
+        ('hello_world', 'hello=5Fworld'),
+        )
+
+    def test_encodestring(self):
+        for p, e in self.STRINGS:
+            self.assert_(encodestring(p) == e)
+
+    def test_decodestring(self):
+        for p, e in self.STRINGS:
+            self.assert_(decodestring(e) == p)
+
+    def test_idempotent_string(self):
+        for p, e in self.STRINGS:
+            self.assert_(decodestring(encodestring(e)) == e)
+
+    def test_encode(self):
+        for p, e in self.STRINGS:
+            infp = StringIO(p)
+            outfp = StringIO()
+            encode(infp, outfp, quotetabs=0)
+            self.assert_(outfp.getvalue() == e)
+
+    def test_decode(self):
+        for p, e in self.STRINGS:
+            infp = StringIO(e)
+            outfp = StringIO()
+            decode(infp, outfp)
+            self.assert_(outfp.getvalue() == p)
+
+    def test_embedded_ws(self):
+        for p, e in self.ESTRINGS:
+            self.assert_(encodestring(p, quotetabs=1) == e)
+            self.assert_(decodestring(e) == p)
+
+    def test_encode_header(self):
+        for p, e in self.HSTRINGS:
+            self.assert_(encodestring(p, header = 1) == e)
+
+    def test_decode_header(self):
+        for p, e in self.HSTRINGS:
+            self.assert_(decodestring(e, header = 1) == p)
+
+def test_main():
+    test_support.run_unittest(QuopriTestCase)
+
+
+if __name__ == "__main__":
+    test_main()
diff --git a/lib-python/2.2/test/test_random.py b/lib-python/2.2/test/test_random.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_random.py
@@ -0,0 +1,19 @@
+import test_support
+import random
+
+# Ensure that the seed() method initializes all the hidden state.  In
+# particular, through 2.2.1 it failed to reset a piece of state used by
+# (and only by) the .gauss() method.
+
+for seed in 1, 12, 123, 1234, 12345, 123456, 654321:
+    for seeder in random.seed, random.whseed:
+        seeder(seed)
+        x1 = random.random()
+        y1 = random.gauss(0, 1)
+
+        seeder(seed)
+        x2 = random.random()
+        y2 = random.gauss(0, 1)
+
+        test_support.vereq(x1, x2)
+        test_support.vereq(y1, y2)
diff --git a/lib-python/2.2/test/test_re.py b/lib-python/2.2/test/test_re.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_re.py
@@ -0,0 +1,392 @@
+import sys
+sys.path = ['.'] + sys.path
+
+from test_support import verify, verbose, TestFailed
+import re
+import sys, os, traceback
+
+# Misc tests from Tim Peters' re.doc
+
+if verbose:
+    print 'Running tests on re.search and re.match'
+
+try:
+    verify(re.search('x*', 'axx').span(0) == (0, 0))
+    verify(re.search('x*', 'axx').span() == (0, 0))
+    verify(re.search('x+', 'axx').span(0) == (1, 3))
+    verify(re.search('x+', 'axx').span() == (1, 3))
+    verify(re.search('x', 'aaa') is None)
+except:
+    raise TestFailed, "re.search"
+
+try:
+    verify(re.match('a*', 'xxx').span(0) == (0, 0))
+    verify(re.match('a*', 'xxx').span() == (0, 0))
+    verify(re.match('x*', 'xxxa').span(0) == (0, 3))
+    verify(re.match('x*', 'xxxa').span() == (0, 3))
+    verify(re.match('a+', 'xxx') is None)
+except:
+    raise TestFailed, "re.search"
+
+if verbose:
+    print 'Running tests on re.sub'
+
+try:
+    verify(re.sub("(?i)b+", "x", "bbbb BBBB") == 'x x')
+
+    def bump_num(matchobj):
+        int_value = int(matchobj.group(0))
+        return str(int_value + 1)
+
+    verify(re.sub(r'\d+', bump_num, '08.2 -2 23x99y') == '9.3 -3 24x100y')
+    verify(re.sub(r'\d+', bump_num, '08.2 -2 23x99y', 3) == '9.3 -3 23x99y')
+
+    verify(re.sub('.', lambda m: r"\n", 'x') == '\\n')
+    verify(re.sub('.', r"\n", 'x') == '\n')
+
+    s = r"\1\1"
+    verify(re.sub('(.)', s, 'x') == 'xx')
+    verify(re.sub('(.)', re.escape(s), 'x') == s)
+    verify(re.sub('(.)', lambda m: s, 'x') == s)
+
+    verify(re.sub('(?P<a>x)', '\g<a>\g<a>', 'xx') == 'xxxx')
+    verify(re.sub('(?P<a>x)', '\g<a>\g<1>', 'xx') == 'xxxx')
+    verify(re.sub('(?P<unk>x)', '\g<unk>\g<unk>', 'xx') == 'xxxx')
+    verify(re.sub('(?P<unk>x)', '\g<1>\g<1>', 'xx') == 'xxxx')
+
+    verify(re.sub('a', r'\t\n\v\r\f\a\b\B\Z\a\A\w\W\s\S\d\D', 'a') == '\t\n\v\r\f\a\b\\B\\Z\a\\A\\w\\W\\s\\S\\d\\D')
+    verify(re.sub('a', '\t\n\v\r\f\a', 'a') == '\t\n\v\r\f\a')
+    verify(re.sub('a', '\t\n\v\r\f\a', 'a') == (chr(9)+chr(10)+chr(11)+chr(13)+chr(12)+chr(7)))
+
+    verify(re.sub('^\s*', 'X', 'test') == 'Xtest')
+
+    # Test for sub() on escaped characters, see SF bug #449000
+    verify(re.sub(r'\r\n', r'\n', 'abc\r\ndef\r\n') == 'abc\ndef\n')
+    verify(re.sub('\r\n', r'\n', 'abc\r\ndef\r\n') == 'abc\ndef\n')
+    verify(re.sub(r'\r\n', '\n', 'abc\r\ndef\r\n') == 'abc\ndef\n')
+    verify(re.sub('\r\n', '\n', 'abc\r\ndef\r\n') == 'abc\ndef\n')
+except AssertionError:
+    raise TestFailed, "re.sub"
+
+
+try:
+    verify(re.sub('a', 'b', 'aaaaa') == 'bbbbb')
+    verify(re.sub('a', 'b', 'aaaaa', 1) == 'baaaa')
+except AssertionError:
+    raise TestFailed, "qualified re.sub"
+
+if verbose:
+    print 'Running tests on symbolic references'
+
+try:
+    re.sub('(?P<a>x)', '\g<a', 'xx')
+except re.error, reason:
+    pass
+else:
+    raise TestFailed, "symbolic reference"
+
+try:
+    re.sub('(?P<a>x)', '\g<', 'xx')
+except re.error, reason:
+    pass
+else:
+    raise TestFailed, "symbolic reference"
+
+try:
+    re.sub('(?P<a>x)', '\g', 'xx')
+except re.error, reason:
+    pass
+else:
+    raise TestFailed, "symbolic reference"
+
+try:
+    re.sub('(?P<a>x)', '\g<a a>', 'xx')
+except re.error, reason:
+    pass
+else:
+    raise TestFailed, "symbolic reference"
+
+try:
+    re.sub('(?P<a>x)', '\g<1a1>', 'xx')
+except re.error, reason:
+    pass
+else:
+    raise TestFailed, "symbolic reference"
+
+try:
+    re.sub('(?P<a>x)', '\g<ab>', 'xx')
+except IndexError, reason:
+    pass
+else:
+    raise TestFailed, "symbolic reference"
+
+try:
+    re.sub('(?P<a>x)|(?P<b>y)', '\g<b>', 'xx')
+except re.error, reason:
+    pass
+else:
+    raise TestFailed, "symbolic reference"
+
+try:
+    re.sub('(?P<a>x)|(?P<b>y)', '\\2', 'xx')
+except re.error, reason:
+    pass
+else:
+    raise TestFailed, "symbolic reference"
+
+if verbose:
+    print 'Running tests on re.subn'
+
+try:
+    verify(re.subn("(?i)b+", "x", "bbbb BBBB") == ('x x', 2))
+    verify(re.subn("b+", "x", "bbbb BBBB") == ('x BBBB', 1))
+    verify(re.subn("b+", "x", "xyz") == ('xyz', 0))
+    verify(re.subn("b*", "x", "xyz") == ('xxxyxzx', 4))
+    verify(re.subn("b*", "x", "xyz", 2) == ('xxxyz', 2))
+except AssertionError:
+    raise TestFailed, "re.subn"
+
+if verbose:
+    print 'Running tests on re.split'
+
+try:
+    verify(re.split(":", ":a:b::c") == ['', 'a', 'b', '', 'c'])
+    verify(re.split(":*", ":a:b::c") == ['', 'a', 'b', 'c'])
+    verify(re.split("(:*)", ":a:b::c") == ['', ':', 'a', ':', 'b', '::', 'c'])
+    verify(re.split("(?::*)", ":a:b::c") == ['', 'a', 'b', 'c'])
+    verify(re.split("(:)*", ":a:b::c") == ['', ':', 'a', ':', 'b', ':', 'c'])
+    verify(re.split("([b:]+)", ":a:b::c") == ['', ':', 'a', ':b::', 'c'])
+    verify(re.split("(b)|(:+)", ":a:b::c") == \
+           ['', None, ':', 'a', None, ':', '', 'b', None, '', None, '::', 'c'] )
+    verify(re.split("(?:b)|(?::+)", ":a:b::c") == ['', 'a', '', '', 'c'])
+except AssertionError:
+    raise TestFailed, "re.split"
+
+try:
+    verify(re.split(":", ":a:b::c", 2) == ['', 'a', 'b::c'])
+    verify(re.split(':', 'a:b:c:d', 2) == ['a', 'b', 'c:d'])
+
+    verify(re.split("(:)", ":a:b::c", 2) == ['', ':', 'a', ':', 'b::c'])
+    verify(re.split("(:*)", ":a:b::c", 2) == ['', ':', 'a', ':', 'b::c'])
+except AssertionError:
+    raise TestFailed, "qualified re.split"
+
+if verbose:
+    print "Running tests on re.findall"
+
+try:
+    verify(re.findall(":+", "abc") == [])
+    verify(re.findall(":+", "a:b::c:::d") == [":", "::", ":::"])
+    verify(re.findall("(:+)", "a:b::c:::d") == [":", "::", ":::"])
+    verify(re.findall("(:)(:*)", "a:b::c:::d") == [(":", ""),
+                                                   (":", ":"),
+                                                   (":", "::")] )
+except AssertionError:
+    raise TestFailed, "re.findall"
+
+if verbose:
+    print "Running tests on re.match"
+
+try:
+    # No groups at all
+    m = re.match('a', 'a') ; verify(m.groups() == ())
+    # A single group
+    m = re.match('(a)', 'a') ; verify(m.groups() == ('a',))
+
+    pat = re.compile('((a)|(b))(c)?')
+    verify(pat.match('a').groups() == ('a', 'a', None, None))
+    verify(pat.match('b').groups() == ('b', None, 'b', None))
+    verify(pat.match('ac').groups() == ('a', 'a', None, 'c'))
+    verify(pat.match('bc').groups() == ('b', None, 'b', 'c'))
+    verify(pat.match('bc').groups("") == ('b', "", 'b', 'c'))
+except AssertionError:
+    raise TestFailed, "match .groups() method"
+
+try:
+    # A single group
+    m = re.match('(a)', 'a')
+    verify(m.group(0) == 'a')
+    verify(m.group(0) == 'a')
+    verify(m.group(1) == 'a')
+    verify(m.group(1, 1) == ('a', 'a'))
+
+    pat = re.compile('(?:(?P<a1>a)|(?P<b2>b))(?P<c3>c)?')
+    verify(pat.match('a').group(1, 2, 3) == ('a', None, None))
+    verify(pat.match('b').group('a1', 'b2', 'c3') == (None, 'b', None))
+    verify(pat.match('ac').group(1, 'b2', 3) == ('a', None, 'c'))
+except AssertionError:
+    raise TestFailed, "match .group() method"
+
+if verbose:
+    print "Running tests on re.escape"
+
+try:
+    p=""
+    for i in range(0, 256):
+        p = p + chr(i)
+        verify(re.match(re.escape(chr(i)), chr(i)) is not None)
+        verify(re.match(re.escape(chr(i)), chr(i)).span() == (0,1))
+
+    pat=re.compile( re.escape(p) )
+    verify(pat.match(p) is not None)
+    verify(pat.match(p).span() == (0,256))
+except AssertionError:
+    raise TestFailed, "re.escape"
+
+
+if verbose:
+    print 'Pickling a RegexObject instance'
+
+import pickle
+pat = re.compile('a(?:b|(c|e){1,2}?|d)+?(.)')
+s = pickle.dumps(pat)
+pat = pickle.loads(s)
+
+try:
+    verify(re.I == re.IGNORECASE)
+    verify(re.L == re.LOCALE)
+    verify(re.M == re.MULTILINE)
+    verify(re.S == re.DOTALL)
+    verify(re.X == re.VERBOSE)
+except AssertionError:
+    raise TestFailed, 're module constants'
+
+for flags in [re.I, re.M, re.X, re.S, re.L]:
+    try:
+        r = re.compile('^pattern$', flags)
+    except:
+        print 'Exception raised on flag', flags
+
+if verbose:
+    print 'Test engine limitations'
+
+# Try nasty case that overflows the straightforward recursive
+# implementation of repeated groups.
+try:
+    verify(re.match('(x)*', 50000*'x').span() == (0, 50000))
+except RuntimeError, v:
+    print v
+
+from re_tests import *
+
+if verbose:
+    print 'Running re_tests test suite'
+else:
+    # To save time, only run the first and last 10 tests
+    #tests = tests[:10] + tests[-10:]
+    pass
+
+for t in tests:
+    sys.stdout.flush()
+    pattern = s = outcome = repl = expected = None
+    if len(t) == 5:
+        pattern, s, outcome, repl, expected = t
+    elif len(t) == 3:
+        pattern, s, outcome = t
+    else:
+        raise ValueError, ('Test tuples should have 3 or 5 fields', t)
+
+    try:
+        obj = re.compile(pattern)
+    except re.error:
+        if outcome == SYNTAX_ERROR: pass  # Expected a syntax error
+        else:
+            print '=== Syntax error:', t
+    except KeyboardInterrupt: raise KeyboardInterrupt
+    except:
+        print '*** Unexpected error ***', t
+        if verbose:
+            traceback.print_exc(file=sys.stdout)
+    else:
+        try:
+            result = obj.search(s)
+        except re.error, msg:
+            print '=== Unexpected exception', t, repr(msg)
+        if outcome == SYNTAX_ERROR:
+            # This should have been a syntax error; forget it.
+            pass
+        elif outcome == FAIL:
+            if result is None: pass   # No match, as expected
+            else: print '=== Succeeded incorrectly', t
+        elif outcome == SUCCEED:
+            if result is not None:
+                # Matched, as expected, so now we compute the
+                # result string and compare it to our expected result.
+                start, end = result.span(0)
+                vardict={'found': result.group(0),
+                         'groups': result.group(),
+                         'flags': result.re.flags}
+                for i in range(1, 100):
+                    try:
+                        gi = result.group(i)
+                        # Special hack because else the string concat fails:
+                        if gi is None:
+                            gi = "None"
+                    except IndexError:
+                        gi = "Error"
+                    vardict['g%d' % i] = gi
+                for i in result.re.groupindex.keys():
+                    try:
+                        gi = result.group(i)
+                        if gi is None:
+                            gi = "None"
+                    except IndexError:
+                        gi = "Error"
+                    vardict[i] = gi
+                repl = eval(repl, vardict)
+                if repl != expected:
+                    print '=== grouping error', t,
+                    print repr(repl) + ' should be ' + repr(expected)
+            else:
+                print '=== Failed incorrectly', t
+
+            # Try the match on a unicode string, and check that it
+            # still succeeds.
+            try:
+                result = obj.search(unicode(s, "latin-1"))
+                if result is None:
+                    print '=== Fails on unicode match', t
+            except NameError:
+                continue # 1.5.2
+            except TypeError:
+                continue # unicode test case
+
+            # Try the match on a unicode pattern, and check that it
+            # still succeeds.
+            obj=re.compile(unicode(pattern, "latin-1"))
+            result = obj.search(s)
+            if result is None:
+                print '=== Fails on unicode pattern match', t
+
+            # Try the match with the search area limited to the extent
+            # of the match and see if it still succeeds.  \B will
+            # break (because it won't match at the end or start of a
+            # string), so we'll ignore patterns that feature it.
+
+            if pattern[:2] != '\\B' and pattern[-2:] != '\\B' \
+                           and result is not None:
+                obj = re.compile(pattern)
+                result = obj.search(s, result.start(0), result.end(0) + 1)
+                if result is None:
+                    print '=== Failed on range-limited match', t
+
+            # Try the match with IGNORECASE enabled, and check that it
+            # still succeeds.
+            obj = re.compile(pattern, re.IGNORECASE)
+            result = obj.search(s)
+            if result is None:
+                print '=== Fails on case-insensitive match', t
+
+            # Try the match with LOCALE enabled, and check that it
+            # still succeeds.
+            obj = re.compile(pattern, re.LOCALE)
+            result = obj.search(s)
+            if result is None:
+                print '=== Fails on locale-sensitive match', t
+
+            # Try the match with UNICODE locale enabled, and check
+            # that it still succeeds.
+            obj = re.compile(pattern, re.UNICODE)
+            result = obj.search(s)
+            if result is None:
+                print '=== Fails on unicode-sensitive match', t
diff --git a/lib-python/2.2/test/test_regex.py b/lib-python/2.2/test/test_regex.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_regex.py
@@ -0,0 +1,113 @@
+from test_support import verbose, sortdict
+import warnings
+warnings.filterwarnings("ignore", "the regex module is deprecated",
+                        DeprecationWarning, __name__)
+import regex
+from regex_syntax import *
+
+re = 'a+b+c+'
+print 'no match:', regex.match(re, 'hello aaaabcccc world')
+print 'successful search:', regex.search(re, 'hello aaaabcccc world')
+try:
+    cre = regex.compile('\(' + re)
+except regex.error:
+    print 'caught expected exception'
+else:
+    print 'expected regex.error not raised'
+
+print 'failed awk syntax:', regex.search('(a+)|(b+)', 'cdb')
+prev = regex.set_syntax(RE_SYNTAX_AWK)
+print 'successful awk syntax:', regex.search('(a+)|(b+)', 'cdb')
+regex.set_syntax(prev)
+print 'failed awk syntax:', regex.search('(a+)|(b+)', 'cdb')
+
+re = '\(<one>[0-9]+\) *\(<two>[0-9]+\)'
+print 'matching with group names and compile()'
+cre = regex.compile(re)
+print cre.match('801 999')
+try:
+    print cre.group('one')
+except regex.error:
+    print 'caught expected exception'
+else:
+    print 'expected regex.error not raised'
+
+print 'matching with group names and symcomp()'
+cre = regex.symcomp(re)
+print cre.match('801 999')
+print cre.group(0)
+print cre.group('one')
+print cre.group(1, 2)
+print cre.group('one', 'two')
+print 'realpat:', cre.realpat
+print 'groupindex:', sortdict(cre.groupindex)
+
+re = 'world'
+cre = regex.compile(re)
+print 'not case folded search:', cre.search('HELLO WORLD')
+cre = regex.compile(re, regex.casefold)
+print 'case folded search:', cre.search('HELLO WORLD')
+
+print '__members__:', cre.__members__
+print 'regs:', cre.regs
+print 'last:', cre.last
+print 'translate:', len(cre.translate)
+print 'givenpat:', cre.givenpat
+
+print 'match with pos:', cre.match('hello world', 7)
+print 'search with pos:', cre.search('hello world there world', 7)
+print 'bogus group:', cre.group(0, 1, 3)
+try:
+    print 'no name:', cre.group('one')
+except regex.error:
+    print 'caught expected exception'
+else:
+    print 'expected regex.error not raised'
+
+from regex_tests import *
+if verbose: print 'Running regex_tests test suite'
+
+for t in tests:
+    pattern=s=outcome=repl=expected=None
+    if len(t)==5:
+        pattern, s, outcome, repl, expected = t
+    elif len(t)==3:
+        pattern, s, outcome = t
+    else:
+        raise ValueError, ('Test tuples should have 3 or 5 fields',t)
+
+    try:
+        obj=regex.compile(pattern)
+    except regex.error:
+        if outcome==SYNTAX_ERROR: pass    # Expected a syntax error
+        else:
+            # Regex syntax errors aren't yet reported, so for
+            # the official test suite they'll be quietly ignored.
+            pass
+            #print '=== Syntax error:', t
+    else:
+        try:
+            result=obj.search(s)
+        except regex.error, msg:
+            print '=== Unexpected exception', t, repr(msg)
+        if outcome==SYNTAX_ERROR:
+            # This should have been a syntax error; forget it.
+            pass
+        elif outcome==FAIL:
+            if result==-1: pass   # No match, as expected
+            else: print '=== Succeeded incorrectly', t
+        elif outcome==SUCCEED:
+            if result!=-1:
+                # Matched, as expected, so now we compute the
+                # result string and compare it to our expected result.
+                start, end = obj.regs[0]
+                found=s[start:end]
+                groups=obj.group(1,2,3,4,5,6,7,8,9,10)
+                vardict=vars()
+                for i in range(len(groups)):
+                    vardict['g'+str(i+1)]=str(groups[i])
+                repl=eval(repl)
+                if repl!=expected:
+                    print '=== grouping error', t, repr(repl)+' should be '+repr(expected)
+            else:
+                print '=== Failed incorrectly', t
diff --git a/lib-python/2.2/test/test_repr.py b/lib-python/2.2/test/test_repr.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_repr.py
@@ -0,0 +1,275 @@
+"""
+  Test cases for the repr module
+  Nick Mathewson
+"""
+
+import sys
+import os
+import unittest
+
+from test_support import run_unittest
+from repr import repr as r # Don't shadow builtin repr
+
+
+def nestedTuple(nesting):
+    t = ()
+    for i in range(nesting):
+        t = (t,)
+    return t
+
+class ReprTests(unittest.TestCase):
+
+    def test_string(self):
+        eq = self.assertEquals
+        eq(r("abc"), "'abc'")
+        eq(r("abcdefghijklmnop"),"'abcdefghijklmnop'")
+
+        s = "a"*30+"b"*30
+        expected = `s`[:13] + "..." + `s`[-14:]
+        eq(r(s), expected)
+
+        eq(r("\"'"), repr("\"'"))
+        s = "\""*30+"'"*100
+        expected = `s`[:13] + "..." + `s`[-14:]
+        eq(r(s), expected)
+
+    def test_container(self):
+        eq = self.assertEquals
+        # Tuples give up after 6 elements
+        eq(r(()), "()")
+        eq(r((1,)), "(1,)")
+        eq(r((1, 2, 3)), "(1, 2, 3)")
+        eq(r((1, 2, 3, 4, 5, 6)), "(1, 2, 3, 4, 5, 6)")
+        eq(r((1, 2, 3, 4, 5, 6, 7)), "(1, 2, 3, 4, 5, 6, ...)")
+
+        # Lists give up after 6 as well
+        eq(r([]), "[]")
+        eq(r([1]), "[1]")
+        eq(r([1, 2, 3]), "[1, 2, 3]")
+        eq(r([1, 2, 3, 4, 5, 6]), "[1, 2, 3, 4, 5, 6]")
+        eq(r([1, 2, 3, 4, 5, 6, 7]), "[1, 2, 3, 4, 5, 6, ...]")
+
+        # Dictionaries give up after 4.
+        eq(r({}), "{}")
+        d = {'alice': 1, 'bob': 2, 'charles': 3, 'dave': 4}
+        eq(r(d), "{'alice': 1, 'bob': 2, 'charles': 3, 'dave': 4}")
+        d['arthur'] = 1
+        eq(r(d), "{'alice': 1, 'arthur': 1, 'bob': 2, 'charles': 3, ...}")
+
+    def test_numbers(self):
+        eq = self.assertEquals
+        eq(r(123), repr(123))
+        eq(r(123L), repr(123L))
+        eq(r(1.0/3), repr(1.0/3))
+
+        n = 10L**100
+        expected = `n`[:18] + "..." + `n`[-19:]
+        eq(r(n), expected)
+
+    def test_instance(self):
+        eq = self.assertEquals
+        i1 = ClassWithRepr("a")
+        eq(r(i1), repr(i1))
+
+        i2 = ClassWithRepr("x"*1000)
+        expected = `i2`[:13] + "..." + `i2`[-14:]
+        eq(r(i2), expected)
+
+        i3 = ClassWithFailingRepr()
+        eq(r(i3), ("<ClassWithFailingRepr instance at %x>"%id(i3)))
+
+        s = r(ClassWithFailingRepr)
+        self.failUnless(s.startswith("<class "))
+        self.failUnless(s.endswith(">"))
+        self.failUnless(s.find("...") == 8)
+
+    def test_file(self):
+        fp = open(unittest.__file__)
+        self.failUnless(repr(fp).startswith(
+            "<open file '%s', mode 'r' at 0x" % unittest.__file__))
+        fp.close()
+        self.failUnless(repr(fp).startswith(
+            "<closed file '%s', mode 'r' at 0x" % unittest.__file__))
+
+    def test_lambda(self):
+        self.failUnless(repr(lambda x: x).startswith(
+            "<function <lambda"))
+        # XXX anonymous functions?  see func_repr
+
+    def test_builtin_function(self):
+        eq = self.assertEquals
+        # Functions
+        eq(repr(hash), '<built-in function hash>')
+        # Methods
+        self.failUnless(repr(''.split).startswith(
+            '<built-in method split of str object at 0x'))
+
+    def test_xrange(self):
+        eq = self.assertEquals
+        eq(repr(xrange(1)), 'xrange(1)')
+        eq(repr(xrange(1, 2)), 'xrange(1, 2)')
+        eq(repr(xrange(1, 2, 3)), 'xrange(1, 4, 3)')
+        # Turn off warnings for deprecated multiplication
+        import warnings
+        warnings.filterwarnings('ignore', category=DeprecationWarning,
+                                module=ReprTests.__module__)
+        eq(repr(xrange(1) * 3), '(xrange(1) * 3)')
+
+    def test_nesting(self):
+        eq = self.assertEquals
+        # everything is meant to give up after 6 levels.
+        eq(r([[[[[[[]]]]]]]), "[[[[[[[]]]]]]]")
+        eq(r([[[[[[[[]]]]]]]]), "[[[[[[[...]]]]]]]")
+
+        eq(r(nestedTuple(6)), "(((((((),),),),),),)")
+        eq(r(nestedTuple(7)), "(((((((...),),),),),),)")
+
+        eq(r({ nestedTuple(5) : nestedTuple(5) }),
+           "{((((((),),),),),): ((((((),),),),),)}")
+        eq(r({ nestedTuple(6) : nestedTuple(6) }),
+           "{((((((...),),),),),): ((((((...),),),),),)}")
+
+        eq(r([[[[[[{}]]]]]]), "[[[[[[{}]]]]]]")
+        eq(r([[[[[[[{}]]]]]]]), "[[[[[[[...]]]]]]]")
+
+    def test_buffer(self):
+        # XXX doesn't test buffers with no b_base or read-write buffers (see
+        # bufferobject.c).  The test is fairly incomplete too.  Sigh.
+        x = buffer('foo')
+        self.failUnless(repr(x).startswith('<read-only buffer for 0x'))
+
+    def test_cell(self):
+        # XXX Hmm? How to get at a cell object?
+        pass
+
+    def test_descriptors(self):
+        eq = self.assertEquals
+        # method descriptors
+        eq(repr(dict.items), "<method 'items' of 'dict' objects>")
+        # XXX member descriptors
+        # XXX attribute descriptors
+        # XXX slot descriptors
+        # static and class methods
+        class C:
+            def foo(cls): pass
+        x = staticmethod(C.foo)
+        self.failUnless(repr(x).startswith('<staticmethod object at 0x'))
+        x = classmethod(C.foo)
+        self.failUnless(repr(x).startswith('<classmethod object at 0x'))
+
+def touch(path, text=''):
+    fp = open(path, 'w')
+    fp.write(text)
+    fp.close()
+
+def zap(actions, dirname, names):
+    for name in names:
+        actions.append(os.path.join(dirname, name))
+
+class LongReprTest(unittest.TestCase):
+    def setUp(self):
+        longname = 'areallylongpackageandmodulenametotestreprtruncation'
+        self.pkgname = os.path.join(longname)
+        self.subpkgname = os.path.join(longname, longname)
+        # Make the package and subpackage
+        os.mkdir(self.pkgname)
+        touch(os.path.join(self.pkgname, '__init__'+os.extsep+'py'))
+        os.mkdir(self.subpkgname)
+        touch(os.path.join(self.subpkgname, '__init__'+os.extsep+'py'))
+        # Remember where we are
+        self.here = os.getcwd()
+        sys.path.insert(0, self.here)
+
+    def tearDown(self):
+        actions = []
+        os.path.walk(self.pkgname, zap, actions)
+        actions.append(self.pkgname)
+        actions.sort()
+        actions.reverse()
+        for p in actions:
+            if os.path.isdir(p):
+                os.rmdir(p)
+            else:
+                os.remove(p)
+        del sys.path[0]
+
+    def test_module(self):
+        eq = self.assertEquals
+        touch(os.path.join(self.subpkgname, self.pkgname + os.extsep + 'py'))
+        from areallylongpackageandmodulenametotestreprtruncation.areallylongpackageandmodulenametotestreprtruncation import areallylongpackageandmodulenametotestreprtruncation
+        eq(repr(areallylongpackageandmodulenametotestreprtruncation),
+           "<module 'areallylongpackageandmodulenametotestreprtruncation.areallylongpackageandmodulenametotestreprtruncation.areallylongpackageandmodulenametotestreprtruncation' from '%s'>" % areallylongpackageandmodulenametotestreprtruncation.__file__)
+
+    def test_type(self):
+        eq = self.assertEquals
+        touch(os.path.join(self.subpkgname, 'foo'+os.extsep+'py'), '''\
+class foo(object):
+    pass
+''')
+        from areallylongpackageandmodulenametotestreprtruncation.areallylongpackageandmodulenametotestreprtruncation import foo
+        eq(repr(foo.foo),
+               "<class 'areallylongpackageandmodulenametotestreprtruncation.areallylongpackageandmodulenametotestreprtruncation.foo.foo'>")
+
+    def test_object(self):
+        # XXX Test the repr of a type with a really long tp_name but with no
+        # tp_repr.  WIBNI we had ::Inline? :)
+        pass
+
+    def test_class(self):
+        touch(os.path.join(self.subpkgname, 'bar'+os.extsep+'py'), '''\
+class bar:
+    pass
+''')
+        from areallylongpackageandmodulenametotestreprtruncation.areallylongpackageandmodulenametotestreprtruncation import bar
+        self.failUnless(repr(bar.bar).startswith(
+            "<class areallylongpackageandmodulenametotestreprtruncation.areallylongpackageandmodulenametotestreprtruncation.bar.bar at 0x"))
+
+    def test_instance(self):
+        touch(os.path.join(self.subpkgname, 'baz'+os.extsep+'py'), '''\
+class baz:
+    pass
+''')
+        from areallylongpackageandmodulenametotestreprtruncation.areallylongpackageandmodulenametotestreprtruncation import baz
+        ibaz = baz.baz()
+        self.failUnless(repr(ibaz).startswith(
+            "<areallylongpackageandmodulenametotestreprtruncation.areallylongpackageandmodulenametotestreprtruncation.baz.baz instance at 0x"))
+
+    def test_method(self):
+        eq = self.assertEquals
+        touch(os.path.join(self.subpkgname, 'qux'+os.extsep+'py'), '''\
+class aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa:
+    def amethod(self): pass
+''')
+        from areallylongpackageandmodulenametotestreprtruncation.areallylongpackageandmodulenametotestreprtruncation import qux
+        # Unbound methods first
+        eq(repr(qux.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.amethod),
+        '<unbound method aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.amethod>')
+        # Bound method next
+        iqux = qux.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa()
+        self.failUnless(repr(iqux.amethod).startswith(
+            '<bound method aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.amethod of <areallylongpackageandmodulenametotestreprtruncation.areallylongpackageandmodulenametotestreprtruncation.qux.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa instance at 0x'))
+
+    def test_builtin_function(self):
+        # XXX test built-in functions and methods with really long names
+        pass
+
+class ClassWithRepr:
+    def __init__(self, s):
+        self.s = s
+    def __repr__(self):
+        return "ClassWithLongRepr(%r)" % self.s
+
+
+class ClassWithFailingRepr:
+    def __repr__(self):
+        raise Exception("This should be caught by Repr.repr_instance")
+
+
+def test_main():
+    run_unittest(ReprTests)
+    if os.name != 'mac':
+        run_unittest(LongReprTest)
+
+
+if __name__ == "__main__":
+    test_main()
diff --git a/lib-python/2.2/test/test_rfc822.py b/lib-python/2.2/test/test_rfc822.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_rfc822.py
@@ -0,0 +1,211 @@
+import rfc822
+import sys
+import test_support
+import unittest
+
+try:
+    from cStringIO import StringIO
+except ImportError:
+    from StringIO import StringIO
+
+
+class MessageTestCase(unittest.TestCase):
+    def create_message(self, msg):
+        return rfc822.Message(StringIO(msg))
+
+    def test_get(self):
+        msg = self.create_message(
+            'To: "last, first" <userid at foo.net>\n\ntest\n')
+        self.assert_(msg.get("to") == '"last, first" <userid at foo.net>')
+        self.assert_(msg.get("TO") == '"last, first" <userid at foo.net>')
+        self.assert_(msg.get("No-Such-Header") is None)
+        self.assert_(msg.get("No-Such-Header", "No-Such-Value")
+                     == "No-Such-Value")
+
+    def test_setdefault(self):
+        msg = self.create_message(
+            'To: "last, first" <userid at foo.net>\n\ntest\n')
+        self.assert_(not msg.has_key("New-Header"))
+        self.assert_(msg.setdefault("New-Header", "New-Value") == "New-Value")
+        self.assert_(msg.setdefault("New-Header", "Different-Value")
+                     == "New-Value")
+        self.assert_(msg["new-header"] == "New-Value")
+
+        self.assert_(msg.setdefault("Another-Header") == "")
+        self.assert_(msg["another-header"] == "")
+
+    def check(self, msg, results):
+        """Check addresses and the date."""
+        m = self.create_message(msg)
+        i = 0
+        for n, a in m.getaddrlist('to') + m.getaddrlist('cc'):
+            try:
+                mn, ma = results[i][0], results[i][1]
+            except IndexError:
+                print 'extra parsed address:', repr(n), repr(a)
+                continue
+            i = i + 1
+            if mn == n and ma == a:
+                pass
+            else:
+                print 'not found:', repr(n), repr(a)
+
+        out = m.getdate('date')
+        if out:
+            self.assertEqual(out,
+                             (1999, 1, 13, 23, 57, 35, 0, 0, 0),
+                             "date conversion failed")
+
+
+    # Note: all test cases must have the same date (in various formats),
+    # or no date!
+
+    def test_basic(self):
+        self.check(
+            'Date:    Wed, 13 Jan 1999 23:57:35 -0500\n'
+            'From:    Guido van Rossum <guido at CNRI.Reston.VA.US>\n'
+            'To:      "Guido van\n'
+            '\t : Rossum" <guido at python.org>\n'
+            'Subject: test2\n'
+            '\n'
+            'test2\n',
+            [('Guido van\n\t : Rossum', 'guido at python.org')])
+
+        self.check(
+            'From: Barry <bwarsaw at python.org\n'
+            'To: guido at python.org (Guido: the Barbarian)\n'
+            'Subject: nonsense\n'
+            'Date: Wednesday, January 13 1999 23:57:35 -0500\n'
+            '\n'
+            'test',
+            [('Guido: the Barbarian', 'guido at python.org')])
+
+        self.check(
+            'From: Barry <bwarsaw at python.org\n'
+            'To: guido at python.org (Guido: the Barbarian)\n'
+            'Cc: "Guido: the Madman" <guido at python.org>\n'
+            'Date:  13-Jan-1999 23:57:35 EST\n'
+            '\n'
+            'test',
+            [('Guido: the Barbarian', 'guido at python.org'),
+             ('Guido: the Madman', 'guido at python.org')
+             ])
+
+        self.check(
+            'To: "The monster with\n'
+            '     the very long name: Guido" <guido at python.org>\n'
+            'Date:    Wed, 13 Jan 1999 23:57:35 -0500\n'
+            '\n'
+            'test',
+            [('The monster with\n     the very long name: Guido',
+              'guido at python.org')])
+
+        self.check(
+            'To: "Amit J. Patel" <amitp at Theory.Stanford.EDU>\n'
+            'CC: Mike Fletcher <mfletch at vrtelecom.com>,\n'
+            '        "\'string-sig at python.org\'" <string-sig at python.org>\n'
+            'Cc: fooz at bat.com, bart at toof.com\n'
+            'Cc: goit at lip.com\n'
+            'Date:    Wed, 13 Jan 1999 23:57:35 -0500\n'
+            '\n'
+            'test',
+            [('Amit J. Patel', 'amitp at Theory.Stanford.EDU'),
+             ('Mike Fletcher', 'mfletch at vrtelecom.com'),
+             ("'string-sig at python.org'", 'string-sig at python.org'),
+             ('', 'fooz at bat.com'),
+             ('', 'bart at toof.com'),
+             ('', 'goit at lip.com'),
+             ])
+
+        self.check(
+            'To: Some One <someone at dom.ain>\n'
+            'From: Anudder Persin <subuddy.else at dom.ain>\n'
+            'Date:\n'
+            '\n'
+            'test',
+            [('Some One', 'someone at dom.ain')])
+
+        self.check(
+            'To: person at dom.ain (User J. Person)\n\n',
+            [('User J. Person', 'person at dom.ain')])
+
+    def test_twisted(self):
+        # This one is just twisted.  I don't know what the proper
+        # result should be, but it shouldn't be to infloop, which is
+        # what used to happen!
+        self.check(
+            'To: <[smtp:dd47 at mail.xxx.edu]_at_hmhq at hdq-mdm1-imgout.companay.com>\n'
+            'Date:    Wed, 13 Jan 1999 23:57:35 -0500\n'
+            '\n'
+            'test',
+            [('', ''),
+             ('', 'dd47 at mail.xxx.edu'),
+             ('', '_at_hmhq at hdq-mdm1-imgout.companay.com'),
+             ])
+
+    def test_commas_in_full_name(self):
+        # This exercises the old commas-in-a-full-name bug, which
+        # should be doing the right thing in recent versions of the
+        # module.
+        self.check(
+            'To: "last, first" <userid at foo.net>\n'
+            '\n'
+            'test',
+            [('last, first', 'userid at foo.net')])
+
+    def test_quoted_name(self):
+        self.check(
+            'To: (Comment stuff) "Quoted name"@somewhere.com\n'
+            '\n'
+            'test',
+            [('Comment stuff', '"Quoted name"@somewhere.com')])
+
+    def test_bogus_to_header(self):
+        self.check(
+            'To: :\n'
+            'Cc: goit at lip.com\n'
+            'Date:    Wed, 13 Jan 1999 23:57:35 -0500\n'
+            '\n'
+            'test',
+            [('', 'goit at lip.com')])
+
+    def test_addr_ipquad(self):
+        self.check(
+            'To: guido@[132.151.1.21]\n'
+            '\n'
+            'foo',
+            [('', 'guido@[132.151.1.21]')])
+
+    def test_rfc2822_phrases(self):
+        # RFC 2822 (the update to RFC 822) specifies that dots in phrases are
+        # obsolete syntax, which conforming programs MUST recognize but NEVER
+        # generate (see $4.1 Miscellaneous obsolete tokens).  This is a
+        # departure from RFC 822 which did not allow dots in non-quoted
+        # phrases.
+        self.check('To: User J. Person <person at dom.ain>\n\n',
+                   [('User J. Person', 'person at dom.ain')])
+
+    # This takes to long to add to the test suite
+##    def test_an_excrutiatingly_long_address_field(self):
+##        OBSCENELY_LONG_HEADER_MULTIPLIER = 10000
+##        oneaddr = ('Person' * 10) + '@' + ('.'.join(['dom']*10)) + '.com'
+##        addr = ', '.join([oneaddr] * OBSCENELY_LONG_HEADER_MULTIPLIER)
+##        lst = rfc822.AddrlistClass(addr).getaddrlist()
+##        self.assertEqual(len(lst), OBSCENELY_LONG_HEADER_MULTIPLIER)
+
+
+    def test_parseaddr(self):
+        eq = self.assertEqual
+        eq(rfc822.parseaddr('<>'), ('', ''))
+        eq(rfc822.parseaddr('aperson at dom.ain'), ('', 'aperson at dom.ain'))
+        eq(rfc822.parseaddr('bperson at dom.ain (Bea A. Person)'),
+           ('Bea A. Person', 'bperson at dom.ain'))
+        eq(rfc822.parseaddr('Cynthia Person <cperson at dom.ain>'),
+           ('Cynthia Person', 'cperson at dom.ain'))
+
+def test_main():
+    test_support.run_unittest(MessageTestCase)
+
+
+if __name__ == "__main__":
+    test_main()
diff --git a/lib-python/2.2/test/test_rgbimg.py b/lib-python/2.2/test/test_rgbimg.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_rgbimg.py
@@ -0,0 +1,63 @@
+# Testing rgbimg module
+
+import rgbimg, os, uu
+
+from test_support import verbose, unlink, findfile
+
+class error(Exception):
+    pass
+
+print 'RGBimg test suite:'
+
+def testimg(rgb_file, raw_file):
+    rgb_file = findfile(rgb_file)
+    raw_file = findfile(raw_file)
+    width, height = rgbimg.sizeofimage(rgb_file)
+    rgb = rgbimg.longimagedata(rgb_file)
+    if len(rgb) != width * height * 4:
+        raise error, 'bad image length'
+    raw = open(raw_file, 'rb').read()
+    if rgb != raw:
+        raise error, \
+              'images don\'t match for '+rgb_file+' and '+raw_file
+    for depth in [1, 3, 4]:
+        rgbimg.longstoimage(rgb, width, height, depth, '@.rgb')
+    os.unlink('@.rgb')
+
+table = [
+    ('testrgb'+os.extsep+'uue', 'test'+os.extsep+'rgb'),
+    ('testimg'+os.extsep+'uue', 'test'+os.extsep+'rawimg'),
+    ('testimgr'+os.extsep+'uue', 'test'+os.extsep+'rawimg'+os.extsep+'rev'),
+    ]
+for source, target in table:
+    source = findfile(source)
+    target = findfile(target)
+    if verbose:
+        print "uudecoding", source, "->", target, "..."
+    uu.decode(source, target)
+
+if verbose:
+    print "testing..."
+
+ttob = rgbimg.ttob(0)
+if ttob != 0:
+    raise error, 'ttob should start out as zero'
+
+testimg('test'+os.extsep+'rgb', 'test'+os.extsep+'rawimg')
+
+ttob = rgbimg.ttob(1)
+if ttob != 0:
+    raise error, 'ttob should be zero'
+
+testimg('test'+os.extsep+'rgb', 'test'+os.extsep+'rawimg'+os.extsep+'rev')
+
+ttob = rgbimg.ttob(0)
+if ttob != 1:
+    raise error, 'ttob should be one'
+
+ttob = rgbimg.ttob(0)
+if ttob != 0:
+    raise error, 'ttob should be zero'
+
+for source, target in table:
+    unlink(findfile(target))
diff --git a/lib-python/2.2/test/test_richcmp.py b/lib-python/2.2/test/test_richcmp.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_richcmp.py
@@ -0,0 +1,261 @@
+# Tests for rich comparisons
+
+from test_support import TestFailed, verify, verbose
+
+class Number:
+
+    def __init__(self, x):
+        self.x = x
+
+    def __lt__(self, other):
+        return self.x < other
+
+    def __le__(self, other):
+        return self.x <= other
+
+    def __eq__(self, other):
+        return self.x == other
+
+    def __ne__(self, other):
+        return self.x != other
+
+    def __gt__(self, other):
+        return self.x > other
+
+    def __ge__(self, other):
+        return self.x >= other
+
+    def __cmp__(self, other):
+        raise TestFailed, "Number.__cmp__() should not be called"
+
+    def __repr__(self):
+        return "Number(%s)" % repr(self.x)
+
+class Vector:
+
+    def __init__(self, data):
+        self.data = data
+
+    def __len__(self):
+        return len(self.data)
+
+    def __getitem__(self, i):
+        return self.data[i]
+
+    def __setitem__(self, i, v):
+        self.data[i] = v
+
+    def __hash__(self):
+        raise TypeError, "Vectors cannot be hashed"
+
+    def __nonzero__(self):
+        raise TypeError, "Vectors cannot be used in Boolean contexts"
+
+    def __cmp__(self, other):
+        raise TestFailed, "Vector.__cmp__() should not be called"
+
+    def __repr__(self):
+        return "Vector(%s)" % repr(self.data)
+
+    def __lt__(self, other):
+        return Vector([a < b for a, b in zip(self.data, self.__cast(other))])
+
+    def __le__(self, other):
+        return Vector([a <= b for a, b in zip(self.data, self.__cast(other))])
+
+    def __eq__(self, other):
+        return Vector([a == b for a, b in zip(self.data, self.__cast(other))])
+
+    def __ne__(self, other):
+        return Vector([a != b for a, b in zip(self.data, self.__cast(other))])
+
+    def __gt__(self, other):
+        return Vector([a > b for a, b in zip(self.data, self.__cast(other))])
+
+    def __ge__(self, other):
+        return Vector([a >= b for a, b in zip(self.data, self.__cast(other))])
+
+    def __cast(self, other):
+        if isinstance(other, Vector):
+            other = other.data
+        if len(self.data) != len(other):
+            raise ValueError, "Cannot compare vectors of different length"
+        return other
+
+operators = "<", "<=", "==", "!=", ">", ">="
+opmap = {}
+for op in operators:
+    opmap[op] = eval("lambda a, b: a %s b" % op)
+
+def testvector():
+    a = Vector(range(2))
+    b = Vector(range(3))
+    for op in operators:
+        try:
+            opmap[op](a, b)
+        except ValueError:
+            pass
+        else:
+            raise TestFailed, "a %s b for different length should fail" % op
+    a = Vector(range(5))
+    b = Vector(5 * [2])
+    for op in operators:
+        print "%23s %-2s %-23s -> %s" % (a, op, b, opmap[op](a, b))
+        print "%23s %-2s %-23s -> %s" % (a, op, b.data, opmap[op](a, b.data))
+        print "%23s %-2s %-23s -> %s" % (a.data, op, b, opmap[op](a.data, b))
+        try:
+            if opmap[op](a, b):
+                raise TestFailed, "a %s b shouldn't be true" % op
+            else:
+                raise TestFailed, "a %s b shouldn't be false" % op
+        except TypeError:
+            pass
+
+def testop(a, b, op):
+    try:
+        ax = a.x
+    except AttributeError:
+        ax = a
+    try:
+        bx = b.x
+    except AttributeError:
+        bx = b
+    opfunc = opmap[op]
+    realoutcome = opfunc(ax, bx)
+    testoutcome = opfunc(a, b)
+    if realoutcome != testoutcome:
+        print "Error for", a, op, b, ": expected", realoutcome,
+        print "but got", testoutcome
+##    else:
+##        print a, op, b, "-->", testoutcome # and "true" or "false"
+
+def testit(a, b):
+    testop(a, b, "<")
+    testop(a, b, "<=")
+    testop(a, b, "==")
+    testop(a, b, "!=")
+    testop(a, b, ">")
+    testop(a, b, ">=")
+
+def basic():
+    for a in range(3):
+        for b in range(3):
+            testit(Number(a), Number(b))
+            testit(a, Number(b))
+            testit(Number(a), b)
+
+def tabulate(c1=Number, c2=Number):
+    for op in operators:
+        opfunc = opmap[op]
+        print
+        print "operator:", op
+        print
+        print "%9s" % "",
+        for b in range(3):
+            b = c2(b)
+            print "| %9s" % b,
+        print "|"
+        print '----------+-' * 4
+        for a in range(3):
+            a = c1(a)
+            print "%9s" % a,
+            for b in range(3):
+                b = c2(b)
+                print "| %9s" % opfunc(a, b),
+            print "|"
+        print '----------+-' * 4
+    print
+    print '*' * 50
+
+def misbehavin():
+    class Misb:
+        def __lt__(self, other): return 0
+        def __gt__(self, other): return 0
+        def __eq__(self, other): return 0
+        def __le__(self, other): raise TestFailed, "This shouldn't happen"
+        def __ge__(self, other): raise TestFailed, "This shouldn't happen"
+        def __ne__(self, other): raise TestFailed, "This shouldn't happen"
+        def __cmp__(self, other): raise RuntimeError, "expected"
+    a = Misb()
+    b = Misb()
+    verify((a<b) == 0)
+    verify((a==b) == 0)
+    verify((a>b) == 0)
+    try:
+        print cmp(a, b)
+    except RuntimeError:
+        pass
+    else:
+        raise TestFailed, "cmp(Misb(), Misb()) didn't raise RuntimeError"
+
+def recursion():
+    from UserList import UserList
+    a = UserList(); a.append(a)
+    b = UserList(); b.append(b)
+    def check(s, a=a, b=b):
+        if verbose:
+            print "check", s
+        try:
+            if not eval(s):
+                raise TestFailed, s + " was false but expected to be true"
+        except RuntimeError, msg:
+            raise TestFailed, str(msg)
+    if verbose:
+        print "recursion tests: a=%s, b=%s" % (a, b)
+    check('a==b')
+    check('not a!=b')
+    a.append(1)
+    if verbose:
+        print "recursion tests: a=%s, b=%s" % (a, b)
+    check('a!=b')
+    check('not a==b')
+    b.append(0)
+    if verbose:
+        print "recursion tests: a=%s, b=%s" % (a, b)
+    check('a!=b')
+    check('not a==b')
+    a[1] = -1
+    if verbose:
+        print "recursion tests: a=%s, b=%s" % (a, b)
+    check('a!=b')
+    check('not a==b')
+    if verbose: print "recursion tests ok"
+
+def dicts():
+    # Verify that __eq__ and __ne__ work for dicts even if the keys and
+    # values don't support anything other than __eq__ and __ne__.  Complex
+    # numbers are a fine example of that.
+    import random
+    imag1a = {}
+    for i in range(50):
+        imag1a[random.randrange(100)*1j] = random.randrange(100)*1j
+    items = imag1a.items()
+    random.shuffle(items)
+    imag1b = {}
+    for k, v in items:
+        imag1b[k] = v
+    imag2 = imag1b.copy()
+    imag2[k] = v + 1.0
+    verify(imag1a == imag1a, "imag1a == imag1a should have worked")
+    verify(imag1a == imag1b, "imag1a == imag1b should have worked")
+    verify(imag2 == imag2, "imag2 == imag2 should have worked")
+    verify(imag1a != imag2, "imag1a != imag2 should have worked")
+    for op in "<", "<=", ">", ">=":
+        try:
+            eval("imag1a %s imag2" % op)
+        except TypeError:
+            pass
+        else:
+            raise TestFailed("expected TypeError from imag1a %s imag2" % op)
+
+def main():
+    basic()
+    tabulate()
+    tabulate(c1=int)
+    tabulate(c2=int)
+    testvector()
+    misbehavin()
+    recursion()
+    dicts()
+
+main()
diff --git a/lib-python/2.2/test/test_rotor.py b/lib-python/2.2/test/test_rotor.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_rotor.py
@@ -0,0 +1,28 @@
+import rotor
+
+r = rotor.newrotor("you'll never guess this")
+r = rotor.newrotor("you'll never guess this", 12)
+
+A = 'spam and eggs'
+B = 'cheese shop'
+
+a = r.encrypt(A)
+print `a`
+b = r.encryptmore(B)
+print `b`
+
+A1 = r.decrypt(a)
+print A1
+if A1 != A:
+    print 'decrypt failed'
+
+B1 = r.decryptmore(b)
+print B1
+if B1 != B:
+    print 'decryptmore failed'
+
+try:
+    r.setkey()
+except TypeError:
+    pass
+r.setkey('you guessed it!')
diff --git a/lib-python/2.2/test/test_sax.py b/lib-python/2.2/test/test_sax.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_sax.py
@@ -0,0 +1,707 @@
+# regression test for SAX 2.0
+# $Id$
+
+from xml.sax import make_parser, ContentHandler, \
+                    SAXException, SAXReaderNotAvailable, SAXParseException
+try:
+    make_parser()
+except SAXReaderNotAvailable:
+    # don't try to test this module if we cannot create a parser
+    raise ImportError("no XML parsers available")
+from xml.sax.saxutils import XMLGenerator, escape, quoteattr, XMLFilterBase
+from xml.sax.expatreader import create_parser
+from xml.sax.xmlreader import InputSource, AttributesImpl, AttributesNSImpl
+from cStringIO import StringIO
+from test_support import verify, verbose, TestFailed, findfile
+import os
+
+# ===== Utilities
+
+tests = 0
+fails = 0
+
+def confirm(outcome, name):
+    global tests, fails
+
+    tests = tests + 1
+    if outcome:
+        print "Passed", name
+    else:
+        print "Failed", name
+        fails = fails + 1
+
+def test_make_parser2():
+    try:
+        # Creating parsers several times in a row should succeed.
+        # Testing this because there have been failures of this kind
+        # before.
+        from xml.sax import make_parser
+        p = make_parser()
+        from xml.sax import make_parser
+        p = make_parser()
+        from xml.sax import make_parser
+        p = make_parser()
+        from xml.sax import make_parser
+        p = make_parser()
+        from xml.sax import make_parser
+        p = make_parser()
+        from xml.sax import make_parser
+        p = make_parser()
+    except:
+        return 0
+    else:
+        return p
+
+
+# ===========================================================================
+#
+#   saxutils tests
+#
+# ===========================================================================
+
+# ===== escape
+
+def test_escape_basic():
+    return escape("Donald Duck & Co") == "Donald Duck &amp; Co"
+
+def test_escape_all():
+    return escape("<Donald Duck & Co>") == "&lt;Donald Duck &amp; Co&gt;"
+
+def test_escape_extra():
+    return escape("Hei på deg", {"å" : "&aring;"}) == "Hei p&aring; deg"
+
+# ===== quoteattr
+
+def test_quoteattr_basic():
+    return quoteattr("Donald Duck & Co") == '"Donald Duck &amp; Co"'
+
+def test_single_quoteattr():
+    return (quoteattr('Includes "double" quotes')
+            == '\'Includes "double" quotes\'')
+
+def test_double_quoteattr():
+    return (quoteattr("Includes 'single' quotes")
+            == "\"Includes 'single' quotes\"")
+
+def test_single_double_quoteattr():
+    return (quoteattr("Includes 'single' and \"double\" quotes")
+            == "\"Includes 'single' and &quot;double&quot; quotes\"")
+
+# ===== make_parser
+
+def test_make_parser():
+    try:
+        # Creating a parser should succeed - it should fall back
+        # to the expatreader
+        p = make_parser(['xml.parsers.no_such_parser'])
+    except:
+        return 0
+    else:
+        return p
+
+
+# ===== XMLGenerator
+
+start = '<?xml version="1.0" encoding="iso-8859-1"?>\n'
+
+def test_xmlgen_basic():
+    result = StringIO()
+    gen = XMLGenerator(result)
+    gen.startDocument()
+    gen.startElement("doc", {})
+    gen.endElement("doc")
+    gen.endDocument()
+
+    return result.getvalue() == start + "<doc></doc>"
+
+def test_xmlgen_content():
+    result = StringIO()
+    gen = XMLGenerator(result)
+
+    gen.startDocument()
+    gen.startElement("doc", {})
+    gen.characters("huhei")
+    gen.endElement("doc")
+    gen.endDocument()
+
+    return result.getvalue() == start + "<doc>huhei</doc>"
+
+def test_xmlgen_pi():
+    result = StringIO()
+    gen = XMLGenerator(result)
+
+    gen.startDocument()
+    gen.processingInstruction("test", "data")
+    gen.startElement("doc", {})
+    gen.endElement("doc")
+    gen.endDocument()
+
+    return result.getvalue() == start + "<?test data?><doc></doc>"
+
+def test_xmlgen_content_escape():
+    result = StringIO()
+    gen = XMLGenerator(result)
+
+    gen.startDocument()
+    gen.startElement("doc", {})
+    gen.characters("<huhei&")
+    gen.endElement("doc")
+    gen.endDocument()
+
+    return result.getvalue() == start + "<doc>&lt;huhei&amp;</doc>"
+
+def test_xmlgen_attr_escape():
+    result = StringIO()
+    gen = XMLGenerator(result)
+
+    gen.startDocument()
+    gen.startElement("doc", {"a": '"'})
+    gen.startElement("e", {"a": "'"})
+    gen.endElement("e")
+    gen.startElement("e", {"a": "'\""})
+    gen.endElement("e")
+    gen.endElement("doc")
+    gen.endDocument()
+
+    return result.getvalue() == start \
+           + "<doc a='\"'><e a=\"'\"></e><e a=\"'&quot;\"></e></doc>"
+
+def test_xmlgen_ignorable():
+    result = StringIO()
+    gen = XMLGenerator(result)
+
+    gen.startDocument()
+    gen.startElement("doc", {})
+    gen.ignorableWhitespace(" ")
+    gen.endElement("doc")
+    gen.endDocument()
+
+    return result.getvalue() == start + "<doc> </doc>"
+
+ns_uri = "http://www.python.org/xml-ns/saxtest/"
+
+def test_xmlgen_ns():
+    result = StringIO()
+    gen = XMLGenerator(result)
+
+    gen.startDocument()
+    gen.startPrefixMapping("ns1", ns_uri)
+    gen.startElementNS((ns_uri, "doc"), "ns1:doc", {})
+    # add an unqualified name
+    gen.startElementNS((None, "udoc"), None, {})
+    gen.endElementNS((None, "udoc"), None)
+    gen.endElementNS((ns_uri, "doc"), "ns1:doc")
+    gen.endPrefixMapping("ns1")
+    gen.endDocument()
+
+    return result.getvalue() == start + \
+           ('<ns1:doc xmlns:ns1="%s"><udoc></udoc></ns1:doc>' %
+                                         ns_uri)
+
+# ===== XMLFilterBase
+
+def test_filter_basic():
+    result = StringIO()
+    gen = XMLGenerator(result)
+    filter = XMLFilterBase()
+    filter.setContentHandler(gen)
+
+    filter.startDocument()
+    filter.startElement("doc", {})
+    filter.characters("content")
+    filter.ignorableWhitespace(" ")
+    filter.endElement("doc")
+    filter.endDocument()
+
+    return result.getvalue() == start + "<doc>content </doc>"
+
+# ===========================================================================
+#
+#   expatreader tests
+#
+# ===========================================================================
+
+# ===== XMLReader support
+
+def test_expat_file():
+    parser = create_parser()
+    result = StringIO()
+    xmlgen = XMLGenerator(result)
+
+    parser.setContentHandler(xmlgen)
+    parser.parse(open(findfile("test"+os.extsep+"xml")))
+
+    return result.getvalue() == xml_test_out
+
+# ===== DTDHandler support
+
+class TestDTDHandler:
+
+    def __init__(self):
+        self._notations = []
+        self._entities  = []
+
+    def notationDecl(self, name, publicId, systemId):
+        self._notations.append((name, publicId, systemId))
+
+    def unparsedEntityDecl(self, name, publicId, systemId, ndata):
+        self._entities.append((name, publicId, systemId, ndata))
+
+def test_expat_dtdhandler():
+    parser = create_parser()
+    handler = TestDTDHandler()
+    parser.setDTDHandler(handler)
+
+    parser.feed('<!DOCTYPE doc [\n')
+    parser.feed('  <!ENTITY img SYSTEM "expat.gif" NDATA GIF>\n')
+    parser.feed('  <!NOTATION GIF PUBLIC "-//CompuServe//NOTATION Graphics Interchange Format 89a//EN">\n')
+    parser.feed(']>\n')
+    parser.feed('<doc></doc>')
+    parser.close()
+
+    return handler._notations == [("GIF", "-//CompuServe//NOTATION Graphics Interchange Format 89a//EN", None)] and \
+           handler._entities == [("img", None, "expat.gif", "GIF")]
+
+# ===== EntityResolver support
+
+class TestEntityResolver:
+
+    def resolveEntity(self, publicId, systemId):
+        inpsrc = InputSource()
+        inpsrc.setByteStream(StringIO("<entity/>"))
+        return inpsrc
+
+def test_expat_entityresolver():
+    parser = create_parser()
+    parser.setEntityResolver(TestEntityResolver())
+    result = StringIO()
+    parser.setContentHandler(XMLGenerator(result))
+
+    parser.feed('<!DOCTYPE doc [\n')
+    parser.feed('  <!ENTITY test SYSTEM "whatever">\n')
+    parser.feed(']>\n')
+    parser.feed('<doc>&test;</doc>')
+    parser.close()
+
+    return result.getvalue() == start + "<doc><entity></entity></doc>"
+
+# ===== Attributes support
+
+class AttrGatherer(ContentHandler):
+
+    def startElement(self, name, attrs):
+        self._attrs = attrs
+
+    def startElementNS(self, name, qname, attrs):
+        self._attrs = attrs
+
+def test_expat_attrs_empty():
+    parser = create_parser()
+    gather = AttrGatherer()
+    parser.setContentHandler(gather)
+
+    parser.feed("<doc/>")
+    parser.close()
+
+    return verify_empty_attrs(gather._attrs)
+
+def test_expat_attrs_wattr():
+    parser = create_parser()
+    gather = AttrGatherer()
+    parser.setContentHandler(gather)
+
+    parser.feed("<doc attr='val'/>")
+    parser.close()
+
+    return verify_attrs_wattr(gather._attrs)
+
+def test_expat_nsattrs_empty():
+    parser = create_parser(1)
+    gather = AttrGatherer()
+    parser.setContentHandler(gather)
+
+    parser.feed("<doc/>")
+    parser.close()
+
+    return verify_empty_nsattrs(gather._attrs)
+
+def test_expat_nsattrs_wattr():
+    parser = create_parser(1)
+    gather = AttrGatherer()
+    parser.setContentHandler(gather)
+
+    parser.feed("<doc xmlns:ns='%s' ns:attr='val'/>" % ns_uri)
+    parser.close()
+
+    attrs = gather._attrs
+
+    return attrs.getLength() == 1 and \
+           attrs.getNames() == [(ns_uri, "attr")] and \
+           attrs.getQNames() == ["ns:attr"] and \
+           len(attrs) == 1 and \
+           attrs.has_key((ns_uri, "attr")) and \
+           attrs.keys() == [(ns_uri, "attr")] and \
+           attrs.get((ns_uri, "attr")) == "val" and \
+           attrs.get((ns_uri, "attr"), 25) == "val" and \
+           attrs.items() == [((ns_uri, "attr"), "val")] and \
+           attrs.values() == ["val"] and \
+           attrs.getValue((ns_uri, "attr")) == "val" and \
+           attrs[(ns_uri, "attr")] == "val"
+
+class ElemGatherer(ContentHandler):
+    def __init__(self):
+        self.events = []
+    def startElementNS(self, pair, qname, attrs):
+        self.events.append(('start', pair, qname))
+    def endElementNS(self, pair, qname):
+        self.events.append(('end', pair, qname))
+
+def check_expat_nsdecl(text, expected):
+    parser = create_parser(1)
+    handler = ElemGatherer()
+    parser.setContentHandler(handler)
+    parser.feed(text)
+    parser.close()
+    if verbose and handler.events != expected:
+        from pprint import pprint
+        print "Expected:"
+        pprint(expected)
+        print "Received:"
+        pprint(handler.events)
+    return handler.events == expected
+
+def test_expat_nsdecl_single():
+    return check_expat_nsdecl(
+        "<abc xmlns='http://xml.python.org/'></abc>", [
+            ("start", ("http://xml.python.org/", "abc"), "abc"),
+            ("end", ("http://xml.python.org/", "abc"), "abc"),
+            ])
+
+def test_expat_nsdecl_pair_same():
+    # XXX This shows where xml.sax.expatreader can use the wrong
+    # prefix when more than one is in scope for a particular URI.
+    # We still want to exercise this code since previous versions got
+    # the namespace handling wrong in more severe ways (exceptions
+    # that should not have happened).
+    return check_expat_nsdecl(
+        "<abc xmlns='http://xml.python.org/'"
+        "     xmlns:foo='http://xml.python.org/'>"
+        "<foo:def/>"
+        "<ghi/>"
+        "</abc>", [
+            ("start", ("http://xml.python.org/", "abc"), "foo:abc"),
+            ("start", ("http://xml.python.org/", "def"), "foo:def"),
+            ("end", ("http://xml.python.org/", "def"), "foo:def"),
+            ("start", ("http://xml.python.org/", "ghi"), "foo:ghi"),
+            ("end", ("http://xml.python.org/", "ghi"), "foo:ghi"),
+            ("end", ("http://xml.python.org/", "abc"), "foo:abc"),
+            ])
+
+def test_expat_nsdecl_pair_diff():
+    return check_expat_nsdecl(
+        "<abc xmlns='http://xml.python.org/1'"
+        "     xmlns:foo='http://xml.python.org/2'>"
+        "<foo:def/>"
+        "<ghi/>"
+        "</abc>", [
+            ("start", ("http://xml.python.org/1", "abc"), "abc"),
+            ("start", ("http://xml.python.org/2", "def"), "foo:def"),
+            ("end", ("http://xml.python.org/2", "def"), "foo:def"),
+            ("start", ("http://xml.python.org/1", "ghi"), "ghi"),
+            ("end", ("http://xml.python.org/1", "ghi"), "ghi"),
+            ("end", ("http://xml.python.org/1", "abc"), "abc"),
+            ])
+
+# ===== InputSource support
+
+xml_test_out = open(findfile("test"+os.extsep+"xml"+os.extsep+"out")).read()
+
+def test_expat_inpsource_filename():
+    parser = create_parser()
+    result = StringIO()
+    xmlgen = XMLGenerator(result)
+
+    parser.setContentHandler(xmlgen)
+    parser.parse(findfile("test"+os.extsep+"xml"))
+
+    return result.getvalue() == xml_test_out
+
+def test_expat_inpsource_sysid():
+    parser = create_parser()
+    result = StringIO()
+    xmlgen = XMLGenerator(result)
+
+    parser.setContentHandler(xmlgen)
+    parser.parse(InputSource(findfile("test"+os.extsep+"xml")))
+
+    return result.getvalue() == xml_test_out
+
+def test_expat_inpsource_stream():
+    parser = create_parser()
+    result = StringIO()
+    xmlgen = XMLGenerator(result)
+
+    parser.setContentHandler(xmlgen)
+    inpsrc = InputSource()
+    inpsrc.setByteStream(open(findfile("test"+os.extsep+"xml")))
+    parser.parse(inpsrc)
+
+    return result.getvalue() == xml_test_out
+
+# ===== IncrementalParser support
+
+def test_expat_incremental():
+    result = StringIO()
+    xmlgen = XMLGenerator(result)
+    parser = create_parser()
+    parser.setContentHandler(xmlgen)
+
+    parser.feed("<doc>")
+    parser.feed("</doc>")
+    parser.close()
+
+    return result.getvalue() == start + "<doc></doc>"
+
+def test_expat_incremental_reset():
+    result = StringIO()
+    xmlgen = XMLGenerator(result)
+    parser = create_parser()
+    parser.setContentHandler(xmlgen)
+
+    parser.feed("<doc>")
+    parser.feed("text")
+
+    result = StringIO()
+    xmlgen = XMLGenerator(result)
+    parser.setContentHandler(xmlgen)
+    parser.reset()
+
+    parser.feed("<doc>")
+    parser.feed("text")
+    parser.feed("</doc>")
+    parser.close()
+
+    return result.getvalue() == start + "<doc>text</doc>"
+
+# ===== Locator support
+
+def test_expat_locator_noinfo():
+    result = StringIO()
+    xmlgen = XMLGenerator(result)
+    parser = create_parser()
+    parser.setContentHandler(xmlgen)
+
+    parser.feed("<doc>")
+    parser.feed("</doc>")
+    parser.close()
+
+    return parser.getSystemId() is None and \
+           parser.getPublicId() is None and \
+           parser.getLineNumber() == 1
+
+def test_expat_locator_withinfo():
+    result = StringIO()
+    xmlgen = XMLGenerator(result)
+    parser = create_parser()
+    parser.setContentHandler(xmlgen)
+    parser.parse(findfile("test.xml"))
+
+    return parser.getSystemId() == findfile("test.xml") and \
+           parser.getPublicId() is None
+
+
+# ===========================================================================
+#
+#   error reporting
+#
+# ===========================================================================
+
+def test_expat_inpsource_location():
+    parser = create_parser()
+    parser.setContentHandler(ContentHandler()) # do nothing
+    source = InputSource()
+    source.setByteStream(StringIO("<foo bar foobar>"))   #ill-formed
+    name = "a file name"
+    source.setSystemId(name)
+    try:
+        parser.parse(source)
+    except SAXException, e:
+        return e.getSystemId() == name
+
+def test_expat_incomplete():
+    parser = create_parser()
+    parser.setContentHandler(ContentHandler()) # do nothing
+    try:
+        parser.parse(StringIO("<foo>"))
+    except SAXParseException:
+        return 1 # ok, error found
+    else:
+        return 0
+
+
+# ===========================================================================
+#
+#   xmlreader tests
+#
+# ===========================================================================
+
+# ===== AttributesImpl
+
+def verify_empty_attrs(attrs):
+    try:
+        attrs.getValue("attr")
+        gvk = 0
+    except KeyError:
+        gvk = 1
+
+    try:
+        attrs.getValueByQName("attr")
+        gvqk = 0
+    except KeyError:
+        gvqk = 1
+
+    try:
+        attrs.getNameByQName("attr")
+        gnqk = 0
+    except KeyError:
+        gnqk = 1
+
+    try:
+        attrs.getQNameByName("attr")
+        gqnk = 0
+    except KeyError:
+        gqnk = 1
+
+    try:
+        attrs["attr"]
+        gik = 0
+    except KeyError:
+        gik = 1
+
+    return attrs.getLength() == 0 and \
+           attrs.getNames() == [] and \
+           attrs.getQNames() == [] and \
+           len(attrs) == 0 and \
+           not attrs.has_key("attr") and \
+           attrs.keys() == [] and \
+           attrs.get("attrs") is None and \
+           attrs.get("attrs", 25) == 25 and \
+           attrs.items() == [] and \
+           attrs.values() == [] and \
+           gvk and gvqk and gnqk and gik and gqnk
+
+def verify_attrs_wattr(attrs):
+    return attrs.getLength() == 1 and \
+           attrs.getNames() == ["attr"] and \
+           attrs.getQNames() == ["attr"] and \
+           len(attrs) == 1 and \
+           attrs.has_key("attr") and \
+           attrs.keys() == ["attr"] and \
+           attrs.get("attr") == "val" and \
+           attrs.get("attr", 25) == "val" and \
+           attrs.items() == [("attr", "val")] and \
+           attrs.values() == ["val"] and \
+           attrs.getValue("attr") == "val" and \
+           attrs.getValueByQName("attr") == "val" and \
+           attrs.getNameByQName("attr") == "attr" and \
+           attrs["attr"] == "val" and \
+           attrs.getQNameByName("attr") == "attr"
+
+def test_attrs_empty():
+    return verify_empty_attrs(AttributesImpl({}))
+
+def test_attrs_wattr():
+    return verify_attrs_wattr(AttributesImpl({"attr" : "val"}))
+
+# ===== AttributesImpl
+
+def verify_empty_nsattrs(attrs):
+    try:
+        attrs.getValue((ns_uri, "attr"))
+        gvk = 0
+    except KeyError:
+        gvk = 1
+
+    try:
+        attrs.getValueByQName("ns:attr")
+        gvqk = 0
+    except KeyError:
+        gvqk = 1
+
+    try:
+        attrs.getNameByQName("ns:attr")
+        gnqk = 0
+    except KeyError:
+        gnqk = 1
+
+    try:
+        attrs.getQNameByName((ns_uri, "attr"))
+        gqnk = 0
+    except KeyError:
+        gqnk = 1
+
+    try:
+        attrs[(ns_uri, "attr")]
+        gik = 0
+    except KeyError:
+        gik = 1
+
+    return attrs.getLength() == 0 and \
+           attrs.getNames() == [] and \
+           attrs.getQNames() == [] and \
+           len(attrs) == 0 and \
+           not attrs.has_key((ns_uri, "attr")) and \
+           attrs.keys() == [] and \
+           attrs.get((ns_uri, "attr")) is None and \
+           attrs.get((ns_uri, "attr"), 25) == 25 and \
+           attrs.items() == [] and \
+           attrs.values() == [] and \
+           gvk and gvqk and gnqk and gik and gqnk
+
+def test_nsattrs_empty():
+    return verify_empty_nsattrs(AttributesNSImpl({}, {}))
+
+def test_nsattrs_wattr():
+    attrs = AttributesNSImpl({(ns_uri, "attr") : "val"},
+                             {(ns_uri, "attr") : "ns:attr"})
+
+    return attrs.getLength() == 1 and \
+           attrs.getNames() == [(ns_uri, "attr")] and \
+           attrs.getQNames() == ["ns:attr"] and \
+           len(attrs) == 1 and \
+           attrs.has_key((ns_uri, "attr")) and \
+           attrs.keys() == [(ns_uri, "attr")] and \
+           attrs.get((ns_uri, "attr")) == "val" and \
+           attrs.get((ns_uri, "attr"), 25) == "val" and \
+           attrs.items() == [((ns_uri, "attr"), "val")] and \
+           attrs.values() == ["val"] and \
+           attrs.getValue((ns_uri, "attr")) == "val" and \
+           attrs.getValueByQName("ns:attr") == "val" and \
+           attrs.getNameByQName("ns:attr") == (ns_uri, "attr") and \
+           attrs[(ns_uri, "attr")] == "val" and \
+           attrs.getQNameByName((ns_uri, "attr")) == "ns:attr"
+
+
+# ===== Main program
+
+def make_test_output():
+    parser = create_parser()
+    result = StringIO()
+    xmlgen = XMLGenerator(result)
+
+    parser.setContentHandler(xmlgen)
+    parser.parse(findfile("test"+os.extsep+"xml"))
+
+    outf = open(findfile("test"+os.extsep+"xml"+os.extsep+"out"), "w")
+    outf.write(result.getvalue())
+    outf.close()
+
+items = locals().items()
+items.sort()
+for (name, value) in items:
+    if name[ : 5] == "test_":
+        confirm(value(), name)
+
+print "%d tests, %d failures" % (tests, fails)
+if fails != 0:
+    raise TestFailed, "%d of %d tests failed" % (fails, tests)
diff --git a/lib-python/2.2/test/test_scope.py b/lib-python/2.2/test/test_scope.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_scope.py
@@ -0,0 +1,524 @@
+from test_support import verify, TestFailed, check_syntax
+
+import warnings
+warnings.filterwarnings("ignore", r"import \*", SyntaxWarning, "<string>")
+
+print "1. simple nesting"
+
+def make_adder(x):
+    def adder(y):
+        return x + y
+    return adder
+
+inc = make_adder(1)
+plus10 = make_adder(10)
+
+verify(inc(1) == 2)
+verify(plus10(-2) == 8)
+
+print "2. extra nesting"
+
+def make_adder2(x):
+    def extra(): # check freevars passing through non-use scopes
+        def adder(y):
+            return x + y
+        return adder
+    return extra()
+
+inc = make_adder2(1)
+plus10 = make_adder2(10)
+
+verify(inc(1) == 2)
+verify(plus10(-2) == 8)
+
+print "3. simple nesting + rebinding"
+
+def make_adder3(x):
+    def adder(y):
+        return x + y
+    x = x + 1 # check tracking of assignment to x in defining scope
+    return adder
+
+inc = make_adder3(0)
+plus10 = make_adder3(9)
+
+verify(inc(1) == 2)
+verify(plus10(-2) == 8)
+
+print "4. nesting with global but no free"
+
+def make_adder4(): # XXX add exta level of indirection
+    def nest():
+        def nest():
+            def adder(y):
+                return global_x + y # check that plain old globals work
+            return adder
+        return nest()
+    return nest()
+
+global_x = 1
+adder = make_adder4()
+verify(adder(1) == 2)
+
+global_x = 10
+verify(adder(-2) == 8)
+
+print "5. nesting through class"
+
+def make_adder5(x):
+    class Adder:
+        def __call__(self, y):
+            return x + y
+    return Adder()
+
+inc = make_adder5(1)
+plus10 = make_adder5(10)
+
+verify(inc(1) == 2)
+verify(plus10(-2) == 8)
+
+print "6. nesting plus free ref to global"
+
+def make_adder6(x):
+    global global_nest_x
+    def adder(y):
+        return global_nest_x + y
+    global_nest_x = x
+    return adder
+
+inc = make_adder6(1)
+plus10 = make_adder6(10)
+
+verify(inc(1) == 11) # there's only one global
+verify(plus10(-2) == 8)
+
+print "7. nearest enclosing scope"
+
+def f(x):
+    def g(y):
+        x = 42 # check that this masks binding in f()
+        def h(z):
+            return x + z
+        return h
+    return g(2)
+
+test_func = f(10)
+verify(test_func(5) == 47)
+
+print "8. mixed freevars and cellvars"
+
+def identity(x):
+    return x
+
+def f(x, y, z):
+    def g(a, b, c):
+        a = a + x # 3
+        def h():
+            # z * (4 + 9)
+            # 3 * 13
+            return identity(z * (b + y))
+        y = c + z # 9
+        return h
+    return g
+
+g = f(1, 2, 3)
+h = g(2, 4, 6)
+verify(h() == 39)
+
+print "9. free variable in method"
+
+def test():
+    method_and_var = "var"
+    class Test:
+        def method_and_var(self):
+            return "method"
+        def test(self):
+            return method_and_var
+        def actual_global(self):
+            return str("global")
+        def str(self):
+            return str(self)
+    return Test()
+
+t = test()
+verify(t.test() == "var")
+verify(t.method_and_var() == "method")
+verify(t.actual_global() == "global")
+
+method_and_var = "var"
+class Test:
+    # this class is not nested, so the rules are different
+    def method_and_var(self):
+        return "method"
+    def test(self):
+        return method_and_var
+    def actual_global(self):
+        return str("global")
+    def str(self):
+        return str(self)
+
+t = Test()
+verify(t.test() == "var")
+verify(t.method_and_var() == "method")
+verify(t.actual_global() == "global")
+
+print "10. recursion"
+
+def f(x):
+    def fact(n):
+        if n == 0:
+            return 1
+        else:
+            return n * fact(n - 1)
+    if x >= 0:
+        return fact(x)
+    else:
+        raise ValueError, "x must be >= 0"
+
+verify(f(6) == 720)
+
+
+print "11. unoptimized namespaces"
+
+check_syntax("""\
+def unoptimized_clash1(strip):
+    def f(s):
+        from string import *
+        return strip(s) # ambiguity: free or local
+    return f
+""")
+
+check_syntax("""\
+def unoptimized_clash2():
+    from string import *
+    def f(s):
+        return strip(s) # ambiguity: global or local
+    return f
+""")
+
+check_syntax("""\
+def unoptimized_clash2():
+    from string import *
+    def g():
+        def f(s):
+            return strip(s) # ambiguity: global or local
+        return f
+""")
+
+# XXX could allow this for exec with const argument, but what's the point
+check_syntax("""\
+def error(y):
+    exec "a = 1"
+    def f(x):
+        return x + y
+    return f
+""")
+
+check_syntax("""\
+def f(x):
+    def g():
+        return x
+    del x # can't del name
+""")
+
+check_syntax("""\
+def f():
+    def g():
+         from string import *
+         return strip # global or local?
+""")
+
+# and verify a few cases that should work
+
+exec """
+def noproblem1():
+    from string import *
+    f = lambda x:x
+
+def noproblem2():
+    from string import *
+    def f(x):
+        return x + 1
+
+def noproblem3():
+    from string import *
+    def f(x):
+        global y
+        y = x
+"""
+
+print "12. lambdas"
+
+f1 = lambda x: lambda y: x + y
+inc = f1(1)
+plus10 = f1(10)
+verify(inc(1) == 2)
+verify(plus10(5) == 15)
+
+f2 = lambda x: (lambda : lambda y: x + y)()
+inc = f2(1)
+plus10 = f2(10)
+verify(inc(1) == 2)
+verify(plus10(5) == 15)
+
+f3 = lambda x: lambda y: global_x + y
+global_x = 1
+inc = f3(None)
+verify(inc(2) == 3)
+
+f8 = lambda x, y, z: lambda a, b, c: lambda : z * (b + y)
+g = f8(1, 2, 3)
+h = g(2, 4, 6)
+verify(h() == 18)
+
+print "13. UnboundLocal"
+
+def errorInOuter():
+    print y
+    def inner():
+        return y
+    y = 1
+
+def errorInInner():
+    def inner():
+        return y
+    inner()
+    y = 1
+
+try:
+    errorInOuter()
+except UnboundLocalError:
+    pass
+else:
+    raise TestFailed
+
+try:
+    errorInInner()
+except NameError:
+    pass
+else:
+    raise TestFailed
+
+print "14. complex definitions"
+
+def makeReturner(*lst):
+    def returner():
+        return lst
+    return returner
+
+verify(makeReturner(1,2,3)() == (1,2,3))
+
+def makeReturner2(**kwargs):
+    def returner():
+        return kwargs
+    return returner
+
+verify(makeReturner2(a=11)()['a'] == 11)
+
+def makeAddPair((a, b)):
+    def addPair((c, d)):
+        return (a + c, b + d)
+    return addPair
+
+verify(makeAddPair((1, 2))((100, 200)) == (101,202))
+
+print "15. scope of global statements"
+# Examples posted by Samuele Pedroni to python-dev on 3/1/2001
+
+# I
+x = 7
+def f():
+    x = 1
+    def g():
+        global x
+        def i():
+            def h():
+                return x
+            return h()
+        return i()
+    return g()
+verify(f() == 7)
+verify(x == 7)
+
+# II
+x = 7
+def f():
+    x = 1
+    def g():
+        x = 2
+        def i():
+            def h():
+                return x
+            return h()
+        return i()
+    return g()
+verify(f() == 2)
+verify(x == 7)
+
+# III
+x = 7
+def f():
+    x = 1
+    def g():
+        global x
+        x = 2
+        def i():
+            def h():
+                return x
+            return h()
+        return i()
+    return g()
+verify(f() == 2)
+verify(x == 2)
+
+# IV
+x = 7
+def f():
+    x = 3
+    def g():
+        global x
+        x = 2
+        def i():
+            def h():
+                return x
+            return h()
+        return i()
+    return g()
+verify(f() == 2)
+verify(x == 2)
+
+print "16. check leaks"
+
+class Foo:
+    count = 0
+
+    def __init__(self):
+        Foo.count += 1
+
+    def __del__(self):
+        Foo.count -= 1
+
+def f1():
+    x = Foo()
+    def f2():
+        return x
+    f2()
+
+for i in range(100):
+    f1()
+
+verify(Foo.count == 0)
+
+print "17. class and global"
+
+def test(x):
+    class Foo:
+        global x
+        def __call__(self, y):
+            return x + y
+    return Foo()
+
+x = 0
+verify(test(6)(2) == 8)
+x = -1
+verify(test(3)(2) == 5)
+
+print "18. verify that locals() works"
+
+def f(x):
+    def g(y):
+        def h(z):
+            return y + z
+        w = x + y
+        y += 3
+        return locals()
+    return g
+
+d = f(2)(4)
+verify(d.has_key('h'))
+del d['h']
+verify(d == {'x': 2, 'y': 7, 'w': 6})
+
+print "19. var is bound and free in class"
+
+def f(x):
+    class C:
+        def m(self):
+            return x
+        a = x
+    return C
+
+inst = f(3)()
+verify(inst.a == inst.m())
+
+print "20. interaction with trace function"
+
+import sys
+def tracer(a,b,c):
+    return tracer
+
+def adaptgetter(name, klass, getter):
+    kind, des = getter
+    if kind == 1:       # AV happens when stepping from this line to next
+        if des == "":
+            des = "_%s__%s" % (klass.__name__, name)
+        return lambda obj: getattr(obj, des)
+
+class TestClass:
+    pass
+
+sys.settrace(tracer)
+adaptgetter("foo", TestClass, (1, ""))
+sys.settrace(None)
+
+try: sys.settrace()
+except TypeError: pass
+else: raise TestFailed, 'sys.settrace() did not raise TypeError'
+
+print "20. eval and exec with free variables"
+
+def f(x):
+    return lambda: x + 1
+
+g = f(3)
+try:
+    eval(g.func_code)
+except TypeError:
+    pass
+else:
+    print "eval() should have failed, because code contained free vars"
+
+try:
+    exec g.func_code
+except TypeError:
+    pass
+else:
+    print "exec should have failed, because code contained free vars"
+
+print "21. list comprehension with local variables"
+
+try:
+    print bad
+except NameError:
+    pass
+else:
+    print "bad should not be defined"
+
+def x():
+    [bad for s in 'a b' for bad in s.split()]
+
+x()
+try:
+    print bad
+except NameError:
+    pass
+
+print "22. eval with free variables"
+
+def f(x):
+    def g():
+        x
+        eval("x + 1")
+    return g
+
+f(4)()
diff --git a/lib-python/2.2/test/test_select.py b/lib-python/2.2/test/test_select.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_select.py
@@ -0,0 +1,62 @@
+# Testing select module
+from test_support import verbose
+import select
+import os
+
+# test some known error conditions
+try:
+    rfd, wfd, xfd = select.select(1, 2, 3)
+except TypeError:
+    pass
+else:
+    print 'expected TypeError exception not raised'
+
+class Nope:
+    pass
+
+class Almost:
+    def fileno(self):
+        return 'fileno'
+
+try:
+    rfd, wfd, xfd = select.select([Nope()], [], [])
+except TypeError:
+    pass
+else:
+    print 'expected TypeError exception not raised'
+
+try:
+    rfd, wfd, xfd = select.select([Almost()], [], [])
+except TypeError:
+    pass
+else:
+    print 'expected TypeError exception not raised'
+
+
+def test():
+    import sys
+    if sys.platform[:3] in ('win', 'mac', 'os2'):
+        if verbose:
+            print "Can't test select easily on", sys.platform
+        return
+    cmd = 'for i in 0 1 2 3 4 5 6 7 8 9; do echo testing...; sleep 1; done'
+    p = os.popen(cmd, 'r')
+    for tout in (0, 1, 2, 4, 8, 16) + (None,)*10:
+        if verbose:
+            print 'timeout =', tout
+        rfd, wfd, xfd = select.select([p], [], [], tout)
+        if (rfd, wfd, xfd) == ([], [], []):
+            continue
+        if (rfd, wfd, xfd) == ([p], [], []):
+            line = p.readline()
+            if verbose:
+                print `line`
+            if not line:
+                if verbose:
+                    print 'EOF'
+                break
+            continue
+        print 'Unexpected return values from select():', rfd, wfd, xfd
+    p.close()
+
+test()
diff --git a/lib-python/2.2/test/test_sgmllib.py b/lib-python/2.2/test/test_sgmllib.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_sgmllib.py
@@ -0,0 +1,314 @@
+import pprint
+import sgmllib
+import test_support
+import unittest
+
+
+class EventCollector(sgmllib.SGMLParser):
+
+    def __init__(self):
+        self.events = []
+        self.append = self.events.append
+        sgmllib.SGMLParser.__init__(self)
+
+    def get_events(self):
+        # Normalize the list of events so that buffer artefacts don't
+        # separate runs of contiguous characters.
+        L = []
+        prevtype = None
+        for event in self.events:
+            type = event[0]
+            if type == prevtype == "data":
+                L[-1] = ("data", L[-1][1] + event[1])
+            else:
+                L.append(event)
+            prevtype = type
+        self.events = L
+        return L
+
+    # structure markup
+
+    def unknown_starttag(self, tag, attrs):
+        self.append(("starttag", tag, attrs))
+
+    def unknown_endtag(self, tag):
+        self.append(("endtag", tag))
+
+    # all other markup
+
+    def handle_comment(self, data):
+        self.append(("comment", data))
+
+    def handle_charref(self, data):
+        self.append(("charref", data))
+
+    def handle_data(self, data):
+        self.append(("data", data))
+
+    def handle_decl(self, decl):
+        self.append(("decl", decl))
+
+    def handle_entityref(self, data):
+        self.append(("entityref", data))
+
+    def handle_pi(self, data):
+        self.append(("pi", data))
+
+    def unknown_decl(self, decl):
+        self.append(("unknown decl", decl))
+
+
+class CDATAEventCollector(EventCollector):
+    def start_cdata(self, attrs):
+        self.append(("starttag", "cdata", attrs))
+        self.setliteral()
+
+
+class SGMLParserTestCase(unittest.TestCase):
+
+    collector = EventCollector
+
+    def get_events(self, source):
+        parser = self.collector()
+        try:
+            for s in source:
+                parser.feed(s)
+            parser.close()
+        except:
+            #self.events = parser.events
+            raise
+        return parser.get_events()
+
+    def check_events(self, source, expected_events):
+        try:
+            events = self.get_events(source)
+        except:
+            import sys
+            #print >>sys.stderr, pprint.pformat(self.events)
+            raise
+        if events != expected_events:
+            self.fail("received events did not match expected events\n"
+                      "Expected:\n" + pprint.pformat(expected_events) +
+                      "\nReceived:\n" + pprint.pformat(events))
+
+    def check_parse_error(self, source):
+        parser = EventCollector()
+        try:
+            parser.feed(source)
+            parser.close()
+        except sgmllib.SGMLParseError:
+            pass
+        else:
+            self.fail("expected SGMLParseError for %r\nReceived:\n%s"
+                      % (source, pprint.pformat(parser.get_events())))
+
+    def test_doctype_decl_internal(self):
+        inside = """\
+DOCTYPE html PUBLIC '-//W3C//DTD HTML 4.01//EN'
+             SYSTEM 'http://www.w3.org/TR/html401/strict.dtd' [
+  <!ELEMENT html - O EMPTY>
+  <!ATTLIST html
+      version CDATA #IMPLIED
+      profile CDATA 'DublinCore'>
+  <!NOTATION datatype SYSTEM 'http://xml.python.org/notations/python-module'>
+  <!ENTITY myEntity 'internal parsed entity'>
+  <!ENTITY anEntity SYSTEM 'http://xml.python.org/entities/something.xml'>
+  <!ENTITY % paramEntity 'name|name|name'>
+  %paramEntity;
+  <!-- comment -->
+]"""
+        self.check_events(["<!%s>" % inside], [
+            ("decl", inside),
+            ])
+
+    def test_doctype_decl_external(self):
+        inside = "DOCTYPE html PUBLIC '-//W3C//DTD HTML 4.01//EN'"
+        self.check_events("<!%s>" % inside, [
+            ("decl", inside),
+            ])
+
+    def test_underscore_in_attrname(self):
+        # SF bug #436621
+        """Make sure attribute names with underscores are accepted"""
+        self.check_events("<a has_under _under>", [
+            ("starttag", "a", [("has_under", "has_under"),
+                               ("_under", "_under")]),
+            ])
+
+    def test_underscore_in_tagname(self):
+        # SF bug #436621
+        """Make sure tag names with underscores are accepted"""
+        self.check_events("<has_under></has_under>", [
+            ("starttag", "has_under", []),
+            ("endtag", "has_under"),
+            ])
+
+    def test_quotes_in_unquoted_attrs(self):
+        # SF bug #436621
+        """Be sure quotes in unquoted attributes are made part of the value"""
+        self.check_events("<a href=foo'bar\"baz>", [
+            ("starttag", "a", [("href", "foo'bar\"baz")]),
+            ])
+
+    def test_xhtml_empty_tag(self):
+        """Handling of XHTML-style empty start tags"""
+        self.check_events("<br />text<i></i>", [
+            ("starttag", "br", []),
+            ("data", "text"),
+            ("starttag", "i", []),
+            ("endtag", "i"),
+            ])
+
+    def test_processing_instruction_only(self):
+        self.check_events("<?processing instruction>", [
+            ("pi", "processing instruction"),
+            ])
+
+    def test_bad_nesting(self):
+        self.check_events("<a><b></a></b>", [
+            ("starttag", "a", []),
+            ("starttag", "b", []),
+            ("endtag", "a"),
+            ("endtag", "b"),
+            ])
+
+    def test_bare_ampersands(self):
+        self.check_events("this text & contains & ampersands &", [
+            ("data", "this text & contains & ampersands &"),
+            ])
+
+    def test_bare_pointy_brackets(self):
+        self.check_events("this < text > contains < bare>pointy< brackets", [
+            ("data", "this < text > contains < bare>pointy< brackets"),
+            ])
+
+    def test_attr_syntax(self):
+        output = [
+          ("starttag", "a", [("b", "v"), ("c", "v"), ("d", "v"), ("e", "e")])
+          ]
+        self.check_events("""<a b='v' c="v" d=v e>""", output)
+        self.check_events("""<a  b = 'v' c = "v" d = v e>""", output)
+        self.check_events("""<a\nb\n=\n'v'\nc\n=\n"v"\nd\n=\nv\ne>""", output)
+        self.check_events("""<a\tb\t=\t'v'\tc\t=\t"v"\td\t=\tv\te>""", output)
+
+    def test_attr_values(self):
+        self.check_events("""<a b='xxx\n\txxx' c="yyy\t\nyyy" d='\txyz\n'>""",
+                        [("starttag", "a", [("b", "xxx\n\txxx"),
+                                            ("c", "yyy\t\nyyy"),
+                                            ("d", "\txyz\n")])
+                         ])
+        self.check_events("""<a b='' c="">""", [
+            ("starttag", "a", [("b", ""), ("c", "")]),
+            ])
+
+    def test_attr_funky_names(self):
+        self.check_events("""<a a.b='v' c:d=v e-f=v>""", [
+            ("starttag", "a", [("a.b", "v"), ("c:d", "v"), ("e-f", "v")]),
+            ])
+
+    def test_illegal_declarations(self):
+        s = 'abc<!spacer type="block" height="25">def'
+        self.check_events(s, [
+            ("data", "abc"),
+            ("unknown decl", 'spacer type="block" height="25"'),
+            ("data", "def"),
+            ])
+
+    def test_weird_starttags(self):
+        self.check_events("<a<a>", [
+            ("starttag", "a", []),
+            ("starttag", "a", []),
+            ])
+        self.check_events("</a<a>", [
+            ("endtag", "a"),
+            ("starttag", "a", []),
+            ])
+
+    def test_declaration_junk_chars(self):
+        self.check_parse_error("<!DOCTYPE foo $ >")
+
+    def test_get_starttag_text(self):
+        s = """<foobar   \n   one="1"\ttwo=2   >"""
+        self.check_events(s, [
+            ("starttag", "foobar", [("one", "1"), ("two", "2")]),
+            ])
+
+    def test_cdata_content(self):
+        s = ("<cdata> <!-- not a comment --> &not-an-entity-ref; </cdata>"
+             "<notcdata> <!-- comment --> </notcdata>")
+        self.collector = CDATAEventCollector
+        self.check_events(s, [
+            ("starttag", "cdata", []),
+            ("data", " <!-- not a comment --> &not-an-entity-ref; "),
+            ("endtag", "cdata"),
+            ("starttag", "notcdata", []),
+            ("data", " "),
+            ("comment", " comment "),
+            ("data", " "),
+            ("endtag", "notcdata"),
+            ])
+        s = """<cdata> <not a='start tag'> </cdata>"""
+        self.check_events(s, [
+            ("starttag", "cdata", []),
+            ("data", " <not a='start tag'> "),
+            ("endtag", "cdata"),
+            ])
+
+    def test_illegal_declarations(self):
+        s = 'abc<!spacer type="block" height="25">def'
+        self.check_events(s, [
+            ("data", "abc"),
+            ("unknown decl", 'spacer type="block" height="25"'),
+            ("data", "def"),
+            ])
+
+    # XXX These tests have been disabled by prefixing their names with
+    # an underscore.  The first two exercise outstanding bugs in the
+    # sgmllib module, and the third exhibits questionable behavior
+    # that needs to be carefully considered before changing it.
+
+    def _test_starttag_end_boundary(self):
+        self.check_events("""<a b='<'>""", [("starttag", "a", [("b", "<")])])
+        self.check_events("""<a b='>'>""", [("starttag", "a", [("b", ">")])])
+
+    def _test_buffer_artefacts(self):
+        output = [("starttag", "a", [("b", "<")])]
+        self.check_events(["<a b='<'>"], output)
+        self.check_events(["<a ", "b='<'>"], output)
+        self.check_events(["<a b", "='<'>"], output)
+        self.check_events(["<a b=", "'<'>"], output)
+        self.check_events(["<a b='<", "'>"], output)
+        self.check_events(["<a b='<'", ">"], output)
+
+        output = [("starttag", "a", [("b", ">")])]
+        self.check_events(["<a b='>'>"], output)
+        self.check_events(["<a ", "b='>'>"], output)
+        self.check_events(["<a b", "='>'>"], output)
+        self.check_events(["<a b=", "'>'>"], output)
+        self.check_events(["<a b='>", "'>"], output)
+        self.check_events(["<a b='>'", ">"], output)
+
+    def _test_starttag_junk_chars(self):
+        self.check_parse_error("<")
+        self.check_parse_error("<>")
+        self.check_parse_error("</$>")
+        self.check_parse_error("</")
+        self.check_parse_error("</a")
+        self.check_parse_error("<$")
+        self.check_parse_error("<$>")
+        self.check_parse_error("<!")
+        self.check_parse_error("<a $>")
+        self.check_parse_error("<a")
+        self.check_parse_error("<a foo='bar'")
+        self.check_parse_error("<a foo='bar")
+        self.check_parse_error("<a foo='>'")
+        self.check_parse_error("<a foo='>")
+        self.check_parse_error("<a foo=>")
+
+
+def test_main():
+    test_support.run_unittest(SGMLParserTestCase)
+
+
+if __name__ == "__main__":
+    test_main()
diff --git a/lib-python/2.2/test/test_sha.py b/lib-python/2.2/test/test_sha.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_sha.py
@@ -0,0 +1,52 @@
+# Testing sha module (NIST's Secure Hash Algorithm)
+
+# use the three examples from Federal Information Processing Standards
+# Publication 180-1, Secure Hash Standard,  1995 April 17
+# http://www.itl.nist.gov/div897/pubs/fip180-1.htm
+
+import sha
+import unittest
+from test import test_support
+
+
+class SHATestCase(unittest.TestCase):
+    def check(self, data, digest):
+        # Check digest matches the expected value
+        obj = sha.new(data)
+        computed = obj.hexdigest()
+        self.assert_(computed == digest)
+
+        # Verify that the value doesn't change between two consecutive
+        # digest operations.
+        computed_again = obj.hexdigest()
+        self.assert_(computed == computed_again)
+
+        # Check hexdigest() output matches digest()'s output
+        digest = obj.digest()
+        hexd = ""
+        for c in digest:
+            hexd += '%02x' % ord(c)
+        self.assert_(computed == hexd)
+
+    def test_case_1(self):
+        self.check("abc",
+                   "a9993e364706816aba3e25717850c26c9cd0d89d")
+
+    def test_case_2(self):
+        self.check("abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
+                   "84983e441c3bd26ebaae4aa1f95129e5e54670f1")
+
+    def test_case_3(self):
+        self.check("a" * 1000000,
+                   "34aa973cd4c4daa4f61eeb2bdbad27316534016f")
+
+    def test_case_4(self):
+        self.check(chr(0xAA) * 80,
+                   '4ca0ef38f1794b28a8f8ee110ee79d48ce13be25')
+
+def test_main():
+    test_support.run_unittest(SHATestCase)
+
+
+if __name__ == "__main__":
+    test_main()
diff --git a/lib-python/2.2/test/test_signal.py b/lib-python/2.2/test/test_signal.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_signal.py
@@ -0,0 +1,66 @@
+# Test the signal module
+from test_support import verbose, TestSkipped
+import signal
+import os
+import sys
+
+if sys.platform[:3] in ('win', 'os2') or sys.platform=='riscos':
+    raise TestSkipped, "Can't test signal on %s" % sys.platform
+
+if verbose:
+    x = '-x'
+else:
+    x = '+x'
+pid = os.getpid()
+
+# Shell script that will send us asynchronous signals
+script = """
+ (
+        set %(x)s
+        sleep 2
+        kill -5 %(pid)d
+        sleep 2
+        kill -2 %(pid)d
+        sleep 2
+        kill -3 %(pid)d
+ ) &
+""" % vars()
+
+def handlerA(*args):
+    if verbose:
+        print "handlerA", args
+
+HandlerBCalled = "HandlerBCalled"       # Exception
+
+def handlerB(*args):
+    if verbose:
+        print "handlerB", args
+    raise HandlerBCalled, args
+
+signal.alarm(20)                        # Entire test lasts at most 20 sec.
+signal.signal(5, handlerA)
+signal.signal(2, handlerB)
+signal.signal(3, signal.SIG_IGN)
+signal.signal(signal.SIGALRM, signal.default_int_handler)
+
+os.system(script)
+
+print "starting pause() loop..."
+
+try:
+    while 1:
+        if verbose:
+            print "call pause()..."
+        try:
+            signal.pause()
+            if verbose:
+                print "pause() returned"
+        except HandlerBCalled:
+            if verbose:
+                print "HandlerBCalled exception caught"
+            else:
+                pass
+
+except KeyboardInterrupt:
+    if verbose:
+        print "KeyboardInterrupt (assume the alarm() went off)"
diff --git a/lib-python/2.2/test/test_socket.py b/lib-python/2.2/test/test_socket.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_socket.py
@@ -0,0 +1,170 @@
+# Not tested:
+#       socket.fromfd()
+#       sktobj.getsockopt()
+#       sktobj.recvfrom()
+#       sktobj.sendto()
+#       sktobj.setblocking()
+#       sktobj.setsockopt()
+#       sktobj.shutdown()
+
+
+from test_support import verbose, TestFailed
+import socket
+import os
+import time
+
+def missing_ok(str):
+    try:
+        getattr(socket, str)
+    except AttributeError:
+        pass
+
+try: raise socket.error
+except socket.error: print "socket.error"
+
+socket.AF_INET
+
+socket.SOCK_STREAM
+socket.SOCK_DGRAM
+socket.SOCK_RAW
+socket.SOCK_RDM
+socket.SOCK_SEQPACKET
+
+for optional in ("AF_UNIX",
+
+                 "SO_DEBUG", "SO_ACCEPTCONN", "SO_REUSEADDR", "SO_KEEPALIVE",
+                 "SO_DONTROUTE", "SO_BROADCAST", "SO_USELOOPBACK", "SO_LINGER",
+                 "SO_OOBINLINE", "SO_REUSEPORT", "SO_SNDBUF", "SO_RCVBUF",
+                 "SO_SNDLOWAT", "SO_RCVLOWAT", "SO_SNDTIMEO", "SO_RCVTIMEO",
+                 "SO_ERROR", "SO_TYPE", "SOMAXCONN",
+
+                 "MSG_OOB", "MSG_PEEK", "MSG_DONTROUTE", "MSG_EOR",
+                 "MSG_TRUNC", "MSG_CTRUNC", "MSG_WAITALL", "MSG_BTAG",
+                 "MSG_ETAG",
+
+                 "SOL_SOCKET",
+
+                 "IPPROTO_IP", "IPPROTO_ICMP", "IPPROTO_IGMP",
+                 "IPPROTO_GGP", "IPPROTO_TCP", "IPPROTO_EGP",
+                 "IPPROTO_PUP", "IPPROTO_UDP", "IPPROTO_IDP",
+                 "IPPROTO_HELLO", "IPPROTO_ND", "IPPROTO_TP",
+                 "IPPROTO_XTP", "IPPROTO_EON", "IPPROTO_BIP",
+                 "IPPROTO_RAW", "IPPROTO_MAX",
+
+                 "IPPORT_RESERVED", "IPPORT_USERRESERVED",
+
+                 "INADDR_ANY", "INADDR_BROADCAST", "INADDR_LOOPBACK",
+                 "INADDR_UNSPEC_GROUP", "INADDR_ALLHOSTS_GROUP",
+                 "INADDR_MAX_LOCAL_GROUP", "INADDR_NONE",
+
+                 "IP_OPTIONS", "IP_HDRINCL", "IP_TOS", "IP_TTL",
+                 "IP_RECVOPTS", "IP_RECVRETOPTS", "IP_RECVDSTADDR",
+                 "IP_RETOPTS", "IP_MULTICAST_IF", "IP_MULTICAST_TTL",
+                 "IP_MULTICAST_LOOP", "IP_ADD_MEMBERSHIP",
+                 "IP_DROP_MEMBERSHIP",
+                 ):
+    missing_ok(optional)
+
+socktype = socket.SocketType
+hostname = socket.gethostname()
+ip = socket.gethostbyname(hostname)
+hname, aliases, ipaddrs = socket.gethostbyaddr(ip)
+all_host_names = [hname] + aliases
+
+if verbose:
+    print hostname
+    print ip
+    print hname, aliases, ipaddrs
+    print all_host_names
+
+for name in all_host_names:
+    if name.find('.'):
+        break
+else:
+    print 'FQDN not found'
+
+if hasattr(socket, 'getservbyname'):
+    # try a few protocols - not everyone has telnet enabled
+    class Found(Exception): pass
+    try:
+        for proto in ("telnet", "ssh", "www", "ftp"):
+            for how in ("tcp", "udp"):
+                try:
+                    socket.getservbyname(proto, how)
+                    raise Found
+                except socket.error:
+                    pass
+    except Found:
+        pass
+    else:
+        print "socket.error", "socket.getservbyname failed"
+
+import sys
+if not sys.platform.startswith('java'):
+    try:
+        # On some versions, this loses a reference
+        orig = sys.getrefcount(__name__)
+        socket.getnameinfo(__name__,0)
+    except SystemError:
+        if sys.getrefcount(__name__) <> orig:
+            raise TestFailed,"socket.getnameinfo loses a reference"
+
+try:
+    # On some versions, this crashes the interpreter.
+    socket.getnameinfo(('x', 0, 0, 0), 0)
+except socket.error:
+    pass
+
+canfork = hasattr(os, 'fork')
+try:
+    PORT = 50007
+    if not canfork or os.fork():
+        # parent is server
+        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        s.bind(("127.0.0.1", PORT))
+        s.listen(1)
+        if verbose:
+            print 'parent accepting'
+        if canfork:
+            conn, addr = s.accept()
+            if verbose:
+                print 'connected by', addr
+            # couple of interesting tests while we've got a live socket
+            f = conn.fileno()
+            if verbose:
+                print 'fileno:', f
+            p = conn.getpeername()
+            if verbose:
+                print 'peer:', p
+            n = conn.getsockname()
+            if verbose:
+                print 'sockname:', n
+            f = conn.makefile()
+            if verbose:
+                print 'file obj:', f
+            while 1:
+                data = conn.recv(1024)
+                if not data:
+                    break
+                if verbose:
+                    print 'received:', data
+                conn.sendall(data)
+            conn.close()
+    else:
+        try:
+            # child is client
+            time.sleep(5)
+            s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+            if verbose:
+                print 'child connecting'
+            s.connect(("127.0.0.1", PORT))
+            msg = 'socket test'
+            s.send(msg)
+            data = s.recv(1024)
+            if msg != data:
+                print 'parent/client mismatch'
+            s.close()
+        finally:
+            os._exit(1)
+except socket.error, msg:
+    raise TestFailed, msg
diff --git a/lib-python/2.2/test/test_socket_ssl.py b/lib-python/2.2/test/test_socket_ssl.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_socket_ssl.py
@@ -0,0 +1,27 @@
+# Test just the SSL support in the socket module, in a moderately bogus way.
+
+import test_support
+
+# Optionally test SSL support.  This currently requires the 'network' resource
+# as given on the regrtest command line.  If not available, nothing after this
+# line will be executed.
+test_support.requires('network')
+
+import socket
+if not hasattr(socket, "ssl"):
+    raise test_support.TestSkipped("socket module has no ssl support")
+
+import urllib
+
+socket.RAND_status()
+try:
+    socket.RAND_egd(1)
+except TypeError:
+    pass
+else:
+    print "didn't raise TypeError"
+socket.RAND_add("this is a random string", 75.0)
+
+f = urllib.urlopen('https://sf.net')
+buf = f.read()
+f.close()
diff --git a/lib-python/2.2/test/test_socketserver.py b/lib-python/2.2/test/test_socketserver.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_socketserver.py
@@ -0,0 +1,165 @@
+# Test suite for SocketServer.py
+
+import test_support
+from test_support import verbose, verify, TESTFN, TestSkipped
+test_support.requires('network')
+
+from SocketServer import *
+import socket
+import select
+import time
+import threading
+import os
+
+NREQ = 3
+DELAY = 0.5
+
+class MyMixinHandler:
+    def handle(self):
+        time.sleep(DELAY)
+        line = self.rfile.readline()
+        time.sleep(DELAY)
+        self.wfile.write(line)
+
+class MyStreamHandler(MyMixinHandler, StreamRequestHandler):
+    pass
+
+class MyDatagramHandler(MyMixinHandler, DatagramRequestHandler):
+    pass
+
+class MyMixinServer:
+    def serve_a_few(self):
+        for i in range(NREQ):
+            self.handle_request()
+    def handle_error(self, request, client_address):
+        self.close_request(request)
+        self.server_close()
+        raise
+
+teststring = "hello world\n"
+
+def receive(sock, n, timeout=20):
+    r, w, x = select.select([sock], [], [], timeout)
+    if sock in r:
+        return sock.recv(n)
+    else:
+        raise RuntimeError, "timed out on %s" % `sock`
+
+def testdgram(proto, addr):
+    s = socket.socket(proto, socket.SOCK_DGRAM)
+    s.sendto(teststring, addr)
+    buf = data = receive(s, 100)
+    while data and '\n' not in buf:
+        data = receive(s, 100)
+        buf += data
+    verify(buf == teststring)
+    s.close()
+
+def teststream(proto, addr):
+    s = socket.socket(proto, socket.SOCK_STREAM)
+    s.connect(addr)
+    s.sendall(teststring)
+    buf = data = receive(s, 100)
+    while data and '\n' not in buf:
+        data = receive(s, 100)
+        buf += data
+    verify(buf == teststring)
+    s.close()
+
+class ServerThread(threading.Thread):
+    def __init__(self, addr, svrcls, hdlrcls):
+        threading.Thread.__init__(self)
+        self.__addr = addr
+        self.__svrcls = svrcls
+        self.__hdlrcls = hdlrcls
+    def run(self):
+        class svrcls(MyMixinServer, self.__svrcls):
+            pass
+        if verbose: print "thread: creating server"
+        svr = svrcls(self.__addr, self.__hdlrcls)
+        if verbose: print "thread: serving three times"
+        svr.serve_a_few()
+        if verbose: print "thread: done"
+
+seed = 0
+def pickport():
+    global seed
+    seed += 1
+    return 10000 + (os.getpid() % 1000)*10 + seed
+
+host = "localhost"
+testfiles = []
+def pickaddr(proto):
+    if proto == socket.AF_INET:
+        return (host, pickport())
+    else:
+        fn = TESTFN + str(pickport())
+        testfiles.append(fn)
+        return fn
+
+def cleanup():
+    for fn in testfiles:
+        try:
+            os.remove(fn)
+        except os.error:
+            pass
+    testfiles[:] = []
+
+def testloop(proto, servers, hdlrcls, testfunc):
+    for svrcls in servers:
+        addr = pickaddr(proto)
+        if verbose:
+            print "ADDR =", addr
+            print "CLASS =", svrcls
+        t = ServerThread(addr, svrcls, hdlrcls)
+        if verbose: print "server created"
+        t.start()
+        if verbose: print "server running"
+        for i in range(NREQ):
+            time.sleep(DELAY)
+            if verbose: print "test client", i
+            testfunc(proto, addr)
+        if verbose: print "waiting for server"
+        t.join()
+        if verbose: print "done"
+
+tcpservers = [TCPServer, ThreadingTCPServer]
+if hasattr(os, 'fork'):
+    tcpservers.append(ForkingTCPServer)
+udpservers = [UDPServer, ThreadingUDPServer]
+if hasattr(os, 'fork'):
+    udpservers.append(ForkingUDPServer)
+
+if not hasattr(socket, 'AF_UNIX'):
+    streamservers = []
+    dgramservers = []
+else:
+    class ForkingUnixStreamServer(ForkingMixIn, UnixStreamServer): pass
+    streamservers = [UnixStreamServer, ThreadingUnixStreamServer,
+                     ForkingUnixStreamServer]
+    class ForkingUnixDatagramServer(ForkingMixIn, UnixDatagramServer): pass
+    dgramservers = [UnixDatagramServer, ThreadingUnixDatagramServer,
+                    ForkingUnixDatagramServer]
+
+def testall():
+    testloop(socket.AF_INET, tcpservers, MyStreamHandler, teststream)
+    testloop(socket.AF_INET, udpservers, MyDatagramHandler, testdgram)
+    if hasattr(socket, 'AF_UNIX'):
+        testloop(socket.AF_UNIX, streamservers, MyStreamHandler, teststream)
+        # Alas, on Linux (at least) recvfrom() doesn't return a meaningful
+        # client address so this cannot work:
+        ##testloop(socket.AF_UNIX, dgramservers, MyDatagramHandler, testdgram)
+
+def test_main():
+    import imp
+    if imp.lock_held():
+        # If the import lock is held, the threads will hang.
+        raise TestSkipped("can't run when import lock is held")
+
+    try:
+        testall()
+    finally:
+        cleanup()
+
+if __name__ == "__main__":
+    test_main()
diff --git a/lib-python/2.2/test/test_sre.py b/lib-python/2.2/test/test_sre.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_sre.py
@@ -0,0 +1,434 @@
+# SRE test harness for the Python regression suite
+
+# this is based on test_re.py, but uses a test function instead
+# of all those asserts
+
+import sys
+sys.path=['.']+sys.path
+
+from test_support import verbose, TestFailed, have_unicode
+import sre
+import sys, os, string, traceback
+
+#
+# test support
+
+def test(expression, result, exception=None):
+    try:
+        r = eval(expression)
+    except:
+        if exception:
+            if not isinstance(sys.exc_value, exception):
+                print expression, "FAILED"
+                # display name, not actual value
+                if exception is sre.error:
+                    print "expected", "sre.error"
+                else:
+                    print "expected", exception.__name__
+                print "got", sys.exc_type.__name__, str(sys.exc_value)
+        else:
+            print expression, "FAILED"
+            traceback.print_exc(file=sys.stdout)
+    else:
+        if exception:
+            print expression, "FAILED"
+            if exception is sre.error:
+                print "expected", "sre.error"
+            else:
+                print "expected", exception.__name__
+            print "got result", repr(r)
+        else:
+            if r != result:
+                print expression, "FAILED"
+                print "expected", repr(result)
+                print "got result", repr(r)
+
+if verbose:
+    print 'Running tests on character literals'
+
+for i in [0, 8, 16, 32, 64, 127, 128, 255]:
+    test(r"""sre.match(r"\%03o" % i, chr(i)) is not None""", 1)
+    test(r"""sre.match(r"\%03o0" % i, chr(i)+"0") is not None""", 1)
+    test(r"""sre.match(r"\%03o8" % i, chr(i)+"8") is not None""", 1)
+    test(r"""sre.match(r"\x%02x" % i, chr(i)) is not None""", 1)
+    test(r"""sre.match(r"\x%02x0" % i, chr(i)+"0") is not None""", 1)
+    test(r"""sre.match(r"\x%02xz" % i, chr(i)+"z") is not None""", 1)
+test(r"""sre.match("\911", "")""", None, sre.error)
+
+#
+# Misc tests from Tim Peters' re.doc
+
+if verbose:
+    print 'Running tests on sre.search and sre.match'
+
+test(r"""sre.search(r'x*', 'axx').span(0)""", (0, 0))
+test(r"""sre.search(r'x*', 'axx').span()""", (0, 0))
+test(r"""sre.search(r'x+', 'axx').span(0)""", (1, 3))
+test(r"""sre.search(r'x+', 'axx').span()""", (1, 3))
+test(r"""sre.search(r'x', 'aaa')""", None)
+
+test(r"""sre.match(r'a*', 'xxx').span(0)""", (0, 0))
+test(r"""sre.match(r'a*', 'xxx').span()""", (0, 0))
+test(r"""sre.match(r'x*', 'xxxa').span(0)""", (0, 3))
+test(r"""sre.match(r'x*', 'xxxa').span()""", (0, 3))
+test(r"""sre.match(r'a+', 'xxx')""", None)
+
+# bug 113254
+test(r"""sre.match(r'(a)|(b)', 'b').start(1)""", -1)
+test(r"""sre.match(r'(a)|(b)', 'b').end(1)""", -1)
+test(r"""sre.match(r'(a)|(b)', 'b').span(1)""", (-1, -1))
+
+# bug 612074
+pat=u"["+sre.escape(u"\u2039")+u"]"
+test(r"""sre.compile(pat) and 1""", 1, None)
+
+if verbose:
+    print 'Running tests on sre.sub'
+
+test(r"""sre.sub(r"(?i)b+", "x", "bbbb BBBB")""", 'x x')
+
+def bump_num(matchobj):
+    int_value = int(matchobj.group(0))
+    return str(int_value + 1)
+
+test(r"""sre.sub(r'\d+', bump_num, '08.2 -2 23x99y')""", '9.3 -3 24x100y')
+test(r"""sre.sub(r'\d+', bump_num, '08.2 -2 23x99y', 3)""", '9.3 -3 23x99y')
+
+test(r"""sre.sub(r'.', lambda m: r"\n", 'x')""", '\\n')
+test(r"""sre.sub(r'.', r"\n", 'x')""", '\n')
+
+s = r"\1\1"
+
+test(r"""sre.sub(r'(.)', s, 'x')""", 'xx')
+test(r"""sre.sub(r'(.)', sre.escape(s), 'x')""", s)
+test(r"""sre.sub(r'(.)', lambda m: s, 'x')""", s)
+
+test(r"""sre.sub(r'(?P<a>x)', '\g<a>\g<a>', 'xx')""", 'xxxx')
+test(r"""sre.sub(r'(?P<a>x)', '\g<a>\g<1>', 'xx')""", 'xxxx')
+test(r"""sre.sub(r'(?P<unk>x)', '\g<unk>\g<unk>', 'xx')""", 'xxxx')
+test(r"""sre.sub(r'(?P<unk>x)', '\g<1>\g<1>', 'xx')""", 'xxxx')
+
+# bug 449964: fails for group followed by other escape
+test(r"""sre.sub(r'(?P<unk>x)', '\g<1>\g<1>\\b', 'xx')""", 'xx\bxx\b')
+
+test(r"""sre.sub(r'a', r'\t\n\v\r\f\a\b\B\Z\a\A\w\W\s\S\d\D', 'a')""", '\t\n\v\r\f\a\b\\B\\Z\a\\A\\w\\W\\s\\S\\d\\D')
+test(r"""sre.sub(r'a', '\t\n\v\r\f\a', 'a')""", '\t\n\v\r\f\a')
+test(r"""sre.sub(r'a', '\t\n\v\r\f\a', 'a')""", (chr(9)+chr(10)+chr(11)+chr(13)+chr(12)+chr(7)))
+
+test(r"""sre.sub(r'^\s*', 'X', 'test')""", 'Xtest')
+
+# qualified sub
+test(r"""sre.sub(r'a', 'b', 'aaaaa')""", 'bbbbb')
+test(r"""sre.sub(r'a', 'b', 'aaaaa', 1)""", 'baaaa')
+
+# bug 114660
+test(r"""sre.sub(r'(\S)\s+(\S)', r'\1 \2', 'hello  there')""", 'hello there')
+
+# Test for sub() on escaped characters, see SF bug #449000
+test(r"""sre.sub(r'\r\n', r'\n', 'abc\r\ndef\r\n')""", 'abc\ndef\n')
+test(r"""sre.sub('\r\n', r'\n', 'abc\r\ndef\r\n')""", 'abc\ndef\n')
+test(r"""sre.sub(r'\r\n', '\n', 'abc\r\ndef\r\n')""", 'abc\ndef\n')
+test(r"""sre.sub('\r\n', '\n', 'abc\r\ndef\r\n')""", 'abc\ndef\n')
+
+# Test for empty sub() behaviour, see SF bug #462270
+test(r"""sre.sub('x*', '-', 'abxd')""", '-a-b-d-')
+test(r"""sre.sub('x+', '-', 'abxd')""", 'ab-d')
+
+if verbose:
+    print 'Running tests on symbolic references'
+
+test(r"""sre.sub(r'(?P<a>x)', '\g<a', 'xx')""", None, sre.error)
+test(r"""sre.sub(r'(?P<a>x)', '\g<', 'xx')""", None, sre.error)
+test(r"""sre.sub(r'(?P<a>x)', '\g', 'xx')""", None, sre.error)
+test(r"""sre.sub(r'(?P<a>x)', '\g<a a>', 'xx')""", None, sre.error)
+test(r"""sre.sub(r'(?P<a>x)', '\g<1a1>', 'xx')""", None, sre.error)
+test(r"""sre.sub(r'(?P<a>x)', '\g<ab>', 'xx')""", None, IndexError)
+test(r"""sre.sub(r'(?P<a>x)|(?P<b>y)', '\g<b>', 'xx')""", None, sre.error)
+test(r"""sre.sub(r'(?P<a>x)|(?P<b>y)', '\\2', 'xx')""", None, sre.error)
+
+if verbose:
+    print 'Running tests on sre.subn'
+
+test(r"""sre.subn(r"(?i)b+", "x", "bbbb BBBB")""", ('x x', 2))
+test(r"""sre.subn(r"b+", "x", "bbbb BBBB")""", ('x BBBB', 1))
+test(r"""sre.subn(r"b+", "x", "xyz")""", ('xyz', 0))
+test(r"""sre.subn(r"b*", "x", "xyz")""", ('xxxyxzx', 4))
+test(r"""sre.subn(r"b*", "x", "xyz", 2)""", ('xxxyz', 2))
+
+if verbose:
+    print 'Running tests on sre.split'
+
+test(r"""sre.split(r":", ":a:b::c")""", ['', 'a', 'b', '', 'c'])
+test(r"""sre.split(r":+", ":a:b:::")""", ['', 'a', 'b', ''])
+test(r"""sre.split(r":*", ":a:b::c")""", ['', 'a', 'b', 'c'])
+test(r"""sre.split(r"(:*)", ":a:b::c")""", ['', ':', 'a', ':', 'b', '::', 'c'])
+test(r"""sre.split(r"(?::*)", ":a:b::c")""", ['', 'a', 'b', 'c'])
+test(r"""sre.split(r"(:)*", ":a:b::c")""", ['', ':', 'a', ':', 'b', ':', 'c'])
+test(r"""sre.split(r"([b:]+)", ":a:b::c")""", ['', ':', 'a', ':b::', 'c'])
+test(r"""sre.split(r"(b)|(:+)", ":a:b::c")""",
+     ['', None, ':', 'a', None, ':', '', 'b', None, '', None, '::', 'c'])
+test(r"""sre.split(r"(?:b)|(?::+)", ":a:b::c")""", ['', 'a', '', '', 'c'])
+
+test(r"""sre.split(r":", ":a:b::c", 2)""", ['', 'a', 'b::c'])
+test(r"""sre.split(r':', 'a:b:c:d', 2)""", ['a', 'b', 'c:d'])
+
+test(r"""sre.split(r"(:)", ":a:b::c", 2)""", ['', ':', 'a', ':', 'b::c'])
+test(r"""sre.split(r"(:*)", ":a:b::c", 2)""", ['', ':', 'a', ':', 'b::c'])
+
+if verbose:
+    print "Running tests on sre.findall"
+
+test(r"""sre.findall(r":+", "abc")""", [])
+test(r"""sre.findall(r":+", "a:b::c:::d")""", [":", "::", ":::"])
+test(r"""sre.findall(r"(:+)", "a:b::c:::d")""", [":", "::", ":::"])
+test(r"""sre.findall(r"(:)(:*)", "a:b::c:::d")""",
+     [(":", ""), (":", ":"), (":", "::")])
+test(r"""sre.findall(r"(a)|(b)", "abc")""", [("a", ""), ("", "b")])
+
+# bug 117612
+test(r"""sre.findall(r"(a|(b))", "aba")""", [("a", ""),("b", "b"),("a", "")])
+
+if sys.hexversion >= 0x02020000:
+    if verbose:
+        print "Running tests on sre.finditer"
+    def fixup(seq):
+        # convert iterator to list
+        if not hasattr(seq, "next") or not hasattr(seq, "__iter__"):
+            print "finditer returned", type(seq)
+        return map(lambda item: item.group(0), seq)
+    # sanity
+    test(r"""fixup(sre.finditer(r":+", "a:b::c:::d"))""", [":", "::", ":::"])
+
+if verbose:
+    print "Running tests on sre.match"
+
+test(r"""sre.match(r'a', 'a').groups()""", ())
+test(r"""sre.match(r'(a)', 'a').groups()""", ('a',))
+test(r"""sre.match(r'(a)', 'a').group(0)""", 'a')
+test(r"""sre.match(r'(a)', 'a').group(1)""", 'a')
+test(r"""sre.match(r'(a)', 'a').group(1, 1)""", ('a', 'a'))
+
+pat = sre.compile(r'((a)|(b))(c)?')
+test(r"""pat.match('a').groups()""", ('a', 'a', None, None))
+test(r"""pat.match('b').groups()""", ('b', None, 'b', None))
+test(r"""pat.match('ac').groups()""", ('a', 'a', None, 'c'))
+test(r"""pat.match('bc').groups()""", ('b', None, 'b', 'c'))
+test(r"""pat.match('bc').groups("")""", ('b', "", 'b', 'c'))
+
+pat = sre.compile(r'(?:(?P<a1>a)|(?P<b2>b))(?P<c3>c)?')
+test(r"""pat.match('a').group(1, 2, 3)""", ('a', None, None))
+test(r"""pat.match('b').group('a1', 'b2', 'c3')""", (None, 'b', None))
+test(r"""pat.match('ac').group(1, 'b2', 3)""", ('a', None, 'c'))
+
+# bug 448951 (similar to 429357, but with single char match)
+# (Also test greedy matches.)
+for op in '','?','*':
+    test(r"""sre.match(r'((.%s):)?z', 'z').groups()"""%op, (None, None))
+    test(r"""sre.match(r'((.%s):)?z', 'a:z').groups()"""%op, ('a:', 'a'))
+
+if verbose:
+    print "Running tests on sre.escape"
+
+p = ""
+for i in range(0, 256):
+    p = p + chr(i)
+    test(r"""sre.match(sre.escape(chr(i)), chr(i)) is not None""", 1)
+    test(r"""sre.match(sre.escape(chr(i)), chr(i)).span()""", (0,1))
+
+pat = sre.compile(sre.escape(p))
+test(r"""pat.match(p) is not None""", 1)
+test(r"""pat.match(p).span()""", (0,256))
+
+if verbose:
+    print 'Running tests on sre.Scanner'
+
+def s_ident(scanner, token): return token
+def s_operator(scanner, token): return "op%s" % token
+def s_float(scanner, token): return float(token)
+def s_int(scanner, token): return int(token)
+
+scanner = sre.Scanner([
+    (r"[a-zA-Z_]\w*", s_ident),
+    (r"\d+\.\d*", s_float),
+    (r"\d+", s_int),
+    (r"=|\+|-|\*|/", s_operator),
+    (r"\s+", None),
+    ])
+
+# sanity check
+test('scanner.scan("sum = 3*foo + 312.50 + bar")',
+     (['sum', 'op=', 3, 'op*', 'foo', 'op+', 312.5, 'op+', 'bar'], ''))
+
+if verbose:
+    print 'Pickling a SRE_Pattern instance'
+
+try:
+    import pickle
+    pat = sre.compile(r'a(?:b|(c|e){1,2}?|d)+?(.)')
+    s = pickle.dumps(pat)
+    pat = pickle.loads(s)
+except:
+    print TestFailed, 're module pickle'
+
+try:
+    import cPickle
+    pat = sre.compile(r'a(?:b|(c|e){1,2}?|d)+?(.)')
+    s = cPickle.dumps(pat)
+    pat = cPickle.loads(s)
+except:
+    print TestFailed, 're module cPickle'
+
+# constants
+test(r"""sre.I""", sre.IGNORECASE)
+test(r"""sre.L""", sre.LOCALE)
+test(r"""sre.M""", sre.MULTILINE)
+test(r"""sre.S""", sre.DOTALL)
+test(r"""sre.X""", sre.VERBOSE)
+test(r"""sre.T""", sre.TEMPLATE)
+test(r"""sre.U""", sre.UNICODE)
+
+for flags in [sre.I, sre.M, sre.X, sre.S, sre.L, sre.T, sre.U]:
+    try:
+        r = sre.compile('^pattern$', flags)
+    except:
+        print 'Exception raised on flag', flags
+
+if verbose:
+    print 'Test engine limitations'
+
+# Try nasty case that overflows the straightforward recursive
+# implementation of repeated groups.
+test("sre.match('(x)*', 50000*'x').span()", (0, 50000), RuntimeError)
+test("sre.match(r'(x)*y', 50000*'x'+'y').span()", (0, 50001), RuntimeError)
+test("sre.match(r'(x)*?y', 50000*'x'+'y').span()", (0, 50001), RuntimeError)
+
+from re_tests import *
+
+if verbose:
+    print 'Running re_tests test suite'
+else:
+    # To save time, only run the first and last 10 tests
+    #tests = tests[:10] + tests[-10:]
+    pass
+
+for t in tests:
+    sys.stdout.flush()
+    pattern=s=outcome=repl=expected=None
+    if len(t)==5:
+        pattern, s, outcome, repl, expected = t
+    elif len(t)==3:
+        pattern, s, outcome = t
+    else:
+        raise ValueError, ('Test tuples should have 3 or 5 fields',t)
+
+    try:
+        obj=sre.compile(pattern)
+    except sre.error:
+        if outcome==SYNTAX_ERROR: pass  # Expected a syntax error
+        else:
+            print '=== Syntax error:', t
+    except KeyboardInterrupt: raise KeyboardInterrupt
+    except:
+        print '*** Unexpected error ***', t
+        if verbose:
+            traceback.print_exc(file=sys.stdout)
+    else:
+        try:
+            result=obj.search(s)
+        except (sre.error), msg:
+            print '=== Unexpected exception', t, repr(msg)
+        if outcome==SYNTAX_ERROR:
+            print '=== Compiled incorrectly', t
+        elif outcome==FAIL:
+            if result is None: pass   # No match, as expected
+            else: print '=== Succeeded incorrectly', t
+        elif outcome==SUCCEED:
+            if result is not None:
+                # Matched, as expected, so now we compute the
+                # result string and compare it to our expected result.
+                start, end = result.span(0)
+                vardict={'found': result.group(0),
+                         'groups': result.group(),
+                         'flags': result.re.flags}
+                for i in range(1, 100):
+                    try:
+                        gi = result.group(i)
+                        # Special hack because else the string concat fails:
+                        if gi is None:
+                            gi = "None"
+                    except IndexError:
+                        gi = "Error"
+                    vardict['g%d' % i] = gi
+                for i in result.re.groupindex.keys():
+                    try:
+                        gi = result.group(i)
+                        if gi is None:
+                            gi = "None"
+                    except IndexError:
+                        gi = "Error"
+                    vardict[i] = gi
+                repl=eval(repl, vardict)
+                if repl!=expected:
+                    print '=== grouping error', t,
+                    print repr(repl)+' should be '+repr(expected)
+            else:
+                print '=== Failed incorrectly', t
+                continue
+
+            # Try the match on a unicode string, and check that it
+            # still succeeds.
+            try:
+                u = unicode(s, "latin-1")
+            except NameError:
+                pass
+            except TypeError:
+                continue # skip unicode test strings
+            else:
+                result=obj.search(u)
+                if result==None:
+                    print '=== Fails on unicode match', t
+
+            # Try the match on a unicode pattern, and check that it
+            # still succeeds.
+            try:
+                u = unicode(pattern, "latin-1")
+            except NameError:
+                pass
+            else:
+                obj=sre.compile(u)
+                result=obj.search(s)
+                if result==None:
+                    print '=== Fails on unicode pattern match', t
+
+            # Try the match with the search area limited to the extent
+            # of the match and see if it still succeeds.  \B will
+            # break (because it won't match at the end or start of a
+            # string), so we'll ignore patterns that feature it.
+
+            if pattern[:2]!='\\B' and pattern[-2:]!='\\B':
+                obj=sre.compile(pattern)
+                result=obj.search(s, result.start(0), result.end(0)+1)
+                if result==None:
+                    print '=== Failed on range-limited match', t
+
+            # Try the match with IGNORECASE enabled, and check that it
+            # still succeeds.
+            obj=sre.compile(pattern, sre.IGNORECASE)
+            result=obj.search(s)
+            if result==None:
+                print '=== Fails on case-insensitive match', t
+
+            # Try the match with LOCALE enabled, and check that it
+            # still succeeds.
+            obj=sre.compile(pattern, sre.LOCALE)
+            result=obj.search(s)
+            if result==None:
+                print '=== Fails on locale-sensitive match', t
+
+            # Try the match with UNICODE locale enabled, and check
+            # that it still succeeds.
+            if have_unicode:
+                obj=sre.compile(pattern, sre.UNICODE)
+                result=obj.search(s)
+                if result==None:
+                    print '=== Fails on unicode-sensitive match', t
diff --git a/lib-python/2.2/test/test_strftime.py b/lib-python/2.2/test/test_strftime.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/test/test_strftime.py
@@ -0,0 +1,146 @@
+#! /usr/bin/env python
+
+# Sanity checker for time.strftime
+
+import time, calendar, sys, os, re
+from test_support import verbose
+
+def main():
+    global verbose
+    # For C Python, these tests expect C locale, so we try to set that
+    # explicitly.  For Jython, Finn says we need to be in the US locale; my
+    # understanding is that this is the closest Java gets to C's "C" locale.
+    # Jython ought to supply an _locale module which Does The Right Thing, but
+    # this is the best we can do given today's state of affairs.
+    try:
+        import java
+        java.util.Locale.setDefault(java.util.Locale.US)
+    except ImportError:
+        # Can't do this first because it will succeed, even in Jython
+        import locale
+        locale.setlocale(locale.LC_TIME, 'C')
+    now = time.time()
+    strftest(now)
+    verbose = 0
+    # Try a bunch of dates and times,  chosen to vary through time of
+    # day and daylight saving time
+    for j in range(-5, 5):
+        for i in range(25):
+            strftest(now + (i + j*100)*23*3603)
+
+def strftest(now):
+    if verbose:
+        print "strftime test for", time.ctime(now)
+    nowsecs = str(long(now))[:-1]
+    gmt = time.gmtime(now)
+    now = time.localtime(now)
+
+    if now[3] < 12: ampm='(AM|am)'
+    else: ampm='(PM|pm)'
+
+    jan1 = time.localtime(time.mktime((now[0], 1, 1) + (0,)*6))
+
+    try:
+        if now[8]: tz = time.tzname[1]
+        else: tz = time.tzname[0]
+    except AttributeError:
+        tz = ''
+
+    if now[3] > 12: clock12 = now[3] - 12
+    elif now[3] > 0: clock12 = now[3]
+    else: clock12 = 12
+
+    expectations = (
+        ('%a', calendar.day_abbr[now[6]], 'abbreviated weekday name'),
+        ('%A', calendar.day_name[now[6]], 'full weekday name'),
+        ('%b', calendar.month_abbr[now[1]], 'abbreviated month name'),
+        ('%B', calendar.month_name[now[1]], 'full month name'),
+        # %c see below
+        ('%d', '%02d' % now[2], 'day of month as number (00-31)'),
+        ('%H', '%02d' % now[3], 'hour (00-23)'),
+        ('%I', '%02d' % clock12, 'hour (01-12)'),
+        ('%j', '%03d' % now[7], 'julian day (001-366)'),
+        ('%m', '%02d' % now[1], 'month as number (01-12)'),
+        ('%M', '%02d' % now[4], 'minute, (00-59)'),
+        ('%p', ampm, 'AM or PM as appropriate'),
+        ('%S', '%02d' % now[5], 'seconds of current time (00-60)'),
+        ('%U', '%02d' % ((now[7] + jan1[6])//7),
+         'week number of the year (Sun 1st)'),
+        ('%w', '0?%d' % ((1+now[6]) % 7), 'weekday as a number (Sun 1st)'),
+        ('%W', '%02d' % ((now[7] + (jan1[6] - 1)%7)//7),
+         'week number of the year (Mon 1st)'),
+        # %x see below
+        ('%X', '%02d:%02d:%02d' % (now[3], now[4], now[5]), '%H:%M:%S'),
+        ('%y', '%02d' % (now[0]%100), 'year without century'),
+        ('%Y', '%d' % now[0], 'year with century'),
+        # %Z see below
+        ('%%', '%', 'single percent sign'),
+        )
+
+    nonstandard_expectations = (
+        # These are standard but don't have predictable output
+        ('%c', fixasctime(time.asctime(now)), 'near-asctime() format'),
+        ('%x', '%02d/%02d/%02d' % (now[1], now[2], (now[0]%100)),
+         '%m/%d/%y %H:%M:%S'),
+        ('%Z', '%s' % tz, 'time zone name'),
+
+        # These are some platform specific extensions
+        ('%D', '%02d/%02d/%02d' % (now[1], now[2], (now[0]%100)), 'mm/dd/yy'),
+        ('%e', '%2d' % now[2], 'day of month as number, blank padded ( 0-31)'),
+        ('%h', calendar.month_abbr[now[1]], 'abbreviated month name'),
+        ('%k', '%2d' % now[3], 'hour, blank padded ( 0-23)'),
+        ('%n', '\n', 'newline character'),
+        ('%r', '%02d:%02d:%02d %s' % (clock12, now[4], now[5], ampm),
+         '%I:%M:%S %p'),
+        ('%R', '%02d:%02d' % (now[3], now[4]), '%H:%M'),
+        ('%s', nowsecs, 'seconds since the Epoch in UCT'),
+        ('%t', '\t', 'tab character'),
+        ('%T', '%02d:%02d:%02d' % (now[3], now[4], now[5]), '%H:%M:%S'),
+        ('%3y', '%03d' % (now[0]%100),
+         'year without century rendered using fieldwidth'),
+        )
+
+    if verbose:
+        print "Strftime test, platform: %s, Python version: %s" % \
+              (sys.platform, sys.version.split()[0])
+
+    for e in expectations:
+        try:
+            result = time.strftime(e[0], now)
+        except ValueError, error:
+            print "Standard '%s' format gave error:" % e[0], error
+            continue
+        if re.match(e[1], result): continue
+        if not result or result[0] == '%':
+            print "Does not support standard '%s' format (%s)" % (e[0], e[2])
+        else:
+            print "Conflict for %s (%s):" % (e[0], e[2])
+            print "  Expected %s, but got %s" % (e[1], result)
+
+    for e in nonstandard_expectations:
+        try:
+            result = time.strftime(e[0], now)
+        except ValueError, result:
+            if verbose:
+                print "Error for nonstandard '%s' format (%s): %s" % \
+                      (e[0], e[2], str(result))
+            continue
+        if re.match(e[1], result):
+            if verbose:
+                print "Supports nonstandard '%s' format (%s)" % (e[0], e[2])
+        elif not result or result[0] == '%':
+            if verbose:
+                print "Does not appear to support '%s' format (%s)" % (e[0],
+                                                                       e[2])
+        else:
+            if verbose:
+                print "Conflict for nonstandard '%s' format (%s):" % (e[0],
+                                                                      e[2])
+                print "  Expected %s, but got %s" % (e[1], result)
+
+def fixasctime(s):
+    if s[8] == ' ':
+        s = s[:8] + '0' + s[9:]
+    return s
+
+main()
diff --git a/lib-python/2.2/test/test_string.py b/lib-python/2.2/test/test_string.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_string.py
@@ -0,0 +1,83 @@
+from test_support import verbose, TestSkipped
+import string_tests
+import string, sys
+
+# XXX: kludge... short circuit if strings don't have methods
+try:
+    ''.join
+except AttributeError:
+    raise TestSkipped
+
+def test(name, input, output, *args):
+    if verbose:
+        print 'string.%s%s =? %s... ' % (name, (input,) + args, output),
+    try:
+        # Prefer string methods over string module functions
+        try:
+            f = getattr(input, name)
+            value = apply(f, args)
+        except AttributeError:
+            f = getattr(string, name)
+            value = apply(f, (input,) + args)
+    except:
+        value = sys.exc_type
+        f = name
+    if value == output:
+        # if the original is returned make sure that
+        # this doesn't happen with subclasses
+        if value is input:
+            class ssub(str):
+                def __repr__(self):
+                    return 'ssub(%r)' % str.__repr__(self)
+            input = ssub(input)
+            try:
+                f = getattr(input, name)
+                value = apply(f, args)
+            except AttributeError:
+                f = getattr(string, name)
+                value = apply(f, (input,) + args)
+            if value is input:
+                if verbose:
+                    print 'no'
+                print '*',f, `input`, `output`, `value`
+                return
+    if value != output:
+        if verbose:
+            print 'no'
+        print f, `input`, `output`, `value`
+    else:
+        if verbose:
+            print 'yes'
+
+string_tests.run_module_tests(test)
+string_tests.run_method_tests(test)
+
+string.whitespace
+string.lowercase
+string.uppercase
+
+# Float formatting
+for prec in range(100):
+    formatstring = u'%%.%if' % prec
+    value = 0.01
+    for x in range(60):
+        value = value * 3.141592655 / 3.0 * 10.0
+        #print 'Overflow check for x=%i and prec=%i:' % \
+        #      (x, prec),
+        try:
+            result = formatstring % value
+        except OverflowError:
+            # The formatfloat() code in stringobject.c and
+            # unicodeobject.c uses a 120 byte buffer and switches from
+            # 'f' formatting to 'g' at precision 50, so we expect
+            # OverflowErrors for the ranges x < 50 and prec >= 67.
+            if x >= 50 or \
+               prec < 67:
+                print '*** unexpected OverflowError for x=%i and prec=%i' % (x, prec)
+            else:
+                #print 'OverflowError'
+                pass
+        else:
+            #print result
+            pass
+    
diff --git a/lib-python/2.2/test/test_strop.py b/lib-python/2.2/test/test_strop.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_strop.py
@@ -0,0 +1,133 @@
+import warnings
+warnings.filterwarnings("ignore", "", DeprecationWarning, __name__)
+warnings.filterwarnings("ignore", "", DeprecationWarning, "unittest")
+import strop
+import test_support
+import unittest
+
+
+class StropFunctionTestCase(unittest.TestCase):
+
+    def test_atoi(self):
+        self.assert_(strop.atoi(" 1 ") == 1)
+        self.assertRaises(ValueError, strop.atoi, " 1x")
+        self.assertRaises(ValueError, strop.atoi, " x1 ")
+
+    def test_atol(self):
+        self.assert_(strop.atol(" 1 ") == 1L)
+        self.assertRaises(ValueError, strop.atol, " 1x")
+        self.assertRaises(ValueError, strop.atol, " x1 ")
+
+    def test_atof(self):
+        self.assert_(strop.atof(" 1 ") == 1.0)
+        self.assertRaises(ValueError, strop.atof, " 1x")
+        self.assertRaises(ValueError, strop.atof, " x1 ")
+
+    def test_capitalize(self):
+        self.assert_(strop.capitalize(" hello ") == " hello ")
+        self.assert_(strop.capitalize("hello ") == "Hello ")
+
+    def test_find(self):
+        self.assert_(strop.find("abcdefghiabc", "abc") == 0)
+        self.assert_(strop.find("abcdefghiabc", "abc", 1) == 9)
+        self.assert_(strop.find("abcdefghiabc", "def", 4) == -1)
+
+    def test_rfind(self):
+        self.assert_(strop.rfind("abcdefghiabc", "abc") == 9)
+
+    def test_lower(self):
+        self.assert_(strop.lower("HeLLo") == "hello")
+
+    def test_upper(self):
+        self.assert_(strop.upper("HeLLo") == "HELLO")
+
+    def test_swapcase(self):
+        self.assert_(strop.swapcase("HeLLo cOmpUteRs") == "hEllO CoMPuTErS")
+
+    def test_strip(self):
+        self.assert_(strop.strip(" \t\n hello \t\n ") == "hello")
+
+    def test_lstrip(self):
+        self.assert_(strop.lstrip(" \t\n hello \t\n ") == "hello \t\n ")
+
+    def test_rstrip(self):
+        self.assert_(strop.rstrip(" \t\n hello \t\n ") == " \t\n hello")
+
+    def test_replace(self):
+        replace = strop.replace
+        self.assert_(replace("one!two!three!", '!', '@', 1)
+                     == "one at two!three!")
+        self.assert_(replace("one!two!three!", '!', '@', 2)
+                     == "one at two@three!")
+        self.assert_(replace("one!two!three!", '!', '@', 3)
+                     == "one at two@three@")
+        self.assert_(replace("one!two!three!", '!', '@', 4)
+                     == "one at two@three@")
+
+        # CAUTION: a replace count of 0 means infinity only to strop,
+        # not to the string .replace() method or to the
+        # string.replace() function.
+
+        self.assert_(replace("one!two!three!", '!', '@', 0)
+                     == "one at two@three@")
+        self.assert_(replace("one!two!three!", '!', '@')
+                     == "one at two@three@")
+        self.assert_(replace("one!two!three!", 'x', '@')
+                     == "one!two!three!")
+        self.assert_(replace("one!two!three!", 'x', '@', 2)
+                     == "one!two!three!")
+
+    def test_split(self):
+        split = strop.split
+        self.assert_(split("this is the split function")
+                     == ['this', 'is', 'the', 'split', 'function'])
+        self.assert_(split("a|b|c|d", '|') == ['a', 'b', 'c', 'd'])
+        self.assert_(split("a|b|c|d", '|', 2) == ['a', 'b', 'c|d'])
+        self.assert_(split("a b c d", None, 1) == ['a', 'b c d'])
+        self.assert_(split("a b c d", None, 2) == ['a', 'b', 'c d'])
+        self.assert_(split("a b c d", None, 3) == ['a', 'b', 'c', 'd'])
+        self.assert_(split("a b c d", None, 4) == ['a', 'b', 'c', 'd'])
+        self.assert_(split("a b c d", None, 0) == ['a', 'b', 'c', 'd'])
+        self.assert_(split("a  b  c  d", None, 2) ==  ['a', 'b', 'c  d'])
+
+    def test_join(self):
+        self.assert_(strop.join(['a', 'b', 'c', 'd']) == 'a b c d')
+        self.assert_(strop.join(('a', 'b', 'c', 'd'), '') == 'abcd')
+        self.assert_(strop.join(Sequence()) == 'w x y z')
+
+        # try a few long ones
+        self.assert_(strop.join(['x' * 100] * 100, ':')
+                     == (('x' * 100) + ":") * 99 + "x" * 100)
+        self.assert_(strop.join(('x' * 100,) * 100, ':')
+                     == (('x' * 100) + ":") * 99 + "x" * 100)
+
+    def test_maketrans(self):
+        self.assert_(strop.maketrans("abc", "xyz") == transtable)
+        self.assertRaises(ValueError, strop.maketrans, "abc", "xyzq")
+
+    def test_translate(self):
+        self.assert_(strop.translate("xyzabcdef", transtable, "def")
+                     == "xyzxyz")
+
+    def test_data_attributes(self):
+        strop.lowercase
+        strop.uppercase
+        strop.whitespace
+
+
+transtable = '\000\001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037 !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`xyzdefghijklmnopqrstuvwxyz{|}~\177\200\201\202\203\204\205\206\207\210\211\212\213\214\215\216\217\220\221\222\223\224\225\226\227\230\231\232\233\234\235\236\237\240\241\242\243\244\245\246\247\250\251\252\253\254\255\256\257\260\261\262\263\264\265\266\267\270\271\272\273\274\275\276\277\300\301\302\303\304\305\306\307\310\311\312\313\314\315\316\317\320\321\322\323\324\325\326\327\330\331\332\333\334\335\336\337\340\341\342\343\344\345\346\347\350\351\352\353\354\355\356\357\360\361\362\363\364\365\366\367\370\371\372\373\374\375\376\377'
+
+
+# join() now works with any sequence type.
+class Sequence:
+    def __init__(self): self.seq = 'wxyz'
+    def __len__(self): return len(self.seq)
+    def __getitem__(self, i): return self.seq[i]
+
+
+def test_main():
+    test_support.run_unittest(StropFunctionTestCase)
+
+
+if __name__ == "__main__":
+    test_main()
diff --git a/lib-python/2.2/test/test_struct.py b/lib-python/2.2/test/test_struct.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_struct.py
@@ -0,0 +1,441 @@
+from test_support import TestFailed, verbose, verify
+import struct
+## import pdb
+
+import sys
+ISBIGENDIAN = sys.byteorder == "big"
+del sys
+verify((struct.pack('=i', 1)[0] == chr(0)) == ISBIGENDIAN,
+       "bigendian determination appears wrong")
+
+def string_reverse(s):
+    chars = list(s)
+    chars.reverse()
+    return "".join(chars)
+
+def bigendian_to_native(value):
+    if ISBIGENDIAN:
+        return value
+    else:
+        return string_reverse(value)
+
+def simple_err(func, *args):
+    try:
+        apply(func, args)
+    except struct.error:
+        pass
+    else:
+        raise TestFailed, "%s%s did not raise struct.error" % (
+            func.__name__, args)
+##      pdb.set_trace()
+
+def any_err(func, *args):
+    try:
+        apply(func, args)
+    except (struct.error, OverflowError, TypeError):
+        pass
+    else:
+        raise TestFailed, "%s%s did not raise error" % (
+            func.__name__, args)
+##      pdb.set_trace()
+
+
+simple_err(struct.calcsize, 'Z')
+
+sz = struct.calcsize('i')
+if sz * 3 != struct.calcsize('iii'):
+    raise TestFailed, 'inconsistent sizes'
+
+fmt = 'cbxxxxxxhhhhiillffd'
+fmt3 = '3c3b18x12h6i6l6f3d'
+sz = struct.calcsize(fmt)
+sz3 = struct.calcsize(fmt3)
+if sz * 3 != sz3:
+    raise TestFailed, 'inconsistent sizes (3*%s -> 3*%d = %d, %s -> %d)' % (
+        `fmt`, sz, 3*sz, `fmt3`, sz3)
+
+simple_err(struct.pack, 'iii', 3)
+simple_err(struct.pack, 'i', 3, 3, 3)
+simple_err(struct.pack, 'i', 'foo')
+simple_err(struct.unpack, 'd', 'flap')
+s = struct.pack('ii', 1, 2)
+simple_err(struct.unpack, 'iii', s)
+simple_err(struct.unpack, 'i', s)
+
+c = 'a'
+b = 1
+h = 255
+i = 65535
+l = 65536
+f = 3.1415
+d = 3.1415
+
+for prefix in ('', '@', '<', '>', '=', '!'):
+    for format in ('xcbhilfd', 'xcBHILfd'):
+        format = prefix + format
+        if verbose:
+            print "trying:", format
+        s = struct.pack(format, c, b, h, i, l, f, d)
+        cp, bp, hp, ip, lp, fp, dp = struct.unpack(format, s)
+        if (cp != c or bp != b or hp != h or ip != i or lp != l or
+            int(100 * fp) != int(100 * f) or int(100 * dp) != int(100 * d)):
+            # ^^^ calculate only to two decimal places
+            raise TestFailed, "unpack/pack not transitive (%s, %s)" % (
+                str(format), str((cp, bp, hp, ip, lp, fp, dp)))
+
+# Test some of the new features in detail
+
+# (format, argument, big-endian result, little-endian result, asymmetric)
+tests = [
+    ('c', 'a', 'a', 'a', 0),
+    ('xc', 'a', '\0a', '\0a', 0),
+    ('cx', 'a', 'a\0', 'a\0', 0),
+    ('s', 'a', 'a', 'a', 0),
+    ('0s', 'helloworld', '', '', 1),
+    ('1s', 'helloworld', 'h', 'h', 1),
+    ('9s', 'helloworld', 'helloworl', 'helloworl', 1),
+    ('10s', 'helloworld', 'helloworld', 'helloworld', 0),
+    ('11s', 'helloworld', 'helloworld\0', 'helloworld\0', 1),
+    ('20s', 'helloworld', 'helloworld'+10*'\0', 'helloworld'+10*'\0', 1),
+    ('b', 7, '\7', '\7', 0),
+    ('b', -7, '\371', '\371', 0),
+    ('B', 7, '\7', '\7', 0),
+    ('B', 249, '\371', '\371', 0),
+    ('h', 700, '\002\274', '\274\002', 0),
+    ('h', -700, '\375D', 'D\375', 0),
+    ('H', 700, '\002\274', '\274\002', 0),
+    ('H', 0x10000-700, '\375D', 'D\375', 0),
+    ('i', 70000000, '\004,\035\200', '\200\035,\004', 0),
+    ('i', -70000000, '\373\323\342\200', '\200\342\323\373', 0),
+    ('I', 70000000L, '\004,\035\200', '\200\035,\004', 0),
+    ('I', 0x100000000L-70000000, '\373\323\342\200', '\200\342\323\373', 0),
+    ('l', 70000000, '\004,\035\200', '\200\035,\004', 0),
+    ('l', -70000000, '\373\323\342\200', '\200\342\323\373', 0),
+    ('L', 70000000L, '\004,\035\200', '\200\035,\004', 0),
+    ('L', 0x100000000L-70000000, '\373\323\342\200', '\200\342\323\373', 0),
+    ('f', 2.0, '@\000\000\000', '\000\000\000@', 0),
+    ('d', 2.0, '@\000\000\000\000\000\000\000',
+               '\000\000\000\000\000\000\000@', 0),
+    ('f', -2.0, '\300\000\000\000', '\000\000\000\300', 0),
+    ('d', -2.0, '\300\000\000\000\000\000\000\000',
+               '\000\000\000\000\000\000\000\300', 0),
+]
+
+for fmt, arg, big, lil, asy in tests:
+    if verbose:
+        print `fmt`, `arg`, `big`, `lil`
+    for (xfmt, exp) in [('>'+fmt, big), ('!'+fmt, big), ('<'+fmt, lil),
+                        ('='+fmt, ISBIGENDIAN and big or lil)]:
+        res = struct.pack(xfmt, arg)
+        if res != exp:
+            raise TestFailed, "pack(%s, %s) -> %s # expected %s" % (
+                `fmt`, `arg`, `res`, `exp`)
+        n = struct.calcsize(xfmt)
+        if n != len(res):
+            raise TestFailed, "calcsize(%s) -> %d # expected %d" % (
+                `xfmt`, n, len(res))
+        rev = struct.unpack(xfmt, res)[0]
+        if rev != arg and not asy:
+            raise TestFailed, "unpack(%s, %s) -> (%s,) # expected (%s,)" % (
+                `fmt`, `res`, `rev`, `arg`)
+
+###########################################################################
+# Simple native q/Q tests.
+
+has_native_qQ = 1
+try:
+    struct.pack("q", 5)
+except struct.error:
+    has_native_qQ = 0
+
+if verbose:
+    print "Platform has native q/Q?", has_native_qQ and "Yes." or "No."
+
+any_err(struct.pack, "Q", -1)   # can't pack -1 as unsigned regardless
+simple_err(struct.pack, "q", "a")  # can't pack string as 'q' regardless
+simple_err(struct.pack, "Q", "a")  # ditto, but 'Q'
+
+def test_native_qQ():
+    bytes = struct.calcsize('q')
+    # The expected values here are in big-endian format, primarily because
+    # I'm on a little-endian machine and so this is the clearest way (for
+    # me) to force the code to get exercised.
+    for format, input, expected in (
+            ('q', -1, '\xff' * bytes),
+            ('q', 0, '\x00' * bytes),
+            ('Q', 0, '\x00' * bytes),
+            ('q', 1L, '\x00' * (bytes-1) + '\x01'),
+            ('Q', (1L << (8*bytes))-1, '\xff' * bytes),
+            ('q', (1L << (8*bytes-1))-1, '\x7f' + '\xff' * (bytes - 1))):
+        got = struct.pack(format, input)
+        native_expected = bigendian_to_native(expected)
+        verify(got == native_expected,
+               "%r-pack of %r gave %r, not %r" %
+                    (format, input, got, native_expected))
+        retrieved = struct.unpack(format, got)[0]
+        verify(retrieved == input,
+               "%r-unpack of %r gave %r, not %r" %
+                    (format, got, retrieved, input))
+
+if has_native_qQ:
+    test_native_qQ()
+
+###########################################################################
+# Standard integer tests (bBhHiIlLqQ).
+
+import binascii
+
+class IntTester:
+
+    # XXX Most std integer modes fail to test for out-of-range.
+    # The "i" and "l" codes appear to range-check OK on 32-bit boxes, but
+    # fail to check correctly on some 64-bit ones (Tru64 Unix + Compaq C
+    # reported by Mark Favas).
+    BUGGY_RANGE_CHECK = "bBhHiIlL"
+
+    def __init__(self, formatpair, bytesize):
+        assert len(formatpair) == 2
+        self.formatpair = formatpair
+        for direction in "<>!=":
+            for code in formatpair:
+                format = direction + code
+                verify(struct.calcsize(format) == bytesize)
+        self.bytesize = bytesize
+        self.bitsize = bytesize * 8
+        self.signed_code, self.unsigned_code = formatpair
+        self.unsigned_min = 0
+        self.unsigned_max = 2L**self.bitsize - 1
+        self.signed_min = -(2L**(self.bitsize-1))
+        self.signed_max = 2L**(self.bitsize-1) - 1
+
+    def test_one(self, x, pack=struct.pack,
+                          unpack=struct.unpack,
+                          unhexlify=binascii.unhexlify):
+        if verbose:
+            print "trying std", self.formatpair, "on", x, "==", hex(x)
+
+        # Try signed.
+        code = self.signed_code
+        if self.signed_min <= x <= self.signed_max:
+            # Try big-endian.
+            expected = long(x)
+            if x < 0:
+                expected += 1L << self.bitsize
+                assert expected > 0
+            expected = hex(expected)[2:-1] # chop "0x" and trailing 'L'
+            if len(expected) & 1:
+                expected = "0" + expected
+            expected = unhexlify(expected)
+            expected = "\x00" * (self.bytesize - len(expected)) + expected
+
+            # Pack work?
+            format = ">" + code
+            got = pack(format, x)
+            verify(got == expected,
+                   "'%s'-pack of %r gave %r, not %r" %
+                    (format, x, got, expected))
+
+            # Unpack work?
+            retrieved = unpack(format, got)[0]
+            verify(x == retrieved,
+                   "'%s'-unpack of %r gave %r, not %r" %
+                    (format, got, retrieved, x))
+
+            # Adding any byte should cause a "too big" error.
+            any_err(unpack, format, '\x01' + got)
+
+            # Try little-endian.
+            format = "<" + code
+            expected = string_reverse(expected)
+
+            # Pack work?
+            got = pack(format, x)
+            verify(got == expected,
+                   "'%s'-pack of %r gave %r, not %r" %
+                    (format, x, got, expected))
+
+            # Unpack work?
+            retrieved = unpack(format, got)[0]
+            verify(x == retrieved,
+                   "'%s'-unpack of %r gave %r, not %r" %
+                    (format, got, retrieved, x))
+
+            # Adding any byte should cause a "too big" error.
+            any_err(unpack, format, '\x01' + got)
+
+        else:
+            # x is out of range -- verify pack realizes that.
+            if code in self.BUGGY_RANGE_CHECK:
+                if verbose:
+                    print "Skipping buggy range check for code", code
+            else:
+                any_err(pack, ">" + code, x)
+                any_err(pack, "<" + code, x)
+
+        # Much the same for unsigned.
+        code = self.unsigned_code
+        if self.unsigned_min <= x <= self.unsigned_max:
+            # Try big-endian.
+            format = ">" + code
+            expected = long(x)
+            expected = hex(expected)[2:-1] # chop "0x" and trailing 'L'
+            if len(expected) & 1:
+                expected = "0" + expected
+            expected = unhexlify(expected)
+            expected = "\x00" * (self.bytesize - len(expected)) + expected
+
+            # Pack work?
+            got = pack(format, x)
+            verify(got == expected,
+                   "'%s'-pack of %r gave %r, not %r" %
+                    (format, x, got, expected))
+
+            # Unpack work?
+            retrieved = unpack(format, got)[0]
+            verify(x == retrieved,
+                   "'%s'-unpack of %r gave %r, not %r" %
+                    (format, got, retrieved, x))
+
+            # Adding any byte should cause a "too big" error.
+            any_err(unpack, format, '\x01' + got)
+
+            # Try little-endian.
+            format = "<" + code
+            expected = string_reverse(expected)
+
+            # Pack work?
+            got = pack(format, x)
+            verify(got == expected,
+                   "'%s'-pack of %r gave %r, not %r" %
+                    (format, x, got, expected))
+
+            # Unpack work?
+            retrieved = unpack(format, got)[0]
+            verify(x == retrieved,
+                   "'%s'-unpack of %r gave %r, not %r" %
+                    (format, got, retrieved, x))
+
+            # Adding any byte should cause a "too big" error.
+            any_err(unpack, format, '\x01' + got)
+
+        else:
+            # x is out of range -- verify pack realizes that.
+            if code in self.BUGGY_RANGE_CHECK:
+                if verbose:
+                    print "Skipping buggy range check for code", code
+            else:
+                any_err(pack, ">" + code, x)
+                any_err(pack, "<" + code, x)
+
+    def run(self):
+        from random import randrange
+
+        # Create all interesting powers of 2.
+        values = []
+        for exp in range(self.bitsize + 3):
+            values.append(1L << exp)
+
+        # Add some random values.
+        for i in range(self.bitsize):
+            val = 0L
+            for j in range(self.bytesize):
+                val = (val << 8) | randrange(256)
+            values.append(val)
+
+        # Try all those, and their negations, and +-1 from them.  Note
+        # that this tests all power-of-2 boundaries in range, and a few out
+        # of range, plus +-(2**n +- 1).
+        for base in values:
+            for val in -base, base:
+                for incr in -1, 0, 1:
+                    x = val + incr
+                    try:
+                        x = int(x)
+                    except OverflowError:
+                        pass
+                    self.test_one(x)
+
+        # Some error cases.
+        for direction in "<>":
+            for code in self.formatpair:
+                for badobject in "a string", 3+42j, randrange:
+                    any_err(struct.pack, direction + code, badobject)
+
+for args in [("bB", 1),
+             ("hH", 2),
+             ("iI", 4),
+             ("lL", 4),
+             ("qQ", 8)]:
+    t = IntTester(*args)
+    t.run()
+
+
+###########################################################################
+# The p ("Pascal string") code.
+
+def test_p_code():
+    for code, input, expected, expectedback in [
+            ('p','abc', '\x00', ''),
+            ('1p', 'abc', '\x00', ''),
+            ('2p', 'abc', '\x01a', 'a'),
+            ('3p', 'abc', '\x02ab', 'ab'),
+            ('4p', 'abc', '\x03abc', 'abc'),
+            ('5p', 'abc', '\x03abc\x00', 'abc'),
+            ('6p', 'abc', '\x03abc\x00\x00', 'abc'),
+            ('1000p', 'x'*1000, '\xff' + 'x'*999, 'x'*255)]:
+        got = struct.pack(code, input)
+        if got != expected:
+            raise TestFailed("pack(%r, %r) == %r but expected %r" %
+                             (code, input, got, expected))
+        (got,) = struct.unpack(code, got)
+        if got != expectedback:
+            raise TestFailed("unpack(%r, %r) == %r but expected %r" %
+                             (code, input, got, expectedback))
+
+test_p_code()
+
+
+###########################################################################
+# SF bug 705836.  "<f" and ">f" had a severe rounding bug, where a carry
+# from the low-order discarded bits could propagate into the exponent
+# field, causing the result to be wrong by a factor of 2.
+
+def test_705836():
+    import math
+
+    for base in range(1, 33):
+        # smaller <- largest representable float less than base.
+        delta = 0.5
+        while base - delta / 2.0 != base:
+            delta /= 2.0
+        smaller = base - delta
+        # Packing this rounds away a solid string of trailing 1 bits.
+        packed = struct.pack("<f", smaller)
+        unpacked = struct.unpack("<f", packed)[0]
+        # This failed at base = 2, 4, and 32, with unpacked = 1, 2, and
+        # 16, respectively.
+        verify(base == unpacked)
+        bigpacked = struct.pack(">f", smaller)
+        verify(bigpacked == string_reverse(packed),
+               ">f pack should be byte-reversal of <f pack")
+        unpacked = struct.unpack(">f", bigpacked)[0]
+        verify(base == unpacked)
+
+    # Largest finite IEEE single.
+    big = (1 << 24) - 1
+    big = math.ldexp(big, 127 - 23)
+    packed = struct.pack(">f", big)
+    unpacked = struct.unpack(">f", packed)[0]
+    verify(big == unpacked)
+
+    # The same, but tack on a 1 bit so it rounds up to infinity.
+    big = (1 << 25) - 1
+    big = math.ldexp(big, 127 - 24)
+    try:
+        packed = struct.pack(">f", big)
+    except OverflowError:
+        pass
+    else:
+        TestFailed("expected OverflowError")
+
+test_705836()
diff --git a/lib-python/2.2/test/test_structseq.py b/lib-python/2.2/test/test_structseq.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_structseq.py
@@ -0,0 +1,28 @@
+from test_support import vereq
+
+import time
+
+t = time.gmtime()
+astuple = tuple(t)
+vereq(len(t), len(astuple))
+vereq(t, astuple)
+
+# Check that slicing works the same way; at one point, slicing t[i:j] with
+# 0 < i < j could produce NULLs in the result.
+for i in range(-len(t), len(t)):
+    for j in range(-len(t), len(t)):
+        vereq(t[i:j], astuple[i:j])
+
+# Devious code could crash structseqs' contructors
+class C:
+    def __getitem__(self, i):
+        raise IndexError
+    def __len__(self):
+        return 9
+
+try:
+    repr(time.struct_time(C()))
+except:
+    pass
+
+# XXX more needed
diff --git a/lib-python/2.2/test/test_sunaudiodev.py b/lib-python/2.2/test/test_sunaudiodev.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_sunaudiodev.py
@@ -0,0 +1,28 @@
+from test_support import verbose, findfile, TestFailed, TestSkipped
+import sunaudiodev
+import os
+
+try:
+    audiodev = os.environ["AUDIODEV"]
+except KeyError:
+    audiodev = "/dev/audio"
+
+if not os.path.exists(audiodev):
+    raise TestSkipped("no audio device found!")
+
+def play_sound_file(path):
+    fp = open(path, 'r')
+    data = fp.read()
+    fp.close()
+    try:
+        a = sunaudiodev.open('w')
+    except sunaudiodev.error, msg:
+        raise TestFailed, msg
+    else:
+        a.write(data)
+        a.close()
+
+def test():
+    play_sound_file(findfile('audiotest.au'))
+
+test()
diff --git a/lib-python/2.2/test/test_sundry.py b/lib-python/2.2/test/test_sundry.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_sundry.py
@@ -0,0 +1,102 @@
+"""Do a minimal test of all the modules that aren't otherwise tested."""
+
+import warnings
+warnings.filterwarnings('ignore', '', DeprecationWarning, 'posixfile')
+
+from test_support import verbose
+
+import BaseHTTPServer
+import CGIHTTPServer
+import Queue
+import SimpleHTTPServer
+import SocketServer
+import aifc
+import anydbm
+import audiodev
+import bdb
+import cmd
+import code
+import codeop
+import colorsys
+import commands
+import compileall
+try:
+    import curses   # not available on Windows
+except ImportError:
+    if verbose:
+        print "skipping curses"
+import dircache
+import dis
+import distutils
+import doctest
+import dumbdbm
+import encodings
+import filecmp
+import fnmatch
+import formatter
+import fpformat
+import ftplib
+import getpass
+import glob
+import gopherlib
+import htmlentitydefs
+import htmllib
+import httplib
+import imaplib
+import imghdr
+import imputil
+import keyword
+#import knee
+import macpath
+import macurl2path
+import mailcap
+import mhlib
+import mimetypes
+import mimify
+import multifile
+import mutex
+import nntplib
+import nturl2path
+import pdb
+import pipes
+#import poplib
+import posixfile
+import pre
+import profile
+import pstats
+import py_compile
+import pyclbr
+#import reconvert
+import repr
+try:
+    import rlcompleter   # not available on Windows
+except ImportError:
+    if verbose:
+        print "skipping rlcompleter"
+import robotparser
+import sched
+import sgmllib
+import shelve
+import shlex
+import shutil
+import smtplib
+import sndhdr
+import statcache
+import statvfs
+import stringold
+import sunau
+import sunaudio
+import symbol
+import tabnanny
+import telnetlib
+import test
+import toaiff
+#import tzparse
+import urllib2
+# Can't test the "user" module -- if the user has a ~/.pythonrc.py, it
+# can screw up all sorts of things (esp. if it prints!).
+#import user
+import webbrowser
+import whichdb
+import xdrlib
+import xml
diff --git a/lib-python/2.2/test/test_support.py b/lib-python/2.2/test/test_support.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_support.py
@@ -0,0 +1,233 @@
+"""Supporting definitions for the Python regression test."""
+
+import sys
+
+class Error(Exception):
+    """Base class for regression test exceptions."""
+
+class TestFailed(Error):
+    """Test failed."""
+
+class TestSkipped(Error):
+    """Test skipped.
+
+    This can be raised to indicate that a test was deliberatly
+    skipped, but not because a feature wasn't available.  For
+    example, if some resource can't be used, such as the network
+    appears to be unavailable, this should be raised instead of
+    TestFailed.
+    """
+
+verbose = 1              # Flag set to 0 by regrtest.py
+use_resources = None       # Flag set to [] by regrtest.py
+
+# _original_stdout is meant to hold stdout at the time regrtest began.
+# This may be "the real" stdout, or IDLE's emulation of stdout, or whatever.
+# The point is to have some flavor of stdout the user can actually see.
+_original_stdout = None
+def record_original_stdout(stdout):
+    global _original_stdout
+    _original_stdout = stdout
+
+def get_original_stdout():
+    return _original_stdout or sys.stdout
+
+def unload(name):
+    try:
+        del sys.modules[name]
+    except KeyError:
+        pass
+
+def forget(modname):
+    unload(modname)
+    import os
+    for dirname in sys.path:
+        try:
+            os.unlink(os.path.join(dirname, modname + '.pyc'))
+        except os.error:
+            pass
+
+def requires(resource, msg=None):
+    if use_resources is not None and resource not in use_resources:
+        if msg is None:
+            msg = "Use of the `%s' resource not enabled" % resource
+        raise TestSkipped(msg)
+
+FUZZ = 1e-6
+
+def fcmp(x, y): # fuzzy comparison function
+    if type(x) == type(0.0) or type(y) == type(0.0):
+        try:
+            x, y = coerce(x, y)
+            fuzz = (abs(x) + abs(y)) * FUZZ
+            if abs(x-y) <= fuzz:
+                return 0
+        except:
+            pass
+    elif type(x) == type(y) and type(x) in (type(()), type([])):
+        for i in range(min(len(x), len(y))):
+            outcome = fcmp(x[i], y[i])
+            if outcome != 0:
+                return outcome
+        return cmp(len(x), len(y))
+    return cmp(x, y)
+
+try:
+    unicode
+    have_unicode = 1
+except NameError:
+    have_unicode = 0
+
+is_jython = sys.platform.startswith('java')
+
+import os
+# Filename used for testing
+if os.name == 'java':
+    # Jython disallows @ in module names
+    TESTFN = '$test'
+elif os.name != 'riscos':
+    TESTFN = '@test'
+    # Unicode name only used if TEST_FN_ENCODING exists for the platform.
+    if have_unicode:
+        TESTFN_UNICODE=unicode("@test-\xe0\xf2", "latin-1") # 2 latin characters.
+        if os.name=="nt":
+            TESTFN_ENCODING="mbcs"
+else:
+    TESTFN = 'test'
+
+# Make sure we can write to TESTFN, try in /tmp if we can't
+fp = None
+try:
+    fp = open(TESTFN, 'w+')
+except IOError:
+    TMP_TESTFN = os.path.join('/tmp', TESTFN)
+    try:
+        fp = open(TMP_TESTFN, 'w+')
+        TESTFN = TMP_TESTFN
+        del TMP_TESTFN
+    except IOError:
+        print ('WARNING: tests will fail, unable to write to: %s or %s' % 
+                (TESTFN, TMP_TESTFN))
+if fp is not None:
+    fp.close()
+    try:
+        os.unlink(TESTFN)
+    except:
+        pass
+del os, fp
+
+from os import unlink
+
+def findfile(file, here=__file__):
+    import os
+    if os.path.isabs(file):
+        return file
+    path = sys.path
+    path = [os.path.dirname(here)] + path
+    for dn in path:
+        fn = os.path.join(dn, file)
+        if os.path.exists(fn): return fn
+    return file
+
+def verify(condition, reason='test failed'):
+    """Verify that condition is true. If not, raise TestFailed.
+
+       The optional argument reason can be given to provide
+       a better error text.
+    """
+
+    if not condition:
+        raise TestFailed(reason)
+
+def vereq(a, b):
+    if not (a == b):
+        raise TestFailed, "%r == %r" % (a, b)
+
+def sortdict(dict):
+    "Like repr(dict), but in sorted order."
+    items = dict.items()
+    items.sort()
+    reprpairs = ["%r: %r" % pair for pair in items]
+    withcommas = ", ".join(reprpairs)
+    return "{%s}" % withcommas
+
+def check_syntax(statement):
+    try:
+        compile(statement, '<string>', 'exec')
+    except SyntaxError:
+        pass
+    else:
+        print 'Missing SyntaxError: "%s"' % statement
+
+
+
+#=======================================================================
+# Preliminary PyUNIT integration.
+
+import unittest
+
+
+class BasicTestRunner:
+    def run(self, test):
+        result = unittest.TestResult()
+        test(result)
+        return result
+
+
+def run_suite(suite, testclass=None):
+    """Run tests from a unittest.TestSuite-derived class."""
+    if verbose:
+        runner = unittest.TextTestRunner(sys.stdout, verbosity=2)
+    else:
+        runner = BasicTestRunner()
+
+    result = runner.run(suite)
+    if not result.wasSuccessful():
+        if len(result.errors) == 1 and not result.failures:
+            err = result.errors[0][1]
+        elif len(result.failures) == 1 and not result.errors:
+            err = result.failures[0][1]
+        else:
+            if testclass is None:
+                msg = "errors occurred; run in verbose mode for details"
+            else:
+                msg = "errors occurred in %s.%s" \
+                      % (testclass.__module__, testclass.__name__)
+            raise TestFailed(msg)
+        raise TestFailed(err)
+
+
+def run_unittest(testclass):
+    """Run tests from a unittest.TestCase-derived class."""
+    run_suite(unittest.makeSuite(testclass), testclass)
+
+
+#=======================================================================
+# doctest driver.
+
+def run_doctest(module, verbosity=None):
+    """Run doctest on the given module.  Return (#failures, #tests).
+
+    If optional argument verbosity is not specified (or is None), pass
+    test_support's belief about verbosity on to doctest.  Else doctest's
+    usual behavior is used (it searches sys.argv for -v).
+    """
+
+    import doctest
+
+    if verbosity is None:
+        verbosity = verbose
+    else:
+        verbosity = None
+
+    # Direct doctest output (normally just errors) to real stdout; doctest
+    # output shouldn't be compared by regrtest.
+    save_stdout = sys.stdout
+    sys.stdout = get_original_stdout()
+    try:
+        f, t = doctest.testmod(module, verbose=verbosity)
+        if f:
+            raise TestFailed("%d of %d doctests failed" % (f, t))
+        return f, t
+    finally:
+        sys.stdout = save_stdout
diff --git a/lib-python/2.2/test/test_symtable.py b/lib-python/2.2/test/test_symtable.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_symtable.py
@@ -0,0 +1,8 @@
+from test_support import verify
+
+import _symtable
+
+symbols = _symtable.symtable("def f(x): return x", "?", "exec")
+
+verify(symbols[0].name == "global")
+verify(len([ste for ste in symbols.values() if ste.name == "f"]) == 1)
diff --git a/lib-python/2.2/test/test_tempfile.py b/lib-python/2.2/test/test_tempfile.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_tempfile.py
@@ -0,0 +1,10 @@
+# SF bug #476138: tempfile behavior across platforms
+# Ensure that a temp file can be closed any number of times without error.
+
+import tempfile
+
+f = tempfile.TemporaryFile("w+b")
+f.write('abc\n')
+f.close()
+f.close()
+f.close()
diff --git a/lib-python/2.2/test/test_thread.py b/lib-python/2.2/test/test_thread.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_thread.py
@@ -0,0 +1,117 @@
+# Very rudimentary test of thread module
+
+# Create a bunch of threads, let each do some work, wait until all are done
+
+from test_support import verbose
+import random
+import thread
+import time
+
+mutex = thread.allocate_lock()
+rmutex = thread.allocate_lock() # for calls to random
+running = 0
+done = thread.allocate_lock()
+done.acquire()
+
+numtasks = 10
+
+def task(ident):
+    global running
+    rmutex.acquire()
+    delay = random.random() * numtasks
+    rmutex.release()
+    if verbose:
+        print 'task', ident, 'will run for', round(delay, 1), 'sec'
+    time.sleep(delay)
+    if verbose:
+        print 'task', ident, 'done'
+    mutex.acquire()
+    running = running - 1
+    if running == 0:
+        done.release()
+    mutex.release()
+
+next_ident = 0
+def newtask():
+    global next_ident, running
+    mutex.acquire()
+    next_ident = next_ident + 1
+    if verbose:
+        print 'creating task', next_ident
+    thread.start_new_thread(task, (next_ident,))
+    running = running + 1
+    mutex.release()
+
+for i in range(numtasks):
+    newtask()
+
+print 'waiting for all tasks to complete'
+done.acquire()
+print 'all tasks done'
+
+class barrier:
+    def __init__(self, n):
+        self.n = n
+        self.waiting = 0
+        self.checkin  = thread.allocate_lock()
+        self.checkout = thread.allocate_lock()
+        self.checkout.acquire()
+
+    def enter(self):
+        checkin, checkout = self.checkin, self.checkout
+
+        checkin.acquire()
+        self.waiting = self.waiting + 1
+        if self.waiting == self.n:
+            self.waiting = self.n - 1
+            checkout.release()
+            return
+        checkin.release()
+
+        checkout.acquire()
+        self.waiting = self.waiting - 1
+        if self.waiting == 0:
+            checkin.release()
+            return
+        checkout.release()
+
+numtrips = 3
+def task2(ident):
+    global running
+    for i in range(numtrips):
+        if ident == 0:
+            # give it a good chance to enter the next
+            # barrier before the others are all out
+            # of the current one
+            delay = 0.001
+        else:
+            rmutex.acquire()
+            delay = random.random() * numtasks
+            rmutex.release()
+        if verbose:
+            print 'task', ident, 'will run for', round(delay, 1), 'sec'
+        time.sleep(delay)
+        if verbose:
+            print 'task', ident, 'entering barrier', i
+        bar.enter()
+        if verbose:
+            print 'task', ident, 'leaving barrier', i
+    mutex.acquire()
+    running -= 1
+    # Must release mutex before releasing done, else the main thread can
+    # exit and set mutex to None as part of global teardown; then
+    # mutex.release() raises AttributeError.
+    finished = running == 0
+    mutex.release()
+    if finished:
+        done.release()
+
+print '\n*** Barrier Test ***'
+if done.acquire(0):
+    raise ValueError, "'done' should have remained acquired"
+bar = barrier(numtasks)
+running = numtasks
+for i in range(numtasks):
+    thread.start_new_thread(task2, (i,))
+done.acquire()
+print 'all tasks done'
diff --git a/lib-python/2.2/test/test_threaded_import.py b/lib-python/2.2/test/test_threaded_import.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_threaded_import.py
@@ -0,0 +1,56 @@
+# This is a variant of the very old (early 90's) file
+# Demo/threads/bug.py.  It simply provokes a number of threads into
+# trying to import the same module "at the same time".
+# There are no pleasant failure modes -- most likely is that Python
+# complains several times about module random having no attribute
+# randrange, and then Python hangs.
+
+import thread
+from test_support import verbose, TestSkipped
+
+critical_section = thread.allocate_lock()
+done = thread.allocate_lock()
+
+def task():
+    global N, critical_section, done
+    import random
+    x = random.randrange(1, 3)
+    critical_section.acquire()
+    N -= 1
+    # Must release critical_section before releasing done, else the main
+    # thread can exit and set critical_section to None as part of global
+    # teardown; then critical_section.release() raises AttributeError.
+    finished = N == 0
+    critical_section.release()
+    if finished:
+        done.release()
+
+# Tricky:  When regrtest imports this module, the thread running regrtest
+# grabs the import lock and won't let go of it until this module returns.
+# All other threads attempting an import hang for the duration.  Since
+# this test spawns threads that do little *but* import, we can't do that
+# successfully until after this module finishes importing and regrtest
+# regains control.  To make this work, a special case was added to
+# regrtest to invoke a module's "test_main" function (if any) after
+# importing it.
+
+def test_main():        # magic name!  see above
+    global N, done
+
+    import imp
+    if imp.lock_held():
+        # This triggers on, e.g., from test import autotest.
+        raise TestSkipped("can't run when import lock is held")
+
+    done.acquire()
+    for N in (20, 50) * 3:
+        if verbose:
+            print "Trying", N, "threads ...",
+        for i in range(N):
+            thread.start_new_thread(task, ())
+        done.acquire()
+        if verbose:
+            print "OK."
+
+if __name__ == "__main__":
+    test_main()
diff --git a/lib-python/2.2/test/test_threadedtempfile.py b/lib-python/2.2/test/test_threadedtempfile.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_threadedtempfile.py
@@ -0,0 +1,86 @@
+"""
+Create and delete FILES_PER_THREAD temp files (via tempfile.TemporaryFile)
+in each of NUM_THREADS threads, recording the number of successes and
+failures.  A failure is a bug in tempfile, and may be due to:
+
++ Trying to create more than one tempfile with the same name.
++ Trying to delete a tempfile that doesn't still exist.
++ Something we've never seen before.
+
+By default, NUM_THREADS == 20 and FILES_PER_THREAD == 50.  This is enough to
+create about 150 failures per run under Win98SE in 2.0, and runs pretty
+quickly. Guido reports needing to boost FILES_PER_THREAD to 500 before
+provoking a 2.0 failure under Linux.  Run the test alone to boost either
+via cmdline switches:
+
+-f  FILES_PER_THREAD (int)
+-t  NUM_THREADS (int)
+"""
+
+NUM_THREADS = 20        # change w/ -t option
+FILES_PER_THREAD = 50   # change w/ -f option
+
+import thread # If this fails, we can't test this module
+import threading
+from test_support import TestFailed
+import StringIO
+from traceback import print_exc
+
+startEvent = threading.Event()
+
+import tempfile
+tempfile.gettempdir() # Do this now, to avoid spurious races later
+
+class TempFileGreedy(threading.Thread):
+    error_count = 0
+    ok_count = 0
+
+    def run(self):
+        self.errors = StringIO.StringIO()
+        startEvent.wait()
+        for i in range(FILES_PER_THREAD):
+            try:
+                f = tempfile.TemporaryFile("w+b")
+                f.close()
+            except:
+                self.error_count += 1
+                print_exc(file=self.errors)
+            else:
+                self.ok_count += 1
+
+def _test():
+    threads = []
+
+    print "Creating"
+    for i in range(NUM_THREADS):
+        t = TempFileGreedy()
+        threads.append(t)
+        t.start()
+
+    print "Starting"
+    startEvent.set()
+
+    print "Reaping"
+    ok = errors = 0
+    for t in threads:
+        t.join()
+        ok += t.ok_count
+        errors += t.error_count
+        if t.error_count:
+            print '%s errors:\n%s' % (t.getName(), t.errors.getvalue())
+
+    msg = "Done: errors %d ok %d" % (errors, ok)
+    print msg
+    if errors:
+        raise TestFailed(msg)
+
+if __name__ == "__main__":
+    import sys, getopt
+    opts, args = getopt.getopt(sys.argv[1:], "t:f:")
+    for o, v in opts:
+        if o == "-f":
+            FILES_PER_THREAD = int(v)
+        elif o == "-t":
+            NUM_THREADS = int(v)
+
+_test()
diff --git a/lib-python/2.2/test/test_threading.py b/lib-python/2.2/test/test_threading.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_threading.py
@@ -0,0 +1,55 @@
+# Very rudimentary test of threading module
+
+# Create a bunch of threads, let each do some work, wait until all are done
+
+from test_support import verbose
+import random
+import threading
+import time
+
+# This takes about n/3 seconds to run (about n/3 clumps of tasks, times
+# about 1 second per clump).
+numtasks = 10
+
+# no more than 3 of the 10 can run at once
+sema = threading.BoundedSemaphore(value=3)
+mutex = threading.RLock()
+running = 0
+
+class TestThread(threading.Thread):
+    def run(self):
+        global running
+        delay = random.random() * 2
+        if verbose:
+            print 'task', self.getName(), 'will run for', delay, 'sec'
+        sema.acquire()
+        mutex.acquire()
+        running = running + 1
+        if verbose:
+            print running, 'tasks are running'
+        mutex.release()
+        time.sleep(delay)
+        if verbose:
+            print 'task', self.getName(), 'done'
+        mutex.acquire()
+        running = running - 1
+        if verbose:
+            print self.getName(), 'is finished.', running, 'tasks are running'
+        mutex.release()
+        sema.release()
+
+threads = []
+def starttasks():
+    for i in range(numtasks):
+        t = TestThread(name="<thread %d>"%i)
+        threads.append(t)
+        t.start()
+
+starttasks()
+
+if verbose:
+    print 'waiting for all tasks to complete'
+for t in threads:
+    t.join()
+if verbose:
+    print 'all tasks done'
diff --git a/lib-python/2.2/test/test_time.py b/lib-python/2.2/test/test_time.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_time.py
@@ -0,0 +1,50 @@
+import test_support
+import time
+import unittest
+
+
+class TimeTestCase(unittest.TestCase):
+
+    def setUp(self):
+        self.t = time.time()
+
+    def test_data_attributes(self):
+        time.altzone
+        time.daylight
+        time.timezone
+        time.tzname
+
+    def test_clock(self):
+        time.clock()
+
+    def test_conversions(self):
+        self.assert_(time.ctime(self.t)
+                     == time.asctime(time.localtime(self.t)))
+        self.assert_(long(time.mktime(time.localtime(self.t)))
+                     == long(self.t))
+
+    def test_sleep(self):
+        time.sleep(1.2)
+
+    def test_strftime(self):
+        tt = time.gmtime(self.t)
+        for directive in ('a', 'A', 'b', 'B', 'c', 'd', 'H', 'I',
+                          'j', 'm', 'M', 'p', 'S',
+                          'U', 'w', 'W', 'x', 'X', 'y', 'Y', 'Z', '%'):
+            format = ' %' + directive
+            try:
+                time.strftime(format, tt)
+            except ValueError:
+                self.fail('conversion specifier: %r failed.' % format)
+
+    def test_asctime(self):
+        time.asctime(time.gmtime(self.t))
+        self.assertRaises(TypeError, time.asctime, 0)
+
+
+def test_main():
+    test_support.run_unittest(TimeTestCase)
+
+
+if __name__ == "__main__":
+    test_main()
diff --git a/lib-python/2.2/test/test_timing.py b/lib-python/2.2/test/test_timing.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_timing.py
@@ -0,0 +1,21 @@
+from test_support import verbose
+import timing
+
+r = range(100000)
+if verbose:
+    print 'starting...'
+timing.start()
+for i in r:
+    pass
+timing.finish()
+if verbose:
+    print 'finished'
+
+secs = timing.seconds()
+milli = timing.milli()
+micro = timing.micro()
+
+if verbose:
+    print 'seconds:', secs
+    print 'milli  :', milli
+    print 'micro  :', micro
diff --git a/lib-python/2.2/test/test_tokenize.py b/lib-python/2.2/test/test_tokenize.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_tokenize.py
@@ -0,0 +1,9 @@
+from test_support import verbose, findfile
+import tokenize, os, sys
+
+if verbose:
+    print 'starting...'
+file = open(findfile('tokenize_tests'+os.extsep+'py'))
+tokenize.tokenize(file.readline)
+if verbose:
+    print 'finished'
diff --git a/lib-python/2.2/test/test_trace.py b/lib-python/2.2/test/test_trace.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_trace.py
@@ -0,0 +1,219 @@
+# Testing the line trace facility.
+
+import test_support
+import unittest
+import sys
+import difflib
+
+if not __debug__:
+    raise test_support.TestSkipped, "tracing not supported under -O"
+
+# A very basic example.  If this fails, we're in deep trouble.
+def basic():
+    return 1
+
+basic.events = [(0, 'call'),
+                (0, 'line'),
+                (1, 'line'),
+                (1, 'return')]
+
+# Armin Rigo's failing example:
+def arigo_example():
+    x = 1
+    del x
+    while 0:
+        pass
+    x = 1
+
+arigo_example.events = [(0, 'call'),
+                        (0, 'line'),
+                        (1, 'line'),
+                        (2, 'line'),
+                        (3, 'line'),
+                        (3, 'line'),
+                        (5, 'line'),
+                        (5, 'return')]
+
+# check that lines consisting of just one instruction get traced:
+def one_instr_line():
+    x = 1
+    del x
+    x = 1
+
+one_instr_line.events = [(0, 'call'),
+                         (0, 'line'),
+                         (1, 'line'),
+                         (2, 'line'),
+                         (3, 'line'),
+                         (3, 'return')]
+
+def no_pop_tops():      # 0
+    x = 1               # 1
+    for a in range(2):  # 2
+        if a:           # 3
+            x = 1       # 4
+        else:           # 5
+            x = 1       # 6
+
+no_pop_tops.events = [(0, 'call'),
+                      (0, 'line'),
+                      (1, 'line'),
+                      (2, 'line'),
+                      (2, 'line'),
+                      (3, 'line'),
+                      (6, 'line'),
+                      (2, 'line'),
+                      (3, 'line'),
+                      (4, 'line'),
+                      (2, 'line'),
+                      (2, 'return')]
+
+def no_pop_blocks():
+    while 0:
+        bla
+    x = 1
+
+no_pop_blocks.events = [(0, 'call'),
+                        (0, 'line'),
+                        (1, 'line'),
+                        (1, 'line'),
+                        (3, 'line'),
+                        (3, 'return')]
+
+def called(): # line -3
+    x = 1
+
+def call():   # line 0
+    called()
+
+call.events = [(0, 'call'),
+               (0, 'line'),
+               (1, 'line'),
+               (-3, 'call'),
+               (-3, 'line'),
+               (-2, 'line'),
+               (-2, 'return'),
+               (1, 'return')]
+
+def raises():
+    raise Exception
+
+def test_raise():
+    try:
+        raises()
+    except Exception, exc:
+        x = 1
+
+test_raise.events = [(0, 'call'),
+                     (0, 'line'),
+                     (1, 'line'),
+                     (2, 'line'),
+                     (-3, 'call'),
+                     (-3, 'line'),
+                     (-2, 'line'),
+                     (-2, 'exception'),
+                     (2, 'exception'),
+                     (3, 'line'),
+                     (4, 'line'),
+                     (4, 'return')]
+
+def _settrace_and_return(tracefunc):
+    sys.settrace(tracefunc)
+    sys._getframe().f_back.f_trace = tracefunc
+def settrace_and_return(tracefunc):
+    _settrace_and_return(tracefunc)
+
+settrace_and_return.events = [(1, 'return')]
+
+def _settrace_and_raise(tracefunc):
+    sys.settrace(tracefunc)
+    sys._getframe().f_back.f_trace = tracefunc
+    raise RuntimeError
+def settrace_and_raise(tracefunc):
+    try:
+        _settrace_and_raise(tracefunc)
+    except RuntimeError, exc:
+        pass
+
+settrace_and_raise.events = [(2, 'exception'),
+                             (3, 'line'),
+                             (4, 'line'),
+                             (4, 'return')]
+
+class Tracer:
+    def __init__(self):
+        self.events = []
+    def trace(self, frame, event, arg):
+        self.events.append((frame.f_lineno, event))
+        return self.trace
+
+class TraceTestCase(unittest.TestCase):
+    def compare_events(self, line_offset, events, expected_events):
+        events = [(l - line_offset, e) for (l, e) in events]
+        if events != expected_events:
+            self.fail(
+                "events did not match expectation:\n" +
+                "\n".join(difflib.ndiff(map(str, expected_events),
+                                        map(str, events))))
+
+
+    def run_test(self, func):
+        tracer = Tracer()
+        sys.settrace(tracer.trace)
+        func()
+        sys.settrace(None)
+        self.compare_events(func.func_code.co_firstlineno,
+                            tracer.events, func.events)
+
+    def run_test2(self, func):
+        tracer = Tracer()
+        func(tracer.trace)
+        sys.settrace(None)
+        self.compare_events(func.func_code.co_firstlineno,
+                            tracer.events, func.events)
+
+    def test_1_basic(self):
+        self.run_test(basic)
+    def test_2_arigo(self):
+        self.run_test(arigo_example)
+    def test_3_one_instr(self):
+        self.run_test(one_instr_line)
+    def test_4_no_pop_blocks(self):
+        self.run_test(no_pop_blocks)
+    def test_5_no_pop_tops(self):
+        self.run_test(no_pop_tops)
+    def test_6_call(self):
+        self.run_test(call)
+    def test_7_raise(self):
+        self.run_test(test_raise)
+
+    def test_8_settrace_and_return(self):
+        self.run_test2(settrace_and_return)
+    def test_9_settrace_and_raise(self):
+        self.run_test2(settrace_and_raise)
+
+class RaisingTraceFuncTestCase(unittest.TestCase):
+    def test_it(self):
+        def tr(frame, event, arg):
+            raise ValueError # just something that isn't RuntimeError
+        def f():
+            return 1
+        try:
+            for i in xrange(sys.getrecursionlimit() + 1):
+                sys.settrace(tr)
+                try:
+                    f()
+                except ValueError:
+                    pass
+                else:
+                    self.fail("exception not thrown!")
+        except RuntimeError:
+            self.fail("recursion counter not reset")
+
+
+def test_main():
+    test_support.run_unittest(TraceTestCase)
+    test_support.run_unittest(RaisingTraceFuncTestCase)
+
+if __name__ == "__main__":
+    test_main()
diff --git a/lib-python/2.2/test/test_traceback.py b/lib-python/2.2/test/test_traceback.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_traceback.py
@@ -0,0 +1,49 @@
+"""Test cases for traceback module"""
+
+import unittest
+from test_support import run_unittest, is_jython
+
+import traceback
+
+class TracebackCases(unittest.TestCase):
+    # For now, a very minimal set of tests.  I want to be sure that
+    # formatting of SyntaxErrors works based on changes for 2.1.
+
+    def get_exception_format(self, func, exc):
+        try:
+            func()
+        except exc, value:
+            return traceback.format_exception_only(exc, value)
+        else:
+            raise ValueError, "call did not raise exception"
+
+    def syntax_error_with_caret(self):
+        compile("def fact(x):\n\treturn x!\n", "?", "exec")
+
+    def syntax_error_without_caret(self):
+        # XXX why doesn't compile raise the same traceback?
+        import badsyntax_nocaret
+
+    def test_caret(self):
+        err = self.get_exception_format(self.syntax_error_with_caret,
+                                        SyntaxError)
+        self.assert_(len(err) == 4)
+        self.assert_("^" in err[2]) # third line has caret
+        self.assert_(err[1].strip() == "return x!")
+
+    def test_nocaret(self):
+        if is_jython:
+            # jython adds a caret in this case (why shouldn't it?)
+            return
+        err = self.get_exception_format(self.syntax_error_without_caret,
+                                        SyntaxError)
+        self.assert_(len(err) == 3)
+        self.assert_(err[1].strip() == "[x for x in x] = x")
+
+
+def test_main():
+    run_unittest(TracebackCases)
+
+
+if __name__ == "__main__":
+    test_main()
diff --git a/lib-python/2.2/test/test_types.py b/lib-python/2.2/test/test_types.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_types.py
@@ -0,0 +1,428 @@
+# Python test set -- part 6, built-in types
+
+from test_support import *
+
+print '6. Built-in types'
+
+print '6.1 Truth value testing'
+if None: raise TestFailed, 'None is true instead of false'
+if 0: raise TestFailed, '0 is true instead of false'
+if 0L: raise TestFailed, '0L is true instead of false'
+if 0.0: raise TestFailed, '0.0 is true instead of false'
+if '': raise TestFailed, '\'\' is true instead of false'
+if (): raise TestFailed, '() is true instead of false'
+if []: raise TestFailed, '[] is true instead of false'
+if {}: raise TestFailed, '{} is true instead of false'
+if not 1: raise TestFailed, '1 is false instead of true'
+if not 1L: raise TestFailed, '1L is false instead of true'
+if not 1.0: raise TestFailed, '1.0 is false instead of true'
+if not 'x': raise TestFailed, '\'x\' is false instead of true'
+if not (1, 1): raise TestFailed, '(1, 1) is false instead of true'
+if not [1]: raise TestFailed, '[1] is false instead of true'
+if not {'x': 1}: raise TestFailed, '{\'x\': 1} is false instead of true'
+def f(): pass
+class C: pass
+import sys
+x = C()
+if not f: raise TestFailed, 'f is false instead of true'
+if not C: raise TestFailed, 'C is false instead of true'
+if not sys: raise TestFailed, 'sys is false instead of true'
+if not x: raise TestFailed, 'x is false instead of true'
+
+print '6.2 Boolean operations'
+if 0 or 0: raise TestFailed, '0 or 0 is true instead of false'
+if 1 and 1: pass
+else: raise TestFailed, '1 and 1 is false instead of false'
+if not 1: raise TestFailed, 'not 1 is true instead of false'
+
+print '6.3 Comparisons'
+if 0 < 1 <= 1 == 1 >= 1 > 0 != 1: pass
+else: raise TestFailed, 'int comparisons failed'
+if 0L < 1L <= 1L == 1L >= 1L > 0L != 1L: pass
+else: raise TestFailed, 'long int comparisons failed'
+if 0.0 < 1.0 <= 1.0 == 1.0 >= 1.0 > 0.0 != 1.0: pass
+else: raise TestFailed, 'float comparisons failed'
+if '' < 'a' <= 'a' == 'a' < 'abc' < 'abd' < 'b': pass
+else: raise TestFailed, 'string comparisons failed'
+if 0 in [0] and 0 not in [1]: pass
+else: raise TestFailed, 'membership test failed'
+if None is None and [] is not []: pass
+else: raise TestFailed, 'identity test failed'
+
+print '6.4 Numeric types (mostly conversions)'
+if 0 != 0L or 0 != 0.0 or 0L != 0.0: raise TestFailed, 'mixed comparisons'
+if 1 != 1L or 1 != 1.0 or 1L != 1.0: raise TestFailed, 'mixed comparisons'
+if -1 != -1L or -1 != -1.0 or -1L != -1.0:
+    raise TestFailed, 'int/long/float value not equal'
+if int(1.9) == 1 == int(1.1) and int(-1.1) == -1 == int(-1.9): pass
+else: raise TestFailed, 'int() does not round properly'
+if long(1.9) == 1L == long(1.1) and long(-1.1) == -1L == long(-1.9): pass
+else: raise TestFailed, 'long() does not round properly'
+if float(1) == 1.0 and float(-1) == -1.0 and float(0) == 0.0: pass
+else: raise TestFailed, 'float() does not work properly'
+print '6.4.1 32-bit integers'
+if 12 + 24 != 36: raise TestFailed, 'int op'
+if 12 + (-24) != -12: raise TestFailed, 'int op'
+if (-12) + 24 != 12: raise TestFailed, 'int op'
+if (-12) + (-24) != -36: raise TestFailed, 'int op'
+if not 12 < 24: raise TestFailed, 'int op'
+if not -24 < -12: raise TestFailed, 'int op'
+# Test for a particular bug in integer multiply
+xsize, ysize, zsize = 238, 356, 4
+if not (xsize*ysize*zsize == zsize*xsize*ysize == 338912):
+    raise TestFailed, 'int mul commutativity'
+# And another.
+m = -sys.maxint - 1
+for divisor in 1, 2, 4, 8, 16, 32:
+    j = m // divisor
+    prod = divisor * j
+    if prod != m:
+        raise TestFailed, "%r * %r == %r != %r" % (divisor, j, prod, m)
+    if type(prod) is not int:
+        raise TestFailed, ("expected type(prod) to be int, not %r" %
+                           type(prod))
+# Check for expected * overflow to long.
+for divisor in 1, 2, 4, 8, 16, 32:
+    j = m // divisor - 1
+    prod = divisor * j
+    if type(prod) is not long:
+        raise TestFailed, ("expected type(%r) to be long, not %r" %
+                           (prod, type(prod)))
+# Check for expected * overflow to long.
+m = sys.maxint
+for divisor in 1, 2, 4, 8, 16, 32:
+    j = m // divisor + 1
+    prod = divisor * j
+    if type(prod) is not long:
+        raise TestFailed, ("expected type(%r) to be long, not %r" %
+                           (prod, type(prod)))
+
+print '6.4.2 Long integers'
+if 12L + 24L != 36L: raise TestFailed, 'long op'
+if 12L + (-24L) != -12L: raise TestFailed, 'long op'
+if (-12L) + 24L != 12L: raise TestFailed, 'long op'
+if (-12L) + (-24L) != -36L: raise TestFailed, 'long op'
+if not 12L < 24L: raise TestFailed, 'long op'
+if not -24L < -12L: raise TestFailed, 'long op'
+x = sys.maxint
+if int(long(x)) != x: raise TestFailed, 'long op'
+try: int(long(x)+1L)
+except OverflowError: pass
+else:raise TestFailed, 'long op'
+x = -x
+if int(long(x)) != x: raise TestFailed, 'long op'
+x = x-1
+if int(long(x)) != x: raise TestFailed, 'long op'
+try: int(long(x)-1L)
+except OverflowError: pass
+else:raise TestFailed, 'long op'
+print '6.4.3 Floating point numbers'
+if 12.0 + 24.0 != 36.0: raise TestFailed, 'float op'
+if 12.0 + (-24.0) != -12.0: raise TestFailed, 'float op'
+if (-12.0) + 24.0 != 12.0: raise TestFailed, 'float op'
+if (-12.0) + (-24.0) != -36.0: raise TestFailed, 'float op'
+if not 12.0 < 24.0: raise TestFailed, 'float op'
+if not -24.0 < -12.0: raise TestFailed, 'float op'
+
+print '6.5 Sequence types'
+
+print '6.5.1 Strings'
+if len('') != 0: raise TestFailed, 'len(\'\')'
+if len('a') != 1: raise TestFailed, 'len(\'a\')'
+if len('abcdef') != 6: raise TestFailed, 'len(\'abcdef\')'
+if 'xyz' + 'abcde' != 'xyzabcde': raise TestFailed, 'string concatenation'
+if 'xyz'*3 != 'xyzxyzxyz': raise TestFailed, 'string repetition *3'
+if 0*'abcde' != '': raise TestFailed, 'string repetition 0*'
+if min('abc') != 'a' or max('abc') != 'c': raise TestFailed, 'min/max string'
+if 'a' in 'abc' and 'b' in 'abc' and 'c' in 'abc' and 'd' not in 'abc': pass
+else: raise TestFailed, 'in/not in string'
+x = 'x'*103
+if '%s!'%x != x+'!': raise TestFailed, 'nasty string formatting bug'
+
+print '6.5.2 Tuples'
+if len(()) != 0: raise TestFailed, 'len(())'
+if len((1,)) != 1: raise TestFailed, 'len((1,))'
+if len((1,2,3,4,5,6)) != 6: raise TestFailed, 'len((1,2,3,4,5,6))'
+if (1,2)+(3,4) != (1,2,3,4): raise TestFailed, 'tuple concatenation'
+if (1,2)*3 != (1,2,1,2,1,2): raise TestFailed, 'tuple repetition *3'
+if 0*(1,2,3) != (): raise TestFailed, 'tuple repetition 0*'
+if min((1,2)) != 1 or max((1,2)) != 2: raise TestFailed, 'min/max tuple'
+if 0 in (0,1,2) and 1 in (0,1,2) and 2 in (0,1,2) and 3 not in (0,1,2): pass
+else: raise TestFailed, 'in/not in tuple'
+
+print '6.5.3 Lists'
+if len([]) != 0: raise TestFailed, 'len([])'
+if len([1,]) != 1: raise TestFailed, 'len([1,])'
+if len([1,2,3,4,5,6]) != 6: raise TestFailed, 'len([1,2,3,4,5,6])'
+if [1,2]+[3,4] != [1,2,3,4]: raise TestFailed, 'list concatenation'
+if [1,2]*3 != [1,2,1,2,1,2]: raise TestFailed, 'list repetition *3'
+if [1,2]*3L != [1,2,1,2,1,2]: raise TestFailed, 'list repetition *3L'
+if 0*[1,2,3] != []: raise TestFailed, 'list repetition 0*'
+if 0L*[1,2,3] != []: raise TestFailed, 'list repetition 0L*'
+if min([1,2]) != 1 or max([1,2]) != 2: raise TestFailed, 'min/max list'
+if 0 in [0,1,2] and 1 in [0,1,2] and 2 in [0,1,2] and 3 not in [0,1,2]: pass
+else: raise TestFailed, 'in/not in list'
+a = [1, 2, 3, 4, 5]
+a[:-1] = a
+if a != [1, 2, 3, 4, 5, 5]:
+    raise TestFailed, "list self-slice-assign (head)"
+a = [1, 2, 3, 4, 5]
+a[1:] = a
+if a != [1, 1, 2, 3, 4, 5]:
+    raise TestFailed, "list self-slice-assign (tail)"
+a = [1, 2, 3, 4, 5]
+a[1:-1] = a
+if a != [1, 1, 2, 3, 4, 5, 5]:
+    raise TestFailed, "list self-slice-assign (center)"
+
+
+print '6.5.3a Additional list operations'
+a = [0,1,2,3,4]
+a[0L] = 1
+a[1L] = 2
+a[2L] = 3
+if a != [1,2,3,3,4]: raise TestFailed, 'list item assignment [0L], [1L], [2L]'
+a[0] = 5
+a[1] = 6
+a[2] = 7
+if a != [5,6,7,3,4]: raise TestFailed, 'list item assignment [0], [1], [2]'
+a[-2L] = 88
+a[-1L] = 99
+if a != [5,6,7,88,99]: raise TestFailed, 'list item assignment [-2L], [-1L]'
+a[-2] = 8
+a[-1] = 9
+if a != [5,6,7,8,9]: raise TestFailed, 'list item assignment [-2], [-1]'
+a[:2] = [0,4]
+a[-3:] = []
+a[1:1] = [1,2,3]
+if a != [0,1,2,3,4]: raise TestFailed, 'list slice assignment'
+a[ 1L : 4L] = [7,8,9]
+if a != [0,7,8,9,4]: raise TestFailed, 'list slice assignment using long ints'
+del a[1:4]
+if a != [0,4]: raise TestFailed, 'list slice deletion'
+del a[0]
+if a != [4]: raise TestFailed, 'list item deletion [0]'
+del a[-1]
+if a != []: raise TestFailed, 'list item deletion [-1]'
+a=range(0,5)
+del a[1L:4L]
+if a != [0,4]: raise TestFailed, 'list slice deletion'
+del a[0L]
+if a != [4]: raise TestFailed, 'list item deletion [0]'
+del a[-1L]
+if a != []: raise TestFailed, 'list item deletion [-1]'
+a.append(0)
+a.append(1)
+a.append(2)
+if a != [0,1,2]: raise TestFailed, 'list append'
+a.insert(0, -2)
+a.insert(1, -1)
+a.insert(2,0)
+if a != [-2,-1,0,0,1,2]: raise TestFailed, 'list insert'
+if a.count(0) != 2: raise TestFailed, ' list count'
+if a.index(0) != 2: raise TestFailed, 'list index'
+a.remove(0)
+if a != [-2,-1,0,1,2]: raise TestFailed, 'list remove'
+a.reverse()
+if a != [2,1,0,-1,-2]: raise TestFailed, 'list reverse'
+a.sort()
+if a != [-2,-1,0,1,2]: raise TestFailed, 'list sort'
+def revcmp(a, b): return cmp(b, a)
+a.sort(revcmp)
+if a != [2,1,0,-1,-2]: raise TestFailed, 'list sort with cmp func'
+# The following dumps core in unpatched Python 1.5:
+def myComparison(x,y):
+    return cmp(x%3, y%7)
+z = range(12)
+z.sort(myComparison)
+
+# Test extreme cases with long ints
+a = [0,1,2,3,4]
+if a[ -pow(2,128L): 3 ] != [0,1,2]:
+    raise TestFailed, "list slicing with too-small long integer"
+if a[ 3: pow(2,145L) ] != [3,4]:
+    raise TestFailed, "list slicing with too-large long integer"
+
+print '6.6 Mappings == Dictionaries'
+d = {}
+if d.keys() != []: raise TestFailed, '{}.keys()'
+if d.has_key('a') != 0: raise TestFailed, '{}.has_key(\'a\')'
+if ('a' in d) != 0: raise TestFailed, "'a' in {}"
+if ('a' not in d) != 1: raise TestFailed, "'a' not in {}"
+if len(d) != 0: raise TestFailed, 'len({})'
+d = {'a': 1, 'b': 2}
+if len(d) != 2: raise TestFailed, 'len(dict)'
+k = d.keys()
+k.sort()
+if k != ['a', 'b']: raise TestFailed, 'dict keys()'
+if d.has_key('a') and d.has_key('b') and not d.has_key('c'): pass
+else: raise TestFailed, 'dict keys()'
+if 'a' in d and 'b' in d and 'c' not in d: pass
+else: raise TestFailed, 'dict keys() # in/not in version'
+if d['a'] != 1 or d['b'] != 2: raise TestFailed, 'dict item'
+d['c'] = 3
+d['a'] = 4
+if d['c'] != 3 or d['a'] != 4: raise TestFailed, 'dict item assignment'
+del d['b']
+if d != {'a': 4, 'c': 3}: raise TestFailed, 'dict item deletion'
+# dict.clear()
+d = {1:1, 2:2, 3:3}
+d.clear()
+if d != {}: raise TestFailed, 'dict clear'
+# dict.update()
+d.update({1:100})
+d.update({2:20})
+d.update({1:1, 2:2, 3:3})
+if d != {1:1, 2:2, 3:3}: raise TestFailed, 'dict update'
+d.clear()
+try: d.update(None)
+except AttributeError: pass
+else: raise TestFailed, 'dict.update(None), AttributeError expected'
+class SimpleUserDict:
+    def __init__(self):
+        self.d = {1:1, 2:2, 3:3}
+    def keys(self):
+        return self.d.keys()
+    def __getitem__(self, i):
+        return self.d[i]
+d.update(SimpleUserDict())
+if d != {1:1, 2:2, 3:3}: raise TestFailed, 'dict.update(instance)'
+d.clear()
+class FailingUserDict:
+    def keys(self):
+        raise ValueError
+try: d.update(FailingUserDict())
+except ValueError: pass
+else: raise TestFailed, 'dict.keys() expected ValueError'
+class FailingUserDict:
+    def keys(self):
+        class BogonIter:
+            def __iter__(self):
+                raise ValueError
+        return BogonIter()
+try: d.update(FailingUserDict())
+except ValueError: pass
+else: raise TestFailed, 'iter(dict.keys()) expected ValueError'
+class FailingUserDict:
+    def keys(self):
+        class BogonIter:
+            def __init__(self):
+                self.i = 1
+            def __iter__(self):
+                return self
+            def next(self):
+                if self.i:
+                    self.i = 0
+                    return 'a'
+                raise ValueError
+        return BogonIter()
+    def __getitem__(self, key):
+        return key
+try: d.update(FailingUserDict())
+except ValueError: pass
+else: raise TestFailed, 'iter(dict.keys()).next() expected ValueError'
+class FailingUserDict:
+    def keys(self):
+        class BogonIter:
+            def __init__(self):
+                self.i = ord('a')
+            def __iter__(self):
+                return self
+            def next(self):
+                if self.i <= ord('z'):
+                    rtn = chr(self.i)
+                    self.i += 1
+                    return rtn
+                raise StopIteration
+        return BogonIter()
+    def __getitem__(self, key):
+        raise ValueError
+try: d.update(FailingUserDict())
+except ValueError: pass
+else: raise TestFailed, 'dict.update(), __getitem__ expected ValueError'
+# dict.copy()
+d = {1:1, 2:2, 3:3}
+if d.copy() != {1:1, 2:2, 3:3}: raise TestFailed, 'dict copy'
+if {}.copy() != {}: raise TestFailed, 'empty dict copy'
+# dict.get()
+d = {}
+if d.get('c') is not None: raise TestFailed, 'missing {} get, no 2nd arg'
+if d.get('c', 3) != 3: raise TestFailed, 'missing {} get, w/ 2nd arg'
+d = {'a' : 1, 'b' : 2}
+if d.get('c') is not None: raise TestFailed, 'missing dict get, no 2nd arg'
+if d.get('c', 3) != 3: raise TestFailed, 'missing dict get, w/ 2nd arg'
+if d.get('a') != 1: raise TestFailed, 'present dict get, no 2nd arg'
+if d.get('a', 3) != 1: raise TestFailed, 'present dict get, w/ 2nd arg'
+# dict.setdefault()
+d = {}
+if d.setdefault('key0') is not None:
+    raise TestFailed, 'missing {} setdefault, no 2nd arg'
+if d.setdefault('key0') is not None:
+    raise TestFailed, 'present {} setdefault, no 2nd arg'
+d.setdefault('key', []).append(3)
+if d['key'][0] != 3:
+    raise TestFailed, 'missing {} setdefault, w/ 2nd arg'
+d.setdefault('key', []).append(4)
+if len(d['key']) != 2:
+    raise TestFailed, 'present {} setdefault, w/ 2nd arg'
+# dict.popitem()
+for copymode in -1, +1:
+    # -1: b has same structure as a
+    # +1: b is a.copy()
+    for log2size in range(12):
+        size = 2**log2size
+        a = {}
+        b = {}
+        for i in range(size):
+            a[`i`] = i
+            if copymode < 0:
+                b[`i`] = i
+        if copymode > 0:
+            b = a.copy()
+        for i in range(size):
+            ka, va = ta = a.popitem()
+            if va != int(ka): raise TestFailed, "a.popitem: %s" % str(ta)
+            kb, vb = tb = b.popitem()
+            if vb != int(kb): raise TestFailed, "b.popitem: %s" % str(tb)
+            if copymode < 0 and ta != tb:
+                raise TestFailed, "a.popitem != b.popitem: %s, %s" % (
+                    str(ta), str(tb))
+        if a: raise TestFailed, 'a not empty after popitems: %s' % str(a)
+        if b: raise TestFailed, 'b not empty after popitems: %s' % str(b)
+
+try: type(1, 2)
+except TypeError: pass
+else: raise TestFailed, 'type(), w/2 args expected TypeError'
+
+try: type(1, 2, 3, 4)
+except TypeError: pass
+else: raise TestFailed, 'type(), w/4 args expected TypeError'
+
+print '6.7 Buffers'
+try: buffer('asdf', -1)
+except ValueError: pass
+else: raise TestFailed, "buffer('asdf', -1) should raise ValueError"
+
+try: buffer(None)
+except TypeError: pass
+else: raise TestFailed, "buffer(None) should raise TypeError"
+
+a = buffer('asdf')
+hash(a)
+b = a * 5
+if a == b:
+    raise TestFailed, 'buffers should not be equal'
+if str(b) != ('asdf' * 5):
+    raise TestFailed, 'repeated buffer has wrong content'
+if str(a * 0) != '':
+    raise TestFailed, 'repeated buffer zero times has wrong content'
+if str(a + buffer('def')) != 'asdfdef':
+    raise TestFailed, 'concatenation of buffers yields wrong content'
+
+try: a[1] = 'g'
+except TypeError: pass
+else: raise TestFailed, "buffer assignment should raise TypeError"
+
+try: a[0:1] = 'g'
+except TypeError: pass
+else: raise TestFailed, "buffer slice assignment should raise TypeError"
diff --git a/lib-python/2.2/test/test_ucn.py b/lib-python/2.2/test/test_ucn.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_ucn.py
@@ -0,0 +1,113 @@
+""" Test script for the Unicode implementation.
+
+Written by Bill Tutt.
+Modified for Python 2.0 by Fredrik Lundh (fredrik at pythonware.com)
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+
+"""#"
+from test_support import verify, verbose
+
+print 'Testing General Unicode Character Name, and case insensitivity...',
+
+# General and case insensitivity test:
+try:
+    # put all \N escapes inside exec'd raw strings, to make sure this
+    # script runs even if the compiler chokes on \N escapes
+    exec r"""
+s = u"\N{LATIN CAPITAL LETTER T}" \
+    u"\N{LATIN SMALL LETTER H}" \
+    u"\N{LATIN SMALL LETTER E}" \
+    u"\N{SPACE}" \
+    u"\N{LATIN SMALL LETTER R}" \
+    u"\N{LATIN CAPITAL LETTER E}" \
+    u"\N{LATIN SMALL LETTER D}" \
+    u"\N{SPACE}" \
+    u"\N{LATIN SMALL LETTER f}" \
+    u"\N{LATIN CAPITAL LeTtEr o}" \
+    u"\N{LATIN SMaLl LETTER x}" \
+    u"\N{SPACE}" \
+    u"\N{LATIN SMALL LETTER A}" \
+    u"\N{LATIN SMALL LETTER T}" \
+    u"\N{LATIN SMALL LETTER E}" \
+    u"\N{SPACE}" \
+    u"\N{LATIN SMALL LETTER T}" \
+    u"\N{LATIN SMALL LETTER H}" \
+    u"\N{LATIN SMALL LETTER E}" \
+    u"\N{SpAcE}" \
+    u"\N{LATIN SMALL LETTER S}" \
+    u"\N{LATIN SMALL LETTER H}" \
+    u"\N{LATIN SMALL LETTER E}" \
+    u"\N{LATIN SMALL LETTER E}" \
+    u"\N{LATIN SMALL LETTER P}" \
+    u"\N{FULL STOP}"
+verify(s == u"The rEd fOx ate the sheep.", s)
+"""
+except UnicodeError, v:
+    print v
+print "done."
+
+import unicodedata
+
+print "Testing name to code mapping....",
+for char in "SPAM":
+    name = "LATIN SMALL LETTER %s" % char
+    code = unicodedata.lookup(name)
+    verify(unicodedata.name(code) == name)
+print "done."
+
+print "Testing code to name mapping for all characters....",
+count = 0
+for code in range(65536):
+    try:
+        char = unichr(code)
+        name = unicodedata.name(char)
+        verify(unicodedata.lookup(name) == char)
+        count += 1
+    except (KeyError, ValueError):
+        pass
+print "done."
+
+print "Found", count, "characters in the unicode name database"
+
+# misc. symbol testing
+print "Testing misc. symbols for unicode character name expansion....",
+exec r"""
+verify(u"\N{PILCROW SIGN}" == u"\u00b6")
+verify(u"\N{REPLACEMENT CHARACTER}" == u"\uFFFD")
+verify(u"\N{HALFWIDTH KATAKANA SEMI-VOICED SOUND MARK}" == u"\uFF9F")
+verify(u"\N{FULLWIDTH LATIN SMALL LETTER A}" == u"\uFF41")
+"""
+print "done."
+
+# strict error testing:
+print "Testing unicode character name expansion strict error handling....",
+try:
+    unicode("\N{blah}", 'unicode-escape', 'strict')
+except UnicodeError:
+    pass
+else:
+    raise AssertionError, "failed to raise an exception when given a bogus character name"
+
+try:
+    unicode("\N{" + "x" * 100000 + "}", 'unicode-escape', 'strict')
+except UnicodeError:
+    pass
+else:
+    raise AssertionError, "failed to raise an exception when given a very " \
+                          "long bogus character name"
+
+try:
+    unicode("\N{SPACE", 'unicode-escape', 'strict')
+except UnicodeError:
+    pass
+else:
+    raise AssertionError, "failed to raise an exception for a missing closing brace."
+
+try:
+    unicode("\NSPACE", 'unicode-escape', 'strict')
+except UnicodeError:
+    pass
+else:
+    raise AssertionError, "failed to raise an exception for a missing opening brace."
+print "done."
diff --git a/lib-python/2.2/test/test_unary.py b/lib-python/2.2/test/test_unary.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_unary.py
@@ -0,0 +1,58 @@
+"""Test compiler changes for unary ops (+, -, ~) introduced in Python 2.2"""
+
+import unittest
+from test_support import run_unittest
+
+class UnaryOpTestCase(unittest.TestCase):
+
+    def test_negative(self):
+        self.assert_(-2 == 0 - 2)
+        self.assert_(-0 == 0)
+        self.assert_(--2 == 2)
+        self.assert_(-2L == 0 - 2L)
+        self.assert_(-2.0 == 0 - 2.0)
+        self.assert_(-2j == 0 - 2j)
+
+    def test_positive(self):
+        self.assert_(+2 == 2)
+        self.assert_(+0 == 0)
+        self.assert_(++2 == 2)
+        self.assert_(+2L == 2L)
+        self.assert_(+2.0 == 2.0)
+        self.assert_(+2j == 2j)
+
+    def test_invert(self):
+        self.assert_(-2 == 0 - 2)
+        self.assert_(-0 == 0)
+        self.assert_(--2 == 2)
+        self.assert_(-2L == 0 - 2L)
+
+    def test_no_overflow(self):
+        nines = "9" * 32
+        self.assert_(eval("+" + nines) == eval("+" + nines + "L"))
+        self.assert_(eval("-" + nines) == eval("-" + nines + "L"))
+        self.assert_(eval("~" + nines) == eval("~" + nines + "L"))
+
+    def test_negation_of_exponentiation(self):
+        # Make sure '**' does the right thing; these form a
+        # regression test for SourceForge bug #456756.
+        self.assertEqual(-2 ** 3, -8)
+        self.assertEqual((-2) ** 3, -8)
+        self.assertEqual(-2 ** 4, -16)
+        self.assertEqual((-2) ** 4, 16)
+
+    def test_bad_types(self):
+        for op in '+', '-', '~':
+            self.assertRaises(TypeError, eval, op + "'a'")
+            self.assertRaises(TypeError, eval, op + "u'a'")
+
+        self.assertRaises(TypeError, eval, "~2j")
+        self.assertRaises(TypeError, eval, "~2.0")
+
+
+def test_main():
+    run_unittest(UnaryOpTestCase)
+
+
+if __name__ == "__main__":
+    test_main()
diff --git a/lib-python/2.2/test/test_unicode.py b/lib-python/2.2/test/test_unicode.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_unicode.py
@@ -0,0 +1,782 @@
+""" Test script for the Unicode implementation.
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+
+"""#"
+from test_support import verify, verbose, TestFailed
+import sys, string
+
+if not sys.platform.startswith('java'):
+    # Test basic sanity of repr()
+    verify(repr(u'abc') == "u'abc'")
+    verify(repr(u'ab\\c') == "u'ab\\\\c'")
+    verify(repr(u'ab\\') == "u'ab\\\\'")
+    verify(repr(u'\\c') == "u'\\\\c'")
+    verify(repr(u'\\') == "u'\\\\'")
+    verify(repr(u'\n') == "u'\\n'")
+    verify(repr(u'\r') == "u'\\r'")
+    verify(repr(u'\t') == "u'\\t'")
+    verify(repr(u'\b') == "u'\\x08'")
+    verify(repr(u"'\"") == """u'\\'"'""")
+    verify(repr(u"'\"") == """u'\\'"'""")
+    verify(repr(u"'") == '''u"'"''')
+    verify(repr(u'"') == """u'"'""")
+    verify(repr(u''.join(map(unichr, range(256)))) ==
+       "u'\\x00\\x01\\x02\\x03\\x04\\x05\\x06\\x07\\x08\\t\\n\\x0b\\x0c\\r"
+       "\\x0e\\x0f\\x10\\x11\\x12\\x13\\x14\\x15\\x16\\x17\\x18\\x19\\x1a"
+       "\\x1b\\x1c\\x1d\\x1e\\x1f !\"#$%&\\'()*+,-./0123456789:;<=>?@ABCDEFGHI"
+       "JKLMNOPQRSTUVWXYZ[\\\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\\x7f"
+       "\\x80\\x81\\x82\\x83\\x84\\x85\\x86\\x87\\x88\\x89\\x8a\\x8b\\x8c\\x8d"
+       "\\x8e\\x8f\\x90\\x91\\x92\\x93\\x94\\x95\\x96\\x97\\x98\\x99\\x9a\\x9b"
+       "\\x9c\\x9d\\x9e\\x9f\\xa0\\xa1\\xa2\\xa3\\xa4\\xa5\\xa6\\xa7\\xa8\\xa9"
+       "\\xaa\\xab\\xac\\xad\\xae\\xaf\\xb0\\xb1\\xb2\\xb3\\xb4\\xb5\\xb6\\xb7"
+       "\\xb8\\xb9\\xba\\xbb\\xbc\\xbd\\xbe\\xbf\\xc0\\xc1\\xc2\\xc3\\xc4\\xc5"
+       "\\xc6\\xc7\\xc8\\xc9\\xca\\xcb\\xcc\\xcd\\xce\\xcf\\xd0\\xd1\\xd2\\xd3"
+       "\\xd4\\xd5\\xd6\\xd7\\xd8\\xd9\\xda\\xdb\\xdc\\xdd\\xde\\xdf\\xe0\\xe1"
+       "\\xe2\\xe3\\xe4\\xe5\\xe6\\xe7\\xe8\\xe9\\xea\\xeb\\xec\\xed\\xee\\xef"
+       "\\xf0\\xf1\\xf2\\xf3\\xf4\\xf5\\xf6\\xf7\\xf8\\xf9\\xfa\\xfb\\xfc\\xfd"
+       "\\xfe\\xff'")
+
+def test(method, input, output, *args):
+    if verbose:
+        print '%s.%s%s =? %s... ' % (repr(input), method, args, repr(output)),
+    try:
+        f = getattr(input, method)
+        value = apply(f, args)
+    except:
+        value = sys.exc_type
+        exc = sys.exc_info()[:2]
+    else:
+        exc = None
+    if value == output and type(value) is type(output):
+        # if the original is returned make sure that
+        # this doesn't happen with subclasses
+        if value is input:
+            class usub(unicode):
+                def __repr__(self):
+                    return 'usub(%r)' % unicode.__repr__(self)
+            input = usub(input)
+            try:
+                f = getattr(input, method)
+                value = apply(f, args)
+            except:
+                value = sys.exc_type
+                exc = sys.exc_info()[:2]
+            if value is input:
+                if verbose:
+                    print 'no'
+                print '*',f, `input`, `output`, `value`
+                return
+    if value != output or type(value) is not type(output):
+        if verbose:
+            print 'no'
+        print '*',f, `input`, `output`, `value`
+        if exc:
+            print '  value == %s: %s' % (exc)
+    else:
+        if verbose:
+            print 'yes'
+
+test('capitalize', u' hello ', u' hello ')
+test('capitalize', u'hello ', u'Hello ')
+test('capitalize', u'aaaa', u'Aaaa')
+test('capitalize', u'AaAa', u'Aaaa')
+
+test('count', u'aaa', 3, u'a')
+test('count', u'aaa', 0, u'b')
+test('count', 'aaa', 3, u'a')
+test('count', 'aaa', 0, u'b')
+test('count', u'aaa', 3, 'a')
+test('count', u'aaa', 0, 'b')
+
+test('title', u' hello ', u' Hello ')
+test('title', u'hello ', u'Hello ')
+test('title', u"fOrMaT thIs aS titLe String", u'Format This As Title String')
+test('title', u"fOrMaT,thIs-aS*titLe;String", u'Format,This-As*Title;String')
+test('title', u"getInt", u'Getint')
+
+test('find', u'abcdefghiabc', 0, u'abc')
+test('find', u'abcdefghiabc', 9, u'abc', 1)
+test('find', u'abcdefghiabc', -1, u'def', 4)
+
+test('rfind', u'abcdefghiabc', 9, u'abc')
+test('rfind', 'abcdefghiabc', 9, u'abc')
+test('rfind', 'abcdefghiabc', 12, u'')
+test('rfind', u'abcdefghiabc', 12, '')
+test('rfind', u'abcdefghiabc', 12, u'')
+
+test('lower', u'HeLLo', u'hello')
+test('lower', u'hello', u'hello')
+
+test('upper', u'HeLLo', u'HELLO')
+test('upper', u'HELLO', u'HELLO')
+
+if 0:
+    transtable = '\000\001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037 !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`xyzdefghijklmnopqrstuvwxyz{|}~\177\200\201\202\203\204\205\206\207\210\211\212\213\214\215\216\217\220\221\222\223\224\225\226\227\230\231\232\233\234\235\236\237\240\241\242\243\244\245\246\247\250\251\252\253\254\255\256\257\260\261\262\263\264\265\266\267\270\271\272\273\274\275\276\277\300\301\302\303\304\305\306\307\310\311\312\313\314\315\316\317\320\321\322\323\324\325\326\327\330\331\332\333\334\335\336\337\340\341\342\343\344\345\346\347\350\351\352\353\354\355\356\357\360\361\362\363\364\365\366\367\370\371\372\373\374\375\376\377'
+
+    test('maketrans', u'abc', transtable, u'xyz')
+    test('maketrans', u'abc', ValueError, u'xyzq')
+
+test('split', u'this is the split function',
+     [u'this', u'is', u'the', u'split', u'function'])
+test('split', u'a|b|c|d', [u'a', u'b', u'c', u'd'], u'|')
+test('split', u'a|b|c|d', [u'a', u'b', u'c|d'], u'|', 2)
+test('split', u'a b c d', [u'a', u'b c d'], None, 1)
+test('split', u'a b c d', [u'a', u'b', u'c d'], None, 2)
+test('split', u'a b c d', [u'a', u'b', u'c', u'd'], None, 3)
+test('split', u'a b c d', [u'a', u'b', u'c', u'd'], None, 4)
+test('split', u'a b c d', [u'a b c d'], None, 0)
+test('split', u'a  b  c  d', [u'a', u'b', u'c  d'], None, 2)
+test('split', u'a b c d ', [u'a', u'b', u'c', u'd'])
+test('split', u'a//b//c//d', [u'a', u'b', u'c', u'd'], u'//')
+test('split', u'a//b//c//d', [u'a', u'b', u'c', u'd'], '//')
+test('split', 'a//b//c//d', [u'a', u'b', u'c', u'd'], u'//')
+test('split', u'endcase test', [u'endcase ', u''], u'test')
+test('split', u'endcase test', [u'endcase ', u''], 'test')
+test('split', 'endcase test', [u'endcase ', u''], u'test')
+
+
+# join now works with any sequence type
+class Sequence:
+    def __init__(self, seq): self.seq = seq
+    def __len__(self): return len(self.seq)
+    def __getitem__(self, i): return self.seq[i]
+
+test('join', u' ', u'a b c d', [u'a', u'b', u'c', u'd'])
+test('join', u' ', u'a b c d', ['a', 'b', u'c', u'd'])
+test('join', u'', u'abcd', (u'a', u'b', u'c', u'd'))
+test('join', u' ', u'w x y z', Sequence('wxyz'))
+test('join', u' ', TypeError, 7)
+test('join', u' ', TypeError, Sequence([7, u'hello', 123L]))
+test('join', ' ', u'a b c d', [u'a', u'b', u'c', u'd'])
+test('join', ' ', u'a b c d', ['a', 'b', u'c', u'd'])
+test('join', '', u'abcd', (u'a', u'b', u'c', u'd'))
+test('join', ' ', u'w x y z', Sequence(u'wxyz'))
+test('join', ' ', TypeError, 7)
+
+result = u''
+for i in range(10):
+    if i > 0:
+        result = result + u':'
+    result = result + u'x'*10
+test('join', u':', result, [u'x' * 10] * 10)
+test('join', u':', result, (u'x' * 10,) * 10)
+
+test('strip', u'   hello   ', u'hello')
+test('lstrip', u'   hello   ', u'hello   ')
+test('rstrip', u'   hello   ', u'   hello')
+test('strip', u'hello', u'hello')
+
+# strip/lstrip/rstrip with None arg
+test('strip', u'   hello   ', u'hello', None)
+test('lstrip', u'   hello   ', u'hello   ', None)
+test('rstrip', u'   hello   ', u'   hello', None)
+test('strip', u'hello', u'hello', None)
+
+# strip/lstrip/rstrip with unicode arg
+test('strip', u'xyzzyhelloxyzzy', u'hello', u'xyz')
+test('lstrip', u'xyzzyhelloxyzzy', u'helloxyzzy', u'xyz')
+test('rstrip', u'xyzzyhelloxyzzy', u'xyzzyhello', u'xyz')
+test('strip', u'hello', u'hello', u'xyz')
+
+# strip/lstrip/rstrip with str arg
+test('strip', u'xyzzyhelloxyzzy', u'hello', 'xyz')
+test('lstrip', u'xyzzyhelloxyzzy', u'helloxyzzy', 'xyz')
+test('rstrip', u'xyzzyhelloxyzzy', u'xyzzyhello', 'xyz')
+test('strip', u'hello', u'hello', 'xyz')
+
+test('swapcase', u'HeLLo cOmpUteRs', u'hEllO CoMPuTErS')
+
+if 0:
+    test('translate', u'xyzabcdef', u'xyzxyz', transtable, u'def')
+
+    table = string.maketrans('a', u'A')
+    test('translate', u'abc', u'Abc', table)
+    test('translate', u'xyz', u'xyz', table)
+
+test('replace', u'one!two!three!', u'one at two!three!', u'!', u'@', 1)
+test('replace', u'one!two!three!', u'onetwothree', '!', '')
+test('replace', u'one!two!three!', u'one at two@three!', u'!', u'@', 2)
+test('replace', u'one!two!three!', u'one at two@three@', u'!', u'@', 3)
+test('replace', u'one!two!three!', u'one at two@three@', u'!', u'@', 4)
+test('replace', u'one!two!three!', u'one!two!three!', u'!', u'@', 0)
+test('replace', u'one!two!three!', u'one at two@three@', u'!', u'@')
+test('replace', u'one!two!three!', u'one!two!three!', u'x', u'@')
+test('replace', u'one!two!three!', u'one!two!three!', u'x', u'@', 2)
+test('replace', u'abc', u'abc', u'ab', u'--', 0)
+test('replace', u'abc', u'abc', u'xy', u'--')
+
+test('startswith', u'hello', 1, u'he')
+test('startswith', u'hello', 1, u'hello')
+test('startswith', u'hello', 0, u'hello world')
+test('startswith', u'hello', 1, u'')
+test('startswith', u'hello', 0, u'ello')
+test('startswith', u'hello', 1, u'ello', 1)
+test('startswith', u'hello', 1, u'o', 4)
+test('startswith', u'hello', 0, u'o', 5)
+test('startswith', u'hello', 1, u'', 5)
+test('startswith', u'hello', 0, u'lo', 6)
+test('startswith', u'helloworld', 1, u'lowo', 3)
+test('startswith', u'helloworld', 1, u'lowo', 3, 7)
+test('startswith', u'helloworld', 0, u'lowo', 3, 6)
+
+test('endswith', u'hello', 1, u'lo')
+test('endswith', u'hello', 0, u'he')
+test('endswith', u'hello', 1, u'')
+test('endswith', u'hello', 0, u'hello world')
+test('endswith', u'helloworld', 0, u'worl')
+test('endswith', u'helloworld', 1, u'worl', 3, 9)
+test('endswith', u'helloworld', 1, u'world', 3, 12)
+test('endswith', u'helloworld', 1, u'lowo', 1, 7)
+test('endswith', u'helloworld', 1, u'lowo', 2, 7)
+test('endswith', u'helloworld', 1, u'lowo', 3, 7)
+test('endswith', u'helloworld', 0, u'lowo', 4, 7)
+test('endswith', u'helloworld', 0, u'lowo', 3, 8)
+test('endswith', u'ab', 0, u'ab', 0, 1)
+test('endswith', u'ab', 0, u'ab', 0, 0)
+test('endswith', 'helloworld', 1, u'd')
+test('endswith', 'helloworld', 0, u'l')
+
+test('expandtabs', u'abc\rab\tdef\ng\thi', u'abc\rab      def\ng       hi')
+test('expandtabs', u'abc\rab\tdef\ng\thi', u'abc\rab      def\ng       hi', 8)
+test('expandtabs', u'abc\rab\tdef\ng\thi', u'abc\rab  def\ng   hi', 4)
+test('expandtabs', u'abc\r\nab\tdef\ng\thi', u'abc\r\nab  def\ng   hi', 4)
+
+if 0:
+    test('capwords', u'abc def ghi', u'Abc Def Ghi')
+    test('capwords', u'abc\tdef\nghi', u'Abc Def Ghi')
+    test('capwords', u'abc\t   def  \nghi', u'Abc Def Ghi')
+
+test('zfill', u'123', u'123', 2)
+test('zfill', u'123', u'123', 3)
+test('zfill', u'123', u'0123', 4)
+test('zfill', u'+123', u'+123', 3)
+test('zfill', u'+123', u'+123', 4)
+test('zfill', u'+123', u'+0123', 5)
+test('zfill', u'-123', u'-123', 3)
+test('zfill', u'-123', u'-123', 4)
+test('zfill', u'-123', u'-0123', 5)
+test('zfill', u'', u'000', 3)
+test('zfill', u'34', u'34', 1)
+test('zfill', u'34', u'00034', 5)
+
+# Comparisons:
+print 'Testing Unicode comparisons...',
+verify(u'abc' == 'abc')
+verify('abc' == u'abc')
+verify(u'abc' == u'abc')
+verify(u'abcd' > 'abc')
+verify('abcd' > u'abc')
+verify(u'abcd' > u'abc')
+verify(u'abc' < 'abcd')
+verify('abc' < u'abcd')
+verify(u'abc' < u'abcd')
+print 'done.'
+
+if 0:
+    # Move these tests to a Unicode collation module test...
+
+    print 'Testing UTF-16 code point order comparisons...',
+    #No surrogates, no fixup required.
+    verify(u'\u0061' < u'\u20ac')
+    # Non surrogate below surrogate value, no fixup required
+    verify(u'\u0061' < u'\ud800\udc02')
+
+    # Non surrogate above surrogate value, fixup required
+    def test_lecmp(s, s2):
+        verify(s <  s2 , "comparison failed on %s < %s" % (s, s2))
+
+    def test_fixup(s):
+        s2 = u'\ud800\udc01'
+        test_lecmp(s, s2)
+        s2 = u'\ud900\udc01'
+        test_lecmp(s, s2)
+        s2 = u'\uda00\udc01'
+        test_lecmp(s, s2)
+        s2 = u'\udb00\udc01'
+        test_lecmp(s, s2)
+        s2 = u'\ud800\udd01'
+        test_lecmp(s, s2)
+        s2 = u'\ud900\udd01'
+        test_lecmp(s, s2)
+        s2 = u'\uda00\udd01'
+        test_lecmp(s, s2)
+        s2 = u'\udb00\udd01'
+        test_lecmp(s, s2)
+        s2 = u'\ud800\ude01'
+        test_lecmp(s, s2)
+        s2 = u'\ud900\ude01'
+        test_lecmp(s, s2)
+        s2 = u'\uda00\ude01'
+        test_lecmp(s, s2)
+        s2 = u'\udb00\ude01'
+        test_lecmp(s, s2)
+        s2 = u'\ud800\udfff'
+        test_lecmp(s, s2)
+        s2 = u'\ud900\udfff'
+        test_lecmp(s, s2)
+        s2 = u'\uda00\udfff'
+        test_lecmp(s, s2)
+        s2 = u'\udb00\udfff'
+        test_lecmp(s, s2)
+
+    test_fixup(u'\ue000')
+    test_fixup(u'\uff61')
+
+    # Surrogates on both sides, no fixup required
+    verify(u'\ud800\udc02' < u'\ud84d\udc56')
+    print 'done.'
+
+test('ljust', u'abc',  u'abc       ', 10)
+test('rjust', u'abc',  u'       abc', 10)
+test('center', u'abc', u'   abc    ', 10)
+test('ljust', u'abc',  u'abc   ', 6)
+test('rjust', u'abc',  u'   abc', 6)
+test('center', u'abc', u' abc  ', 6)
+test('ljust', u'abc', u'abc', 2)
+test('rjust', u'abc', u'abc', 2)
+test('center', u'abc', u'abc', 2)
+
+test('islower', u'a', 1)
+test('islower', u'A', 0)
+test('islower', u'\n', 0)
+test('islower', u'\u1FFc', 0)
+test('islower', u'abc', 1)
+test('islower', u'aBc', 0)
+test('islower', u'abc\n', 1)
+
+test('isupper', u'a', 0)
+test('isupper', u'A', 1)
+test('isupper', u'\n', 0)
+if sys.platform[:4] != 'java':
+    test('isupper', u'\u1FFc', 0)
+test('isupper', u'ABC', 1)
+test('isupper', u'AbC', 0)
+test('isupper', u'ABC\n', 1)
+
+test('istitle', u'a', 0)
+test('istitle', u'A', 1)
+test('istitle', u'\n', 0)
+test('istitle', u'\u1FFc', 1)
+test('istitle', u'A Titlecased Line', 1)
+test('istitle', u'A\nTitlecased Line', 1)
+test('istitle', u'A Titlecased, Line', 1)
+test('istitle', u'Greek \u1FFcitlecases ...', 1)
+test('istitle', u'Not a capitalized String', 0)
+test('istitle', u'Not\ta Titlecase String', 0)
+test('istitle', u'Not--a Titlecase String', 0)
+
+test('isalpha', u'a', 1)
+test('isalpha', u'A', 1)
+test('isalpha', u'\n', 0)
+test('isalpha', u'\u1FFc', 1)
+test('isalpha', u'abc', 1)
+test('isalpha', u'aBc123', 0)
+test('isalpha', u'abc\n', 0)
+
+test('isalnum', u'a', 1)
+test('isalnum', u'A', 1)
+test('isalnum', u'\n', 0)
+test('isalnum', u'123abc456', 1)
+test('isalnum', u'a1b3c', 1)
+test('isalnum', u'aBc000 ', 0)
+test('isalnum', u'abc\n', 0)
+
+test('splitlines', u"abc\ndef\n\rghi", [u'abc', u'def', u'', u'ghi'])
+test('splitlines', u"abc\ndef\n\r\nghi", [u'abc', u'def', u'', u'ghi'])
+test('splitlines', u"abc\ndef\r\nghi", [u'abc', u'def', u'ghi'])
+test('splitlines', u"abc\ndef\r\nghi\n", [u'abc', u'def', u'ghi'])
+test('splitlines', u"abc\ndef\r\nghi\n\r", [u'abc', u'def', u'ghi', u''])
+test('splitlines', u"\nabc\ndef\r\nghi\n\r", [u'', u'abc', u'def', u'ghi', u''])
+test('splitlines', u"\nabc\ndef\r\nghi\n\r", [u'\n', u'abc\n', u'def\r\n', u'ghi\n', u'\r'], 1)
+
+test('translate', u"abababc", u'bbbc', {ord('a'):None})
+test('translate', u"abababc", u'iiic', {ord('a'):None, ord('b'):ord('i')})
+test('translate', u"abababc", u'iiix', {ord('a'):None, ord('b'):ord('i'), ord('c'):u'x'})
+
+# Contains:
+print 'Testing Unicode contains method...',
+verify(('a' in u'abdb') == 1)
+verify(('a' in u'bdab') == 1)
+verify(('a' in u'bdaba') == 1)
+verify(('a' in u'bdba') == 1)
+verify(('a' in u'bdba') == 1)
+verify((u'a' in u'bdba') == 1)
+verify((u'a' in u'bdb') == 0)
+verify((u'a' in 'bdb') == 0)
+verify((u'a' in 'bdba') == 1)
+verify((u'a' in ('a',1,None)) == 1)
+verify((u'a' in (1,None,'a')) == 1)
+verify((u'a' in (1,None,u'a')) == 1)
+verify(('a' in ('a',1,None)) == 1)
+verify(('a' in (1,None,'a')) == 1)
+verify(('a' in (1,None,u'a')) == 1)
+verify(('a' in ('x',1,u'y')) == 0)
+verify(('a' in ('x',1,None)) == 0)
+try:
+    u'\xe2' in 'g\xe2teau'
+except UnicodeError:
+    pass
+else:
+    print '*** contains operator does not propagate UnicodeErrors'
+print 'done.'
+
+# Formatting:
+print 'Testing Unicode formatting strings...',
+verify(u"%s, %s" % (u"abc", "abc") == u'abc, abc')
+verify(u"%s, %s, %i, %f, %5.2f" % (u"abc", "abc", 1, 2, 3) == u'abc, abc, 1, 2.000000,  3.00')
+verify(u"%s, %s, %i, %f, %5.2f" % (u"abc", "abc", 1, -2, 3) == u'abc, abc, 1, -2.000000,  3.00')
+verify(u"%s, %s, %i, %f, %5.2f" % (u"abc", "abc", -1, -2, 3.5) == u'abc, abc, -1, -2.000000,  3.50')
+verify(u"%s, %s, %i, %f, %5.2f" % (u"abc", "abc", -1, -2, 3.57) == u'abc, abc, -1, -2.000000,  3.57')
+verify(u"%s, %s, %i, %f, %5.2f" % (u"abc", "abc", -1, -2, 1003.57) == u'abc, abc, -1, -2.000000, 1003.57')
+verify(u"%c" % (u"a",) == u'a')
+verify(u"%c" % ("a",) == u'a')
+verify(u"%c" % (34,) == u'"')
+verify(u"%c" % (36,) == u'$')
+if sys.platform[:4] != 'java':
+    value = u"%r, %r" % (u"abc", "abc")
+    if value != u"u'abc', 'abc'":
+        print '*** formatting failed for "%s"' % 'u"%r, %r" % (u"abc", "abc")'
+
+verify(u"%(x)s, %(y)s" % {'x':u"abc", 'y':"def"} == u'abc, def')
+try:
+    value = u"%(x)s, %(ä)s" % {'x':u"abc", u'ä':"def"}
+except KeyError:
+    print '*** formatting failed for "%s"' % "u'abc, def'"
+else:
+    verify(value == u'abc, def')
+
+for ordinal in (-100, 0x200000):
+    try:
+        u"%c" % ordinal
+    except ValueError:
+        pass
+    else:
+        print '*** formatting u"%%c" %% %i should give a ValueError' % ordinal
+
+# formatting jobs delegated from the string implementation:
+verify('...%(foo)s...' % {'foo':u"abc"} == u'...abc...')
+verify('...%(foo)s...' % {'foo':"abc"} == '...abc...')
+verify('...%(foo)s...' % {u'foo':"abc"} == '...abc...')
+verify('...%(foo)s...' % {u'foo':u"abc"} == u'...abc...')
+verify('...%(foo)s...' % {u'foo':u"abc",'def':123} ==  u'...abc...')
+verify('...%(foo)s...' % {u'foo':u"abc",u'def':123} == u'...abc...')
+verify('...%s...%s...%s...%s...' % (1,2,3,u"abc") == u'...1...2...3...abc...')
+verify('...%%...%%s...%s...%s...%s...%s...' % (1,2,3,u"abc") == u'...%...%s...1...2...3...abc...')
+verify('...%s...' % u"abc" == u'...abc...')
+verify('%*s' % (5,u'abc',) == u'  abc')
+verify('%*s' % (-5,u'abc',) == u'abc  ')
+verify('%*.*s' % (5,2,u'abc',) == u'   ab')
+verify('%*.*s' % (5,3,u'abc',) == u'  abc')
+verify('%i %*.*s' % (10, 5,3,u'abc',) == u'10   abc')
+verify('%i%s %*.*s' % (10, 3, 5,3,u'abc',) == u'103   abc')
+print 'done.'
+
+print 'Testing builtin unicode()...',
+
+# unicode(obj) tests (this maps to PyObject_Unicode() at C level)
+
+verify(unicode(u'unicode remains unicode') == u'unicode remains unicode')
+
+class UnicodeSubclass(unicode):
+    pass
+
+verify(unicode(UnicodeSubclass('unicode subclass becomes unicode'))
+       == u'unicode subclass becomes unicode')
+
+verify(unicode('strings are converted to unicode')
+       == u'strings are converted to unicode')
+
+class UnicodeCompat:
+    def __init__(self, x):
+        self.x = x
+    def __unicode__(self):
+        return self.x
+
+verify(unicode(UnicodeCompat('__unicode__ compatible objects are recognized'))
+       == u'__unicode__ compatible objects are recognized')
+
+class StringCompat:
+    def __init__(self, x):
+        self.x = x
+    def __str__(self):
+        return self.x
+
+verify(unicode(StringCompat('__str__ compatible objects are recognized'))
+       == u'__str__ compatible objects are recognized')
+
+# unicode(obj) is compatible to str():
+
+o = StringCompat('unicode(obj) is compatible to str()')
+verify(unicode(o) == u'unicode(obj) is compatible to str()')
+verify(str(o) == 'unicode(obj) is compatible to str()')
+
+for obj in (123, 123.45, 123L):
+    verify(unicode(obj) == unicode(str(obj)))
+
+# unicode(obj, encoding, error) tests (this maps to
+# PyUnicode_FromEncodedObject() at C level)
+
+if not sys.platform.startswith('java'):
+    try:
+        unicode(u'decoding unicode is not supported', 'utf-8', 'strict')
+    except TypeError:
+        pass
+    else:
+        raise TestFailed, "decoding unicode should NOT be supported"
+
+verify(unicode('strings are decoded to unicode', 'utf-8', 'strict')
+       == u'strings are decoded to unicode')
+
+if not sys.platform.startswith('java'):
+    verify(unicode(buffer('character buffers are decoded to unicode'),
+                   'utf-8', 'strict')
+           == u'character buffers are decoded to unicode')
+
+print 'done.'
+
+# Test builtin codecs
+print 'Testing builtin codecs...',
+
+# UTF-7 specific encoding tests:
+utfTests = [(u'A\u2262\u0391.', 'A+ImIDkQ.'),  # RFC2152 example
+ (u'Hi Mom -\u263a-!', 'Hi Mom -+Jjo--!'),     # RFC2152 example
+ (u'\u65E5\u672C\u8A9E', '+ZeVnLIqe-'),        # RFC2152 example
+ (u'Item 3 is \u00a31.', 'Item 3 is +AKM-1.'), # RFC2152 example
+ (u'+', '+-'),
+ (u'+-', '+--'),
+ (u'+?', '+-?'),
+ (u'\?', '+AFw?'),
+ (u'+?', '+-?'),
+ (ur'\\?', '+AFwAXA?'),
+ (ur'\\\?', '+AFwAXABc?'),
+ (ur'++--', '+-+---')]
+
+for x,y in utfTests:
+    verify( x.encode('utf-7') == y )
+
+try:
+    unicode('+3ADYAA-', 'utf-7') # surrogates not supported
+except UnicodeError:
+    pass
+else:
+    raise TestFailed, "unicode('+3ADYAA-', 'utf-7') failed to raise an exception"
+
+verify(unicode('+3ADYAA-', 'utf-7', 'replace') == u'\ufffd')
+
+# UTF-8 specific encoding tests:
+verify(u''.encode('utf-8') == '')
+verify(u'\u20ac'.encode('utf-8') == '\xe2\x82\xac')
+verify(u'\ud800\udc02'.encode('utf-8') == '\xf0\x90\x80\x82')
+verify(u'\ud84d\udc56'.encode('utf-8') == '\xf0\xa3\x91\x96')
+verify(u'\ud800'.encode('utf-8') == '\xed\xa0\x80')
+verify(u'\udc00'.encode('utf-8') == '\xed\xb0\x80')
+verify((u'\ud800\udc02'*1000).encode('utf-8') ==
+       '\xf0\x90\x80\x82'*1000)
+verify(u'\u6b63\u78ba\u306b\u8a00\u3046\u3068\u7ffb\u8a33\u306f'
+       u'\u3055\u308c\u3066\u3044\u307e\u305b\u3093\u3002\u4e00'
+       u'\u90e8\u306f\u30c9\u30a4\u30c4\u8a9e\u3067\u3059\u304c'
+       u'\u3001\u3042\u3068\u306f\u3067\u305f\u3089\u3081\u3067'
+       u'\u3059\u3002\u5b9f\u969b\u306b\u306f\u300cWenn ist das'
+       u' Nunstuck git und'.encode('utf-8') ==
+       '\xe6\xad\xa3\xe7\xa2\xba\xe3\x81\xab\xe8\xa8\x80\xe3\x81'
+       '\x86\xe3\x81\xa8\xe7\xbf\xbb\xe8\xa8\xb3\xe3\x81\xaf\xe3'
+       '\x81\x95\xe3\x82\x8c\xe3\x81\xa6\xe3\x81\x84\xe3\x81\xbe'
+       '\xe3\x81\x9b\xe3\x82\x93\xe3\x80\x82\xe4\xb8\x80\xe9\x83'
+       '\xa8\xe3\x81\xaf\xe3\x83\x89\xe3\x82\xa4\xe3\x83\x84\xe8'
+       '\xaa\x9e\xe3\x81\xa7\xe3\x81\x99\xe3\x81\x8c\xe3\x80\x81'
+       '\xe3\x81\x82\xe3\x81\xa8\xe3\x81\xaf\xe3\x81\xa7\xe3\x81'
+       '\x9f\xe3\x82\x89\xe3\x82\x81\xe3\x81\xa7\xe3\x81\x99\xe3'
+       '\x80\x82\xe5\xae\x9f\xe9\x9a\x9b\xe3\x81\xab\xe3\x81\xaf'
+       '\xe3\x80\x8cWenn ist das Nunstuck git und')
+
+# UTF-8 specific decoding tests
+verify(unicode('\xf0\xa3\x91\x96', 'utf-8') == u'\U00023456' )
+verify(unicode('\xf0\x90\x80\x82', 'utf-8') == u'\U00010002' )
+verify(unicode('\xe2\x82\xac', 'utf-8') == u'\u20ac' )
+# test UTF-8 2.2.1 bug work-around
+verify(unicode('\xa0\x80', 'utf-8') == u'\ud800' )
+verify(unicode('\xaf\xbf', 'utf-8') == u'\udbff' )
+verify(unicode('\xed\xb0\x80', 'utf-8') == u'\udc00' )
+verify(unicode('\xed\xbf\xbf', 'utf-8') == u'\udfff' )
+
+# Other possible utf-8 test cases:
+# * strict decoding testing for all of the
+#   UTF8_ERROR cases in PyUnicode_DecodeUTF8
+
+verify(unicode('hello','ascii') == u'hello')
+verify(unicode('hello','utf-8') == u'hello')
+verify(unicode('hello','utf8') == u'hello')
+verify(unicode('hello','latin-1') == u'hello')
+
+# Error handling
+try:
+    u'Andr\202 x'.encode('ascii')
+    u'Andr\202 x'.encode('ascii','strict')
+except ValueError:
+    pass
+else:
+    raise TestFailed, "u'Andr\202'.encode('ascii') failed to raise an exception"
+verify(u'Andr\202 x'.encode('ascii','ignore') == "Andr x")
+verify(u'Andr\202 x'.encode('ascii','replace') == "Andr? x")
+
+try:
+    unicode('Andr\202 x','ascii')
+    unicode('Andr\202 x','ascii','strict')
+except ValueError:
+    pass
+else:
+    raise TestFailed, "unicode('Andr\202') failed to raise an exception"
+verify(unicode('Andr\202 x','ascii','ignore') == u"Andr x")
+verify(unicode('Andr\202 x','ascii','replace') == u'Andr\uFFFD x')
+
+verify("\\N{foo}xx".decode("unicode-escape", "ignore") == u"xx")
+try:
+    "\\".decode("unicode-escape")
+except ValueError:
+    pass
+else:
+    raise TestFailed, '"\\".decode("unicode-escape") should fail'
+
+verify(u'hello'.encode('ascii') == 'hello')
+verify(u'hello'.encode('utf-7') == 'hello')
+verify(u'hello'.encode('utf-8') == 'hello')
+verify(u'hello'.encode('utf8') == 'hello')
+verify(u'hello'.encode('utf-16-le') == 'h\000e\000l\000l\000o\000')
+verify(u'hello'.encode('utf-16-be') == '\000h\000e\000l\000l\000o')
+verify(u'hello'.encode('latin-1') == 'hello')
+
+# Roundtrip safety for BMP (just the first 1024 chars)
+u = u''.join(map(unichr, range(1024)))
+for encoding in ('utf-7', 'utf-8', 'utf-16', 'utf-16-le', 'utf-16-be',
+                 'raw_unicode_escape', 'unicode_escape', 'unicode_internal'):
+    verify(unicode(u.encode(encoding),encoding) == u)
+
+# Roundtrip safety for non-BMP (just a few chars)
+u = u'\U00010001\U00020002\U00030003\U00040004\U00050005'
+for encoding in ('utf-8',
+                 'utf-16', 'utf-16-le', 'utf-16-be',
+                 #'raw_unicode_escape',
+                 'unicode_escape', 'unicode_internal'):
+    verify(unicode(u.encode(encoding),encoding) == u)
+
+u = u''.join(map(unichr, range(256)))
+for encoding in (
+    'latin-1',
+    ):
+    try:
+        verify(unicode(u.encode(encoding),encoding) == u)
+    except TestFailed:
+        print '*** codec "%s" failed round-trip' % encoding
+    except ValueError,why:
+        print '*** codec for "%s" failed: %s' % (encoding, why)
+
+u = u''.join(map(unichr, range(128)))
+for encoding in (
+    'ascii',
+    ):
+    try:
+        verify(unicode(u.encode(encoding),encoding) == u)
+    except TestFailed:
+        print '*** codec "%s" failed round-trip' % encoding
+    except ValueError,why:
+        print '*** codec for "%s" failed: %s' % (encoding, why)
+
+print 'done.'
+
+print 'Testing standard mapping codecs...',
+
+print '0-127...',
+s = ''.join(map(chr, range(128)))
+for encoding in (
+    'cp037', 'cp1026',
+    'cp437', 'cp500', 'cp737', 'cp775', 'cp850',
+    'cp852', 'cp855', 'cp860', 'cp861', 'cp862',
+    'cp863', 'cp865', 'cp866',
+    'iso8859_10', 'iso8859_13', 'iso8859_14', 'iso8859_15',
+    'iso8859_2', 'iso8859_3', 'iso8859_4', 'iso8859_5', 'iso8859_6',
+    'iso8859_7', 'iso8859_9', 'koi8_r', 'latin_1',
+    'mac_cyrillic', 'mac_latin2',
+
+    'cp1250', 'cp1251', 'cp1252', 'cp1253', 'cp1254', 'cp1255',
+    'cp1256', 'cp1257', 'cp1258',
+    'cp856', 'cp857', 'cp864', 'cp869', 'cp874',
+
+    'mac_greek', 'mac_iceland','mac_roman', 'mac_turkish',
+    'cp1006', 'iso8859_8',
+
+    ### These have undefined mappings:
+    #'cp424',
+
+    ### These fail the round-trip:
+    #'cp875'
+
+    ):
+    try:
+        verify(unicode(s,encoding).encode(encoding) == s)
+    except TestFailed:
+        print '*** codec "%s" failed round-trip' % encoding
+    except ValueError,why:
+        print '*** codec for "%s" failed: %s' % (encoding, why)
+
+print '128-255...',
+s = ''.join(map(chr, range(128,256)))
+for encoding in (
+    'cp037', 'cp1026',
+    'cp437', 'cp500', 'cp737', 'cp775', 'cp850',
+    'cp852', 'cp855', 'cp860', 'cp861', 'cp862',
+    'cp863', 'cp865', 'cp866',
+    'iso8859_10', 'iso8859_13', 'iso8859_14', 'iso8859_15',
+    'iso8859_2', 'iso8859_4', 'iso8859_5',
+    'iso8859_9', 'koi8_r', 'latin_1',
+    'mac_cyrillic', 'mac_latin2',
+
+    ### These have undefined mappings:
+    #'cp1250', 'cp1251', 'cp1252', 'cp1253', 'cp1254', 'cp1255',
+    #'cp1256', 'cp1257', 'cp1258',
+    #'cp424', 'cp856', 'cp857', 'cp864', 'cp869', 'cp874',
+    #'iso8859_3', 'iso8859_6', 'iso8859_7',
+    #'mac_greek', 'mac_iceland','mac_roman', 'mac_turkish',
+
+    ### These fail the round-trip:
+    #'cp1006', 'cp875', 'iso8859_8',
+
+    ):
+    try:
+        verify(unicode(s,encoding).encode(encoding) == s)
+    except TestFailed:
+        print '*** codec "%s" failed round-trip' % encoding
+    except ValueError,why:
+        print '*** codec for "%s" failed: %s' % (encoding, why)
+
+# UTF-8 must be roundtrip safe for all UCS-2 code points
+# This excludes surrogates: in the full range, there would be
+# a surrogate pair (\udbff\udc00), which gets converted back
+# to a non-BMP character (\U0010fc00)
+u = u''.join(map(unichr, range(0,0xd800)+range(0xe000,0x10000)))
+for encoding in ('utf-8',):
+    verify(unicode(u.encode(encoding),encoding) == u)
+
+print 'done.'
+
+print 'Testing Unicode string concatenation...',
+verify((u"abc" u"def") == u"abcdef")
+verify(("abc" u"def") == u"abcdef")
+verify((u"abc" "def") == u"abcdef")
+verify((u"abc" u"def" "ghi") == u"abcdefghi")
+verify(("abc" "def" u"ghi") == u"abcdefghi")
+print 'done.'
+
+print 'Testing Unicode printing...',
+print u'abc'
+print u'abc', u'def'
+print u'abc', 'def'
+print 'abc', u'def'
+print u'abc\n'
+print u'abc\n',
+print u'abc\n',
+print u'def\n'
+print u'def\n'
+print 'done.'
diff --git a/lib-python/2.2/test/test_unicode_file.py b/lib-python/2.2/test/test_unicode_file.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_unicode_file.py
@@ -0,0 +1,95 @@
+# Test some Unicode file name semantics
+# We dont test many operations on files other than
+# that their names can be used with Unicode characters.
+import os
+
+from test_support import verify, TestSkipped, TESTFN_UNICODE
+try:
+    from test_support import TESTFN_ENCODING
+    oldlocale = None
+except ImportError:
+    import locale
+    # try to run the test in an UTF-8 locale. If this locale is not
+    # available, avoid running the test since the locale's encoding
+    # might not support TESTFN_UNICODE. Likewise, if the system does
+    # not support locale.CODESET, Unicode file semantics is not
+    # available, either.
+    oldlocale = locale.setlocale(locale.LC_CTYPE)
+    try:
+        locale.setlocale(locale.LC_CTYPE,"en_US.UTF-8")
+        TESTFN_ENCODING = locale.nl_langinfo(locale.CODESET)
+    except (locale.Error, AttributeError):
+        raise TestSkipped("No Unicode filesystem semantics on this platform.")
+
+TESTFN_ENCODED = TESTFN_UNICODE.encode(TESTFN_ENCODING)
+
+# Check with creation as Unicode string.
+f = open(TESTFN_UNICODE, 'wb')
+if not os.path.isfile(TESTFN_UNICODE):
+    print "File doesn't exist after creating it"
+
+if not os.path.isfile(TESTFN_ENCODED):
+    print "File doesn't exist (encoded string) after creating it"
+
+f.close()
+
+# Test stat and chmod
+if os.stat(TESTFN_ENCODED) != os.stat(TESTFN_UNICODE):
+    print "os.stat() did not agree on the 2 filenames"
+os.chmod(TESTFN_ENCODED, 0777)
+os.chmod(TESTFN_UNICODE, 0777)
+
+# Test rename
+os.rename(TESTFN_ENCODED, TESTFN_ENCODED + ".new")
+os.rename(TESTFN_UNICODE+".new", TESTFN_ENCODED)
+
+os.unlink(TESTFN_ENCODED)
+if os.path.isfile(TESTFN_ENCODED) or \
+   os.path.isfile(TESTFN_UNICODE):
+    print "File exists after deleting it"
+
+# Check with creation as encoded string.
+f = open(TESTFN_ENCODED, 'wb')
+if not os.path.isfile(TESTFN_UNICODE) or \
+   not os.path.isfile(TESTFN_ENCODED):
+    print "File doesn't exist after creating it"
+
+path, base = os.path.split(os.path.abspath(TESTFN_ENCODED))
+if base not in os.listdir(path):
+    print "Filename did not appear in os.listdir()"
+
+f.close()
+os.unlink(TESTFN_UNICODE)
+if os.path.isfile(TESTFN_ENCODED) or \
+   os.path.isfile(TESTFN_UNICODE):
+    print "File exists after deleting it"
+
+# test os.open
+f = os.open(TESTFN_ENCODED, os.O_CREAT)
+if not os.path.isfile(TESTFN_UNICODE) or \
+   not os.path.isfile(TESTFN_ENCODED):
+    print "File doesn't exist after creating it"
+os.close(f)
+os.unlink(TESTFN_UNICODE)
+
+# Test directories etc
+cwd = os.getcwd()
+abs_encoded = os.path.abspath(TESTFN_ENCODED) + ".dir"
+abs_unicode = os.path.abspath(TESTFN_UNICODE) + ".dir"
+os.mkdir(abs_encoded)
+try:
+    os.chdir(abs_encoded)
+    os.chdir(abs_unicode)
+finally:
+    os.chdir(cwd)
+    os.rmdir(abs_unicode)
+os.mkdir(abs_unicode)
+try:
+    os.chdir(abs_encoded)
+    os.chdir(abs_unicode)
+finally:
+    os.chdir(cwd)
+    os.rmdir(abs_encoded)
+print "All the Unicode tests appeared to work"
+if oldlocale:
+    locale.setlocale(locale.LC_CTYPE, oldlocale)
diff --git a/lib-python/2.2/test/test_unicodedata.py b/lib-python/2.2/test/test_unicodedata.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_unicodedata.py
@@ -0,0 +1,125 @@
+""" Test script for the unicodedata module.
+
+    Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+    (c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+
+"""#"
+from test_support import verify, verbose
+import sha
+
+encoding = 'utf-8'
+
+def test_methods():
+
+    h = sha.sha()
+    for i in range(65536):
+        char = unichr(i)
+        data = [
+
+            # Predicates (single char)
+            char.isalnum() and u'1' or u'0',
+            char.isalpha() and u'1' or u'0',
+            char.isdecimal() and u'1' or u'0',
+            char.isdigit() and u'1' or u'0',
+            char.islower() and u'1' or u'0',
+            char.isnumeric() and u'1' or u'0',
+            char.isspace() and u'1' or u'0',
+            char.istitle() and u'1' or u'0',
+            char.isupper() and u'1' or u'0',
+
+            # Predicates (multiple chars)
+            (char + u'abc').isalnum() and u'1' or u'0',
+            (char + u'abc').isalpha() and u'1' or u'0',
+            (char + u'123').isdecimal() and u'1' or u'0',
+            (char + u'123').isdigit() and u'1' or u'0',
+            (char + u'abc').islower() and u'1' or u'0',
+            (char + u'123').isnumeric() and u'1' or u'0',
+            (char + u' \t').isspace() and u'1' or u'0',
+            (char + u'abc').istitle() and u'1' or u'0',
+            (char + u'ABC').isupper() and u'1' or u'0',
+
+            # Mappings (single char)
+            char.lower(),
+            char.upper(),
+            char.title(),
+
+            # Mappings (multiple chars)
+            (char + u'abc').lower(),
+            (char + u'ABC').upper(),
+            (char + u'abc').title(),
+            (char + u'ABC').title(),
+
+            ]
+        h.update(u''.join(data).encode(encoding))
+    return h.hexdigest()
+
+def test_unicodedata():
+
+    h = sha.sha()
+    for i in range(65536):
+        char = unichr(i)
+        data = [
+            # Properties
+            str(unicodedata.digit(char, -1)),
+            str(unicodedata.numeric(char, -1)),
+            str(unicodedata.decimal(char, -1)),
+            unicodedata.category(char),
+            unicodedata.bidirectional(char),
+            unicodedata.decomposition(char),
+            str(unicodedata.mirrored(char)),
+            str(unicodedata.combining(char)),
+            ]
+        h.update(''.join(data))
+    return h.hexdigest()
+
+### Run tests
+
+print 'Testing Unicode Database...'
+print 'Methods:',
+print test_methods()
+
+# In case unicodedata is not available, this will raise an ImportError,
+# but still test the above cases...
+import unicodedata
+print 'Functions:',
+print test_unicodedata()
+
+# Some additional checks of the API:
+print 'API:',
+
+verify(unicodedata.digit(u'A',None) is None)
+verify(unicodedata.digit(u'9') == 9)
+verify(unicodedata.digit(u'\u215b',None) is None)
+verify(unicodedata.digit(u'\u2468') == 9)
+
+verify(unicodedata.numeric(u'A',None) is None)
+verify(unicodedata.numeric(u'9') == 9)
+verify(unicodedata.numeric(u'\u215b') == 0.125)
+verify(unicodedata.numeric(u'\u2468') == 9.0)
+
+verify(unicodedata.decimal(u'A',None) is None)
+verify(unicodedata.decimal(u'9') == 9)
+verify(unicodedata.decimal(u'\u215b',None) is None)
+verify(unicodedata.decimal(u'\u2468',None) is None)
+
+verify(unicodedata.category(u'\uFFFE') == 'Cn')
+verify(unicodedata.category(u'a') == 'Ll')
+verify(unicodedata.category(u'A') == 'Lu')
+
+verify(unicodedata.bidirectional(u'\uFFFE') == '')
+verify(unicodedata.bidirectional(u' ') == 'WS')
+verify(unicodedata.bidirectional(u'A') == 'L')
+
+verify(unicodedata.decomposition(u'\uFFFE') == '')
+verify(unicodedata.decomposition(u'\u00bc') == '<fraction> 0031 2044 0034')
+
+verify(unicodedata.mirrored(u'\uFFFE') == 0)
+verify(unicodedata.mirrored(u'a') == 0)
+verify(unicodedata.mirrored(u'\u2201') == 1)
+
+verify(unicodedata.combining(u'\uFFFE') == 0)
+verify(unicodedata.combining(u'a') == 0)
+verify(unicodedata.combining(u'\u20e1') == 230)
+
+print 'ok'
diff --git a/lib-python/2.2/test/test_unpack.py b/lib-python/2.2/test/test_unpack.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_unpack.py
@@ -0,0 +1,144 @@
+from test_support import *
+
+t = (1, 2, 3)
+l = [4, 5, 6]
+
+class Seq:
+    def __getitem__(self, i):
+        if i >= 0 and i < 3: return i
+        raise IndexError
+
+a = -1
+b = -1
+c = -1
+
+# unpack tuple
+if verbose:
+    print 'unpack tuple'
+a, b, c = t
+if a != 1 or b != 2 or c != 3:
+    raise TestFailed
+
+# unpack list
+if verbose:
+    print 'unpack list'
+a, b, c = l
+if a != 4 or b != 5 or c != 6:
+    raise TestFailed
+
+# unpack implied tuple
+if verbose:
+    print 'unpack implied tuple'
+a, b, c = 7, 8, 9
+if a != 7 or b != 8 or c != 9:
+    raise TestFailed
+
+# unpack string... fun!
+if verbose:
+    print 'unpack string'
+a, b, c = 'one'
+if a != 'o' or b != 'n' or c != 'e':
+    raise TestFailed
+
+# unpack generic sequence
+if verbose:
+    print 'unpack sequence'
+a, b, c = Seq()
+if a != 0 or b != 1 or c != 2:
+    raise TestFailed
+
+# single element unpacking, with extra syntax
+if verbose:
+    print 'unpack single tuple/list'
+st = (99,)
+sl = [100]
+a, = st
+if a != 99:
+    raise TestFailed
+b, = sl
+if b != 100:
+    raise TestFailed
+
+# now for some failures
+
+# unpacking non-sequence
+if verbose:
+    print 'unpack non-sequence'
+try:
+    a, b, c = 7
+    raise TestFailed
+except TypeError:
+    pass
+
+
+# unpacking tuple of wrong size
+if verbose:
+    print 'unpack tuple wrong size'
+try:
+    a, b = t
+    raise TestFailed
+except ValueError:
+    pass
+
+# unpacking list of wrong size
+if verbose:
+    print 'unpack list wrong size'
+try:
+    a, b = l
+    raise TestFailed
+except ValueError:
+    pass
+
+
+# unpacking sequence too short
+if verbose:
+    print 'unpack sequence too short'
+try:
+    a, b, c, d = Seq()
+    raise TestFailed
+except ValueError:
+    pass
+
+
+# unpacking sequence too long
+if verbose:
+    print 'unpack sequence too long'
+try:
+    a, b = Seq()
+    raise TestFailed
+except ValueError:
+    pass
+
+
+# unpacking a sequence where the test for too long raises a different
+# kind of error
+class BozoError(Exception):
+    pass
+
+class BadSeq:
+    def __getitem__(self, i):
+        if i >= 0 and i < 3:
+            return i
+        elif i == 3:
+            raise BozoError
+        else:
+            raise IndexError
+
+
+# trigger code while not expecting an IndexError
+if verbose:
+    print 'unpack sequence too long, wrong error'
+try:
+    a, b, c, d, e = BadSeq()
+    raise TestFailed
+except BozoError:
+    pass
+
+# trigger code while expecting an IndexError
+if verbose:
+    print 'unpack sequence too short, wrong error'
+try:
+    a, b, c = BadSeq()
+    raise TestFailed
+except BozoError:
+    pass
diff --git a/lib-python/2.2/test/test_urllib.py b/lib-python/2.2/test/test_urllib.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_urllib.py
@@ -0,0 +1,109 @@
+# Minimal test of the quote function
+from test_support import verify, verbose
+import urllib
+
+chars = 'abcdefghijklmnopqrstuvwxyz'\
+        '\337\340\341\342\343\344\345\346\347\350\351\352\353\354\355\356' \
+        '\357\360\361\362\363\364\365\366\370\371\372\373\374\375\376\377' \
+        'ABCDEFGHIJKLMNOPQRSTUVWXYZ' \
+        '\300\301\302\303\304\305\306\307\310\311\312\313\314\315\316\317' \
+        '\320\321\322\323\324\325\326\330\331\332\333\334\335\336'
+
+expected = 'abcdefghijklmnopqrstuvwxyz' \
+           '%DF%E0%E1%E2%E3%E4%E5%E6%E7%E8%E9%EA%EB%EC%ED%EE' \
+           '%EF%F0%F1%F2%F3%F4%F5%F6%F8%F9%FA%FB%FC%FD%FE%FF' \
+           'ABCDEFGHIJKLMNOPQRSTUVWXYZ' \
+           '%C0%C1%C2%C3%C4%C5%C6%C7%C8%C9%CA%CB%CC%CD%CE%CF' \
+           '%D0%D1%D2%D3%D4%D5%D6%D8%D9%DA%DB%DC%DD%DE'
+
+test = urllib.quote(chars)
+verify(test == expected, "urllib.quote problem 1")
+test2 = urllib.unquote(expected)
+verify(test2 == chars)
+
+in1 = "abc/def"
+out1_1 = "abc/def"
+out1_2 = "abc%2Fdef"
+
+verify(urllib.quote(in1) == out1_1, "urllib.quote problem 2")
+verify(urllib.quote(in1, '') == out1_2, "urllib.quote problem 3")
+
+in2 = "abc?def"
+out2_1 = "abc%3Fdef"
+out2_2 = "abc?def"
+
+verify(urllib.quote(in2) == out2_1, "urllib.quote problem 4")
+verify(urllib.quote(in2, '?') == out2_2, "urllib.quote problem 5")
+
+
+
+in3 = {"p1":"v1","p2":"v2"}
+in3list = [("p1", "v1"), ("p2","v2")]
+exp3_1 = "p2=v2&p1=v1"
+exp3_2 = "p1=v1&p2=v2"
+# dict input, only string values
+act3 = urllib.urlencode(in3)
+verify(act3==exp3_1 or act3==exp3_2, "urllib.urlencode problem 1 dict")
+# list input, only string values
+act3list = urllib.urlencode(in3list)
+verify(act3list==exp3_2, "urllib.urlencode problem 1 list")
+
+
+in4 = {"p1":["v1","v2"]}
+in4list = [("p1", ["v1","v2"])]
+exp4 = "p1=v1&p1=v2"
+# dict input, list values, doseq==1
+act4 = urllib.urlencode(in4,doseq=1)
+verify(act4==exp4, "urllib.urlencode problem 2 dict")
+# list input, list values, doseq==1
+act4list = urllib.urlencode(in4,doseq=1)
+verify(act4list==exp4, "urllib.urlencode problem 2 list")
+
+
+in5 = in4
+in5list = in4list
+exp5 = "p1=%5B%27v1%27%2C+%27v2%27%5D"
+exp5list = "p1=%5B%27v1%27%2C+%27v2%27%5D"
+# dict input, list variables, doseq=0
+act5 = urllib.urlencode(in5)
+verify(act5==exp5, "urllib.urlencode problem 3 dict")
+# list input, list variables, doseq=0
+act5list = urllib.urlencode(in5list)
+verify(act5list==exp5list, "urllib.urlencode problem 3 list")
+
+
+in6 = {"p1":"v1","p2":"v2"}
+in6list = [("p1", "v1"), ("p2","v2")]
+exp6_1 = "p2=v2&p1=v1"
+exp6_2 = "p1=v1&p2=v2"
+# dict input, only string values, doseq==1
+act6 = urllib.urlencode(in6,doseq=1)
+verify(act6==exp6_1 or act6==exp6_2, "urllib.urlencode problem 4 dict")
+# list input, only string values
+act6list = urllib.urlencode(in6list,doseq=1)
+verify(act6list==exp6_2, "urllib.urlencode problem 4 list")
+
+
+in7 = "p1=v1&p2=v2"
+try:
+    act7 = urllib.urlencode(in7)
+    print "urllib.urlencode problem 5 string"
+except TypeError:
+    pass
+
+
+import UserDict
+in8 = UserDict.UserDict()
+in8["p1"] = "v1"
+in8["p2"] = ["v1", "v2"]
+exp8_1 = "p1=v1&p2=v1&p2=v2"
+exp8_2 = "p2=v1&p2=v2&p1=v1"
+act8 = urllib.urlencode(in8,doseq=1)
+verify(act8==exp8_1 or act8==exp8_2, "urllib.urlencode problem 6 UserDict")
+
+
+import UserString
+in9 = UserString.UserString("")
+exp9 = ""
+act9 = urllib.urlencode(in9,doseq=1)
+verify(act9==exp9, "urllib.urlencode problem 7 UserString")
diff --git a/lib-python/2.2/test/test_urllib2.py b/lib-python/2.2/test/test_urllib2.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_urllib2.py
@@ -0,0 +1,31 @@
+from test_support import verify
+import urllib2
+import os
+
+# A couple trivial tests
+
+try:
+    urllib2.urlopen('bogus url')
+except ValueError:
+    pass
+else:
+    verify(0)
+
+# XXX Name hacking to get this to work on Windows.
+fname = os.path.abspath(urllib2.__file__).replace('\\', '/')
+if fname[1:2] == ":":
+    fname = fname[2:]
+# And more hacking to get it to work on MacOS. This assumes
+# urllib.pathname2url works, unfortunately...
+if os.name == 'mac':
+    fname = '/' + fname.replace(':', '/')
+elif os.name == 'riscos':
+    import string
+    fname = os.expand(fname)
+    fname = fname.translate(string.maketrans("/.", "./"))
+
+file_url = "file://%s" % fname
+f = urllib2.urlopen(file_url)
+
+buf = f.read()
+f.close()
diff --git a/lib-python/2.2/test/test_urlparse.py b/lib-python/2.2/test/test_urlparse.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_urlparse.py
@@ -0,0 +1,94 @@
+import urlparse
+
+errors = 0
+
+RFC1808_BASE = "http://a/b/c/d;p?q#f"
+
+for url, expected in [('http://www.python.org',
+                       ('http', 'www.python.org', '', '', '', '')),
+                      ('http://www.python.org#abc',
+                       ('http', 'www.python.org', '', '', '', 'abc')),
+                      ('http://www.python.org/#abc',
+                       ('http', 'www.python.org', '/', '', '', 'abc')),
+                      (RFC1808_BASE,
+                       ('http', 'a', '/b/c/d', 'p', 'q', 'f')),
+                      ('file:///tmp/junk.txt',
+                       ('file', '', '/tmp/junk.txt', '', '', '')),
+                      ]:
+    result = urlparse.urlparse(url)
+    print "%-13s = %r" % (url, result)
+    if result != expected:
+        errors += 1
+        print "urlparse(%r)" % url
+        print ("expected %r,\n"
+               "     got %r") % (expected, result)
+    # put it back together and it should be the same
+    result2 = urlparse.urlunparse(result)
+    assert(result2 == url)
+print
+
+def checkJoin(relurl, expected):
+    global errors
+    result = urlparse.urljoin(RFC1808_BASE, relurl)
+    print "%-13s = %r" % (relurl, result)
+    if result != expected:
+        errors += 1
+        print "urljoin(%r, %r)" % (RFC1808_BASE, relurl)
+        print ("expected %r,\n"
+               "     got %r") % (expected, result)
+
+print "urlparse.urljoin() tests"
+print
+
+# "normal" cases from RFC 1808:
+checkJoin('g:h', 'g:h')
+checkJoin('g', 'http://a/b/c/g')
+checkJoin('./g', 'http://a/b/c/g')
+checkJoin('g/', 'http://a/b/c/g/')
+checkJoin('/g', 'http://a/g')
+checkJoin('//g', 'http://g')
+checkJoin('?y', 'http://a/b/c/d;p?y')
+checkJoin('g?y', 'http://a/b/c/g?y')
+checkJoin('g?y/./x', 'http://a/b/c/g?y/./x')
+checkJoin('#s', 'http://a/b/c/d;p?q#s')
+checkJoin('g#s', 'http://a/b/c/g#s')
+checkJoin('g#s/./x', 'http://a/b/c/g#s/./x')
+checkJoin('g?y#s', 'http://a/b/c/g?y#s')
+checkJoin(';x', 'http://a/b/c/d;x')
+checkJoin('g;x', 'http://a/b/c/g;x')
+checkJoin('g;x?y#s', 'http://a/b/c/g;x?y#s')
+checkJoin('.', 'http://a/b/c/')
+checkJoin('./', 'http://a/b/c/')
+checkJoin('..', 'http://a/b/')
+checkJoin('../', 'http://a/b/')
+checkJoin('../g', 'http://a/b/g')
+checkJoin('../..', 'http://a/')
+checkJoin('../../', 'http://a/')
+checkJoin('../../g', 'http://a/g')
+
+# "abnormal" cases from RFC 1808:
+checkJoin('', 'http://a/b/c/d;p?q#f')
+checkJoin('../../../g', 'http://a/../g')
+checkJoin('../../../../g', 'http://a/../../g')
+checkJoin('/./g', 'http://a/./g')
+checkJoin('/../g', 'http://a/../g')
+checkJoin('g.', 'http://a/b/c/g.')
+checkJoin('.g', 'http://a/b/c/.g')
+checkJoin('g..', 'http://a/b/c/g..')
+checkJoin('..g', 'http://a/b/c/..g')
+checkJoin('./../g', 'http://a/b/g')
+checkJoin('./g/.', 'http://a/b/c/g/')
+checkJoin('g/./h', 'http://a/b/c/g/h')
+checkJoin('g/../h', 'http://a/b/c/h')
+
+# RFC 1808 and RFC 1630 disagree on these (according to RFC 1808),
+# so we'll not actually run these tests (which expect 1808 behavior).
+#checkJoin('http:g', 'http:g')
+#checkJoin('http:', 'http:')
+
+print errors, "errors"
+
+# One more test backported from 2.3
+for u in ['Python', './Python']:
+    if urlparse.urlunparse(urlparse.urlparse(u)) != u:
+        print "*** urlparse/urlunparse failure for", `u`
diff --git a/lib-python/2.2/test/test_userdict.py b/lib-python/2.2/test/test_userdict.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_userdict.py
@@ -0,0 +1,120 @@
+# Check every path through every method of UserDict
+
+from test_support import verify, verbose
+from UserDict import UserDict, IterableUserDict
+
+d0 = {}
+d1 = {"one": 1}
+d2 = {"one": 1, "two": 2}
+
+# Test constructors
+
+u = UserDict()
+u0 = UserDict(d0)
+u1 = UserDict(d1)
+u2 = IterableUserDict(d2)
+
+uu = UserDict(u)
+uu0 = UserDict(u0)
+uu1 = UserDict(u1)
+uu2 = UserDict(u2)
+
+# Test __repr__
+
+verify(str(u0) == str(d0))
+verify(repr(u1) == repr(d1))
+verify(`u2` == `d2`)
+
+# Test __cmp__ and __len__
+
+all = [d0, d1, d2, u, u0, u1, u2, uu, uu0, uu1, uu2]
+for a in all:
+    for b in all:
+        verify(cmp(a, b) == cmp(len(a), len(b)))
+
+# Test __getitem__
+
+verify(u2["one"] == 1)
+try:
+    u1["two"]
+except KeyError:
+    pass
+else:
+    verify(0, "u1['two'] shouldn't exist")
+
+# Test __setitem__
+
+u3 = UserDict(u2)
+u3["two"] = 2
+u3["three"] = 3
+
+# Test __delitem__
+
+del u3["three"]
+try:
+    del u3["three"]
+except KeyError:
+    pass
+else:
+    verify(0, "u3['three'] shouldn't exist")
+
+# Test clear
+
+u3.clear()
+verify(u3 == {})
+
+# Test copy()
+
+u2a = u2.copy()
+verify(u2a == u2)
+
+class MyUserDict(UserDict):
+    def display(self): print self
+
+m2 = MyUserDict(u2)
+m2a = m2.copy()
+verify(m2a == m2)
+
+# SF bug #476616 -- copy() of UserDict subclass shared data
+m2['foo'] = 'bar'
+verify(m2a != m2)
+
+# Test keys, items, values
+
+verify(u2.keys() == d2.keys())
+verify(u2.items() == d2.items())
+verify(u2.values() == d2.values())
+
+# Test has_key and "in".
+
+for i in u2.keys():
+    verify(u2.has_key(i) == 1)
+    verify((i in u2) == 1)
+    verify(u1.has_key(i) == d1.has_key(i))
+    verify((i in u1) == (i in d1))
+    verify(u0.has_key(i) == d0.has_key(i))
+    verify((i in u0) == (i in d0))
+
+# Test update
+
+t = UserDict()
+t.update(u2)
+verify(t == u2)
+
+# Test get
+
+for i in u2.keys():
+    verify(u2.get(i) == u2[i])
+    verify(u1.get(i) == d1.get(i))
+    verify(u0.get(i) == d0.get(i))
+
+# Test "in" iteration.
+for i in xrange(20):
+    u2[i] = str(i)
+ikeys = []
+for k in u2:
+    ikeys.append(k)
+ikeys.sort()
+keys = u2.keys()
+keys.sort()
+verify(ikeys == keys)
diff --git a/lib-python/2.2/test/test_userlist.py b/lib-python/2.2/test/test_userlist.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_userlist.py
@@ -0,0 +1,201 @@
+# Check every path through every method of UserList
+
+from UserList import UserList
+from test_support import TestFailed
+
+# Use check instead of assert so -O doesn't render the
+# test useless.
+# XXX: could use the verify function in test_support instead
+def check(predicate, msg):
+    if not predicate:
+        raise TestFailed(msg + " failed")
+
+l0 = []
+l1 = [0]
+l2 = [0, 1]
+
+# Test constructors
+
+u = UserList()
+u0 = UserList(l0)
+u1 = UserList(l1)
+u2 = UserList(l2)
+
+uu = UserList(u)
+uu0 = UserList(u0)
+uu1 = UserList(u1)
+uu2 = UserList(u2)
+
+v = UserList(tuple(u))
+class OtherList:
+    def __init__(self, initlist):
+        self.__data = initlist
+    def __len__(self):
+        return len(self.__data)
+    def __getitem__(self, i):
+        return self.__data[i]
+v0 = UserList(OtherList(u0))
+vv = UserList("this is also a sequence")
+
+# Test __repr__
+
+check(str(u0) == str(l0), "str(u0) == str(l0)")
+check(repr(u1) == repr(l1), "repr(u1) == repr(l1)")
+check(`u2` == `l2`, "`u2` == `l2`")
+
+# Test __cmp__ and __len__
+
+def mycmp(a, b):
+    r = cmp(a, b)
+    if r < 0: return -1
+    if r > 0: return 1
+    return r
+
+all = [l0, l1, l2, u, u0, u1, u2, uu, uu0, uu1, uu2]
+for a in all:
+    for b in all:
+        check(mycmp(a, b) == mycmp(len(a), len(b)),
+              "mycmp(a, b) == mycmp(len(a), len(b))")
+
+# Test __getitem__
+
+for i in range(len(u2)):
+    check(u2[i] == i, "u2[i] == i")
+
+# Test __setitem__
+
+uu2[0] = 0
+uu2[1] = 100
+try:
+    uu2[2] = 200
+except IndexError:
+    pass
+else:
+    raise TestFailed("uu2[2] shouldn't be assignable")
+
+# Test __delitem__
+
+del uu2[1]
+del uu2[0]
+try:
+    del uu2[0]
+except IndexError:
+    pass
+else:
+    raise TestFailed("uu2[0] shouldn't be deletable")
+
+# Test __getslice__
+
+for i in range(-3, 4):
+    check(u2[:i] == l2[:i], "u2[:i] == l2[:i]")
+    check(u2[i:] == l2[i:], "u2[i:] == l2[i:]")
+    for j in range(-3, 4):
+        check(u2[i:j] == l2[i:j], "u2[i:j] == l2[i:j]")
+
+# Test __setslice__
+
+for i in range(-3, 4):
+    u2[:i] = l2[:i]
+    check(u2 == l2, "u2 == l2")
+    u2[i:] = l2[i:]
+    check(u2 == l2, "u2 == l2")
+    for j in range(-3, 4):
+        u2[i:j] = l2[i:j]
+        check(u2 == l2, "u2 == l2")
+
+uu2 = u2[:]
+uu2[:0] = [-2, -1]
+check(uu2 == [-2, -1, 0, 1], "uu2 == [-2, -1, 0, 1]")
+uu2[0:] = []
+check(uu2 == [], "uu2 == []")
+
+# Test __contains__
+for i in u2:
+    check(i in u2, "i in u2")
+for i in min(u2)-1, max(u2)+1:
+    check(i not in u2, "i not in u2")
+
+# Test __delslice__
+
+uu2 = u2[:]
+del uu2[1:2]
+del uu2[0:1]
+check(uu2 == [], "uu2 == []")
+
+uu2 = u2[:]
+del uu2[1:]
+del uu2[:1]
+check(uu2 == [], "uu2 == []")
+
+# Test __add__, __radd__, __mul__ and __rmul__
+
+check(u1 + [] == [] + u1 == u1, "u1 + [] == [] + u1 == u1")
+check(u1 + [1] == u2, "u1 + [1] == u2")
+check([-1] + u1 == [-1, 0], "[-1] + u1 == [-1, 0]")
+check(u2 == u2*1 == 1*u2, "u2 == u2*1 == 1*u2")
+check(u2+u2 == u2*2 == 2*u2, "u2+u2 == u2*2 == 2*u2")
+check(u2+u2+u2 == u2*3 == 3*u2, "u2+u2+u2 == u2*3 == 3*u2")
+
+# Test append
+
+u = u1[:]
+u.append(1)
+check(u == u2, "u == u2")
+
+# Test insert
+
+u = u2[:]
+u.insert(0, -1)
+check(u == [-1, 0, 1], "u == [-1, 0, 1]")
+
+# Test pop
+
+u = [-1] + u2
+u.pop()
+check(u == [-1, 0], "u == [-1, 0]")
+u.pop(0)
+check(u == [0], "u == [0]")
+
+# Test remove
+
+u = u2[:]
+u.remove(1)
+check(u == u1, "u == u1")
+
+# Test count
+u = u2*3
+check(u.count(0) == 3, "u.count(0) == 3")
+check(u.count(1) == 3, "u.count(1) == 3")
+check(u.count(2) == 0, "u.count(2) == 0")
+
+
+# Test index
+
+check(u2.index(0) == 0, "u2.index(0) == 0")
+check(u2.index(1) == 1, "u2.index(1) == 1")
+try:
+    u2.index(2)
+except ValueError:
+    pass
+else:
+    raise TestFailed("expected ValueError")
+
+# Test reverse
+
+u = u2[:]
+u.reverse()
+check(u == [1, 0], "u == [1, 0]")
+u.reverse()
+check(u == u2, "u == u2")
+
+# Test sort
+
+u = UserList([1, 0])
+u.sort()
+check(u == u2, "u == u2")
+
+# Test extend
+
+u = u1[:]
+u.extend(u2)
+check(u == u1 + u2, "u == u1 + u2")
diff --git a/lib-python/2.2/test/test_userstring.py b/lib-python/2.2/test/test_userstring.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/test/test_userstring.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+import sys
+from test_support import verbose
+import string_tests
+# UserString is a wrapper around the native builtin string type.
+# UserString instances should behave similar to builtin string objects.
+# The test cases were in part derived from 'test_string.py'.
+from UserString import UserString
+
+if __name__ == "__main__":
+    verbose = '-v' in sys.argv
+
+tested_methods = {}
+
+def test(methodname, input, output, *args):
+    global tested_methods
+    tested_methods[methodname] = 1
+    if verbose:
+        print '%r.%s(%s)' % (input, methodname, ", ".join(map(repr, args))),
+    u = UserString(input)
+    objects = [input, u, UserString(u)]
+    res = [""] * 3
+    for i in range(3):
+        object = objects[i]
+        try:
+            f = getattr(object, methodname)
+        except AttributeError:
+            f = None
+            res[i] = AttributeError
+        else:
+            try:
+                res[i] = apply(f, args)
+            except:
+                res[i] = sys.exc_type
+    if res[0] == res[1] == res[2] == output:
+        if verbose:
+            print 'yes'
+    else:
+        if verbose:
+            print 'no'
+        print (methodname, input, output, args, res[0], res[1], res[2])
+
+string_tests.run_method_tests(test)
diff --git a/lib-python/2.2/test/test_uu.py b/lib-python/2.2/test/test_uu.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_uu.py
@@ -0,0 +1,158 @@
+"""
+Tests for uu module.
+Nick Mathewson
+"""
+
+from test_support import verify, TestFailed, verbose, TESTFN
+import sys, os
+import uu
+from StringIO import StringIO
+
+teststr = "The smooth-scaled python crept over the sleeping dog\n"
+expected = """\
+M5&AE('-M;V]T:\"US8V%L960@<'ET:&]N(&-R97!T(&]V97(@=&AE('-L965P
+(:6YG(&1O9PH """
+encoded1 = "begin 666 t1\n"+expected+"\n \nend\n"
+if verbose:
+    print '1. encode file->file'
+inp = StringIO(teststr)
+out = StringIO()
+uu.encode(inp, out, "t1")
+verify(out.getvalue() == encoded1)
+inp = StringIO(teststr)
+out = StringIO()
+uu.encode(inp, out, "t1", 0644)
+verify(out.getvalue() == "begin 644 t1\n"+expected+"\n \nend\n")
+
+if verbose:
+    print '2. decode file->file'
+inp = StringIO(encoded1)
+out = StringIO()
+uu.decode(inp, out)
+verify(out.getvalue() == teststr)
+inp = StringIO("""UUencoded files may contain many lines,
+                  even some that have 'begin' in them.\n"""+encoded1)
+out = StringIO()
+uu.decode(inp, out)
+verify(out.getvalue() == teststr)
+
+stdinsave = sys.stdin
+stdoutsave = sys.stdout
+try:
+    if verbose:
+        print '3. encode stdin->stdout'
+    sys.stdin = StringIO(teststr)
+    sys.stdout = StringIO()
+    uu.encode("-", "-", "t1", 0666)
+    verify(sys.stdout.getvalue() == encoded1)
+    if verbose:
+        print >>stdoutsave, '4. decode stdin->stdout'
+    sys.stdin = StringIO(encoded1)
+    sys.stdout = StringIO()
+    uu.decode("-", "-")
+    verify(sys.stdout.getvalue() == teststr)
+finally:
+    sys.stdin = stdinsave
+    sys.stdout = stdoutsave
+
+if verbose:
+    print '5. encode file->file'
+tmpIn  = TESTFN + "i"
+tmpOut = TESTFN + "o"
+try:
+    fin = open(tmpIn, 'wb')
+    fin.write(teststr)
+    fin.close()
+
+    fin = open(tmpIn, 'rb')
+    fout = open(tmpOut, 'w')
+    uu.encode(fin, fout, tmpIn, mode=0644)
+    fin.close()
+    fout.close()
+
+    fout = open(tmpOut, 'r')
+    s = fout.read()
+    fout.close()
+    verify(s == 'begin 644 ' + tmpIn + '\n' + expected + '\n \nend\n')
+
+    os.unlink(tmpIn)
+    if verbose:
+        print '6. decode file-> file'
+    uu.decode(tmpOut)
+    fin = open(tmpIn, 'rb')
+    s = fin.read()
+    fin.close()
+    verify(s == teststr)
+    # XXX is there an xp way to verify the mode?
+
+finally:
+    try:
+        fin.close()
+    except:
+        pass
+    try:
+        fout.close()
+    except:
+        pass
+    try:
+        os.unlink(tmpIn)
+    except:
+        pass
+    try:
+        os.unlink(tmpOut)
+    except:
+        pass
+
+if verbose:
+    print '7. error: truncated input'
+inp = StringIO("begin 644 t1\n"+expected)
+out = StringIO()
+try:
+    uu.decode(inp, out)
+    raise TestFailed("No exception thrown")
+except uu.Error, e:
+    verify(str(e) == 'Truncated input file')
+
+if verbose:
+    print '8. error: missing begin'
+inp = StringIO("")
+out = StringIO()
+try:
+    uu.decode(inp, out)
+    raise TestFailed("No exception thrown")
+except uu.Error, e:
+    verify(str(e) == 'No valid begin line found in input file')
+
+# Test to verify that decode() will refuse to overwrite an existing file
+import tempfile
+outfile = tempfile.mktemp()
+inp = StringIO('Here is a message to be uuencoded')
+out = StringIO()
+uu.encode(inp, out, outfile)
+out.seek(0)
+try:
+    if verbose:
+        print '9. decode w/file not exists is okay'
+    uu.decode(out)
+    if not os.path.exists(outfile):
+        raise TestFailed('uudecode w/ out_file=None failed')
+    fp = open(outfile)
+    data = fp.read()
+    fp.close()
+    if data <> inp.getvalue():
+        raise TestFailed('uudecode stored something weird')
+    # Try to write it again, which should cause a failure
+    if verbose:
+        print '10. uudecode w/file exists fails'
+    out.seek(0)
+    try:
+        uu.decode(out)
+    except uu.Error:
+        pass
+    else:
+        raise TestFailed('expected to get a "file exists" error')
+finally:
+    try:
+        os.unlink(outfile)
+    except OSError:
+        pass
diff --git a/lib-python/2.2/test/test_wave.py b/lib-python/2.2/test/test_wave.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_wave.py
@@ -0,0 +1,34 @@
+from test_support import TestFailed
+import os, tempfile
+import wave
+
+def check(t, msg=None):
+    if not t:
+        raise TestFailed, msg
+
+nchannels = 2
+sampwidth = 2
+framerate = 8000
+nframes = 100
+
+testfile = tempfile.mktemp()
+
+f = wave.open(testfile, 'wb')
+f.setnchannels(nchannels)
+f.setsampwidth(sampwidth)
+f.setframerate(framerate)
+f.setnframes(nframes)
+output = '\0' * nframes * nchannels * sampwidth
+f.writeframes(output)
+f.close()
+
+f = wave.open(testfile, 'rb')
+check(nchannels == f.getnchannels(), "nchannels")
+check(sampwidth == f.getsampwidth(), "sampwidth")
+check(framerate == f.getframerate(), "framerate")
+check(nframes == f.getnframes(), "nframes")
+input = f.readframes(nframes)
+check(input == output, "data")
+f.close()
+
+os.remove(testfile)
diff --git a/lib-python/2.2/test/test_weakref.py b/lib-python/2.2/test/test_weakref.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_weakref.py
@@ -0,0 +1,573 @@
+import sys
+import unittest
+import UserList
+import weakref
+
+import test_support
+
+
+class C:
+    def method(self):
+        pass
+
+
+class Callable:
+    bar = None
+
+    def __call__(self, x):
+        self.bar = x
+
+
+def create_function():
+    def f(): pass
+    return f
+
+def create_bound_method():
+    return C().method
+
+def create_unbound_method():
+    return C.method
+
+
+class TestBase(unittest.TestCase):
+
+    def setUp(self):
+        self.cbcalled = 0
+
+    def callback(self, ref):
+        self.cbcalled += 1
+
+
+class ReferencesTestCase(TestBase):
+
+    def test_basic_ref(self):
+        self.check_basic_ref(C)
+        self.check_basic_ref(create_function)
+        self.check_basic_ref(create_bound_method)
+        self.check_basic_ref(create_unbound_method)
+
+    def test_basic_callback(self):
+        self.check_basic_callback(C)
+        self.check_basic_callback(create_function)
+        self.check_basic_callback(create_bound_method)
+        self.check_basic_callback(create_unbound_method)
+
+    def test_multiple_callbacks(self):
+        o = C()
+        ref1 = weakref.ref(o, self.callback)
+        ref2 = weakref.ref(o, self.callback)
+        del o
+        self.assert_(ref1() is None,
+                     "expected reference to be invalidated")
+        self.assert_(ref2() is None,
+                     "expected reference to be invalidated")
+        self.assert_(self.cbcalled == 2,
+                     "callback not called the right number of times")
+
+    def test_multiple_selfref_callbacks(self):
+        """Make sure all references are invalidated before callbacks
+        are called."""
+        #
+        # What's important here is that we're using the first
+        # reference in the callback invoked on the second reference
+        # (the most recently created ref is cleaned up first).  This
+        # tests that all references to the object are invalidated
+        # before any of the callbacks are invoked, so that we only
+        # have one invocation of _weakref.c:cleanup_helper() active
+        # for a particular object at a time.
+        #
+        def callback(object, self=self):
+            self.ref()
+        c = C()
+        self.ref = weakref.ref(c, callback)
+        ref1 = weakref.ref(c, callback)
+        del c
+
+    def test_proxy_ref(self):
+        o = C()
+        o.bar = 1
+        ref1 = weakref.proxy(o, self.callback)
+        ref2 = weakref.proxy(o, self.callback)
+        del o
+
+        def check(proxy):
+            proxy.bar
+
+        self.assertRaises(weakref.ReferenceError, check, ref1)
+        self.assertRaises(weakref.ReferenceError, check, ref2)
+        self.assert_(self.cbcalled == 2)
+
+    def check_basic_ref(self, factory):
+        o = factory()
+        ref = weakref.ref(o)
+        self.assert_(ref() is not None,
+                     "weak reference to live object should be live")
+        o2 = ref()
+        self.assert_(o is o2,
+                     "<ref>() should return original object if live")
+
+    def check_basic_callback(self, factory):
+        self.cbcalled = 0
+        o = factory()
+        ref = weakref.ref(o, self.callback)
+        del o
+        self.assert_(self.cbcalled == 1,
+                     "callback did not properly set 'cbcalled'")
+        self.assert_(ref() is None,
+                     "ref2 should be dead after deleting object reference")
+
+    def test_ref_reuse(self):
+        o = C()
+        ref1 = weakref.ref(o)
+        # create a proxy to make sure that there's an intervening creation
+        # between these two; it should make no difference
+        proxy = weakref.proxy(o)
+        ref2 = weakref.ref(o)
+        self.assert_(ref1 is ref2,
+                     "reference object w/out callback should be re-used")
+
+        o = C()
+        proxy = weakref.proxy(o)
+        ref1 = weakref.ref(o)
+        ref2 = weakref.ref(o)
+        self.assert_(ref1 is ref2,
+                     "reference object w/out callback should be re-used")
+        self.assert_(weakref.getweakrefcount(o) == 2,
+                     "wrong weak ref count for object")
+        del proxy
+        self.assert_(weakref.getweakrefcount(o) == 1,
+                     "wrong weak ref count for object after deleting proxy")
+
+    def test_proxy_reuse(self):
+        o = C()
+        proxy1 = weakref.proxy(o)
+        ref = weakref.ref(o)
+        proxy2 = weakref.proxy(o)
+        self.assert_(proxy1 is proxy2,
+                     "proxy object w/out callback should have been re-used")
+
+    def test_basic_proxy(self):
+        o = C()
+        self.check_proxy(o, weakref.proxy(o))
+
+        L = UserList.UserList()
+        p = weakref.proxy(L)
+        self.failIf(p, "proxy for empty UserList should be false")
+        p.append(12)
+        self.assertEqual(len(L), 1)
+        self.failUnless(p, "proxy for non-empty UserList should be true")
+        p[:] = [2, 3]
+        self.assertEqual(len(L), 2)
+        self.assertEqual(len(p), 2)
+        self.failUnless(3 in p, "proxy didn't support __contains__() properly")
+        p[1] = 5
+        self.assertEqual(L[1], 5)
+        self.assertEqual(p[1], 5)
+        L2 = UserList.UserList(L)
+        p2 = weakref.proxy(L2)
+        self.assertEqual(p, p2)
+
+    def test_callable_proxy(self):
+        o = Callable()
+        ref1 = weakref.proxy(o)
+
+        self.check_proxy(o, ref1)
+
+        self.assert_(type(ref1) is weakref.CallableProxyType,
+                     "proxy is not of callable type")
+        ref1('twinkies!')
+        self.assert_(o.bar == 'twinkies!',
+                     "call through proxy not passed through to original")
+        ref1(x='Splat.')
+        self.assert_(o.bar == 'Splat.',
+                     "call through proxy not passed through to original")
+
+        # expect due to too few args
+        self.assertRaises(TypeError, ref1)
+
+        # expect due to too many args
+        self.assertRaises(TypeError, ref1, 1, 2, 3)
+
+    def check_proxy(self, o, proxy):
+        o.foo = 1
+        self.assert_(proxy.foo == 1,
+                     "proxy does not reflect attribute addition")
+        o.foo = 2
+        self.assert_(proxy.foo == 2,
+                     "proxy does not reflect attribute modification")
+        del o.foo
+        self.assert_(not hasattr(proxy, 'foo'),
+                     "proxy does not reflect attribute removal")
+
+        proxy.foo = 1
+        self.assert_(o.foo == 1,
+                     "object does not reflect attribute addition via proxy")
+        proxy.foo = 2
+        self.assert_(
+            o.foo == 2,
+            "object does not reflect attribute modification via proxy")
+        del proxy.foo
+        self.assert_(not hasattr(o, 'foo'),
+                     "object does not reflect attribute removal via proxy")
+
+    def test_getweakrefcount(self):
+        o = C()
+        ref1 = weakref.ref(o)
+        ref2 = weakref.ref(o, self.callback)
+        self.assert_(weakref.getweakrefcount(o) == 2,
+                     "got wrong number of weak reference objects")
+
+        proxy1 = weakref.proxy(o)
+        proxy2 = weakref.proxy(o, self.callback)
+        self.assert_(weakref.getweakrefcount(o) == 4,
+                     "got wrong number of weak reference objects")
+
+    def test_getweakrefs(self):
+        o = C()
+        ref1 = weakref.ref(o, self.callback)
+        ref2 = weakref.ref(o, self.callback)
+        del ref1
+        self.assert_(weakref.getweakrefs(o) == [ref2],
+                     "list of refs does not match")
+
+        o = C()
+        ref1 = weakref.ref(o, self.callback)
+        ref2 = weakref.ref(o, self.callback)
+        del ref2
+        self.assert_(weakref.getweakrefs(o) == [ref1],
+                     "list of refs does not match")
+
+    def test_newstyle_number_ops(self):
+        class F(float):
+            pass
+        f = F(2.0)
+        p = weakref.proxy(f)
+        self.assert_(p + 1.0 == 3.0)
+        self.assert_(1.0 + p == 3.0)  # this used to SEGV
+
+    def test_callbacks_protected(self):
+        """Callbacks protected from already-set exceptions?"""
+        # Regression test for SF bug #478534.
+        class BogusError(Exception):
+            pass
+        data = {}
+        def remove(k):
+            del data[k]
+        def encapsulate():
+            f = lambda : ()
+            data[weakref.ref(f, remove)] = None
+            raise BogusError
+        try:
+            encapsulate()
+        except BogusError:
+            pass
+        else:
+            self.fail("exception not properly restored")
+        try:
+            encapsulate()
+        except BogusError:
+            pass
+        else:
+            self.fail("exception not properly restored")
+
+
+class Object:
+    def __init__(self, arg):
+        self.arg = arg
+    def __repr__(self):
+        return "<Object %r>" % self.arg
+
+
+class MappingTestCase(TestBase):
+
+    COUNT = 10
+
+    def test_weak_values(self):
+        #
+        #  This exercises d.copy(), d.items(), d[], del d[], len(d).
+        #
+        dict, objects = self.make_weak_valued_dict()
+        for o in objects:
+            self.assert_(weakref.getweakrefcount(o) == 1,
+                         "wrong number of weak references to %r!" % o)
+            self.assert_(o is dict[o.arg],
+                         "wrong object returned by weak dict!")
+        items1 = dict.items()
+        items2 = dict.copy().items()
+        items1.sort()
+        items2.sort()
+        self.assert_(items1 == items2,
+                     "cloning of weak-valued dictionary did not work!")
+        del items1, items2
+        self.assert_(len(dict) == self.COUNT)
+        del objects[0]
+        self.assert_(len(dict) == (self.COUNT - 1),
+                     "deleting object did not cause dictionary update")
+        del objects, o
+        self.assert_(len(dict) == 0,
+                     "deleting the values did not clear the dictionary")
+        # regression on SF bug #447152:
+        dict = weakref.WeakValueDictionary()
+        self.assertRaises(KeyError, dict.__getitem__, 1)
+        dict[2] = C()
+        self.assertRaises(KeyError, dict.__getitem__, 2)
+
+    def test_weak_keys(self):
+        #
+        #  This exercises d.copy(), d.items(), d[] = v, d[], del d[],
+        #  len(d), d.has_key().
+        #
+        dict, objects = self.make_weak_keyed_dict()
+        for o in objects:
+            self.assert_(weakref.getweakrefcount(o) == 1,
+                         "wrong number of weak references to %r!" % o)
+            self.assert_(o.arg is dict[o],
+                         "wrong object returned by weak dict!")
+        items1 = dict.items()
+        items2 = dict.copy().items()
+        items1.sort()
+        items2.sort()
+        self.assert_(items1 == items2,
+                     "cloning of weak-keyed dictionary did not work!")
+        del items1, items2
+        self.assert_(len(dict) == self.COUNT)
+        del objects[0]
+        self.assert_(len(dict) == (self.COUNT - 1),
+                     "deleting object did not cause dictionary update")
+        del objects, o
+        self.assert_(len(dict) == 0,
+                     "deleting the keys did not clear the dictionary")
+        o = Object(42)
+        dict[o] = "What is the meaning of the universe?"
+        self.assert_(dict.has_key(o))
+        self.assert_(not dict.has_key(34))
+
+    def test_weak_keyed_iters(self):
+        dict, objects = self.make_weak_keyed_dict()
+        self.check_iters(dict)
+
+    def test_weak_valued_iters(self):
+        dict, objects = self.make_weak_valued_dict()
+        self.check_iters(dict)
+
+    def check_iters(self, dict):
+        # item iterator:
+        items = dict.items()
+        for item in dict.iteritems():
+            items.remove(item)
+        self.assert_(len(items) == 0, "iteritems() did not touch all items")
+
+        # key iterator, via __iter__():
+        keys = dict.keys()
+        for k in dict:
+            keys.remove(k)
+        self.assert_(len(keys) == 0, "__iter__() did not touch all keys")
+
+        # key iterator, via iterkeys():
+        keys = dict.keys()
+        for k in dict.iterkeys():
+            keys.remove(k)
+        self.assert_(len(keys) == 0, "iterkeys() did not touch all keys")
+
+        # value iterator:
+        values = dict.values()
+        for v in dict.itervalues():
+            values.remove(v)
+        self.assert_(len(values) == 0, "itervalues() did not touch all values")
+
+    def test_make_weak_keyed_dict_from_dict(self):
+        o = Object(3)
+        dict = weakref.WeakKeyDictionary({o:364})
+        self.assert_(dict[o] == 364)
+
+    def test_make_weak_keyed_dict_from_weak_keyed_dict(self):
+        o = Object(3)
+        dict = weakref.WeakKeyDictionary({o:364})
+        dict2 = weakref.WeakKeyDictionary(dict)
+        self.assert_(dict[o] == 364)
+
+    def make_weak_keyed_dict(self):
+        dict = weakref.WeakKeyDictionary()
+        objects = map(Object, range(self.COUNT))
+        for o in objects:
+            dict[o] = o.arg
+        return dict, objects
+
+    def make_weak_valued_dict(self):
+        dict = weakref.WeakValueDictionary()
+        objects = map(Object, range(self.COUNT))
+        for o in objects:
+            dict[o.arg] = o
+        return dict, objects
+
+    def check_popitem(self, klass, key1, value1, key2, value2):
+        weakdict = klass()
+        weakdict[key1] = value1
+        weakdict[key2] = value2
+        self.assert_(len(weakdict) == 2)
+        k, v = weakdict.popitem()
+        self.assert_(len(weakdict) == 1)
+        if k is key1:
+            self.assert_(v is value1)
+        else:
+            self.assert_(v is value2)
+        k, v = weakdict.popitem()
+        self.assert_(len(weakdict) == 0)
+        if k is key1:
+            self.assert_(v is value1)
+        else:
+            self.assert_(v is value2)
+
+    def test_weak_valued_dict_popitem(self):
+        self.check_popitem(weakref.WeakValueDictionary,
+                           "key1", C(), "key2", C())
+
+    def test_weak_keyed_dict_popitem(self):
+        self.check_popitem(weakref.WeakKeyDictionary,
+                           C(), "value 1", C(), "value 2")
+
+    def check_setdefault(self, klass, key, value1, value2):
+        self.assert_(value1 is not value2,
+                     "invalid test"
+                     " -- value parameters must be distinct objects")
+        weakdict = klass()
+        o = weakdict.setdefault(key, value1)
+        self.assert_(o is value1)
+        self.assert_(weakdict.has_key(key))
+        self.assert_(weakdict.get(key) is value1)
+        self.assert_(weakdict[key] is value1)
+
+        o = weakdict.setdefault(key, value2)
+        self.assert_(o is value1)
+        self.assert_(weakdict.has_key(key))
+        self.assert_(weakdict.get(key) is value1)
+        self.assert_(weakdict[key] is value1)
+
+    def test_weak_valued_dict_setdefault(self):
+        self.check_setdefault(weakref.WeakValueDictionary,
+                              "key", C(), C())
+
+    def test_weak_keyed_dict_setdefault(self):
+        self.check_setdefault(weakref.WeakKeyDictionary,
+                              C(), "value 1", "value 2")
+
+    def check_update(self, klass, dict):
+        #
+        #  This exercises d.update(), len(d), d.keys(), d.has_key(),
+        #  d.get(), d[].
+        #
+        weakdict = klass()
+        weakdict.update(dict)
+        self.assert_(len(weakdict) == len(dict))
+        for k in weakdict.keys():
+            self.assert_(dict.has_key(k),
+                         "mysterious new key appeared in weak dict")
+            v = dict.get(k)
+            self.assert_(v is weakdict[k])
+            self.assert_(v is weakdict.get(k))
+        for k in dict.keys():
+            self.assert_(weakdict.has_key(k),
+                         "original key disappeared in weak dict")
+            v = dict[k]
+            self.assert_(v is weakdict[k])
+            self.assert_(v is weakdict.get(k))
+
+    def test_weak_valued_dict_update(self):
+        self.check_update(weakref.WeakValueDictionary,
+                          {1: C(), 'a': C(), C(): C()})
+
+    def test_weak_keyed_dict_update(self):
+        self.check_update(weakref.WeakKeyDictionary,
+                          {C(): 1, C(): 2, C(): 3})
+
+    def test_weak_keyed_delitem(self):
+        d = weakref.WeakKeyDictionary()
+        o1 = Object('1')
+        o2 = Object('2')
+        d[o1] = 'something'
+        d[o2] = 'something'
+        self.assert_(len(d) == 2)
+        del d[o1]
+        self.assert_(len(d) == 1)
+        self.assert_(d.keys() == [o2])
+
+    def test_weak_valued_delitem(self):
+        d = weakref.WeakValueDictionary()
+        o1 = Object('1')
+        o2 = Object('2')
+        d['something'] = o1
+        d['something else'] = o2
+        self.assert_(len(d) == 2)
+        del d['something']
+        self.assert_(len(d) == 1)
+        self.assert_(d.items() == [('something else', o2)])
+
+    def test_weak_keyed_bad_delitem(self):
+        d = weakref.WeakKeyDictionary()
+        o = Object('1')
+        # An attempt to delete an object that isn't there should raise
+        # KeyError.  It didn't before 2.3.
+        self.assertRaises(KeyError, d.__delitem__, o)
+        self.assertRaises(KeyError, d.__getitem__, o)
+
+        # If a key isn't of a weakly referencable type, __getitem__ and
+        # __setitem__ raise TypeError.  __delitem__ should too.
+        self.assertRaises(TypeError, d.__delitem__,  13)
+        self.assertRaises(TypeError, d.__getitem__,  13)
+        self.assertRaises(TypeError, d.__setitem__,  13, 13)
+
+    def test_weak_keyed_cascading_deletes(self):
+        # SF bug 742860.  For some reason, before 2.3 __delitem__ iterated
+        # over the keys via self.data.iterkeys().  If things vanished from
+        # the dict during this (or got added), that caused a RuntimeError.
+
+        d = weakref.WeakKeyDictionary()
+        mutate = False
+
+        class C(object):
+            def __init__(self, i):
+                self.value = i
+            def __hash__(self):
+                return hash(self.value)
+            def __eq__(self, other):
+                if mutate:
+                    # Side effect that mutates the dict, by removing the
+                    # last strong reference to a key.
+                    del objs[-1]
+                return self.value == other.value
+
+        objs = [C(i) for i in range(4)]
+        for o in objs:
+            d[o] = o.value
+        del o   # now the only strong references to keys are in objs
+        # Find the order in which iterkeys sees the keys.
+        objs = d.keys()
+        # Reverse it, so that the iteration implementation of __delitem__
+        # has to keep looping to find the first object we delete.
+        objs.reverse()
+
+        # Turn on mutation in C.__eq__.  The first time thru the loop,
+        # under the iterkeys() business the first comparison will delete
+        # the last item iterkeys() would see, and that causes a
+        #     RuntimeError: dictionary changed size during iteration
+        # when the iterkeys() loop goes around to try comparing the next
+        # key.  After this was fixed, it just deletes the last object *our*
+        # "for o in obj" loop would have gotten to.
+        mutate = True
+        count = 0
+        for o in objs:
+            count += 1
+            del d[o]
+        self.assertEqual(len(d), 0)
+        self.assertEqual(count, 2)
+
+def test_main():
+    loader = unittest.TestLoader()
+    suite = unittest.TestSuite()
+    suite.addTest(loader.loadTestsFromTestCase(ReferencesTestCase))
+    suite.addTest(loader.loadTestsFromTestCase(MappingTestCase))
+    test_support.run_suite(suite)
+
+
+if __name__ == "__main__":
+    test_main()
diff --git a/lib-python/2.2/test/test_winreg.py b/lib-python/2.2/test/test_winreg.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_winreg.py
@@ -0,0 +1,151 @@
+# Test the windows specific win32reg module.
+# Only win32reg functions not hit here: FlushKey, LoadKey and SaveKey
+
+from _winreg import *
+import os, sys
+
+from test_support import verify, have_unicode
+
+test_key_name = "SOFTWARE\\Python Registry Test Key - Delete Me"
+
+test_data = [
+    ("Int Value",     45,                                      REG_DWORD),
+    ("String Val",    "A string value",                        REG_SZ,),
+    ("StringExpand",  "The path is %path%",                    REG_EXPAND_SZ),
+    ("Multi-string",  ["Lots", "of", "string", "values"],      REG_MULTI_SZ),
+    ("Raw Data",      ("binary"+chr(0)+"data"),                REG_BINARY),
+]
+if have_unicode:
+    test_data+=[
+    (unicode("Unicode Val"),  unicode("A Unicode value"),                      REG_SZ,),
+    ("UnicodeExpand", unicode("The path is %path%"),                   REG_EXPAND_SZ),
+    ("Multi-unicode", [unicode("Lots"), unicode("of"), unicode("unicode"), unicode("values")], REG_MULTI_SZ),
+    ("Multi-mixed",   [unicode("Unicode"), unicode("and"), "string", "values"],REG_MULTI_SZ),
+    ]
+
+def WriteTestData(root_key):
+    # Set the default value for this key.
+    SetValue(root_key, test_key_name, REG_SZ, "Default value")
+    key = CreateKey(root_key, test_key_name)
+    # Create a sub-key
+    sub_key = CreateKey(key, "sub_key")
+    # Give the sub-key some named values
+
+    for value_name, value_data, value_type in test_data:
+        SetValueEx(sub_key, value_name, 0, value_type, value_data)
+
+    # Check we wrote as many items as we thought.
+    nkeys, nvalues, since_mod = QueryInfoKey(key)
+    verify(nkeys==1, "Not the correct number of sub keys")
+    verify(nvalues==1, "Not the correct number of values")
+    nkeys, nvalues, since_mod = QueryInfoKey(sub_key)
+    verify(nkeys==0, "Not the correct number of sub keys")
+    verify(nvalues==len(test_data), "Not the correct number of values")
+    # Close this key this way...
+    # (but before we do, copy the key as an integer - this allows
+    # us to test that the key really gets closed).
+    int_sub_key = int(sub_key)
+    CloseKey(sub_key)
+    try:
+        QueryInfoKey(int_sub_key)
+        raise RuntimeError, "It appears the CloseKey() function does not close the actual key!"
+    except EnvironmentError:
+        pass
+    # ... and close that key that way :-)
+    int_key = int(key)
+    key.Close()
+    try:
+        QueryInfoKey(int_key)
+        raise RuntimeError, "It appears the key.Close() function does not close the actual key!"
+    except EnvironmentError:
+        pass
+
+def ReadTestData(root_key):
+    # Check we can get default value for this key.
+    val = QueryValue(root_key, test_key_name)
+    verify(val=="Default value", "Registry didn't give back the correct value")
+
+    key = OpenKey(root_key, test_key_name)
+    # Read the sub-keys
+    sub_key = OpenKey(key, "sub_key")
+    # Check I can enumerate over the values.
+    index = 0
+    while 1:
+        try:
+            data = EnumValue(sub_key, index)
+        except EnvironmentError:
+            break
+        verify(data in test_data, "Didn't read back the correct test data")
+        index = index + 1
+    verify(index==len(test_data), "Didn't read the correct number of items")
+    # Check I can directly access each item
+    for value_name, value_data, value_type in test_data:
+        read_val, read_typ = QueryValueEx(sub_key, value_name)
+        verify(read_val==value_data and read_typ == value_type, \
+               "Could not directly read the value" )
+    sub_key.Close()
+    # Enumerate our main key.
+    read_val = EnumKey(key, 0)
+    verify(read_val == "sub_key", "Read subkey value wrong")
+    try:
+        EnumKey(key, 1)
+        verify(0, "Was able to get a second key when I only have one!")
+    except EnvironmentError:
+        pass
+
+    key.Close()
+
+def DeleteTestData(root_key):
+    key = OpenKey(root_key, test_key_name, 0, KEY_ALL_ACCESS)
+    sub_key = OpenKey(key, "sub_key", 0, KEY_ALL_ACCESS)
+    # It is not necessary to delete the values before deleting
+    # the key (although subkeys must not exist).  We delete them
+    # manually just to prove we can :-)
+    for value_name, value_data, value_type in test_data:
+        DeleteValue(sub_key, value_name)
+
+    nkeys, nvalues, since_mod = QueryInfoKey(sub_key)
+    verify(nkeys==0 and nvalues==0, "subkey not empty before delete")
+    sub_key.Close()
+    DeleteKey(key, "sub_key")
+
+    try:
+        # Shouldnt be able to delete it twice!
+        DeleteKey(key, "sub_key")
+        verify(0, "Deleting the key twice succeeded")
+    except EnvironmentError:
+        pass
+    key.Close()
+    DeleteKey(root_key, test_key_name)
+    # Opening should now fail!
+    try:
+        key = OpenKey(root_key, test_key_name)
+        verify(0, "Could open the non-existent key")
+    except WindowsError: # Use this error name this time
+        pass
+
+def TestAll(root_key):
+    WriteTestData(root_key)
+    ReadTestData(root_key)
+    DeleteTestData(root_key)
+
+# Test on my local machine.
+TestAll(HKEY_CURRENT_USER)
+print "Local registry tests worked"
+try:
+    remote_name = sys.argv[sys.argv.index("--remote")+1]
+except (IndexError, ValueError):
+    remote_name = None
+
+if remote_name is not None:
+    try:
+        remote_key = ConnectRegistry(remote_name, HKEY_CURRENT_USER)
+    except EnvironmentError, exc:
+        print "Could not connect to the remote machine -", exc.strerror
+        remote_key = None
+    if remote_key is not None:
+        TestAll(remote_key)
+        print "Remote registry tests worked"
+else:
+    print "Remote registry calls can be tested using",
+    print "'test_winreg.py --remote \\\\machine_name'"
diff --git a/lib-python/2.2/test/test_winsound.py b/lib-python/2.2/test/test_winsound.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_winsound.py
@@ -0,0 +1,6 @@
+# Ridiculously simple test of the winsound module for Windows.
+
+import winsound
+for i in range(100, 2000, 100):
+    winsound.Beep(i, 75)
+print "Hopefully you heard some sounds increasing in frequency!"
diff --git a/lib-python/2.2/test/test_xmllib.py b/lib-python/2.2/test/test_xmllib.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_xmllib.py
@@ -0,0 +1,35 @@
+'''Test module to thest the xmllib module.
+   Sjoerd Mullender
+'''
+
+testdoc = """\
+<?xml version="1.0" encoding="UTF-8" standalone='yes' ?>
+<!-- comments aren't allowed before the <?xml?> tag,
+     but they are allowed before the <!DOCTYPE> tag -->
+<?processing instructions are allowed in the same places as comments ?>
+<!DOCTYPE greeting [
+  <!ELEMENT greeting (#PCDATA)>
+]>
+<greeting>Hello, world!</greeting>
+"""
+
+import test_support
+import unittest
+import xmllib
+
+
+class XMLParserTestCase(unittest.TestCase):
+
+    def test_simple(self):
+        parser = xmllib.XMLParser()
+        for c in testdoc:
+            parser.feed(c)
+        parser.close()
+
+
+def test_main():
+    test_support.run_unittest(XMLParserTestCase)
+
+
+if __name__ == "__main__":
+    test_main()
diff --git a/lib-python/2.2/test/test_xmlrpc.py b/lib-python/2.2/test/test_xmlrpc.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_xmlrpc.py
@@ -0,0 +1,37 @@
+import sys
+import test_support
+import unittest
+import xmlrpclib
+
+alist = [{'astring': 'foo at bar.baz.spam',
+          'afloat': 7283.43,
+          'anint': 2**20,
+          'ashortlong': 2L,
+          'anotherlist': ['.zyx.41'],
+          'abase64': xmlrpclib.Binary("my dog has fleas"),
+          'boolean': xmlrpclib.False,
+          }]
+
+class XMLRPCTestCase(unittest.TestCase):
+
+    def test_dump_load(self):
+        self.assertEquals(alist,
+                          xmlrpclib.loads(xmlrpclib.dumps((alist,)))[0][0])
+
+    def test_dump_big_long(self):
+        self.assertRaises(OverflowError, xmlrpclib.dumps, (2L**99,))
+
+    def test_dump_bad_dict(self):
+        self.assertRaises(TypeError, xmlrpclib.dumps, ({(1,2,3): 1},))
+
+    def test_dump_big_int(self):
+        if sys.maxint > 2L**31-1:
+            self.assertRaises(OverflowError, xmlrpclib.dumps,
+                              (int(2L**34),))
+
+def test_main():
+    test_support.run_unittest(XMLRPCTestCase)
+
+
+if __name__ == "__main__":
+    test_main()
diff --git a/lib-python/2.2/test/test_xreadline.py b/lib-python/2.2/test/test_xreadline.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_xreadline.py
@@ -0,0 +1,43 @@
+from test_support import verbose
+
+class XReader:
+    def __init__(self):
+        self.count = 5
+
+    def readlines(self, sizehint = None):
+        self.count = self.count - 1
+        return map(lambda x: "%d\n" % x, range(self.count))
+
+class Null: pass
+
+import xreadlines
+
+
+lineno = 0
+
+try:
+    xreadlines.xreadlines(Null())[0]
+except AttributeError, detail:
+    print "AttributeError (expected)"
+else:
+    print "Did not throw attribute error"
+
+try:
+    xreadlines.xreadlines(XReader)[0]
+except TypeError, detail:
+    print "TypeError (expected)"
+else:
+    print "Did not throw type error"
+
+try:
+    xreadlines.xreadlines(XReader())[1]
+except RuntimeError, detail:
+    print "RuntimeError (expected):", detail
+else:
+    print "Did not throw runtime error"
+
+xresult = ['0\n', '1\n', '2\n', '3\n', '0\n', '1\n', '2\n', '0\n', '1\n', '0\n']
+for line in xreadlines.xreadlines(XReader()):
+    if line != xresult[lineno]:
+        print "line %d differs" % lineno
+    lineno += 1
diff --git a/lib-python/2.2/test/test_zipfile.py b/lib-python/2.2/test/test_zipfile.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_zipfile.py
@@ -0,0 +1,78 @@
+import zlib # implied prerequisite
+import zipfile, os, StringIO, tempfile
+from test_support import TestFailed
+
+srcname = "junk9630"+os.extsep+"tmp"
+zipname = "junk9708"+os.extsep+"tmp"
+
+
+def zipTest(f, compression, srccontents):
+    zip = zipfile.ZipFile(f, "w", compression)   # Create the ZIP archive
+    zip.write(srcname, "another"+os.extsep+"name")
+    zip.write(srcname, srcname)
+    zip.close()
+
+    zip = zipfile.ZipFile(f, "r", compression)   # Read the ZIP archive
+    readData2 = zip.read(srcname)
+    readData1 = zip.read("another"+os.extsep+"name")
+    zip.close()
+
+    if readData1 != srccontents or readData2 != srccontents:
+        raise TestFailed, "Written data doesn't equal read data."
+
+
+try:
+    fp = open(srcname, "wb")               # Make a source file with some lines
+    for i in range(0, 1000):
+        fp.write("Test of zipfile line %d.\n" % i)
+    fp.close()
+
+    fp = open(srcname, "rb")
+    writtenData = fp.read()
+    fp.close()
+
+    for file in (zipname, tempfile.TemporaryFile(), StringIO.StringIO()):
+        zipTest(file, zipfile.ZIP_STORED, writtenData)
+
+    for file in (zipname, tempfile.TemporaryFile(), StringIO.StringIO()):
+        zipTest(file, zipfile.ZIP_DEFLATED, writtenData)
+
+finally:
+    if os.path.isfile(srcname):           # Remove temporary files
+        os.unlink(srcname)
+    if os.path.isfile(zipname):
+        os.unlink(zipname)
+
+
+# This test checks that the ZipFile constructor closes the file object
+# it opens if there's an error in the file.  If it doesn't, the traceback
+# holds a reference to the ZipFile object and, indirectly, the file object.
+# On Windows, this causes the os.unlink() call to fail because the
+# underlying file is still open.  This is SF bug #412214.
+#
+fp = open(srcname, "w")
+fp.write("this is not a legal zip file\n")
+fp.close()
+try:
+    zf = zipfile.ZipFile(srcname)
+except zipfile.BadZipfile:
+    os.unlink(srcname)
+
+
+# make sure we don't raise an AttributeError when a partially-constructed
+# ZipFile instance is finalized; this tests for regression on SF tracker
+# bug #403871.
+try:
+    zipfile.ZipFile(srcname)
+except IOError:
+    # The bug we're testing for caused an AttributeError to be raised
+    # when a ZipFile instance was created for a file that did not
+    # exist; the .fp member was not initialized but was needed by the
+    # __del__() method.  Since the AttributeError is in the __del__(),
+    # it is ignored, but the user should be sufficiently annoyed by
+    # the message on the output that regression will be noticed
+    # quickly.
+    pass
+else:
+    raise TestFailed("expected creation of readable ZipFile without\n"
+                     "  a file to raise an IOError.")
diff --git a/lib-python/2.2/test/test_zlib.py b/lib-python/2.2/test/test_zlib.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/test_zlib.py
@@ -0,0 +1,226 @@
+import zlib
+from test_support import TestFailed
+import sys
+import imp
+
+try:
+    t = imp.find_module('test_zlib')
+    file = t[0]
+except ImportError:
+    file = open(__file__)
+buf = file.read() * 8
+file.close()
+
+# test the checksums (hex so the test doesn't break on 64-bit machines)
+print hex(zlib.crc32('penguin')), hex(zlib.crc32('penguin', 1))
+print hex(zlib.adler32('penguin')), hex(zlib.adler32('penguin', 1))
+
+# make sure we generate some expected errors
+try:
+    zlib.compress('ERROR', zlib.MAX_WBITS + 1)
+except zlib.error, msg:
+    print "expecting", msg
+try:
+    zlib.compressobj(1, 8, 0)
+except ValueError, msg:
+    print "expecting", msg
+try:
+    zlib.decompressobj(0)
+except ValueError, msg:
+    print "expecting", msg
+
+x = zlib.compress(buf)
+y = zlib.decompress(x)
+if buf != y:
+    print "normal compression/decompression failed"
+else:
+    print "normal compression/decompression succeeded"
+
+buf = buf * 16
+
+co = zlib.compressobj(8, 8, -15)
+x1 = co.compress(buf)
+x2 = co.flush()
+try:
+    co.flush()
+    print "Oops - second flush worked when it should not have!"
+except zlib.error:
+    pass
+
+x = x1 + x2
+
+dc = zlib.decompressobj(-15)
+y1 = dc.decompress(x)
+y2 = dc.flush()
+y = y1 + y2
+if buf != y:
+    print "compress/decompression obj failed"
+else:
+    print "compress/decompression obj succeeded"
+
+co = zlib.compressobj(2, 8, -12, 9, 1)
+bufs = []
+for i in range(0, len(buf), 256):
+    bufs.append(co.compress(buf[i:i+256]))
+bufs.append(co.flush())
+combuf = ''.join(bufs)
+
+decomp1 = zlib.decompress(combuf, -12, -5)
+if decomp1 != buf:
+    print "decompress with init options failed"
+else:
+    print "decompress with init options succeeded"
+
+deco = zlib.decompressobj(-12)
+bufs = []
+for i in range(0, len(combuf), 128):
+    bufs.append(deco.decompress(combuf[i:i+128]))
+bufs.append(deco.flush())
+decomp2 = ''.join(bufs)
+if decomp2 != buf:
+    print "decompressobj with init options failed"
+else:
+    print "decompressobj with init options succeeded"
+
+print "should be '':", `deco.unconsumed_tail`
+
+# Check a decompression object with max_length specified
+deco = zlib.decompressobj(-12)
+cb = combuf
+bufs = []
+while cb:
+    max_length = 1 + len(cb)/10
+    chunk = deco.decompress(cb, max_length)
+    if len(chunk) > max_length:
+        print 'chunk too big (%d>%d)' % (len(chunk),max_length)
+    bufs.append(chunk)
+    cb = deco.unconsumed_tail
+bufs.append(deco.flush())
+decomp2 = ''.join(buf)
+if decomp2 != buf:
+    print "max_length decompressobj failed"
+else:
+    print "max_length decompressobj succeeded"
+
+# Misc tests of max_length
+deco = zlib.decompressobj(-12)
+try:
+    deco.decompress("", -1)
+except ValueError:
+    pass
+else:
+    print "failed to raise value error on bad max_length"
+print "unconsumed_tail should be '':", `deco.unconsumed_tail`
+
+# Test flush() with the various options, using all the different levels
+# in order to provide more variations.
+sync_opt = ['Z_NO_FLUSH', 'Z_SYNC_FLUSH', 'Z_FULL_FLUSH']
+sync_opt = [getattr(zlib, opt) for opt in sync_opt if hasattr(zlib, opt)]
+
+for sync in sync_opt:
+    for level in range(10):
+        obj = zlib.compressobj( level )
+        d = obj.compress( buf[:3000] )
+        d = d + obj.flush( sync )
+        d = d + obj.compress( buf[3000:] )
+        d = d + obj.flush()
+        if zlib.decompress(d) != buf:
+            print "Decompress failed: flush mode=%i, level=%i" % (sync,level)
+        del obj
+
+# Test for the odd flushing bugs noted in 2.0, and hopefully fixed in 2.1
+
+import random
+random.seed(1)
+
+print 'Testing on 17K of random data'
+
+if hasattr(zlib, 'Z_SYNC_FLUSH'):
+
+    # Create compressor and decompressor objects
+    c=zlib.compressobj(9)
+    d=zlib.decompressobj()
+
+    # Try 17K of data
+    # generate random data stream
+    a=""
+    for i in range(17*1024):
+        a=a+chr(random.randint(0,255))
+
+    # compress, sync-flush, and decompress
+    t = d.decompress( c.compress(a)+c.flush(zlib.Z_SYNC_FLUSH) )
+
+    # if decompressed data is different from the input data, choke.
+    if len(t) != len(a):
+        print len(a),len(t),len(d.unused_data)
+        raise TestFailed, "output of 17K doesn't match"
+
+def ignore():
+    """An empty function with a big string.
+
+    Make the compression algorithm work a little harder.
+    """
+
+    """
+LAERTES
+
+       O, fear me not.
+       I stay too long: but here my father comes.
+
+       Enter POLONIUS
+
+       A double blessing is a double grace,
+       Occasion smiles upon a second leave.
+
+LORD POLONIUS
+
+       Yet here, Laertes! aboard, aboard, for shame!
+       The wind sits in the shoulder of your sail,
+       And you are stay'd for. There; my blessing with thee!
+       And these few precepts in thy memory
+       See thou character. Give thy thoughts no tongue,
+       Nor any unproportioned thought his act.
+       Be thou familiar, but by no means vulgar.
+       Those friends thou hast, and their adoption tried,
+       Grapple them to thy soul with hoops of steel;
+       But do not dull thy palm with entertainment
+       Of each new-hatch'd, unfledged comrade. Beware
+       Of entrance to a quarrel, but being in,
+       Bear't that the opposed may beware of thee.
+       Give every man thy ear, but few thy voice;
+       Take each man's censure, but reserve thy judgment.
+       Costly thy habit as thy purse can buy,
+       But not express'd in fancy; rich, not gaudy;
+       For the apparel oft proclaims the man,
+       And they in France of the best rank and station
+       Are of a most select and generous chief in that.
+       Neither a borrower nor a lender be;
+       For loan oft loses both itself and friend,
+       And borrowing dulls the edge of husbandry.
+       This above all: to thine ownself be true,
+       And it must follow, as the night the day,
+       Thou canst not then be false to any man.
+       Farewell: my blessing season this in thee!
+
+LAERTES
+
+       Most humbly do I take my leave, my lord.
+
+LORD POLONIUS
+
+       The time invites you; go; your servants tend.
+
+LAERTES
+
+       Farewell, Ophelia; and remember well
+       What I have said to you.
+
+OPHELIA
+
+       'Tis in my memory lock'd,
+       And you yourself shall keep the key of it.
+
+LAERTES
+
+       Farewell.
+"""
diff --git a/lib-python/2.2/test/testall.py b/lib-python/2.2/test/testall.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/testall.py
@@ -0,0 +1,4 @@
+# Backward compatibility -- you should use regrtest instead of this module.
+import sys, regrtest
+sys.argv[1:] = ["-vv"]
+regrtest.main()
diff --git a/lib-python/2.2/test/testcodec.py b/lib-python/2.2/test/testcodec.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/testcodec.py
@@ -0,0 +1,48 @@
+""" Test Codecs (used by test_charmapcodec)
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright 2000 Guido van Rossum.
+
+"""#"
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self,input,errors='strict'):
+
+        return codecs.charmap_encode(input,errors,encoding_map)
+
+    def decode(self,input,errors='strict'):
+
+        return codecs.charmap_decode(input,errors,decoding_map)
+
+class StreamWriter(Codec,codecs.StreamWriter):
+    pass
+
+class StreamReader(Codec,codecs.StreamReader):
+    pass
+
+### encodings module API
+
+def getregentry():
+
+    return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+        0x78: u"abc", # 1-n decoding mapping
+        "abc": 0x0078,# 1-n encoding mapping
+        0x01: None,   # decoding mapping to <undefined>
+        0x79: u"",    # decoding mapping to <remove character>
+})
+
+### Encoding Map
+
+encoding_map = {}
+for k,v in decoding_map.items():
+    encoding_map[v] = k
diff --git a/lib-python/2.2/test/testimg.uue b/lib-python/2.2/test/testimg.uue
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/testimg.uue
@@ -0,0 +1,1170 @@
+begin 755 test.rawimg
+M___Y^/\A$Q3_*1LC_QL4'/\;%AS_%Q(6_QP7&?\B&AW_'A<7_QT5&/\7$A3_
+M&108_Q84%O\7%1?_$Q 6_Q<3'/\:$Q__'1<;_R$:&O\5$Q7_'!D8_SHW-O\\
+M.CW_141*_TE)3/]/3U+_'1DA_R,<)O\B'"3_249,_T=$2O]$0D7_.3<Z_T1"
+M1?].3$__44]2_QD2'/\;%![_*24M_U166_]55US_55=<_UQ>8_]<7F/_%1$9
+M_QH6'O\B(2C_65A>_U587?]87%[_65M>_Q at 5&_\@'"3_%Q8<_U%34?]<6E7_
+M5U9/_U-14_\:%A__'AHB_TA&2/\]/CC_24I$_RDG*O\B'B;_)",I_UI;7/];
+M65;_7EA6_X%\?/^!?GW_7E]@_U!45O]45US_4U5:_U)46_].4%?_2TU4_T9(
+M3_]#14S_1$E/_TA)4/](1T[_1D=._TI/5?])2E'_2$=._T-%3/])2U+_2$E0
+M_T9'3O]#1$O_0D1,_T!"2O]#14S_1DE._T5(3?]$1TS_0$-(_T1'3/]!0TC_
+M0T)(_S\^1/]#0DC_04-(_SD\0?\[0$7_0#]%_SY 1?\Z/$'_.CQ!_SH\0?\[
+M.S[_/SU _Q43%O\E(R;_%Q48_S0R-?\7%1C_)R4H_Q02%?\2$!/_%!(5_Q,1
+M%/\5$Q;_^>[U_SXT/O\F'"O_'QDA_QD4&/\6$17_%1 4_Q<2%O\:%1G_&!,7
+M_Q40%/\8$Q?_%Q03_Q<3%?\:%1O_'1<?_QL4'O\L)RW_(1T?_QP7&_\5$Q;_
+M'AP?_QT;'O\<&AW_03]"_TE'2O\:%A[_'AHC_QT8'/\8%!;_'!@:_Q\;'?\@
+M&Q__(ATA_R0?(_\H(R?_&1,;_Q at 4'/\='R3_2T]1_TU/4O]04%/_45%4_U14
+M5_\7$QO_&14=_R\M,/]I:6;_5597_U%34?]14$__'!<=_R,<*/\8%1O_6%A5
+M_UA85?];6%/_5%!2_QL7(/\;&B'_6%98_UM:4_];653_)2(A_R$=)?\<&1__
+M7UU at _T=%1_\M*RW_(B B_R at G)O]24TW_>7YX_Y&1CO]O:FS_5U=:_U)45_]1
+M4U;_4%)7_TY/5O])4%;_2$]5_T1(4/]%1E'_1DA0_T=)4/]&2$__1$9-_T%#
+M2O]$1DW_04-*_T!"2?] 0DG_04-*_T1&3?]!0TK_/D%&_T)%2O]!1$G_1$=,
+M_T%#2O]!0TK_/3]&_T!"2?] 0D?_/D!%_S]!1O\^0$7_.3M _ST_1/\\/D/_
+M.CQ!_ST[/O\W-3?_,C R_S N,/\M*2O_)B(D_Q82%/\7$Q7_%1 4_Q(-$?\7
+M$A;_%1 4_Q81%?__]?K_+R4N_T<^2_\A'"+_&A49_Q81%?\5$!3_&!,7_Q81
+M%?\6$17_%1 4_Q at 3%_\7$1G_%Q(8_PT(#/]V<G3_241*_QD5%_\:&!/_&!44
+M_Q(0$_\5$Q;_%1,6_Q,1%/\3$13_#0L._QD5'?\=&2+_'1@<_Q at 4%O\3#Q'_
+M%! 2_Q<2%O\6$17_&108_QP7&_\4$!C_%Q,;_QX;(?\=&Q[_(1\A_S$O,?\_
+M/3__3$I,_Q<3&_\9%1W_)2,F_U%13O]14U'_3U),_U)23_\=&!S_(QPF_QH7
+M'?]965;_5552_U933O]85%;_&Q<@_QD8'_]85U;_65I4_UA94O\E(R7_'!LA
+M_QL:(/]<6V'_75QB_V%@9O\7%AS_)"0G_U=86?]04$W_5U%/_UY65/]64E#_
+M>GUW_V!C7?]765?_5E=8_TQ15O]+3U?_2E!:_T=*5O].4%C_2DQ3_T=)4/]'
+M25#_14=._T9(3_]'25#_1$9-_SY 1_] 0DG_.SU$_T-%3/] 0TC_041)_TI-
+M4O\^04;_/T%(_S]!2/\^0$?_0D1+_SY 1?\^0$7_/T%&_SY 1?\Z.CW_,C(U
+M_RTM,/\G)RK_*RDK_R at F*/\J*"K_(R$C_R4A(_\I)2?_(AX at _RXJ+/\9%!C_
+M% \3_Q at 3%_\3#A+_&!,7__KQ\?\@%Q[_&Q0<_QL6&O\8$Q?_%1 4_Q40%/\5
+M$!3_%1 4_Q40%/\4#Q/_%Q(6_Q<3%?\7$A;_$PX2_Q at 3&?\<%1__&!,7_Q42
+M$?\4$!+_$0\2_Q(0$_\3$13_$Q$4_Q,1%/\4$A7_%A(:_QP8(?\;%AK_'!@:
+M_Q\;'?\;%QG_%Q(6_Q40%/\5$!3_% \3_Q,/%_\7$QO_'AD?_Q\9'?\=&1O_
+M(R ?_R<D(_\A'AW_%A(:_Q at 4'/\@'B'_2TM(_TU/3/].4DK_3D](_QP8&O\>
+M%R'_&Q@>_U964_]:6E?_7%E4_UE55_\8%!W_&AD at _V!?9?]=7E__86)C_ST\
+M0_\6%Q[_&QTB_V%B:?]B8FO_86%L_Q<7(/\B(RK_3E!5_U-56O]74EC_)Q\C
+M_R4D(_];6UC_75Y8_VMJ:?^5DY7_A(""_UE65?]86%O_4U5:_U!26?].4%?_
+M2TU4_TE+4O]+353_1TE0_T)$2_]$1DW_1$9-_T-%3/]!0TK_04-*_SY!1O] 
+M0TC_0$-(_T%$2?\\/D7_/3]&_ST_1O\^0$?_/3]$_SP^0_\]/T3_.#H__S(P
+M,_\J*"K_*RDK_R at F*/\H)BC_*2<I_R<E)_\D(B3_)R,E_R at D)O\I)2?_*24G
+M_R4@)/\8$Q?_%Q(6_Q at 3%_\4#Q/_^_+R_R8;(/\;$AG_%Q,5_Q at 3%_\5$!3_
+M%1 4_Q40%/\5$!3_%1 4_Q0/$_\7$A;_%Q,5_Q<3%?\5$!3_&!$;_S0J.?\:
+M$QO_%1 4_Q0/%?\1#Q+_$A 3_Q$/$O\/#1#_#0T0_Q 0$_\?'"+_&Q<?_Q81
+M%?\5$1/_$P\1_Q00$O\2#1'_$PX2_Q(-$?\3#A+_$1 7_Q<3&_\8$AK_&1$5
+M_QD4%O\M*"C_14$__STZ.?\6$AK_&!0=_RDF+/]245#_35%/_U%34/]24TW_
+M)R0C_R <)?\='"+_6UU;_V-B8?]=6UC_9V-E_QD5'O\:&"'_6EIC_V)C:O]H
+M:7#_1450_Q46'?\:'"'_7U]H_V1D;_]B8FW_$A(=_QL;)/]A8FG_7F!H_V%?
+M:/\?&R/_'!XC_SY /O\Z.#K_'AP?_R at E*_]:55#_8%M6_W9R</]D86#_5EA;
+M_U=97/]:7&'_45-8_TQ15_]*3E;_14E1_TA*4?]%1T[_149-_T-%3/]%1T[_
+M0D5*_SP_1/\]0$7_041)_SU 1?\^0$?_/#Y%_SP^1?\X.C__.CD__RPK,?\I
+M*"[_+BPN_RHH*O\J*"K_)R4G_RDG*?\E(R7_*"8H_R4C)?\F(B3_)R,E_R(>
+M(/\E(2/_)B$E_R,>(O\W,C;_)!\C_Q81%?_\\//_'A07_S$I+/\L)RG_'!<;
+M_Q0/$_\5$!3_% \3_Q40%/\5$!3_% \3_Q<2%O\3#QC_%Q(8_Q at 3%_\8$1G_
+M$PP3_Q<1%?\5$!3_$1$4_Q(0$_\2$!/_$0\2_Q$/$O\0$!/_#0\2_Q43%O\@
+M&Q__%1,6_Q(0$_\1#Q+_$0\2_Q$/$O\0#A'_$0\2_Q .$?\1$A/_&!<>_Q at 4
+M'/\A'1__$0\1_TI&1/]54$K_(R(A_Q,1&O\5$QS_+2PR_U-45?]-4%7_5EE>
+M_UY=8_\P+3/_'!@@_Q<8'_]>8VG_8F%H_V)A9_]C8FG_&!8?_Q<4(/]>7V;_
+M8&%H_V!A:/]45EO_&QHA_Q at 7'O]E9&K_961J_U]>9/\9%R#_'!HC_TU-4/\Q
+M,S#_)R<J_QL:(?\9&![_04 __RTK+O\@'"3_)R0J_T ]./]"/D#_(1XD_Q\?
+M(O]D95__ at H-]_Y:5E/]G96C_55E;_U587?].4%?_2DI3_TA'3O]&1T[_14=.
+M_T%&3/\^0TC_0$5*_SY#2/\]0D?_/D!#_SX^0?\T-#?_+R\R_RPJ+?\K*2S_
+M*B at K_RHH*_\K*2O_*"8H_RDG*?\H)BC_*2<J_R8D)_\E(R;_)2,F_R8D)O\D
+M(B3_)"(D_R0B)/\D(B3_)"(D_R(@(O\F)";_$@X0___V^?\@&!O_'AD;_QD5
+M%_\9%!C_%Q(6_QP7&_\9%!C_%A$5_Q81%?\5$!3_&!,7_Q<1%?\@&QO_%Q03
+M_Q<4&O\>("7_'B C_R at L+O\;'R'_$1$4_Q$1%/\0$!/_$! 3_Q(0$_\3#A+_
+M&A08_W!H;/\7$A;_$A 3_Q$/$O\1#Q+_$0\2_Q .$?\1#Q+_$ X1_QD9'/]/
+M2U/_-S(X_R,@&_\].S/_5U!$_UI00?]>5TO_&A4;_Q at 4'/\X-CG_/#LZ_SY!
+M0O]14UC_45)9_RPM-/\?&R/_%Q@?_V%F;/]H9V[_;6QR_V]N=?\6%!W_&Q at D
+M_UU;9/]F9FG_9VAI_UM:8/\8%A__&AD at _V-C9O]H:FC_969G_Q85'/\4$QK_
+M5E=8_U]A7O]65EG_%A4<_QD8'O] /S[_)R4H_QH6'O\B'R7_9V-A_V%<8/\<
+M&"#_(2 F_T=%1_\>'![_*"8H_UY<7O]Z=7#_J:&=_W=R=/];6%[_5%5<_TY.
+M5_]$1D[_/4-+_S@[0/\Y/$'_+S(W_RDL,?\K*2S_+RHN_RTH+/\M*"S_*RDL
+M_R at H*_\F)BG_)B8I_R8F*?\E)2C_*"@K_R<G*O\G)RK_)"0G_R0D)_\E)2C_
+M*"8H_R<E)_\E(R7_)2,E_R4C)?\E(R7_*"8H_R(@(O\M*2O_^/#S_RDD)O\=
+M&1O_&QD;_WQW>_\6$17_&A49_QD4&/\8$Q?_&!,7_Q\:'O\F(27_2T5#_Y"+
+MA?^+B83_'Q\B_V)G;/]26%K_2DY0_S<W.O\K+3#_*"HM_R0F*?\:'!__%148
+M_Q04%_\9%!C_$PT1_Q(0$_\3$13_$0\2_Q$/$O\0#A'_$0\2_Q$/$O\0#A'_
+M$0X4_SXX0/\=%QO_)R,;_TI%.?]>4D/_;%E'_W5 at 5/\I(27_,BTS_T1 0O]"
+M0#W_55!*_U%,1O]'0D+_.C4Y_Q\;(_\7&!__4UA>_U-26?]/3E3_5U9=_Q at 6
+M'_\[.$3_,R\X_UM96_];7%;_6%99_QH7(_\9&!__6UQ=_U]C6_]@8F#_&!<=
+M_QH9'_]?85__8&)?_UY>8?\8%Q[_%Q8<_T5$0_\V-#?_&!0<_QT:(/]K9F;_
+M;6AN_QD5'O\<&R+_;VQR_QT:(/\C(23_5U57_R8@)/\L)"C_4DU-_UM65O^ 
+M>WO_A8""_T1 0O\T,C3_)"0G_Q\>)/\;&B#_$A$7_R(B)?\H)BG_*RDL_S(P
+M,_\R-#?_-SD\_SD[/O\Z/#__.3M _S at Z/_\V.#W_-3<\_S(T-_\L+C'_*"HM
+M_R<I+/\J*"K_*"8H_R at F*/\H)BC_)R4G_R<E)_\F)";_*"8H_QD5%__\]/?_
+M)R(D_QD7&?\6%QC_DI"3_Q\:'O\8$Q?_&!,7_Q<2%O\4$A7_%1,6_QD4&/]$
+M/#C_EY&'_YF8D?_!Q,7_M+F^_Z^SM?^VM+?_P;F]_V5C9?]G:FO_76!A_S,V
+M-_\D*2G_(28F_R C)/\C)"7_(B B_QT;'O\8%AG_%A07_Q .$?\3#A+_$PX2
+M_Q(-$?\B'2'_/SD]_RPF)/]>5TO_:V%6_UU/0/]\9%7_?F19_RT>(O\H(RG_
+M-S,U_SX[-O]E6D__85=)_U=,1?\\-C3_&A8>_Q<8'_\\/T3_/3Q"_QX;(?\<
+M&R'_#PT6_R8B*_\M(B?_)2(=_UI<4_]3453_&Q at D_QD9(O]97%W_5UQ6_UI<
+M6?\9&1S_&1@>_UYB8/]O<6__9&1G_Q at 7'O\<&R'_/T$__RDI+/\9%1W_'1DA
+M_S\]/_\=&B#_&14>_Q<6'?\Q,#;_&Q@>_QP:'?]J:6C_(A\K_R =*?]02T__
+M3$9$_RHB'O\H(!S_3TI%_VAE8/^)A8?_6%99_TE'2O\5%!K_24Q1_T9)3O\\
+M/T3_0$-(_SY!1O]"14K_14A-_T!#2/]!1$G_/T!'_SH[0O\\/43_.3M _SL]
+M0/\]/T+_+S$T_RLL+?\L*BS_*2<I_RDG*?\H)BC_*"8H_R at F*/\=&QW_%A(4
+M__;R\/\_.CK_+2LM_R<G*O^:F)O_&!,7_Q at 3%_\9%!C_'!@:_Q43%?\:'1[_
+M&!89_Q\;&?^:E8__F92._];9VO_/T]7_T-#3_\W(RO_+O\#_8E97_T="0O]_
+M at 7__8E]>_U-34/]"0C__,S,P_RXN*_\X.S7_-#8S_RLM*_\C(R;_'!D8_Q83
+M$O\1#!#_%1 6_R 9&?]%0SS_5E-(_VUC5?^ <F/_<V56_VY<3O]O6TW_74Y/
+M_UM77_\H*S#_14(]_V=;4O]F64S_6U!%_TI&1/\C'R?_&A@;_U554O\>'1S_
+M&A45_QD6'/\8%Q[_'QD=_T at N(?]*."K_4E-,_T9%2_\@&R?_%QDA_UYG;?]B
+M9VS_:&=F_QT:(/\;&R;_0D1,_S at Z/?\M+#/_&!8?_Q85&_\\03W_#0\2_Q01
+M'?\8%A__$1D=_T5'3/\8%1O_%!8;_TQ.3/\:&!O_(R F_V9D9O\L*#'_'QLC
+M_VAF:/]=6F#_(R F_T9$1_\A'R+_)"(E_U!*2/]%/SW_1D1'_Q45'O]B9&G_
+M4$]5_TQ+4?])2$[_24M0_T9(3?\_04;_0$)'_TU,4O\U-#K_,"\U_RTL,O\K
+M*C#_*RHP_RHI+_\I*"[_)R<J_RDJ*_\I*BO_*2HK_RLI+/\I)RK_*B at K_QL9
+M'/\9%1?_\_7R_SP^//\A'R'_CXJ._T]-4/\6%!?_%Q48_Q43%O\C&Q__&108
+M_QT@(?\6&!;_&Q\=_Y"1B_^BFY7_U<_-_];+RO_5R<7_T\K$_\W'O_^>DI/_
+M% \1_QD:&_\;%QG_&!,3_QD4%/\>&1G_%A$1_R4F(/\R-2__0$,]_T)%/_\R
+M,B__+BXK_R$@'_\1#Q'_(!L;_T1"._]$03;_7%)$_WIK7O]31#?_0C,F_U-"
+M-?]:2TO_'AD?_R,E*/]234?_;6)7_V582_]K7E'_95Q5_U)-4?].2TK_:FAC
+M_Q41$_\6$A3_&A<=_Q83&?\J'A__6#TL_TTX)O]H7UC_;FIL_QX7(?\@'R;_
+M4UM?_V%E9_^)B(?_%!$7_Q<3'/^"@(/_?X%^_V-C9O\8%A__%!,9_TY,1_\6
+M$Q+_&14>_Q at 7'O\8("3_5%9;_Q,0%O\@(B?_65I;_Q,2&/\H)R[_9&1G_S0P
+M.?\?&R/_9V5G_V%>9/\3$!;_5U58_QX<'_\C(23_96-F_QP;(?\6%A__1TI5
+M_R <'O]D7U__ at GU]_UU86/].3$__2DI-_T]/4O]&1DG_0$)'_T)$2?\^0$7_
+M/3]$_SY 1?\Y.T#_/D!%_ST_1/\]/4#_/S]"_SX^0?\_/T+_/CY!_SP\/_\S
+M,S;_(" C_QH6&/_\^O?_*R at G_Q@6&/\;&1S_+2TP_Q at 8&_\6%AG_%!07_QX=
+M)/\4%AO_%1 at 9_RDF(?\>'R#_CXZ-_YZ8D/_3SLG_T\O)_]7+Q__3RL3_R\6]
+M_Z.7F/\8$A;_&1D<_Q at 3%_\<&!K_&A88_QP8&O\7$Q7_$1(3_QT>'_\;'1K_
+M)"<A_QP@&/] 0SW_)2<D_Q47%?\>&AC_24= _U932/]I7U'_>&9<_UA(/O]%
+M."W_4D<\_V)34_\:$A;_&!H8_W)L9/]E6U'_;6!3_VI:2O]S9EO_55!+_VEG
+M8/]:6%'_$PX0_Q40%/\5$AC_'QH<_T4V,?];/BS_2C =_Y:&??^&?W__)!TD
+M_RDF+/]B:&K_?'^ _XF*B_\7%!K_&Q8<_WMW=?]O<6C_8F1A_QH9(/\7%AS_
+M14 [_T])1_\;%1W_%1<<_QHB)O\Y.T#_&18<_Q06&_]'24S_#Q 7_QP<)?]0
+M4E?_-C$]_Q\;(_]A7V'_6%5;_Q,0%O]85EG_'AP?_Q\=(/\[.D#_&!<=_Q01
+M%_\6$QG_&108_Q(,$/\J)"C_2D1(_U)04O]T<G3_7%I<_UA66/]+35+_1TI/
+M_T9)3O]"14K_/T)'_SL^0_\X.T#_,S8[_RXP-?\N+3/_+BTS_RXM,_\K+3#_
+M*RTP_RXP,_\K+3#_(!X at ___T]?\O)RK_&1D<_WM^?_^3E)7_%Q<:_QT=(/]I
+M96?_;6QR_Q(5&O\F)RC_'!00_RPH*O^0BXO_F)2,_\[-QO_4T=#_T<S,_];0
+MSO_*P+S_IIJ=_QX7'O\;&![_%1(8_Q46%_\3%!7_%A<8_Q 1$O\.#1/_'!LA
+M_Q at 9&O\>(![_*"TG_SQ!._\K,"S_*S L_R\K*?]!/#;_5U%'_VA>4/]B4$;_
+M4$ V_T4Z+_]=4TC_7U)-_Q40$O\0$!/_;FEC_VQB6/][;F'_>6M<_X1W:O]C
+M7%;_8%Q4_W%L9O\>%AG_%1 6_Q(0$_\@&1G_5$$Y_V%#+?]4-R7_D'UU_X^'
+MA?\K)"O_8%]E_U!45O]04U3_3$U._P<&#/\C(23_14$__U)63?\F*"7_'ATC
+M_Q at 7'?]-1D#_6%!._QL6'/\7%AS_*# T_U987?\;&![_%!8;_T1&2?\4%AO_
+M'1TH_T1&2_\M*S3_'QPB_TY,3O]/3%+_%!$7_UA66?\>'!__%A07_Q$,$/\:
+M%AC_&!,5_R$:&O\;%AS_$P\7_QL7'_\=&2'_'!H=_R >(?\?'2#_4E!3_V]Q
+M=/]565O_35%3_TI.4/]#2$W_/T1)_SY#2/\_1$G_0$%(_T-"2?]"04C_0D%(
+M_ST_1/\]04/_.CY _S8Z//\5$Q7_^O+U_S0O-?\D(RG_;VIJ_Y:2D/\>&1W_
+M+2 at L_Z"5E/^+?WO_'1<;_QH9'_\=&QW_'1X?_Y60D/^>E8[_S\K%_];/S__8
+MS\__U,K&_]C,Q_^=E)3_&A47_QT9&_\5$Q7_%Q47_Q at 6&/\:&!K_$Q$3_Q,1
+M$_\;&1O_'!H<_QP:'/\G+";_.3X\_RHO+_\F+"C_-30M_R\J)/]+13W_6U-)
+M_W]S:O]-03C_13LQ_TU%._]P8U;_(1D5_R\K+?]/1#W_<F58_WUP8_]]<F?_
+MAGMP_W!H7O]E8E?_<G!H_S8R-/\<&1__&QD;_RTH(O\Z*![_8D(L_T at O'O^^
+MKZK_:6%?_S$M+_]C9VG_86QM_UE?8?]46ES_-ST__SM!0_\L,C3_3U-5_Q$3
+M%O\-#1#_(!XA_SPY-/^-B(C_(1LC_R,@)O\Q-CO_7F!E_Q83&?\9&R#_2DM,
+M_R =(_\<&"#_)B0G_R$?(O\<&AW_2$9)_TI(2_\4$1?_44Y4_Q\<(O\0#1/_
+M%A$5_Q at 3%_\4#Q/_&Q8:_QD6'/\5$AC_%A,9_QX;(?\:%QW_%!$7_QL8'O\;
+M&![_)2,E_R4C)?]'14?_=7-U_U]?8O].4%7_2DQ1_T-%2O]!0TC_/D!%_SP^
+M0_\[/4+_,3,V_S$S-O\T-CG_+S$T_QD7&O_X\//_1T)(_RLJ,/\?&AK_;V=E
+M_R0?(_\@(B7_(" =_R$>&?\5$Q;_&AD?_Q\7&O\N+"[_C8B(_YZ5CO_/RL7_
+MTLO+_]?.SO_<TL[_ULK%_Z"7E_\D'R'_'1D;_Q84%O\7%1?_&1<9_QH8&O\3
+M$1/_$A 2_QH8&O\<&AS_&A@:_R<L)O\W/#K_*B\O_R4K)_\O,2[_*"8C_SPY
+M-/]$/SG_1S\]_QL2$O\C'!S_'AD9_S,J)/]$/#C_4DU-_VA?6?]F6E'_<69;
+M_WIM8/^%=VC_>7%G_V9C6/]L:V#_-3,P_R >(?\B("+_*"0B_RT>&/]&*QK_
+M."$5_]3&P_^4C(K_'AH8_UU?7/]A;&?_6F!<_U]E8?]I;VO_76-?_SU#/_]*
+M3T__$1,6_P\/$O\6%!?_-S0O_TQ'1_\@&B+_&!4;_S0Y/O]<7F/_659<_S R
+M-_]045+_$@\5_R <)/\7%1C_%A07_QL9'/\6%!?_(!XA_Q42&/\D(2?_%!$7
+M_QX;(?\:%1G_&A49_Q81%?\4#Q/_%Q0:_Q83&?\8%1O_%1(8_Q83&?\4$1?_
+M%A,9_QD6'/\4$!C_(!PD_Q41&?\C'R?_-38W_U-54_]E9V7_2DQ*_TI,3_]$
+M1DG_04-&_SY 0_]"1$?_/T%$_SL]0/\^0$/_.3D\__OS]O\G(BC_(2 F_XZ)
+MB?]U;VW_*B4I_Q at 6&?^]M[7_O;6Q_QP7&_\8%QW_&147_Q43%?^2C8W_GI6.
+M_\K%P/_6S\__W=34_]S2SO_2QL'_H)>7_Q\:'/\7$Q7_%1,5_Q84%O\9%QG_
+M'!H<_Q,1$_\3$1/_&A@:_QL9&_\;&1O_)"DC_S4Z./\N,S/_)BPH_S0U+_\L
+M*23_2T9 _TE#._]C657_65%-_TI%0/\Y-C'_,"PN_R0B)?\8%AG_&!89_QL7
+M&?\?&QG_+"4?_X!V;/]G6US_8EQ:_VEF8?] /3S_'AH<_QD7&?\T,B__)QP;
+M_THS)_\Y)1W_RKV\_X-Y=?\@&1G_+"HL_S,Z.O](2TS_*2HK_V%B8_]]?G__
+M$1(3_[.UN/\;'2#_%A89_QP:'?\I)B'_O;BX_V!:8O\;&![_,C<\_S at Z/_\?
+M'"+_*"HO_T!!0O\7%!K_)R,K_Q,1%/\,"@W_,C S_Q84%_\2$!/_'QPB_Q at 5
+M&_\3$!;_$0X4_Q at 3%_\9%!C_%A$5_Q40%/\2#Q7_&A<=_Q,0%O\2#Q7_&18<
+M_Q$.%/\:%QW_$Q 6_Q82&O\7$QO_%1$9_Q82&O\?'2#_+"HM_RTK+O]24%/_
+M24I+_T5(2?]$1TC_0T9'_S]!1/\^0$/_04-&_T)$1_] 0D7___C[_R(?)?\5
+M%AW_'QP;_SPV-/\@&Q__(QTA_RX?'_\G%Q3_(AH>_Q,5&O\1%!7_0$%"_XB&
+M at _^;E8W_Q\*]_];/S__=U-3_V]'-_]'%P?^>EY?_'!<9_Q82%/\4$A3_$ X0
+M_Q43%?\=&QW_$Q$3_Q<5%_\9%QG_,"XP_QH8&O\E*B;_,C<U_S,X./\D*B;_
+M/#@P_R at D'/]O;&?_8EQ4_W]U<?]L8US_;6A<_W!J8/]U:F/_<F9=_W-F6_]J
+M7%/_4DY,_U).4/]=6%/_;61=_VI>8?]%.C__'QH:_RXJ+/\;%1G_%Q,5_R0C
+M(O\?&AS_-"4?_R<9%O]74E3_>W-Q_R4=(/\E)"K_)BHL_R @(_\H)BG_&18<
+M_VAG;?\@'R7_G)ZA_Q<9'/\:&AW_'1L>_R,>&?_$P<#_:&=M_R,@)O\7&A__
+M%QD>_Q(/%?\8%QW_*RPM_QT;'O\@'"3_$Q$4_Q84%_\U,S;_#0L._Q84%_\Q
+M+C3_.#0\_QH6'O\;%Q__%Q(6_QH5&?\6$17_%1 4_Q83&?\7%!K_%!$7_Q83
+M&?\1#A3_$0X4_Q83&?\5$AC_%A,9_QL9'/\9%QK_&1<9_Q$-%?\3#Q?_.#0\
+M_RLH+O\G*2S_<G9T_U)44O]*34[_1$A*_T1&2?]#14C_1D9)_T9(2___\O/_
+M*B4I_R B*?]"/D#_0SLY_S4O,_\7%!K_DXN._YJ2D/\J)2G_%QD>_QP:'/\I
+M)B7_CXZ'_Y65C/_$PKW_UL[,_]7*R?_:S\[_S<+!_Z&=F_\G)"/_%A(4_Q<2
+M%O\3$13_%Q47_Q84%O\5$Q7_%1,5_Q at 6&/\C(2/_&QD;_R0H)O\Y/CS_+S0R
+M_R0I)_\[.SC_&R,M_S9&8/]#3VK_-3I3_S Z3O\]3UW_+#Q,_R<L/O\K,D#_
+M.D1/_UYB:O]H9V;_;F=O_X!Y<_^$>F__<V1>_Y^7E?]/34__24=$_Q\;'?\<
+M&!K_(R4H_SY*4_\6+3+_3V)N_XZHM?\<)27_)2 @_SP_0/](34O_5E93_TU,
+M2_],34[_LK2W_Q@;(/^TK[/_'AP>_Q\B(_\>&AS_(1\<_[FUL_]84U?_%A4<
+M_Q(/%?\2$!/_$ X1_Q84%_\?'2#_%1,6_Q43%O\4$A7_$Q$4_Q$/$O\3$13_
+M$A 3_S<S._\R+C?_'1DB_QH6'_\2#Q7_%Q48_Q<5&/\7%1C_#PT0_R >(?\7
+M%1C_$ X1_QH8&_\4$A7_%!(5_Q02%?\3$AG_$Q(8_Q04%_\3%!7_%!4<_Q .
+M%_]13U+_'QP;_VYO</]F9&'_A8!Z_U134O],4%+_14E+_TA(2_]*2$O_1T5(
+M___P[/\[-#3_*2HK_TU'1?\U+2O_-C T_QH7'?]9453_8UM9_R<B)O\;'2+_
+M)B0F_S8S,O^/CH?_F)B/_\3"O?_9T<__U\S+_]K/SO_%NKG_JJ:D_T],2_\5
+M$1/_%A$5_Q,1%/\7%1?_%A06_Q43%?\5$Q7_%A06_QX<'O\:&!K_(B8D_S@]
+M._\P-3/_(R at F_S4Y-_\7'RG_"Q<R_PP3-_\($S+_*SQJ_QXY=O\I2WW_)U"!
+M_SYLGO\L6HS_,EN,_T5HD/].9HW_3%1>_Y>- at O]W:V+_.#(P_S] 0?](2$7_
+M:FYE_VIN9O]U at X;_.UUQ_WVOQ/^@Q^#_>**R_W6$A/^$>WO_0S]!_QLC(/\M
+M,R__0TA&_W5Z>O^XP,+_%1\C_[V[OO\<&AS_&AT>_QX:'/\5&!+_MZFF_YN 
+MAO\4#Q7_$1$4_Q02%?\4$A7_$ X1_Q02%?\3$13_$Q$4_Q$/$O\2$!/_%1,6
+M_Q,1%/\4$A7_'ALA_Q,/%_\1#17_$ P4_Q$/$O\:&!O_&1<:_Q at 6&?\2$!/_
+M'1L>_Q02%?\:&!O_$@\5_Q(/%?\5$AC_%1(8_Q,2&/\1$QC_$Q4:_Q<9'O\1
+M$AG_$Q(8_S(P,O\?'1K_H9R<_X^$@_]H7%?_9EU=_V!<7O]>6ES_2D9(_U%-
+M3_]24%/___3T_RPG*?\>("7_(1X=_RXF)/],1DK_(R F_R8>(?\J(B#_'1@<
+M_Q@:'_\<&AS_=G-R_X.">_^8F(__O+JU_]C0SO_>T]+_V<[-_\6ZN?^JIJ3_
+M1T1#_Q82%/\7$A;_%Q48_QH8&O\7%1?_&!88_Q<5%_\7%1?_&QD;_QT;'?\?
+M(R'_,38T_S,X-O\=(B#_.#8Q_R0B'_\?'"+_%A,?_Q01*/\P-U7_'S%5_R V
+M7/\O5'?_.VJ-_RU:@O\>16__+%5Y_RY(;_]\@XG_GI"!_VQ at 5_]%03__+C$R
+M_RHO*_]?8UK_ at 7]Z_UUF;?]9?)/_;YZV_Y_!V_^#I;G_H::K_S,L+/\T-3;_
+M55I8_WZ ??]L:VK_*2PM_Z"DIO]>8VC_PL##_Q,1$_\0$Q3_'AH<_R,A&O^X
+MKZG_BWQ\_Q<1%?\6%!?_%!(5_Q02%?\3$13_$Q$4_Q,1%/\2$!/_$A 3_Q,1
+M%/\2$!/_$Q$4_QH8&_])1TK_%1,6_Q .$?\2$!/_$A 3_R >(?\B("/_(R$D
+M_Q<5&/]$0D7_%!(5_Q .$?\4$!C_$@X6_QH6'O\G(RO_,2XT_S at S.?].2%#_
+M85ID_Q$.&_\4$1W_'!@@_T(]0?^MJ*K_LJJH_WMO:O]L967_BX*"_W!G9_]L
+M967_7%=7_T,^0O__^?K_)R8M_RHO._\U-#K_*"(@_U=15?\>&R'_EI&3_[2L
+MJO\^.3W_#A 5_QH8&O\>&QK_@'YW_Y63B_^\NK7_U]'/_]O0S__7S,O_PK>V
+M_ZVII_\D(2#_&A88_Q40%/\3$13_&1<9_Q<5%_\8%AC_%Q47_Q<5%_\;&1O_
+M'QT?_Q\C(?\O-S3_.#T[_QPA'_\T.#K_'R$F_QH='O\1%AS_%QHL_Q<B(_]#
+M6%+_>8^7_Y:QO/]\F*#_7GB#_T-79?](97'_,D=D_WE[>?^6A';_<&5:_TM'
+M1?]"14;_,38R_XZ/B/]Z=W;_'!DF_T%:<?^!L,+_F<79_XRWR?^JN;G_.#4T
+M_SD^/O]D:6?_S<G'_X^*BO\<&!K_CHZ1_V%C:/^FI*?_$Q,6_S S-/\E(B'_
+MDH:!_V1?6?\;'1K_&Q<9_QD4&/\=&Q[_%1,6_Q(0$_\2$!/_$A 3_Q$/$O\1
+M#Q+_$A 3_Q(0$_\-"P[_&QD<_U!/5?](1TW_%!(5_Q(0$_\/#1#_$ X1_QH8
+M&_\7%1C_$0\2_P@&"?\2$!/_&A49_T9"2O\_/4;_.CI%_Q$/&/\R*3#_0#4Z
+M_XE^@_]81U'_:5UL_V1::_]W;GO_;VAO_Z.>H/^=EY7_2D="_U535?_6RLO_
+M6U-1_TI#0_\:%17_'!8:___X\_]!/4;_4UAJ_Q02%?\D)"'_;V1I_Q<4&O\3
+M%!7_*!T<_Q80%/\7%AS_&A88_X^*BO]_>'+_G9>/_ZVKIO_2T,W_V-#,_]?+
+MQ_^MJ*/_JZFF_QD6%?\;%QG_&A49_Q,1%/\7%1C_$0\2_Q02%?\5$AC_%A07
+M_Q84%O\Y.#?_)",B_RDX-/\U.3?_&1T;_R\T,/\E(BC_'1\<_Q0:(O\7&SO_
+M'!XA_UM95O^+F)W_B:.F_X69G?]E;6__6&)?_U=O;/\E0G+_>'U[_XMU<_]O
+M95K_3TE'_R0E)O\C)2+_G92-_UUL<?\?+D7_$"M#_WVENO^;R^?_H<SD_[S 
+MPO]01T?_*2<I_V!D8O_.R,;_F924_QL9&_^[N[[_2DQ1_[BWOO\9&Q[_8&-D
+M_Q at 4$O\A'1O_)B$A_QD4%O\4#Q7_&!89_R(@(_\=&Q[_$A 3_Q$/$O\2$!/_
+M%A07_Q,1%/\1#Q'_$0\1_PT+#O\<&1__,SM+_Q(2'_\5#Q/_$A,4_Q 0$_\8
+M%AG_)"(E_Q<5&/\8%AC_%Q47_Q at 4%O\:%1?_'!<=_PT/%O\'#!+_" <._Q,.
+M$O\<&A?_'QL3_WYI9_]]8G+_2"Y)_U,]5_]"-C__C8*!_U914?\Y.S[_+2\T
+M_\["P_\N,S'_3$U._QT8'/\7%!K___3Q_SDT.O\7%R3_)B$E_RDK*/]]<G?_
+M&A<=_XJ+C/^RIZ;_<&IN_Q,2&/\:%AC_%Q(2_W=P:O^;E8W_N+:Q_[V]NO_!
+MOKG_J:&=_YR:E?^<FI?_)2(A_R <'O\<%QO_$Q$4_QH8&_\3$13_%Q48_Q<4
+M&O\7%1C_(!X at _QD8%_\?'AW_*CDU_S<[.?\8'!K_+C8R_R<B)O\G)A__)"DU
+M_QPF2_\4&R'_/SLY_X2)CO^7K*[_ at 9:8_WB&A?]E;6G_771O_R1!<?]Y?GS_
+ME7]]_W=M8O](0D#_*2HK_R,E(O^?D(O_&2,G_S9?<O];C:C_99.O_UV1LO]0
+M?9O_JK"Z_U]56/\L*BS_%!@6_]#*R/^@FYO_'AP>_["PL_]"1$G_LK"S_QH:
+M'?]S=7C_%1(8_QXB)/\E*2O_%!07_Q,.$O\6%!?_&A@;_Q<5&/\2$!/_%1,6
+M_Q,1%/\0#A'_$0\2_Q$/$?\1#Q'_#PT0_QL8'O\?)CS_%!8E_QT8&O\E("3_
+M$A 3_QD7&O\L*BW_'1L>_Q<5%_\5$Q7_34E+_RDD)O\:$A;_%A(4_Q(1$/\A
+M'R+_$18<_Q@>(/\L*2C_'Q,6_V1:;?\;$BK_C("5_X%V??]%/#S_(1X=_SQ 
+M/O\P,C#_S\3#_S U,_]'2$G_(!L?_QH8&___\_+_*" C_R0A)_\A'!S_/T$^
+M_W1I;O\0#1/_)28G_S4J*?\G(27_%!,9_QL7&?];5E;_9V!:_Z":DO^EHY[_
+M@(6!_YR=E_^2D(O_=GES_WAX=?\7%!/_&!06_Q<2%O\1#Q+_%A07_Q02%?\7
+M%1C_%1(8_Q84%_\C(2/_&QH9_QP;&O\G-C+_.#PZ_Q@<&O\O.SC_)2(A_S8P
+M)O\F+#S_'B=0_Q4;(_\B'AS_5%M;_U1G9_]TBHG_>XZ(_V!J8?]9<&O_+DM[
+M_W=\>O^9 at X'_=6M at _W9P;O\J*RS_,#(O_X^. at _]J8F#_#"<R_W&HP/^#L<W_
+M8I:W_WFBP/^SL[S_;F)C_R0B)/\N,C#_S\G'_Y^:FO\L*BS_IJ:I_RHL,?^Q
+MK:O_&Q at 7_V)C:O\G)#'_/#E%_R$B*?\4$QG_$1$4_Q,1%/\6%!?_$A 3_Q,1
+M%/\:&!O_%Q48_Q$/$O\1#Q+_$A 2_Q(0$O\0#A'_'!D?_QPC0?\*#1__$ L+
+M_R at C)?\8%1O_&1<:_R0B)?\@'B'_'1L=_R(@(O]J9FC_)B$C_Q,/'_\8&"/_
+M%1@=_QHE,/\>)CK_.3U%_R<B'?\U+R?_;&MJ_PP-#O^DHI__-3 K_R$<'/\6
+M&!;_/D,__TQ*1?_%N;7_1TQ*_RXO,/\9%!C_&!89__GM[O\G'R+_%A$5_QX;
+M&O\='1K_FHZ1_R at E*_\H*2K_,2DG_RHE)_\5$AC_(B B_WYY>?];5E#_HYJ3
+M_Y22C?]N=6[_D9:0_WI]=_]N=6[_<G-M_Q01$/\9%!C_&108_Q84%_\8%AG_
+M&!89_QD7&O\9%QK_%A07_R >(/\7%A7_&QH9_R<S,/\T.3?_%AH8_S@^0/\A
+M'QS_'A<1_T9.7O\D+5S_&ALF_R >&_\F-#/_AIN=_W.)B_]A=W7_6V1=_UYU
+M</\N3WK_>'Q^_YR'@O]G6DW_BH)^_Q\@(?\O,2__AXZ"_ZF5C?]06&+_9)2I
+M_WFHP/^;S>C_6WR3_[RWO?^"=G?_'R A_SL^/__/R<?_J*&A_S$O,?\O+S+_
+M*BDO_UQ85O\=&AG_0T)(_PD(%?\=%B+_(!HB_QD6'/\0$Q3_%148_Q02%?\1
+M#Q+_%A07_Q\=(/\6%!?_$0\2_Q$/$O\2$!/_$A 2_Q(0$_\5$AC_'2-#_Q$6
+M,/\1#Q+_%Q$5_RDE+O\G)"K_,"TS_RXL+_\Q+S'_)B0F_QX:'/]"/3__(B4^
+M_TI19_\4'BS_(S--_TI6=?\+#!W_(1D5_V%<2O]#/CG_&!,5_Z*=G_\J)"S_
+M&A4;_Q47%?])3$;_4$](_ZRBGO\9'1O_&!D:_R at F*?\P+C#___KY_RLC)_\2
+M$1?_'!T>_WEW=/^FEYC_%1,5_RDM+_^[M;/_G)>9_Q at 3&?\=&Q[_%Q(2_T5)
+M0?^HG9;_A8-^_V5O9O^%C8/_;'1J_V=O9?]I;67_%Q47_QH7'?\:&!O_&!88
+M_QL9&_\>'![_'!H<_Q at 6&/\:&!K_&A@:_Q at 6&/\7&!G_*"\O_S([._\4&1G_
+M-3 \_R(F'?\@%A__'2DP_R$V;/\6%"3_(A\>_R$M-/^*I*?_ at IVD_UUT>?]&
+M4D__46IO_RU5?/]T>8O_D8!R_W- at 5O^>DHW_(2(C_Q\;'?^2BH;_IYV2_YFJ
+MM_]VHKS_F,38_ZG-XO\J.TC_B(.#_YV8D_\>'B'_2TI0_\7 PO^NIJG_.C at Z
+M_S@[//]+3D__/D-#_QH='O^"?H#_&!D:_QD9'/\<'!__$Q,6_Q04%_\6%AG_
+M%A89_QH:'?\]/4#_'Q\B_QD9'/\?'R+_#@X1_Q0.%O\4#Q/_$0\2_Q(0&?\8
+M(CW_(BI6_Q8<-O\8&"'_5$QG_T ^1_\K)R__&QHA_X.!?O]23T[_&A88_R<B
+M)O\-$B3_'",Q_S(U0/]G;8/_=H&@_Q\D-O\=%A;_."TF_R,?'?\=%A;_K:>E
+M_RLH+O\?&R/_%!(4_V)C7?\;&QC_,S$L_QL8%_\5$Q;_*2TK_Q at 6&/__^/?_
+M0SL__Q\>)/\5%A?_)24B_[&EIO\;&1O_&1L>_QD3$?\6$1/_'!<=_QL9'/^2
+MC8W_14E!_Z*7D/]_?7C_7&5>_V9M9?]@9U__86A at _V1H8/\8%AC_&18<_Q84
+M%_\6%!;_&A@:_QT;'?\6%!;_%1,5_Q(0$O\9%QG_%Q47_Q<8&?\F+2W_-#T]
+M_Q09&?\S,#S_(2@>_QH0&?\)$AG_&REA_QL7)_\G(1__&!H=_S1+2O^%H:3_
+M9'A\_TU65O]2:6C_,UAW_W%R??^7@&W_A7=N_ZB at GO\A'R+_'QT?_XV*A?^H
+MH97_D:*O_R5/:?^SS^3_M<K<_Q,=)_^*A7__ at X![_QXB)/]96V#_O[N]_[VY
+MN_\L,3'_-$ __S0]/?],6%?_)"<H_Y2,C_\9$13_%A07_QH:'?\0$!/_&!@;
+M_Q45&/\>'B'_'AXA_T-#1O\J*BW_(" C_RHJ+?\2$A7_% X6_Q0/$_\.# __
+M$Q$:_Q4A//\:)53_'"=*_S(U3O]B5'K_@'Z'_PT0%?\5$A[_$ T,_U!-3/\9
+M%1?_(!L?_T-"2/]65%;_8UQ<_Z:BJO]N;W;_%A07_QD2$O\Q)2'_'QH:_QL4
+M%/^IHZ'_%!$7_Q,/%_\/#0__8F-=_YN;F/\]/3K_'1L=_QL9'/\M+BC_&A88
+M___X]_],1$C_)20J_QX?(/\O-##_O[:V_QD7&?\D)"?_LZNI_[JUM_\;%AS_
+M$0\2_QX9&?\?(QO_H9:/_X!^>?\T/#C_.3\[_RTS+_\U.S?_76!:_Q at 6&/\4
+M$1?_%!(5_Q,1$_\5$Q7_%1,5_Q43%?\3$1/_%!(4_QD7&?\9%QG_%A<8_R$H
+M*/\Q.CK_$A<7_RTM./\C*R'_(QDB_Q\E+?\?+67_-C5&_S<X,O\_03[_,S at R
+M_YFEI/^9I:G_;G5U_T]E8_\S67G_='R0_XB ?O^&@8'_K*:J_QL6'/\>'![_
+MC8Z(_YV8C/](66;_1VV(_Z_/Y?]TCJ/_1%1>_X2$@?^9FI3_)RTO_S U.O^P
+MKK#_P[_!_V1I:?]<:&?_='M[_X2)B?^1CY'_J*"C_Q at 3%?\5$Q;_&AH=_Q 0
+M$_\7%QK_#@X1_Q86&?\='2#_'AXA_Q\?(O\9&1S_&1D<_P\/$O\3#17_$PX2
+M_Q .$?\4$AO_%1XV_QDC4/\;)4K_%!DW_V5/?/]^>87_$!@:_QH:)_\=&1O_
+M55)1_R at D)O\P*R__)R,E_R(8&_\R)2K_>7!W_T$V-?]@5EG_$0X4_Q00$O\6
+M$A3_&1(2_Y6/C?\5$AC_%! 8_Q<5%_]=7EC_GIZ;_S$R,_\<'!__'1L=_TM*
+M0_^'A(/___7R_T Z/O\;'"/_&!D:_WAZ=_^\M;7_&A@:_Q43%O\F'Q__&A88
+M_Q83&?\='"+_BX:&_RHK)/^=E(W_ at X%\_R\W-/\H+2O_&1\;_RTS+_]365'_
+M&!88_Q01%_\4$A7_%1,5_Q02%/\3$1/_%!(4_Q43%?\4$A3_&1<9_QD7&?\7
+M&!G_(B<G_RPU-?\;("#_-3<__Q8>&O\=%1G_&1\G_R0V;O\9'R__)BHB_RTP
+M*O\V+2;_.S8V_VUP<?]&3T__,%%C_QM0AO\;.W/_.$]Z_UQH=_^ZLK;_(ATC
+M_R <'O^-CX;_HI^4_U-@:_]+;(/_1VN _R%"6/\A-D+_*2LI_Y^AGO_#QL?_
+MA(F._\/$Q?^_O+O_6EU>_[[#P_\^04+_S<G+_V%97/^7CY+_&1<9_R4E*/\9
+M&1S_$Q,6_Q<7&O\0$!/_'AXA_R,C)O\H*"O_&1D<_QL;'O\5%1C_$! 3_Q,.
+M%/\3#A+_$A 3_QL:(?\5'##_&B%+_Q8=0?\;'#O_9D]Y_W-P??\J-3;_'R K
+M_WUZ>?]'1$/_&!06_RTH+/]'0DC_13M$_T,T/_^$?(;_85-4_Z&9G?\4$AO_
+M$Q48_Q,1$_\<%Q?_EY&/_PT+#O\5$AC_&!<6_V5F8/]K:6;_-38W_QH<(?\4
+M$A3_5%)*_[VZN?__]O#_54]3_S4Z1O\H)2O_)" >_[&NK?\9%1?_&A49_V%6
+M5?]C657_&Q<5_QT<&_\J'!W_*"DB_Y64B?^'B(+_)2PL_Q\F)O\>)B/_*C(N
+M_TI63?\8&1K_%A$7_Q,1%/\6%!?_%A07_Q84%_\3$13_%!(4_Q02%/\6%!;_
+M&1<9_Q<5%_\?(B/_)S,R_PL2$O\M-3+_#Q$@_QL8$_\7(2S_&3!E_QXA,_\A
+M'QC_+"HE_SPZ*_\G(AW_2D<\_X*(A/\C0F3_+%>%_QQ B_\M3I;_26=]_\"N
+MLO\:'R3_(QD<_Y"0@?^>G)7_C(Z3_YFDJ_^7H[+_1UAE_SI&2O^.AH+_J*6D
+M_]W5V/^3F)C_J:JK_\&_O/^ZN;C_V-/7_[JVM/_)PL+_<VYN_X6"@?\O+BW_
+M&!H=_QH<'_\2$A7_&!89_PT.#_]86US_4515_R(E)O\@'B'_)"(E_QT;'O\3
+M$13_%A 4_Q0/$_\1#Q+_%!07_QTF/O\<(U'_%!U"_QP@//\N'CK_869R_UE>
+M7O]K;6K_E9.._U%-2_\7$A;_+"@P_QX=(_\R*S+_.C$X_WYY?_]42TO_J:"@
+M_QP7&?\8%AG_$Q(8_QD4%O^EGYW_&!,7_Q43%O\:&1C_ at 8%X_YF6D?\\.#;_
+M)B(D_QD8%_]334/_P+V\___T\/]]=G;_&1HA_Q .$?^(A(+_JJ>F_QX:'/\=
+M&!S_=&EH_VYF9/\:%17_'1L=_R\G*O\7(!G_-CTS_SI"/O\F+2W_(R8G_QD=
+M&_\I+BK_1E!'_QL<'?\9%!K_%1,6_Q at 6&?\8%AG_&!89_Q02%?\6%!;_%1,5
+M_Q84%O\9%QG_%1,5_QXA(O\C+R[_$!<7_RLS,/\4%B7_&Q at 3_Q0>*?\9,6C_
+M+S-(_TA&0_\D)"'_+2LC_SHP,_\Y,RO_/STZ_R1 8O\L5X7_'D:1_RI+D_]$
+M7W?_MZ6I_Q,:&O\B%QS_C9!^_Z. at E/]X<&[_P;6X_]++U?\<'B;_.#L\_[6H
+MH_^II*3_V<_2_WV"@O^;GI__RL7 _[JTLO_<TM7_R+ZZ_\[&Q/]^>7G_B8:%
+M_Q\>'?\7&1S_%1<:_Q$1%/\7%1C_%!46_PX/$/\_0$'_'A\@_R<E*/\E(R;_
+M(!XA_Q02%?\6$!3_% \3_P\-$/\/#Q+_'"4]_QLA4?\3'D#_%ALT_R\@.?]I
+M;GK_9&EI_VAJ9_^)AX+_4DY,_R0?(_\\.$#_1D5+_Q$*$?\Z,3C_C(>-_V-:
+M6O^BF9G_'QH<_Q84%_\3$AC_'!<9_ZJDHO\T+S/_&A@;_R<F)?]_?W;_?7IU
+M_S\V,/\K)B'_'QH4_U9,0?_+P<3___3Q_Z&6E?\:%Q;_'AH<_S<T,_^VL[+_
+M%A(4_Q\:'O\>&1__%1(8_Q<9(/\4&B+_=W!W_QPD(/^*CXG_>7Y\_S5!0/\Y
+M0D+_.4- _SM&0?]"3D7_$A,4_QD4&O\5$Q;_%1,6_Q84%_\7%1C_%!(5_Q84
+M%O\7%1?_%A06_QD7&?\8%AC_'2 A_R(N+?\@)R?_+#0Q_Q,5)/\<&13_"Q4@
+M_QHR:_\9'S7_0#\^_T!"0/\K,"K_'QPB_S8T+?\W.3;_'S99_RE7A/\:1I#_
+M*TJ2_SE2;_^QGZ'_%AX:_R8;(/^*BW[_I9^7_VA:6__JV=G_Z-C<_SDT-O]Y
+M>7;_U<G$_Z6>GO_8S,__C9*2_WR#@__(QK__O;BS_^#7U__*P;O_O[>U_XJ%
+MA?]Q;FW_+BTL_QH<'_\6&!O_%!07_Q at 6&?\/#0__$Q$3_Q,1$_\D(B3_'AP?
+M_QP:'?\9%QK_%!(5_Q4/$_\4#Q/_$ X1_Q,3%O\8'RO_'R5%_Q$7,?\:%RW_
+M6$EB_V1I=?]D:6G_5EA5_X6#?O]134O_&A49_U).5O]_?H3_?WA__X!W?O^V
+ML;?_A7Q\_Z:=G?\<%QG_'!H=_QP;(?\<%QG_K:>E_X:!A?\[.3S_+RXM_X:&
+M??^=FI7_244]_QH8$_\:&!7_-"TG_[ZXO/__^OK_O+&P_R<B(O\D("+_)!\?
+M_XV*B?\/"PW_%1 4_[*HI/^WJZ?_'Q<5_QP9&/\?%1C_("0B_Y:6D_]?8F/_
+M-$)!_SA&1?\Z247_/DU(_T-/1O\1%!7_&Q8<_Q,1%/\8%AG_&1<:_Q<5&/\4
+M$A7_&1<9_Q84%O\<&AS_%1,5_Q at 6&/\<(2'_)C(Q_R(I*?\E+"S_&1TL_Q\;
+M&?\.%2'_(3MT_QH?./\G)B7_*RTJ_S0[-/\U.#G_+S,K_S$V,/\<,5/_*U:$
+M_Q9$CO\N397_.%%T_ZN8F/\:(A__'A07_WQ[=/^BFI;_8%17_^77V/_KW-W_
+M+BHH_\#!N_^PJ:/_J*&A_]S-T?^&BXO_86IJ_]74S?^]NK7_W]G7_\O$OO^\
+MM[+_C8B*_V!=7/\S,C'_&AP?_Q88'?\4%!?_&!89_Q(0$_\2$!/_$Q$4_R8D
+M)O\>'B'_&AH=_Q04%_\1$13_% \3_Q0/$_\3$13_$A(5_Q<:&_\:'2[_#1$@
+M_Q at 1(O\E&BW_)RTU_S8[.?]45E/_55)-_U103O\9%!C_44Y4_RXK,?\B'2/_
+M+B4L_VYI;_]-1$3_KJ6E_Q at 3%?\5$Q;_#@T4_Q0/$_^LIZ?_?'AZ_S at V./\P
+M,"W_EI:-_Z&<EO],2D/_'AX;_R,A(_\H(R/_M[6X___Q[?^JGJ'_(1XD_QH:
+M'?^%@(#_MK&Q_Q(.$/\8%AG_'186_RLD)/\9%1?_%1<:_VAB9O\5'!S_FIV>
+M_SH]/O\N/S__,4)"_S1&1/\Z3$C_2E-,_Q05%O\6$QG_'!8:_QD7&O\>'!__
+M%1,6_Q,1%/\6%!?_%!(5_QP:'?\:&!O_%148_QH>(/\B+S#_&B(D_R(J+O\3
+M'2?_(AX at _QH=*?\A.G7_)"9!_RHE(/]144;_.SLN_UA93/\Q-"C_'B =_Q at O
+M3/\?2G3_"39]_RI+E/\B/V__Q*^K_Q08(/\F(1O_='1Q_ZBCG?])/SO_ZMO;
+M__+<W?\B(!W_GY>5_];'Q_^YN;;_V\_0_YB9FO]67%[_S\C(_[JRL/_:TM#_
+MPKJX_[NSK_^EFZ3_4DU-_R(@'?\>'23_%!8;_Q<5&/\<&!K_$ T3_Q,0%O\3
+M$13_*RDK_RLK+O\9&R#_%!8;_Q$3&/\D)2;_-38W_SX_0/]+3$W_*"8I_Q88
+M&_\L,3'_9FAF_U!44O\\/CO_5U)-_VQ at 6_]_<VS_<FIH_QX9'?]02D[_(1PB
+M_Q *$O\L)B[_;&9N_UQ34_^MI*3_&A47_Q<5&/\/#QK_$A$>_R(?*_\6#QG_
+M%10:_Q<8&?^BG9W_IIZ<_TI(0?]&1#W_3DE#_U),1/]_=W/___#L_[&EJ/\>
+M&R'_&QL>_R4@(/]U<'#_&147_Q$/$O^JHZ/_LJNK_Q<3%?\3%1C_'AH<_R$I
+M)O^QKJW_P;R\_W=\@?])457_0TM-_T)+2_]*3TG_%Q47_Q,0%O\2$!/_&A@;
+M_Q02%?\5$Q;_$Q$4_Q04%_\2$A7_&1D<_QH:'?\8&!O_&Q\A_R M+O\B*BS_
+M(RDK_QHE+/\C)2/_)28Q_R9">_\?)#[_)R,A_S4V,/]'13W_5E9+_TU/1O\A
+M(R'_%2Q)_R!+=?\)-GW_)D>0_QPY:?^QG)C_%1DA_R8A&_]34U#_G)>1_T V
+M,O_NW]__[]G:_S N*__9T<__W]#0_WMY=O_;S<[_GYV?_T5)2__3T,__K:NH
+M_]73T/^\NK?_HYZ9_ZF?J/\T+R__'!H7_S4X/?\>(R/_%AH8_S at Z-_\S+S'_
+M&A88_W!M;/]&1$'_*"<F_S0V-/^BI*+_(R4C_U]A7O]*3$G_04- _S]!/O]9
+M6UC_3DY+_V%<5_^9C8C_MZ6A_\BTL/_1N+/_T;6N_\VTI_]40SW_(QHA_U5/
+M5_]X<GK_>'%[_WYW@?^NI['_J)^?_Z^FIO\:%1?_%Q48_Q<9(/\4%2#_(R(S
+M_QH7+O\:%RW_$Q B_QD5)?\4#!K_$ P4_S0Q,/\_/C/_2T@\_S at V+___\^__
+MMZNN_Q42&/\7%QK_=7!P_STX./\6$A3_&!89_R8?'_\;%!3_&Q<9_Q47&O]*
+M24C_-#PX_\.[N?_JV]O_X-'2_^+0U/_9R,C_T\/ _Y^:E/\?&QW_%1(8_Q,3
+M%O\8%AG_$Q$4_QD7&O\5$Q;_$! 3_Q 2%?\9&Q[_%QD<_Q45&/\9'1__("TN
+M_R,K+?\H*RS_'2HO_R4K)_\L+C;_)$%W_R4K1?\C'R'_)"8D_S\]-O]144C_
+M75Y7_Q at 9&O\.)$3_&41N_P8S>O\H29+_'CMK_[NFHO\@)"S_)2 :_TM+2/^A
+MG);_.C L_^76UO_PVMO_-C0Q_]C0SO_>S\__FI:4_][/T/^EH:/_2DQ/_]#)
+MR?^LIJ3_U,[,_\"ZN/_$O+C_O;.\_S(M+?\R,"W_2$9(_YN;F/^8EI'_/3LT
+M_ZFEH_\S+RW_KZRG_T ^-_^<FI7_.3HT_W5V</\G*"+_14E!_S8Z,O]$2$#_
+M0D8^_T9,1/]44$C_RK>O_\JRJ?_)L:C_PZF at _[RAEO_5N*S_OZRB_T,W,_\>
+M&1W_1T)(_Q<3%?\?&QW_)2$C_T(^0/\_-S7_HYJ:_QL6&/\7%1C_%!$7_Q<3
+M'/\-#!G_%!(E_QT9,_\<&#+_&Q at O_Q42*/\?&C3_)B,P_S0R+?\T,BO_4%53
+M___V\O_!M+/_'QXD_Q(4%_\=&1?_)!\?_R$='_\5$Q;_-"DH_T,X-_\?&AS_
+M&AH=_T1&1/\P.#3_P[FU__'<VO_GV]?_X=71_]W0R__9S<;_GI>1_R,@'_\6
+M&!W_%QL=_Q\B(_\='2#_(" C_R(B)?\E*2O_)2LM_R8L+O\J,#+_+# R_S(X
+M.O\E,C/_)C$R_RHO+?\?*C'_)3 I_R4G+O\D/W7_*3%+_R0B)/\G*"G_1T=$
+M_U153_]254__)B at K_P\D0/\80V__!C)\_R9'C_\C/FW_N*>G_S O-?\K)B'_
+M,S$N_Z";E?\T+"C_Z]S=_^_9VO\L*2C_ULW-_]O,S/][<V__U\;&_["KJ_]*
+M2DW_V]/1_[JOKO_*O[[_Q;BW_\:\N/_)O\C_+"@J_\"\NO]Z='+_L:RG_STX
+M,O^*@7K_3DQ%_V5>6/^,AG[_1$$V_Z&@F?\N+RC_JJFB_RPK)/]>8%?_0$0[
+M_T-'/O]!13S_6%]3_Z6>DO_)KZ;_Q:J?_\JRJ?_*LJG_Q["D_\JSI_\[+RO_
+M/CH\_Q\=(/\M*BG_2$E#_TI+1?]24TW_4E--_TQ(1O]+1D;_&147_Q45&/\7
+M$A3_%@X8_Q80'O\7%1[_%A,J_Q41*_\7%2__&ALT_R$://\>&"S_'1P;_R4D
+M(_^(@X?___+R_\V\MO\@'R;_%ALA_T-!/O\E)"/_%A06_QH8&_^,?W[_FHR-
+M_R$7&O\@&R'_.C\__S4W-?^_NK7_\.'<_^W?W/_CU=+_VLK'_^'1SO^RJZ7_
+M/T$__R\Z._\N.CG_+3DX_RTY./\N.CG_+SLZ_RTX.?\N.3K_-#] _S(]/O\N
+M.SS_+SP]_R8S-/\@+2[_'2XD_R$E-/\;*BG_,#@Z_R4]=/\H,TS_'R A_RDF
+M)?\I*2;_2DU'_SH^-O\V-SC_%R$P_Q\[9_\0/(#_'D!__RH]9_^?FZ/_0S at W
+M_R<E*/\K)B#_GYV6_S,N*?_KW-W_[]C9_RLF*O_/Q\O_Y,[/_[ROJO_)O[O_
+MM[&O_TY+2O_;TLS_M*RH_\G P/_6Q\+_T,"]_\W%R?\H*"O_R<&__]/%PO_!
+MO;O_.3TU_Z&8D?\K,B;_M*RB_T,],_^.C8+_H)Z7_UU;5/^LIZ'_0CLU_U]?
+M5O])3$#_8&-7_U583/])3$#_P[>N_]2YLO^LEH[_OZ:?_^'(P__:P<#_T[V_
+M_QL4%/\?'!O_7UU:_R8D'_\[/SW_+"\P_SL^/_\W.CO_.3T[_UY at 7O]?8&'_
+M2TM._Q00&/\4#QO_#Q00_QD7*O\6$2W_%Q,M_Q44,?\3$C?_.35+_S,R.?]$
+M1D3_0T5#_]'(R/__\_?_SKRY_Q\<*?\<'RO_3$E(_QP;&O\9%QG_&1<:_QP6
+M'O\@&B+_'!<=_Q at 6&?]'3$S_/3\]_[^ZM?_LW=C_\-W7_]"]M__HU]'_X-'+
+M_ZNJH_\Y0$#_+SL__R at P,O\L-37_*34T_R at T,_\G,S+_)"\P_R,N+_\C+B__
+M(BTN_R8Q,O\K-C?_*C4V_R<R,_\L-BW_-459_Q >)_\?(23_+4A^_RHT3_\C
+M(R;_+"@J_R\M)O]85TS_8V)7_UQ85O\-%B?_#"91_Q$Z?/\>1(+_)SE?_[*M
+ML_\K)BC_'ATC_R0?&?^<G)/_,2\H_^[BW?_QV]S_.3<Z_\K$R/_?S,S_N:RG
+M_\&WL__ NKC_4D]._]K1R_^\M+#_NK.S_\S#O?_6R,7_SL;*_R4E*/_,Q,+_
+MVLS)_UM34?\Y.#'_P;VU_R(G&_^ ?7+_.3PP_[>\L/]=6%+_I9Z8_YB/B?]Y
+M;6C_1$0[_U)52?]*34'_14@\_V%A5O_&N*__[]/,_^#'P/_ERK__\-?0_]+#
+MP_\@%A__%1 6_Q at 4%O\E(B'_+RTJ_Q83$O\U,3/_0S]!_QH6&/\F(2'_6%-3
+M_W5P<O^+A8G_$AT8_Q at 4._\L'UK_'!I!_Q at 9,O\3$"W_&18Z_R<E1O\R,$#_
+M)B4K_R<G*O\H*2K_V,K+__[O\/_&M+#_/CQ%_R0H,/\8%A/_&!<6_Q43%?\5
+M$Q;_HIV?_Z^GJO\9%!C_%A89_R4J*O\U-S7_M;"K_]S-R/_ at T='_CX:&_VIE
+M9?]=7%O_24]+_RLV-_\N.C__+C8Z_RHS,_\J-C7_)C(Q_R(N+?\F,3+_(BTN
+M_R(M+O\C+B__%AX at _QLC)?\=)2?_("@J_R\Q*/\D0%O_#!<O_R$='_\@.W'_
+M)"U+_QH9'_\C'B+_'QL9_SDW,/]*2$#_3$='_Q49+O\<+5'_'3=C_RE :O\?
+M*43_6E18_R at E*_\?("?_'AH<_R0B)/\H)B/_YMS8_^_<W/]#0T;_:V9J_]C)
+MR?^_LJW_MZVI_\2^O/]13DW_T\K$_[VSK_^HI:3_RL6__[&EH?_$O,#_+2TP
+M_[>OK?_>T,W_,R at G_Z&<EO]45$O_:FI?_\"VK/\V.R__G:*6_T$_./^[NK/_
+M03\X_ZZIH_]45$O_.SXR_U993?]254G_3DU"_]?'OO_TU<__ZL_(_]:_NO^I
+MEY/_2#\__Q,0%O\5$AC_&108_Q<3%?\P+2S_%A,2_QT9&_\G(R7_&!06_QP9
+M&/\M*"C_3DE+_YN5F?\/"S__IZ+,_[:SO_^NH]C_%!$\_R >/_\I)D/_8F-N
+M_SH[//\F)BG_)",J_RPL+__2RLC___+M_X1R:/\6%!?_%!8;_UY=5O])1T3_
+M'!@:_QP7&_\B%13_'1$,_QL6%O\4%!?_$Q at 8_RDM*_^KIJ;_W]#+_[*KJ_\P
+M-#;_*C(T_RLX.?\J-C/_+SP]_RHV._\K,S?_*3(R_RDU-/\D,"__)# O_RHR
+M-/\J,C3_("@J_Q at C)/\:(B3_("8H_R at N,/\F+"[_+"PC_QP[7?\'$R[_'148
+M_QTW</\G,U+_)R4H_QX9'_\D'R/_'QP;_R0C(O\H)BG_(QPC_QT;)/\C(RS_
+M(R ?_QP=*/\H(R?_)B4K_RPH,/\S+#/_+BDM_S<U./_5S,S_Z-;3_XZ/D/^9
+ME)C_U,7%_\>XL_^NI*#_QKZ\_UY96?_,PK[_D(J(_T=(2?_'Q<#_,BHH_\S$
+MR/\D)"?_N+"L_]C*Q__.P<#_A'UW_R(D&__*QK[_2#PU_ZRLH_^LK*/_,"\H
+M_ZNLI?\W.S/_D)*)_S\_-/]$13C_<7%D_V9G6O^1BX/_V,6__]["O_^ID8[_
+M)Q at 8_RD=(/\2#0__$0\,_RLK'/\R-1__,30=_R\S'?\N,"W_,C,T_S$R,_\Q
+M,C/_*BDH_Q43%?\8%AC_3DQ/_Q48._^AG+;_9F%<_Z>BNO\<&S__(R(__UM;
+M;O\S.#+_.3TU_R ?)?\K*#3_961J_\C"P/_TZO3_5T9&_R4A*?\;'27_'1P;
+M_QD7&O\;&![_&Q<@_QP8(?\;%Q__&18<_QD7&O\@'B'_&QH9_QH8%?\R*"O_
+MVL[*_TE24O\U/4'_-3T__S$\/?\O-SG_*C(T_R<O,?\?)RG_&R,E_QDA(_\9
+M(2/_("0F_QP>(?\7'1__&2$C_QHE)O\>*2K_(2PM_R$L+?\N-2G_-TEE_Q$9
+M*?\:&1C_(BQ._RLT1?\@(B#_'AHC_Q\A)O\B)2;_(R8G_R at M+?\P-3/_,C4V
+M_S0S.?\\.$'_/C8Y_UA33O^&?GK_M*BK_\&\OO^FFY3_1T1*_^3=W?_RV];_
+M>71T_]K2U?_9R,C_T+NY_X)Z=O_$N+G_)B$C_\+!P/]03E'_-#8[_[^]O_]?
+M75__O+&X_RLG*?^[LJS_WL[+_V%95_\V-2[_R<6]_V=F7_\_.C3_P[JT_SDW
+M,/^NHIW_;&9>_SHZ+_]T=&?_:6=8_X![:?]W;UO_='!A_RXI)/\@&QO_$PX2
+M_Q$0%O\3%!7_&!4;_Q at 2%O\7$@S_-S$A_SLW(?\T-"'_-C0L_Q<6%?\D(B3_
+M)"(D_R$?(?\7%1C_$A 3_Q,3%O\?(23_!@HP_S<N8_^TH]G_13AK_U%,9/\<
+M&27_/D%&_QL@'O\M,2__G9VF_WMYB?\J*S+_Q+^Z__#F[_]"+B;_-RLD_S0K
+M)?\\+2?_138P_S at K(/]$."G_3CTP_U5"-O]92#C_:UM(_W9<3O^#9TS_ at VY1
+M_X5\=?_AS\S_RL;(_S<Y/O\C*2O_(RLM_RPR-/\G+2__)2LM_R(H*O\:("+_
+M(RDK_P\5%_\C)2C_#Q$4_QLA(_\9(2/_&R,E_Q\G*?\B*BS_)2TO_RD]+O]H
+M;WO_)R,S_QTF+?\@*4'_)3-*_Q8A,O\].#[_0S\]_W!L:O^FGIS_R[Z]_]7*
+MR?_FU]'_[=?/_^[3TO_MV-;_Z=;,_]G)O_\G)"/_*"LP_Z"7D/]84U7_Y-O;
+M_^S4T?]Z=77_T<G,_]C'Q__9QL;_(R ?_[^ZO/].3E'_ at X%\_YJ6E/^;F9O_
+MR\?%_W9W<?_!N+C_*B8H_[JQJ__ at T,W_L*BF_\' N?]H9%S_/#<Q_ZJHH?]3
+M3DC_-C0M_\"[M?\R+B;_P;ZS_WIW:_][>6K_?WEI_WQV9O\\.C/_-#DU_P\5
+M$?\@)"+_&QP=_R(@(O\H)BC_)R8E_R at F(?\W-3#_'1H5_R0@'O]*1T;_$A 3
+M_Q(0$_\7%1C_$A 3_Q84%_\5$Q;_$1$4_Q@:'?\]0C[_(R<V_UQ><_^=D8S_
+M;&-=_TI%1?^1E)7_,SP\_T9+2?^1D9K_'APL_R at I,/^0CXC__^_B_[R2:O^Y
+MCV'_K8E<_ZZ#5?^WBU__M(E9_[2+5_^WBUG_O(Y;_[>*5/^WBU/_O8E7_ZF 
+M5O^1>5[_V,J[_^31R_\L+2[_$Q4:_Q(6&/\6&AS_$Q48_R at J+?\0$A7_)RDL
+M_QD;'O\F*"O_%!89_R4G*O\1$Q;_&R$C_QHB)/\?)2?_(2<I_R,I*_\I+S'_
+M+#LZ_R B*?\@+D?_.DEL_RDY9?]37X3_SLC6_]_-R?_IT,O_\-31__79UO_U
+MV=;_\M;9__36U/_KU,__X]O?_XV/GO_/Q\O_3CPX_S(Q-_\7(BG_GIB._W5K
+M9__EV=S_\-C5_WUX>/_*PL7_V<C(_][*QO^*A7__S\?#_W=U<O_*QK[_?G=Q
+M_XZ*B/_(O[C_;V]F_[^WL_\C(!__N[&M_][.R_]_=W7_3TY'_SHV+O_!NK3_
+M?7MT_S$P*?^SKJC_34Q%_WAV;O^!>6__=W)F_WYZ:_][=&;_;&9<_Q\='_]0
+M3$K_4TM'_R$;&?\@&QO_+BDM_SP\.?]04TW_-#<X_Q\C(?\;'2+_$@X6_QP8
+M&O\;&1S_&18<_QH7'?\6$QG_&!89_Q at 6&?\6%AG_&!H=_U!$6?]%/DC_8%Y9
+M_[FGL?\N(RC_GIN:_Y^@H?\C)B?_*"PJ_XF)DO\G)37_)R at O_S0X-O__\./_
+MS)UM_\J:9__+G&7_PY1:_\:46__"D%?_P8]8_\"35__ CUS_P8Q6_[Z04_^I
+M?4O_1B\8_V%C8?_,Q<7_W='-_QXG)_\0%1K_'1\D_QP@(O\8&AW_)RDL_Q08
+M&O\G*RW_&1T?_RDK+O\5%QK_*"HO_Q(4&?\=("7_'B(D_R,H*/\L,3'_,C at Z
+M_RLP-?\]0TO_H:*M_R@]6O]=9H__V<S?__#9VO_UU<O_\-G:__#:V/_QV=;_
+M[MS8_^_ at V__MX=C_X]C7_UUEA?\^49?_+4Q\_[W#T_]01$?_3DQ5_QTH+_^?
+MF8__?G5O_^79W/_LT]+_4D]._T(]/__8R<G_YM',_YJ/A/_+P;?_*B at A_[^Y
+MK_^.A7[_=W)L_\:^M/]>7E'_@'IR_R(@&_^QJJ3_X,[+_X1Y>/^HHYW_M;&I
+M_S(P*/\V-2[_O;RU_Z2?FO\C)!W_F96-_W%E7/]U;F+_:V9:_WET:/\@'1C_
+M'1\B_QD7%/\^/3;_,#,M_TU33_]G96?_8F)?_T9+1?\I+B[_(BDI_RHM,O\8
+M%Q[_&QD;_YJ2E?^@F)O_HYN>_ZZFJ?^SJJK_N[*R_[RUM?_$O[__Q,Z]_UM@
+M6O]866#_J[.O_RLM,/^6DIK_,2PP_S8R-/]W=7?_B8F4_R0B,O\='B7_+3,U
+M___T[O_4FV'_S)YQ_[Z%0__"DUW_OY!6_\256__ D5?_OY%7_[^,5__!D5+_
+MDFQ%_RT;#?]<4U/_8&!7_]+'P/_=SL__*S$S_RTR./\^0$7_)2DK_R8H*_\M
+M,S7_(BHL_R0L+O\6'![_+# R_Q06&?\I*S#_%!,9_S0S.?\A'R+_&!X at _R,K
+M+?\F+C+_*S V_S at T-O_4R<[_X,_7_^G7U/_MV='_\=K5__'9UO_SVMG_^-S9
+M__/7T__AR<;_^./?_Z.9E?\V,T#_)52._QXS@?\K4XK_P<+-_U=05_\G&"/_
+M='AZ_Y^7C?^'@GW_W=/6_][)SO\9'1__F9>9_\[ P?_8Q\?_9EE4_\_)P?\H
+M)R;_14(]_X)[=?\^/#7_6UI3_T$\-_\3$0S_# H'_ZJBH/_.P\+_Q+RX_U!.
+M1_] 1#O_6E1,_[:JI?]*0SW_+"TF_[>PI/^">&K_E8N _Y2)?O]A64__+RTJ
+M_S@[0/]A8V;_8F5?_U-43?\R,2K_3$I#_RXL*?\R-#+_(RHJ_QD='_\;'2#_
+M%QD<_Q\A)/\M+S+_EXV)_Z&4C_^CEI'_FXZ)_Y6,AO^1B(+_C(-]_XR#??^$
+M?7?_*B8H_V%A:O^VLKO_,BXV_XZ+D?\6%!?_$ X0_X5_ at _]Z=8'_*BDZ_RDI
+M,O],45;___'K_\6,4O_ DF7_SY94_\*37?_)F6+_R)AA_\657O_*EU;_R)EI
+M_V!%(/\F&A7_(AP at _R$?*/]/3TS_S\.\_^?6UO\O,S7_%QD at _QT<(O\C)RG_
+M&QT at _R,I*_\P.#K_*"\O_Q@='?\K+B__&!D:_SL\-O]&24/_*3 P_QDC)_\<
+M(R__(R8Q_S8S.?_1R,C_Z-;2_^W:U/_VW]K_^-_:__+<U/_SW-?_]]O8__;7
+MU__LU-'_QK&O_R49'/\O*3'_9FAO_Q 4*?\A5Y'_)#N(_R-,B/^]O<S_7UA?
+M_VA79_\?(B?_HIJ0_Y"+AO_=T];_V<G-_SI!0?_%PL'_WLO+_\F]OO\Z-S;_
+M'!T>_QX;(?\G(B;_)" B_QP:'/\9&AO_(!TC_Q at 8&_\7%1C_<6=J_XN#AO^>
+MF9G_='!N_ZNHH__.O+C_+B@@_R\R)?_'O:[_GX]\_YN.??^1B('_75Q;_TY1
+M5O\Y04O_-3]*_SD^1/\I,2[_+S<S_SY#/_\\/CO_(R(A_RXS,_\G+S'_*2XS
+M_QTA(_\A)RG_'R<I_RLX.?]B9&?_<G!S_VUK;O]H9FG_8&1F_UA>8/]46ES_
+M4%98_U%24_]/3E7_9F9U_Z^IM_]X='W_B(6+_X:$A_^IIZG_BH2(_W]ZAO\E
+M)#7_.#A!_VQN<?__].[_UYYD_\Z@<__6G5O_S9UJ_\J99/_,FV;_S9QG_[:/
+M7O]@/S__'1 C_Q\A'_]64$C_:F1H_S] .?_3Q\#_Y]/5_S at Z/?\='"/_&18<
+M_RTQ,_\V.#O_)RTO_RDQ,_\M,C+_%!@6_S4W-?]%1$/_+B\P_Q04%_\;'2#_
+M*BPO_U-.5/_/Q,G_Z]C8__3=V/_WW-7_]]W2__;<T__YW=K_\-?2_^_8T__J
+MU='_II21_WEQ=/\S+C3_+2\W_VYT?O]M;W+_'B O_QA,A?\A/(G_)$R*_[Z_
+MRO]\=W?_(Q,A_Q 3&/^:DHC_AX)]_]#&R?_?R<O_FYV;_\G%P__4Q,'_V<K%
+M_SLS-O\5%Q[_&!@;_R4C)O\>'B'_(2,F_RDM+_\H*C'_3E%6_Q46%_^1B8?_
+MT\# _]"]O?^DF)3_3$,]_T-$/?]35T[_BH1\_YR+A?]I8V'_6EI=_SQ$3O\W
+M1%7_-T=;_S%"6?\Z25S_,SQ#_RHP,O\A)RG_-CH\_Q88&_\G)RK_,C at Z_S,_
+M0_\K,CC_+CH^_RPW./\9(2/_*"PN_]K2U?_;TM+_X=C8_][5U?_HV=K_Y]77
+M_^C6V/_GU=?_]./C_]3)SO]X<W__L*VY_WIV?O^ ?8/_L[&T_ZVKK?^"?(#_
+MA'^+_R<F-_\E)2[_;&IL___RZ?_?J&[_Q)EM_]VF9O_'FVG_SIIH_\.06_^_
+MDFG_0!\W_S$B0?\-%!3_'1L=_YF)@/^- at 7S_3DM _];)Q/_QV]S_-#<X_R ?
+M)?\:%QW_+# R_Q,5&O\A)2?_'",C_S U,?]$24/_,C(O_QH8%?\H)R[_&Q<?
+M_VYE;/_=SM+_]-[<__G?W/_XW-C__^#<__C<V/_XWM7_]]_6__'8V?_QU]3_
+M;EI6_S$H(O\M)Q__+"@J_RTD'O\J(1K_-3 K_R at B&O\<'2C_'4R"_RA&D/\F
+M38C_O<+(_Y:4C?\D&2#_&!H?_X^)?_^'A'__R\+"_]W$Q?^\M;7_R,+ _]'$
+MO__9Q+__64Q+_R at L+O\B)R?_'R(C_Q<:&_\?)"3_+S8V_Q,8'?\3&1O_%1D7
+M_S<R+/_.P;S_P;FU_SD[,O][A'?_CXV(_RX@'?\<%!+_#1P<_R] 3O\U157_
+M-TMA_S)'8_\N0U__,49>_S%"6?\T/$;_%A@;_Q06'?\Z-SW_&18<_Q<9'O\O
+M-SO_'2DM_R$J,?\C,33_(S Q_S R-?\K)2G_Y]//__+;UO_OVM7_]^+=__?B
+MWO_ZY>'_]>#;__?BW?_]XMO_S[FZ_VAB:O^IJ[#_D8V5_WY[@?^XM;O_IZ6H
+M_VUG:_]Z=X/_(R0U_R at I,/\I)RG___7K_]FE;/_-IGS_VJ1A_\JA8/_%E5[_
+MO91C_Q\- /\@&#K_,"I&_RHI,/\\/#G_3TI%_SDT+O\]-3'_U,3!_^W9U?\V
+M.SG_(R at M_R0D)_\M,C+_%AH8_S$S,?\_/CW_+3(R_R(F*/\G*2S_'1@>_VQF
+M9/_$M[+_[M;3__'7U/_VW=C_\][9__'>V/_XW]K_[MO3__G<V/_BT]/_4DI.
+M_QT;)/\L-TC_'"(L_VMB6_\@+$G_*20D_R<I)O\S/5+_)R<T_Q at M1O\]:)S_
+M,UB9_RU+ at __$QM7_GIZ;_QT5&/\9%QK_ at H5Y_X>%@O_)O[O_ULC%_[ROM/]J
+M8&/_T<7!_\R]N/]M9V7_)B at K_R(G+?\V.3[_)BHL_TI.4/\[/T'_1TQ,_SL^
+M/_\@(2+_'QT?_];*S?_&O+__&A88_QX:'/\Q-CO_*B0H_Q8>(O\N/TW_,3]8
+M_SE#3?\]4&+_,$1 at _R8S1O\1%Q__.D=8_RXY1/\=&AG_*S \_RPL+_]A863_
+M+#0^_S=!1?\@+"O_+#4[_R0Q-O\K-SO_)BPN_RTK+?_GV-/_[]K5__+=V?_U
+MW]W_^.'B__;@WO_VX=W_^.'<___GX/_FU=7_<G%X_ZVOM/^9E)K_<6QR_["K
+ML?^HHZG_ at GU__UI<9/\?)C3_,#$X_XN,C?_]\.O_X:MQ_\ZC<__=HU[_VJ=K
+M_ZV%:_\H&Q;_&!,7_Q )%?\8$QG_-C,N_SDT+_\Q*2?_&Q<5_S K)O_+O;K_
+M[=O8_SD^/O\<(2?_(!\E_S$W+_]'2T+_,S0N_QP9&/\I)2?_)R @_XU_?/_8
+MP[[_[MC/__?=U/_[W]?_\]W4__C>U?_TWM7_\N+8__7BV/_AS\O_V,7+_SQ$
+M5/\M057_.T5:_R=!;/\7("[_GY.$_R Q5?];763_%QXD_QLS6O\C,$G_$BQ-
+M_R=6C?\L69G_,4V&_[Z^S?^NL*W_0#@[_QD4&/^!A'C_@'Y[_\:\N/_5QL'_
+MNJZQ_X%Y??_&O;W_R;V^_T1 0O\/%1?_&!HA_R,G*?\3%QG_%!@:_Q@<'O\1
+M%!7_'A\@_S$O,?\Y-3?_-C$W_S at Y0/\W.4#_'!LB_R8O+_\B(2?_'2HW_RT^
+M5O\Q/E'_)2HO_QXH-_]%3V3_EI6;_ZZII/]W?HK_,#I(_[*MK?\Y/TG_04)#
+M_]'-S_\P-#S_-3U!_QLD)/\L,SG_("TR_S$]0?\8'B#_%187_]_0R__RW=C_
+M]=O8__G<V__WV]W_]MW>__#9VO_OVMC_[]G7_YJ.D?^$AHW_J*NP_Z";G_]F
+M867_K*>K_Z:AI?]S;G#_$1(=_R F-O\N+C?_H9V?__SPZ__=IVW_RJ!R_\:7
+M7?^ND'K_.2(S_QX7-?\K)$+_*"$S_QP7'?\?&AK_(!D9_QH3&O\=&!S_*24C
+M_\["O?_SW-?_/STZ_SDW.?]A6EK_-C at U_QP<&?\G)2+_+RHJ_YB,A?^FEHW_
+MT[VT_^_3R__ES\;_\-C/_^W1R?_LULW_\M;3_^+)R/^RH)W_N*:C_T8Z-O_6
+MSMC_-$=9_S1.7/\[/DG_.%:"_Q\?(O^QFGW_)3MB_T-%3/\Z/S__'#%4_Q8@
+M-/\@,D[_%SAM_R=,C?\L0'K_N[G)_X**AO\D("+_&!,7_W=Z;O]V=''_M:NG
+M_]?'OO^^L;#_8%M?_U)-3_^PJJ[_B(J/_R$F*_\<'R3_&1T?_RTQ,_\R-CC_
+M)RLM_R4F)_\>'![_'QL=_YF4EO\X-SW_1DM1_R\T.O\7$QO_("0B_R$>)/\A
+M+CO_,$!0_SD[0_^IHJ+_H**Q_TU79O][>W[_YN#>_Y*9I_\V/TW_P[BW_S@]
+M0_\Y/CS_S,C&_S<Y0?\Z/T3_&B$A_S U._\3&B#_'B,H_RPJ+?\I(23_Y]//
+M_^W8T__KV=7_\N3A__KDW/_VW=;_\=S7_][,R/_IU]G_24-+_WR%C/^8FZ#_
+MI:.E_TI&2/^DH*+_HIZ at _WES=_\G)S3_(R at Z_RPL-_^MI:C___+I_]FC:O_ 
+MEVW_?5PX_QT;%O\1#AO_%!$>_QD1)?\T+D+_$ T9_Q84%_\8$QG_% T5_Q(/
+M%?\K*"?_Q;NW__#6S?^KHIO_:V5C_R,9%?\E("3_&10:_Q,.$/\<%Q?_$@T-
+M_R(='_\?&!__(QPD_RH?'O\T)"'_.",A_RX<&?\G&1K_(AT?_QTB(/]U>7?_
+M0T5#_];1T_]::WO_&B8J_Q89)/\F3H;_&A\K_W)B4O\=-%[_<W-\_U953O\B
+M,$?_7&)L_RDP1O\+'TS_(CQ[_R at Z=/^XM<?_76-?_S<X.?\4$A7_='9M_U)0
+M3?]G7UO_T<*\_[JOKO]*2$K_B(6$_XB#A_]*3%'_+#$V_RTP-?\E*2O_*2LN
+M_R$C)O\3%A?_&QT;_Q\='_\?&QW_BX2$_S,S-O\Q-#G_*R\Q_Q at 3%_\4$@__
+M(!L?_QPF,/\@+#/_O;>[_^C;VO]\?8[_/$Q6_W%X?O_?VM[_-SU-_S8]2__'
+MN[?_049,_SD_.__8TM#_,C,Z_S(U.O\K+S'_*RTT_RHJ,_\;'2+_O+2X_X1X
+M>__CSLG_D7UU_SXU+_]&1#__\N'4_Y^'>/^LFHS_ at W=N_]_0U/]!0$?_769M
+M_T!#2/^;F9O_&147_YJ6F/^GHZ7_<&MO_R(B,?\E*#K_+R\X_ZZFJO__[M;_
+MQYMS_SX>!/\<%17_&108_Q40%/\7$A;_%A$5_Q,1%/\6%!?_%1,6_Q43%O\4
+M$A3_%!,9_QD4&/^^LJ[_Z-7-_VI=6/\E'!S_2D \_TI%1?\9%1?_%A(4_QH6
+M&/]12T__-2\S_R8@)/\Y,S?_%A$5_Q<2%O\9%!C_&A49_QL9'/\5&1O_.D1!
+M_U%64/\R-T/_[-O._S5#6/\G(R'_$!8F_S1:C?\;&RK_4T X_QTI1O\9'"[_
+M&AH=_Q4=,?\;)3#_'2 Y_Q <.?\C-V3_(#UW_ZVQQO^2C8__+2\T_Q89&O]P
+M<F__-#4O_TA&/__+N[C_KZ6H_RDK*?] .S7_N[.O_XF$A/]]?G__*2TO_QL?
+M(?\>&R'_)R(D_TA#/?\9&A3_$1$4_RDG*O^&?GS_+RTP_Q,6&_\0$Q3_*BXL
+M_V=F9?\P,3+_'2 E_SD^1/\U,S;_+2PK_T!#5?\Q04K_ at I"/_]W9V_\P-$G_
+M-D!/_[^WL_\U.D#_/$$__\W)Q_\O,3C_-C<^_SX^1_\G*C7_)24R_QDB*/^'
+M at H3_M:ZU_][3R/^&<67_4U)'_RTL)?_?T,O_CGEM_ZF8B/]H9%S_TL+&_S]!
+M2/\V.D+_0T)(_YB6F/\B'B#_E9&3_Y>3E?]J:&O_)24P_R$C,O\T-#?_K:BN
+M___MX/\L'P[_'QPB_QT:(/\A'AW_'AD=_QD4&/\9%!C_%A07_R >(?\O+3#_
+M-S4X_SD[0/\_0$?_5E%7_\:YM/_NV]/_BGQY_T4V.O\I%QG_%@\/_Q,/$?\8
+M%!;_7%A:_VME:?]=5UO_2$)&_UM56?]54%3_%1 4_QX9'?\2#1'_C8>%_ZVE
+MH_^]N+/_N+"L_Z6DJ__"L:3_4F)\_R,@'_\3&2/_1EN+_QD;(_]>4TC_$Q at Q
+M_QH<*_\2$A7_%ATK_RHS.O\5%RS_#A4I_Q ;/?\C07G_HZF__XJ"AO\I(2O_
+M%1,6_V9E9/\B(A__,S0N_XZ%?_^BG)K_%Q47_RHG(O^(@'S_C(6%_Q\:'/\9
+M%QK_%A06_Q04%_\8%13_,28?_V5=6?]84U?_-#(U_XB ?O]544__@()__X:(
+MA?\W/#C_&!L at _V-C9O]N:6G_L*RN_[VZN?]W=V[_.SY#_SA#1/]\B(7_W-C:
+M_SD]4O\Q.TK_OK:R_S,X/O\^0T'_S\O)_S(T._]"0TK_-S= _RTP._\E*#/_
+M(RTQ_[RSL_^VJ[#_X-/&_XAN8/]223O_8EA-__'<U_^/>F[_G(]^_VEC6__4
+MQ,C_.#I!_S@\1/\^/4/_D(Z0_R <'O]>6ES_-# R_VUK;O\F)C'_(2,R_S,S
+M-O^OJK#_^-?H_Q@:&/\6$AO_& X7_Q02%/\5$!3_$PX2_Q at 3%_\Y-SK_/SU 
+M_TI(2_]%0T;_0$))_T-%3?]'0DC_T+^Y_^W7S_^$=7;_(!@<_QP7&?\7$Q7_
+M&!06_R\K+?],2$K_6U59_TQ&2O\4#A+_(QTA_S$L,/] .S__&A49_Q0/$_^Z
+MKZC_Q;&I_Z")A/^ ;&C_:V-F_[BFF/\S1F3_&!D:_Q(7'/\C)E#_%1@=_UQ8
+M2?\:&"O_'1TH_QH8&O\6&"#_(RPR_Q86)?\4%A[_&!PQ_R(Z:_^4EZC_I)N;
+M_T,^0O\<%AK_03Q _QH8&O\;'1K_2DA _Z&>F?\:%AC_+"LJ_T! /?\R+R[_
+M$P\1_QH5&?\>&QK_*"PJ_VEI9O]B4DG_MZJE_\*ZOO\E("3_9U]=_Y^7D__B
+MWMS_<7-Q_TM/4?\V.#W_7UI5_V)64?]/35#_6%)6_TU-0O]+2DG_&1P=_W5^
+M?O_=V=O_/$!5_RHT0_^_M[/_1$E/_S@].__/R\G_*2LR_ST^1?] 0$G_*RXY
+M_R8L-/\9)"7_P;:U_\*RMO_CU,[_=5U4_U!&._];44/_Z]3/_W]L8/^+ at G'_
+M9F!8_]3"QO]!0TK_.CY&_T5$2O]^?'[_(Q\A_Q\;'?\B'B#_:6=J_Q at 8(_\5
+M%R;_3$Q/_[*ML?_YV/?_%A$5_R$1%?\=#QC_%!(5_Q40%/\8$Q?_%Q(8_PX)
+M#?\U,#3_1D1'_U!-4_]&1U+_5EED_TQ'3?_&M:__Y]+._XQ_?O\;&1O_%1 at 9
+M_Q43%?\8%!;_&A88_Q82%/\1"P__#@@,_R :'O\;%1G_% \3_R ;'_\4#Q/_
+M'AD=_S\^,_^]M*;_P+*I_[RVKO_'O[O_JIN._TA;>?]34%;_&A\?_RLF2O\<
+M'B'_6%9'_QX8)O\?'2;_&1<:_Q04'?\E+#+_$Q8B_PP-%/\:&B?_#!I!_T9&
+M4?_ N;/_)B at F_QX9'?\6$17_%A89_Q(5%O\E)A__5E-._R,?(?\A(R'_)"DE
+M_S]$0O\?(R'_4U%3_VYL:?^*B8C_'1\=_XMX;O_(M+#_UL?+_R,=(?]:5%+_
+MP[V[_^+:W?^!?H3_*"HQ_R ;(?]Q96'_?G-L_SH\0?\7$AC_6%9._S0Q,/\Z
+M.#O_;7%S_]G6U?\Z/U'_,3M)_\*ZMO] 14O_.3X\_\C#P_\G*RW_/D!#_T9'
+M3O\H*S;_,C8^_Q<B(__#N;7_QK2V_]?.R/]20SW_0CXV_U!(/O_LU-'_<&%4
+M_W9R7?]A6U/_U</'_T5(3?\Y/47_-C,Y_V]M</\E(2/_'AH<_R <'O]U<W;_
+M@'^,_RPM/O\Q,SC_L*NO_^W;Z?\B&"+_#Q 7_Q 2%_\:%1?_&106_Q40$O\9
+M%!;_(!H8_S(N+/]*14G_.S<__TM(5/])25;_.34^_[FLJ_]K75[_2T,__V]J
+M9/]?6UG_%Q(6_T5 1/^CGJ+_&108_R4?(_])0T?_A'Z"_V]I;?]G867_9V%E
+M_X!Z?O\>&!S_+RHD_ZF?E?^/@7C_:V-9_W%@6O^2C8C_0%-E_R$9)_\9&1S_
+M(R$T_R A*/]F85O_%A(:_Q\?*O\6%B'_'AXI_R<J-O\M,#S_%!0A_Q03(/\7
+M&2[_'R$I_X>$@_\6&!W_&1@>_R<E*/\P+C'_'1L>_QX='/\M+"O_-#,R_T)!
+M0/\N+2S_*RTJ_WA]=_^@GI?_CHB&_^#5VO\;'!W_;5Q/_]W(P__MU=K_,2DM
+M_T$\//^UM[7_XMC;_X:$A_\R.C[_'1LD_VID:/]H96#_4590_VEG8O]$03S_
+M8U]=_R4B(?^)B8S_X]W;_SQ 3_\\1E3_M[*M_T%&3/\]0$'_H)ZA_S Z-_] 
+M1D+_24A._S8Y1/\I*C7_(BHL_\2YN/_$M;G_V<W(_TU$/?]!.S/_4D= _^33
+MT_]<4DC_9V13_U=*1?_5P,;_1$=,_X&$B?]V<7?_6%):_U505/\=&AG_'1@>
+M_V]J;O]Q;7W_*"E"_RPN-O]I;&W_XMCA_R at C)_\>(![_,S$L_S<S,?]!/3O_
+M3TI%_U502O]&03O_03XY_T9$0?\\.CS_*BDP_S(S/O\X-C__,BLK_]7(Q_^H
+MGYG_=G!H_VED7_\=&!K_(!L?_Q(-$?]P:V__'1<;_T$[/_\;%1G_65-7_Q80
+M%/\E'R/_:6-G_QT7&_\='AC_96-<_Y")@_^;FI/_C7MX_Z:<F/\_4%[_%1,C
+M_QD;'O\@'C'_&1DB_R4C(/\M*S3_+2TX_RLK-O\E)3#_("$L_R8G,O\4%!__
+M$Q <_Q at 7*/\<'2C_6UU at _R,B*/\D(A__-C,N_T9#/O]"/SK_/CPY_QX>&_\_
+M/SS_0T- _R0B)/\?(1__/T1 _V=D7__-R,/_ZMO at _R<G*O\]-2O_U\; __/;
+MX/\G'R/_-"\O_[BUM/_AU=C_EY>:_TM56?\6&!__)R at I_VUL9?]+3T?_8F-<
+M_RHK)?]?9&#_45E6_XJ.D/_AV]G_/T-2_SQ&5/^ZM;#_049,_T!#1/_+R<S_
+M.$%!_T%&1O]*25#_,30__R at J,O\D+"[_P[BW_[ZOL__/QL;_0S\]_SLY-O].
+M2$;_V-#3_T='1/]04TW_249%_]/"RO]15%G_MKF^_VED:O^DGJ;_)B$E_VIG
+M9O^AG*+_<6QP_W=S at _\L+4;_+C X_V%@7__GVMG_/C at V_S8U-/\Y-SG_/3]'
+M_QXA+/\;&B#_(Q\A_S(K*_\U,#+_.S<Y_SLZ.?\S,S;_+2TV_S<Y0?\J*"O_
+M:EU<_UI/2/]V;F3_AG]Y_QX9&_\8$Q?_'1@<_QT8'/\;%1G_7%9:_QX8'/\:
+M%!C_'A@<_R<A)?\F("3_(AP at _Q4.#O^>D9#_,R0D_R09&/\G&1K_3D=!_SA#
+M3O\:$R3_%!46_Q<4)_\I+#?_,3,V_R0D+_\='2C_(2$L_QP<)_\4%A[_$! 9
+M_Q,1&O\3#QC_%A, at _Q<:)?\H+3/_/#H]_S\].O\].S;_/3LV_S\]./\Q+S'_
+M'AP?_QH8&_\<&AW_'!H=_Q<7&O\8&QS_BX>%_]#(Q/_IU]O_*28L_S4S+O_4
+MQ\+_[]?<_RXF*O\Q+"S_MK&Q_^G9W?^8F)O_6&9I_YRDKO\3%AO_3DY+_WM]
+M>O]76%G_*RTP_RDS-_\X1TS_ at XB-_]O5T_] 1%/_.4-1_XN&@?\^0TG_1DE*
+M_\O)S/\Q.#[_0D5*_SX]1/\I+#?_-3<__R at P,O_!MK7_OJ^S_]/1SO]#0T#_
+M04=#_UI:5__,RL?_3U%._UA=6?];75K_T+_%_TI-4O^TM[S_:F5K_YR6GO\F
+M(27_IJ.B_YV8GO]J96G_=7&!_RDJ0_\R-#S_B82(_]C0WO\3%B?_.4=<_Q$>
+M._]!3&7_*"]%_QL=+/\<'"?_(1HB_RTF+?\P*S'_,S$T_S0O,_\_04;_-CA 
+M_SHX.__8R<G_S+VW_WEN8_]Y;6C_&A47_V!;7_]13%#_%Q(6_R(<(/\E'R/_
+M3DA,_UI46/^ >G[_,"HN_Q80%/\W,37_&1<9_QP8&O\B'1__'QT?_Q00$O\N
+M*2/_/45/_Q0*&?\3$13_$Q B_R$D+_\@(2C_'!PE_Q<7(/\<'"7_&AHC_R(B
+M*_\;&R3_%A0=_Q<3'/\5$A[_%!0A_Q49(?](24K_24M)_RHM+O\<'R#_&AT>
+M_Q@:'_\8%Q[_&A8>_QX?)O\F)2O_.#H]_QPA(?\Z.SS_O+FT_^?5V?\F("C_
+M+C(P_\*VL?_UW>#_*B(F_R\J*O^SK*S_V,O*_Z*=H?]7:V__FZ*N_Y>9H?\G
+M)BS_'ATC_R(F+O]E9FW_L;;"_T9=:?^"BY'_V]73_T-&4O\Z1%+_LZZI_T-%
+M3/\[/C__OKR__S$V//\[/47_0#Y'_R<K,_\N,#C_)2TO_X%Y=_^[K[+_W,K&
+M_][+Q?_>S<?_W<G!_^+)PO_0N[;_V,3 _]&ZM?_?R<O_4U9;_Z6HK?]74EC_
+MBH:._R4@)/^BGY[_FY::_V5 at 9/]P;G[_)RA!_S,U/?]N<WG_Y]3D_R,=*_\P
+M.DC_&R R_R$F./\8&RS_%14 at _QL6'/\9%1?_'!@:_R<C)?\U,3/_0SP\_S8Q
+M-_\Q,#?_+B at L_V5=6_]P:6/_3$D^_TY+1O\B'1__&1,7_Q<1%?\4#A+_&!(6
+M_R,=(?\9$Q?_%A 4_Q\4&?\7#Q/_%Q$5_QH5&?\6$1/_)!\A_R0?(?\D'R'_
+M$A 2_QP7$O\,$1W_#Q,B_Q(.%_\8$Q__%A$=_QP7(_\<&B/_%Q8=_Q85'/\9
+M&!__'1TF_QL;)O\;&R;_%14 at _Q(0&?\3$1K_%A4;_RPM+O\N,#/_&1L>_R,E
+M*/\F*"O_("@J_RHN,/\M+3#_*R\Q_RDJ*_\O+S+_'B(D_T-(3?]%2DC_T+_%
+M_R\F+?\H*2K_KJ*=__7=X/\I(27_+"<I_[RPL__2T<K_LZ*M_TIG<O]RAHK_
+M25QD_VYWA?]24E__5%QL_SY*4?]XDJ7_3FAV_WJ @O_;T\__:&=T_S [1O^@
+MGI;_0#]&_SM!0_^^NKS_+"\T_SD[0O]+25+_*RXY_R4H,_\C+S/_ at 7Q\_\>\
+MP?^GDXO_O:.6_["6B/_!J)?_R+&?_\VRH?_!I9?_R;*F_]O'P_]:7%__KK"X
+M_UA56_]U<7G_)R(F_YN9EO^/BXW_75A:_VMK>O\I+$7_-#0]_VA=8O_MV>/_
+M(A at A_QT<(_\='"/_*2,K_Q at 2&O\8$Q?_'QT?_R0@(O\>&AS_'!@:_QH6&/\>
+M%Q?_)2 F_ST\0_\F("3_HYF<_[2NK/^)AH'_+"DH_QX8'/\4#A+_KZFM_QL5
+M&?]P:F[_*"(F_UM56?\W,37_F8Z3_V1<8/^"?(#_$PX2_Q40$O\F(2/_)R(D
+M_Q\:'/\.#A'_%A(4_Q(>,_\H-5+_$A$>_Q(/&_\4$1W_%Q0 at _QL8)/\>&R?_
+M'QPH_QH7(_\@'B?_'QTF_QH8(?\2$!G_$1 7_Q(1&/\3$AC_%!,9_R$C)O\>
+M("/_.CP__SH\/_\4&AS_-SD\_U!+3_^4B8[_:V1K_X)]@_]"0$/_8V%C_Z6C
+MH/_IVMO_*R4I_RXN*__-P;S_\MK=_R8>(O\I)";_L*"H_\# O?^ZJ;G_,TQ=
+M_V9U>_]OA)#_;H:9_UEXBO]RD*;_;Y"<_V*-G_\H/$K_ at H2'_]C0S/].35K_
+M.41/_UQ>5?\Z.T+_,S<Y_W)S=/\P,SC_/#Y%_T$_2/\R-4#_*RPW_R0N,O^[
+MLK+_S<#%_\"LI/^WG9#_LYF+_\.JF?_$JI7_S:^:_\.DDO_)L)__X,G$_W1U
+M=O^IJK'_7UI>_V9B:O\E("3_E9.0_XR(BO]E8&+_=76$_RTP2?\P,#G_KZBH
+M_^G<Y_\;$1K_)A\F_R ;'_\B'2/_)!\E_R<D*O\M*C#_&!,7_QT9&_\7$Q7_
+M%1$3_Q<0$/\<%QW_/3Q#_R0>(O]",CK_,2 F_R$5%O\F&R#_'!,:_QT7&_]-
+M1TO_&A08_Q<1%?\<%AK_*B0H_QP6&O\>$QC_% P0_QP6&O\7$A;_%A$3_QT8
+M&O\G(B3_'QH<_QL9'/\>&AC_$AHJ_Q8=,?\7%R+_&QLF_QL;)O\;&R;_'!HC
+M_QT;)/\>'"7_*2<P_RXK,?\P+3/_%Q0:_Q,0%O\3$Q;_$A(5_Q44&_\4$AO_
+M(B0I_SY 0_\J+"__*BPO_RXN,?]45EG_0T5(_Q</$_\7$QO_-#,Z_S$L,/]+
+M1D;_J:*<_^O8V/\J)BC_+2HE_\F]MO_KT];_+24I_S(M+_^SI*G_?()Z_[NQ
+MN_] 5&+_DXB/_[Z]RO]"5VG_1W*$_T=]A_^TO\;_8%AL_RY#3_]N>7K_WM;2
+M_TE(5?\M.$/_+#0J_TU.5?\[/4#_+S(S_R<L,?\Y.T+_2DA1_S(U0/\K+#?_
+M&R,G_YR0D?_,O<+_PZ^G_[ZDE_^]HY7_OZ:5_\:NF__%JYC_Q*:6_\2MH/_?
+MQL'_<7!O_[.SMO]13$[_65-;_R8A)?^0CHO_B(2&_V9A8_]F9G7_*RY'_S(R
+M._^HH*/_Y=OL_S0L-O\M)R__'ALA_RHL,?\C)2S_(1\H_RXI-?\?&B#_'AH<
+M_Q at 3%?\:%AC_)B$C_R4C)O]!0$;_)R,E_Q at 3&?\7%1C_%A<8_Q06&_\0"P__
+M&A08_QD3%_\9$Q?_$@P0_QH4&/\3#1'_&Q49_QL3%_\1"0W_$0L/_Q$,$/\6
+M$1/_'!<9_R,>(/\C'B#_'1@:_R,<%O\<'B/_*",I_QX:(O\='2;_'1TF_QP=
+M*/\L*S+_,C$W_R ?)?\8%QW_&A<=_QH7'?\9%AS_%Q0:_Q<7&O\='"+_)B4L
+M_QT:)O\7%AS_7EYA_S P,_],3$__+BHL_QT@(?\Z0D3_&1L>_Q47'_\<'"7_
+M$Q87_V-?7?_$O;?_ZMK7_RHF*/\G(1__LJ:?_^/+SO\P*"S_,"PN_\.WL_]N
+M=W#_N[6Y_S]27/^0D9+_PL3)_S<]3?\Y057_>H&'_]3,RO]H:'?_/$95_SQ"
+M1/]T;&C_3$M8_RXY1/\1&Q+_(B0I_S at Z/?\5&AK_)BLP_SD\0?])2$__,C8^
+M_S0U0/\@)2K_JZ*B_\:WO/^LFY7_JI2+_ZB3A_^?BWW_EXA[_Y>%>_][:&#_
+MBGQS_^?2S?]C8V#_JZNN_TI%1?](14O_(!L?_X)_?O]I96?_9%]C_UU=;/\C
+M)SW_/#U$_Y60EO_DT>?_*28L_S V0/\V,S__(R,L_Q,4'_\3%!__.SQ'_RLD
+M+/\N+"[_/S0S_UI25O]965S_-#<X_RTR,O\F)23_(AT?_R4@(O\B'1__)2 B
+M_RPD)_\D'!__*2$D_RLC)O\I)";_(QX at _R@C)?\F(2/_)B$A_R$<'/\D'Q__
+M)!\?_QH5%?\B'1W_)R(B_R0?'_\B&A[_(QP<_R$?(O\8(3+_$!<I_QX>)_\L
+M*2__(" I_QX;)_\6$1W_%Q(>_Q<2'O\6%!W_&1<@_QT;)/\?'2;_&AHC_Q<7
+M(/\<'"7_,S,\_U=46O\J*"O_$ X1_QH8&_]@8%W_>'IW_SP_0/\5%QK_'B E
+M_T- 1O\9)B?_65M>_\6RLO_RU='_-R<O_R,C)O^)BX+_R[>Y_S4J+_\Q+S'_
+MSL*Y_V]UA?_,P<#_.TA9_Y".D?_*Q<7_-#Y-_R\[2O]Y?G[_V];1_VUO?O\R
+M0E7_+3LZ_U122_]245[_,CQ*_QD='_\5&1O_,C8X_SH^0/\Z/T7_/3]"_TQ.
+M4?\P-#S_)2LU_R$D*?]K:&?_N["U_]/(Q__-P[__U,K&_]S2SO_EV=7_Z-K7
+M_^77U/_JW-G_]N+>_UQ>7/]L;G'_)"$@_SLY._\?'2#_1D1'_QH8&_];6F'_
+M'A\P_R0G./\T.#K_ at H""_^'3[_\K+37_2%%?_RXP/_\:'2C_)BHY_QTA,/\0
+M%"/_5U!:_VEK;O^^L+'_Q+G _V5E;O].4%C_1DE._RHE)_\E'A[_(1H:_R,<
+M'/\C'!S_*"$A_R$:&O\I(B+_+B<G_S$J*O\F'Q__*B,C_RTF)O\J)27_)R(B
+M_RTH*/\K)B;_-"\O_RDD)/\E("#_(1P<_Q\A'_],/S[_/#4__T-GC/]4;(W_
+M#10H_Q<4(/\7%1[_&!4A_Q,0'/\8%2'_'1HF_QH7)/\?'"G_&18C_R =*O\T
+M,CO_*2 at O_TA'3O\?'B7_'AP?_Q,1%/\/#1#_"PD,_Q<2&/]".#O_'A00_Q@:
+M%_\6%Q#_-C4N_R$X,_]-7V/_/T%(_]?(P_\R)R[_'!\D_SA"./]E65K_-2\S
+M_S at T-O_*OK7_)2L[_\_$P_\V0U3_F9>:_\W(R/] 2EG_.454_VIS<_]S<&O_
+M?7^._S% 4_\T0#__1T4^_U958O\B+#K_/D)$_SD]/_\_0T7_/4%#_SQ!1_\W
+M.3S_/D!#_S T//\G+3?_(20I_R4G)?^BG*#_YM'/__OBW?_XW]K_^N'<__WF
+MX?_ZX][_^.'<__GBW?_TV-K_;6MM_R8F*?\X-3#_DY*1_R4C)?\B("+_(1\A
+M_U-26?\N+T#_)"<X_S8Z//]/34__XM'I_S0S.O]57&C_(!\L_QP;(O\B("G_
+M(1\H_R$?*/\R+C?_&QXC_]#!Q?_9R-+_7%ML_TQ/8/\_/TC_*R,F_RL@'_\L
+M)"+_*2$?_RLC(?\L)B3_+RDG_RDC(?\F(![_)!P:_RTE(_\N)B3_+24C_R\H
+M*/\T+R__(QX>_RTH*/\U,##_'AD9_RTH*/\D'Q__)"(D_XM^>?]%-SC_)C15
+M_Q(>._\8&2K_%A,?_Q85(O\>'2K_&AHE_QX>*?\8&"/_%!,:_S(Q./\?'B7_
+M(!\F_QX<'_\>'!__'!H=_R<E*/\I)RK_$Q$4_R$?(O\F)"?_'!LA_V)64O_#
+MLJS_8VEK_QXY.O\*)"?_'D=,_V2%D?]:?8K_HZZI_S8Q,_\M*"[_MKJR_\RZ
+MO/]'/$'_*2<I_\J^M?]$2EK_U<K)_SA%5O^+B8S_R,/#_T),6_\W0U+_*C8U
+M_TQ*1?^#A93_)"Y"_R8R,?]!/SC_3TY;_QLE,_\P-#;_*BXP_R\S-?\G*RW_
+M*2XT_R\Q-/\K+3#_+3$Y_RHP.O\A)"G_)"@F_S4U./_(NKO_QK2V_\&OL?^]
+MJZW_K9^@_Z&5EO^AE9;_F8V._Y-X?O]*14G_-C0V_U-.2/]N;FO_&AD8_R8E
+M)/]B86#_6%=>_R at I.O\D)SC_-#@Z_TA&2/_AS^/_,2PP_R(C*O\R+C;_)2 B
+M_Q0/$_\>&1W_,2PP_T,_1_\H*B__S+_$_]G(TO]85FG_1DE;_T-#3O\I(27_
+M*A\>_S0I*/\N(R+_*R ?_S H)/\L)R+_-"\J_R at C'O\R*B;_,B<F_R\D(_\N
+M(R+_*2(B_RHE)?\<%Q?_%Q(2_QT8&O\=&!K_'1 at 8_RLF)O\=%AW_A7IS_W9I
+M7/]'.$/_,BDZ_R4>*/\>&B/_&ADF_QD:)?\8&"'_*2DR_S$Q.O\K+"W_(R(A
+M_R$?(?\?'1__'QT at _QP:'?\7%1C_-30Z_Q<4&O\5%1C_'R A_R\I+?\C)"7_
+M4TI$_X5V</]M='K_5Y&G_TB(I_\P<(__.F:(_RHP1O]P6EO_-R(G_R0A)_^V
+MKZG_T+FZ_V%46?\O+2__S<&X_VIK=O_:T='_04M:_U!.4?_ OL#_1E!?_SY*
+M6?\7(R+_1D1!_X2"DO\D+3[_+#4U_R4B'?]+2E?_+#5#_SD\0?\Z/D#_14E+
+M_S,Y._\W/$'_,3,V_Q$3%O\Q-CS_)"<R_R,F*_\;("#_,#,T_[.NKO_5SL[_
+MV<_2_]?0T/_ at T-3_Y-+6_\&PMO^RHZ?_[]78_][3V/]K9FC_=&]I_RPN*_\G
+M)B7_ at 7]Z_VMI9/]&14O_*2LZ_QH=+O\Q-3?_149'_][1X_\H("K_(!L?_R$=
+M)?\B'2/_&Q8:_Q\:'O\H(R?_&Q@>_QD5%_^EGI[_VL_6_T-#3O\\/$O_0#I(
+M_S$J,?\G("#_(QP<_RLD)/\F'Q__+",C_S4L+/\X+R__,"<G_S(K*_\K)"3_
+M)R @_RLD)/\K(R?_%1 4_Q45&/\;%1G_'!4<_QP7&_\@&!O_*R(B_R,A(_]R
+M;6?_1STO_Z&3D/_2Q=#_8EMG_QD:)?\>(2S_,S(X_R4C)O\@'B'_'AP?_R,@
+M'_\A'AW_(1P at _R$<(O\F'R;_'AD=_QP;(O\O,SO_FI&8_YRAH?\V3T[_15]H
+M_SHW1/^3 at XO_N:JN_V%:8?_DW>?_?H&2_T%)7?\N*C+_FY::_\_ NO\W+#'_
+M*R at N_[:II/^]KJ__?7)Y_R at C(__%N;#_FY&-_\C'SO],3E7_%A06_W^!B/]#
+M35O_.D95_QTL*/],3E/_CXJ<_R at R1O\?(R7_2DM,_QX@*/\H*SS_.SU$_S0X
+M.O]"2$K_.T%#_RXQ-O\N,3;_*2PQ_RHM,O\F*S'_*"LP_S0V.?\D)2;_PKBM
+M_[>CF__ at R<3_BWEO_Z"-A?^[JZC_8UM9_U-+0?_FU-#_XLW+_Y:+BO\U.CK_
+M,S@]_R$>'?^0BH+_>WEQ_R8F*?\E*#3_*RX__U-26/^%A8+_V,O=_RXF,/\Q
+M+##_'QLC_QL7'_\B'R7_'1H at _S M,_\Q,3K_%Q0:_S<P,/_CU-G_.SA$_S\_
+M3O\^.$;_,RPS_R8?'_\G("#_+28F_R8?'_\V+C'_,BHM_S8N,?\B&AW_'A<7
+M_RHC(_\I(B+_*2(B_R4@)/\>&R'_'!D?_QT:(/\H(RG_*R8J_RLC)O\M)"3_
+M)",B_V=E8/]F85O_*!T<_[*GKO]S;77_)B<N_QT<(_\B'2'_(!L?_QX9'?\A
+M'"#_(QXD_R(=(_\?&B#_&A0<_R4?(_\A'"#_)B4L_RLO-_^BF9G_IZFL_WR9
+MI?]<AY__0#M5_VE3:/^OGJG_:%YA_]S/SO^-CI7_.TM5_S="0_^.CY#_U<; 
+M_SXS./\G)"K_F).._Y^4D_^6BHW_'AD;_\:ZL_\Z,"S_O[[%_T%#2O]>7%[_
+M-#8]_SQ'4O\\2U'_'RD?_T5(0O^5E)O_*2\Y_UQ?9/\;("7_+C9 _Q at B,/\]
+M1DS_-#H\_T!"1?\9%QK_%QD>_RTP-?\I+#'_+C$V_R$F+/\B)2K_+C S_SDZ
+M._^^M*G_K9F1_^++QO^,>G#_F(A^_\"TK?]_<VS_J)6)_^?7U/_.OL+_13H_
+M_R\R-_]!1D;_0T$\_XV-@/]E9U[_*BDH_RTM./]W=(;_1C]'_[NQM/_AU.;_
+M,BHT_R4@)/\6$AK_&1@?_Q\>)?\='"/_(!\F_QX>*_\G*"__95Y>_]_0T?\W
+M-3[_,# __T0^3/]".T+_+28F_R4>'O\E'A[_(AL;_S J+O\9%!C_%1 4_Q<2
+M%O\:$A7_(AL;_RLD)/\K)"3_-C$Q_R4=(/]^<G7_85E<_R0?(_\A'"#_*R,F
+M_R\F)O\G)B7_76%?_WR ?O^:DY/_+R4H_Q\;'?\9&AO_'QH<_R :'O\B'"#_
+M'QD=_R,=(?\8$Q?_'!<;_QT:&?]44D__CXJ,_T]*3O\?'B7_/4%)_U=<6/_ 
+MPL7_T]'A_[RWR?\C'"W_24-1_]7.V/_JW=S_ZM;8_Z><J?]*0E;_/",T_W=N
+M=?_#M*[_2T!%_R$>)/]L:VK_,"LF_YN.C?\I(R?_N*VF_R@>&O]I:&__1$9-
+M_QL?)_\W0$[_1D]@_S]"3O\0#1K_"0 2_R(7+O\.!QG_*20P_QL;)/\Q-$#_
+M*"\]_R0F+?\9&1S_24E,_Q04%_\;'2+_*2PQ_R\R-_\N,3;_)"DO_R,F*_\F
+M*"O_/3X__[ZTJ?^SGY?_YL_*_Y.!=_^.>W/_P;*M_VE?6_]>4DG_[M_:_^33
+MT__7R,G_14!$_SDZ._\C(1[_85]:_RLP+O\I+2O_1TE1_S0T1_\T+SO_TL;/
+M_]3$V/]%/D;_(R F_R$?*/\W-T+_.3I%_U-47_\F)B__*"@S_Q\?*/](0$/_
+MWL_/_T,_2/]#0U+_/#E&_T [0?\D'!__*2 @_S(I*?\S*BK_&1,7_QH5&_\>
+M&1__'1@<_QL3%O\@&1G_)1X>_R at A(?\P*"3_D8B"_]?(P_]J7EK_)R(D_R$;
+M'_\H("/_,2 at H_R0B'_]>8V/_8F9H_XJ%A?]02$;_(!T<_Q\='_\C'!S_)1TA
+M_R$;'_\F("3_*B(E_T])1_^1CHG_G)>1_[2PJ/\V,3'_,"LO_W]^A?\T.$#_
+M9&9D_[NVNO_<UM[_U-31_[W#O_^KL;/_U-'7_^S;V__RW-3_N;6W_S]05O\5
+M)"3_=WAY_[JMJ/]=4U;_'AP?_QT<&_^0BHC_13DU_R,>(O^EF93_ at GEY_R\Q
+M.?\R-#S_+35%_SI%5O\(#R/_)1TX_VIDGO]13I#_.#AQ_Q@;2O\:$BW_+RD_
+M_Q01)/\7%R+_&!,7_QL8'O\A(23_J*BK_RLN,_\L+C/_-CD^_RPN-?\C*"[_
+M'B$F_R4G*O\S,3/_P+:L_[*?E__>QL/_DX%W_X5T;O^MFY__;&IL_X)[=?_D
+MU];_X\[,_["?G_^[LK+_)R4H_R\J+O\T,3#_("8H_U]F9O\L-#[_,C=)_S P
+M.__3R]7_U<GB_T=+3?\G,SS_%!XM_S P/?\>'RK_&ALF_R0=)_\8$1C_&1,;
+M_RXG+__DU=G_1T!*_ST]3/\V-D/_3TI0_RH>)_\I'"'_+2,?_R\C)/\:%!C_
+M(AP at _R(='_\J(B7_)R @_RDB(O\K)"3_*2(B_S0J)O]E9F#_X-/2_X%O<?\E
+M'2#_)1T at _RPE)?\J)"+_*R<E_U]?8O]97F/_'!<9_\B\N/\E'A[_(R F_R(<
+M(/\5#@[_&108_VAC9_\H'AK_KZ":_\&_M__$N+/_P+*O_S4T,_\C'R'_&Q<?
+M_S<Z1?]-4$K_R</'_^37XO_FU]C_95]5_VA at 7/_CU-C_\M_?__/=V__%O,/_
+M/$%-_R4D*_\^-C3_65-1_VQG9_\A'1__<6=C_RL@'_\E'R/_'ATD_Y.'B/\Q
+M,#?_04Q=_SD]3/\\2$W_!@LG_V9CF/\Y-F;_&! L_QL,'O\>$!G_'Q8=_QL9
+M,_]75X#_4E)Y_P\/'O\E("#_+C0\_T-!0__;TM+_*BDP_RXM-/\V.$#_*RXY
+M_R8J,O\A)"G_*RDL_SHU-_^_L*K_UL.]_^W8UO^>BX/_A8!T_]C S_] 3E?_
+M+ATC_W9VA?_6Q\?_.# S_V]L<O\U-#O_-3 R_[NSK_^0EY#_041%_UI=:/\S
+M.$K_+RLT_Z2=J?_&P=G_,C4Z_QP@*/\3$A__&Q<9_Q,3%O\6%Q[_'!<C_QP7
+M'?\2#A;_*B0L_]#$Q_]33%;_0$!/_T%!3O]02U'_+"(E_RL?(O],04#_-RPK
+M_UA-3/^7CXW_-"TM_R0?(?\F'Q__*R0D_RLD)/\H(2'_,B at D_XF*A/_AU-/_
+MBWE[_R,;'O\E'2#_*R0D_RTG)?\M*2?_8&!C_U-87?\G(B3_FY&-_R0='?\:
+M%QW_(QXB_YB,A_^FFYK_5U!0_RP@'/^^LJ[_ at 8.!_Z^EJ/_:S]3_,C R_R4A
+M(_\?&R/_-CE$_SX_.?_)O[O_XM/4_^/6T?]13T?_/#@V_\S!QO_GV-S_]=_=
+M_\S#RO\X/4G_&QHA_S(J*/\W,2__0CT]_QD5%_\A&AK_)1X>_R <'O\@'2/_
+M>G)P_SP^1?]'5V?_.T53_SE"0O\;%C+_:5:$_Q4,'?\3%!O_$@X6_Q80&/\4
+M$1[_%Q8=_PH&%O\Q*T'_&14>_R4<'/\_0TO_24=)_];1T?\@'R7_+BTS_S$S
+M.O\N,CK_,#4[_R4H+?\P+C'_(1P>_XM[<O_!K:7_Z=30_YB%>_]Q;67_R[;,
+M_Q<I-_\F'BC_'R4U_\>_O?\M*"K_1#]%_RTI,?\X,3'_GY6+_X6$??^-BHG_
+M9F9O_RHM/O]%04G_Q+W'_]K#X/\E'"/_&!$8_R$0&O\A&B'_&QLD_R(H,/\M
+M,#O_)20J_RXM-/]545G_ULS/_T([1?\[.TK_4U- at _U-.5/\P*";_(148_RP@
+M(?].13__O[.L_]S/RO]&/CS_*"(F_R,;'O\I(B+_)1X>_RDB(O\S*27_B8J$
+M_][1T/^@CI#_*" C_R<?(O\H(2'_+"8D_S L*O]?7V+_76)G_R4@(O^PHIG_
+M95A3_Z:>G/^FG)C_P[>P_YZ6E/\H(R7_*B(E_U=(2/^4DH__WM+5_^+3V/\O
+M+BW_)" B_R(>)O\W.D7_/4!!_[RTLO_:R\O_XM[<_U5:5/\U,S7_8%E at _Z.8
+MG?_MU]C_SL7,_SQ!3?\<&R+_8EI8_U5/3?\:%17_(Q\A_R$?(?\<&AS_'QT?
+M_Q\='_]834S_2$E0_T939/]!2%;_)QTP_STK+?^KDH'_=EI7_UH]//^8=6[_
+MA6%7_X1B7_\K&!+_EH%__Z>1D_\4!@/_-RDF_T)$3/])2DO_U=+1_QP>(?\L
+M+2[_/T)'_S U._\L,3?_)"<L_QP:'?\<%QG_B'9L_\&KHO_JU,S_E8!T_W=O
+M9?_5O=+_&"HX_RPE+_\C,#W_I*">_RPH*O\\-SW_+R at R_T X-O^@E(7_ at GES
+M_XE]?O]?6V/_)BDU_SLZ0/^\M[W_S+S8_RDJ,?\4'R;_&QTD_R(B+?\E*C;_
+M/$1._Q,:(/\E)"K_*RHP_SDV//\?&!C_/3E"_SL[2O]*2EG_4U!6_RLC(?\X
+M+"W_*AXA_RLD'O^7BX+_V,S%_UE.3?\J)BC_(!L=_RHC(_\J(R/_*"$A_S F
+M(O^)BH3_WM'0_Z22E/\H("/_)Q\B_RHC(_\K)2/_+RDG_VUN;_]@96K_)B$C
+M_Z21A?_ K:7_Q[NT_\6YL/_(N[;_JJ.C_R at E*_\E(R;_1C at U_[>PJO_?T-#_
+MZMO<_S0Q,/\D("+_'AHB_S<Z1?]&3E+_9EYA_]/'RO][>7O_>GU^_W5[??]H
+M96O_KJ.H__/AWO_8S=3_24Y:_QD:(?\H(B#_)A\?_R8A(?\A'1__'!P?_R >
+M(?\A'R'_'AL:_T8W-_]!.T/_1T]?_U9;9_],3E7_#0,,_RL4'_\R(2?_%0H7
+M_R$4'_\G%Q__&0T6_R$:(O\8"QC_)A at G_R :(O^6BHO_1$)+_T=(2?_1T,__
+M+"XQ_TQ.3/\9'!W_("4J_SQ!1_\I+C/_2$-'_][6V?^)>&O_P:NB_^G3R_^8
+MA';_?W1I_]>[S_\<+CC_+"4L_R8Q//^1D(__*28E_S8P-/\I(RO_1S\[_YN+
+M>_^1AG__D8.$_TE$2O\B)3#_(R,F_ZZLKO_=SN;_)RLS_SM)6/\G*S/_-C8S
+M_T) /?\_.SG_&Q,1_RPM)_\M,R__66%=_]72T?\X.4#_+3 \_SHW1/]I9&C_
+M+"(E_RXD)_\F'Q__)B(@_ST\-?_7R\?_;%M;_R8?'_\A'AW_)R @_R\F)O\M
+M(2+_-2 at G_XZ.B__>T=#_LZ*B_RDD)/\D'Q__*",C_RHE)?\X,"[_96=E_V)I
+M:?\B'2'_F8R!_\.RLO^#?';_:6=B_^76VO^MH:+_*28E_R4@(/\X,RW_O;2N
+M_]3(Q/_DW-K_.#4T_R0@(O\@'"3_,C5 _SY%1?_"L[/_S+^^_^_DX_]Q:G;_
+M1U-<_UY=8_^YJZS_Z=K4_^70U?]%15+_&!HA_R0?(?\F(B3_(Q\A_R <'O\@
+M'![_(1T?_R(>(/\>&AS_-2<H_TM&3/]58&O_45ME_T=-7?\H*#?_8%UO_W)N
+ME/\C(CG_$! ?_PT-&O\*"1;_&QDZ_RPG4?\Y-%;_'R$H_Z>EHO]955[_0D5&
+M_];3TO\K+3#_.#HX_S4W-/\R-3K_,#$\_R at P-/_&NKO_Y]C=_Y5_=O_)L*O_
+MY<S+_YZ&=_][<6?_U\#3_R S/_\A&R/_+S4__V)F:/\E)R7_,2PN_R$@)_].
+M1D3_B'EL_XA]=O^1 at H/_2T1+_R,E+/\U-3C_H9R<_\3#T/\>'!G_(QL?_Q<;
+M&?\?)1?_/D,W_SE"-?\M.BW_(RX;_R)"+?]ICX#_QL7$_TE$2O] /D?_-2\W
+M_T Y.?\N(B/_)1L>_RLD)/\E(1__("0<_\C#OO^#<'#_,B0E_R0='?\G("#_
+M,RHJ_RTA(O\P(R+_AX>$_]K-S/^_KJ[_*20D_R0?'_\M*"C_,"LK_S8N+/]E
+M9V7_7V9F_R,>(O]V;&'_:EU<_XR*@_^,CXG_Y]C<_["DI?\O+"O_)B$A_RLJ
+M(_^MIJ#_R+JW_^O at W_\Q+BW_(Q\A_QL7'_\P,S[_/$-#_[^NKO^_L+#_\N7D
+M_VYH=O])5F'_5E=>_[*FJ?_AU<[_Y]+7_S\_3/\@(BG_)2 B_R,?(?\C'R'_
+M(!P>_R <'O\A'1__(AX at _QX:'/\K(R'_24M._T926_]:96S_1$]:_SE#3O\J
+M,3__,C%._S G8_\T*F__/S9\_UM1FO\8%4G_95^+_T$]7?\Y04O_E923_UA4
+M7?],3U#_V=;5_S R-_\P,3+_24M)_S R.?\Q,CW_)R\S_\.WN/_DU=K_?G%L
+M_\"KJ?_<R<G_AG)N_W=N9__4O=#_%"<S_R4?)_\W0$[_.3]'_R$C)O\P+#3_
+M(20O_TQ(2O]X:F'_=FMD_Y*!@?]&/43_(R0K_SX\/_^VJZK_O\3*_SY(-_\V
+M0C/_+T P_TQC4/\Y5$/_#"D7_SY?3?\S3CW_/%I*_YBMH?_9T<W_2T=/_S0U
+M0/\M*C;_0SY _R8='?\E&Q[_)R @_R<C(?].3TC_T<?#_YR)B?\F'1W_(AT=
+M_R<@(/\L(R/_+B(C_S(E)/][>WC_U\K)_\BWM_\H(R/_)2 @_R<B(O\I)"3_
+M+B8D_ST_/?]=9&3_'QH>_WMP9?]H65G_LJNE_YN9E/_DU=G_MZNL_S,P+_\G
+M(B+_(2(;_ZZIH_^MGYS_Y=C7_S,N+O\E(2/_)" H_SD\1_\Y/C[_P*VM_Z>6
+MEO_NW]__8UUK_SA)5O]#1T__JZ"E_^'4S__FT=;_/3U*_R B*?\H(R7_)B(D
+M_R4A(_\B'B#_(AX at _R(>(/\B'B#_(AX at _R,;%_]14$__/$5,_XB*D?^+B8O_
+M24]1_SM'3/] 2%+_>'I__T9'3O\H+CC_'"4S_TQ55?]Y>GO_Q\')_SD^2O^+
+MB8O_3TM4_T)%1O_)QL7_+S W_RDI+/^8F9K_/#Y&_S<X0_\O-SO_OK*S_^+3
+MV/]W<FS_R;ZW_]_2S?^9CXO_FI6/_]['VO\U2%3_)B H_SI"3/\N,SC_:&EJ
+M_T(]0?\U-#O_4DQ*_[6IHO]=6%C_D(2'_SHU._\A*"[_0T5(_ZFDI/_&R,__
+M+D4R_TQO5_]UCWK_*#\L_R L*_\Q.C/_-T0Y_R at T/?\_0E3_RL+%_^?8TO\[
+M.T3_/$!/_TM)6?]&0DK_+B0G_RTC)O\G'A[_,"HH_V-<5O_>T,W_J9B8_R8C
+M(O\A'AW_)R @_RXE)?\N(B/_,20C_T1%/__7R,C_S+N[_R at C(_\E("#_)!\?
+M_R8A(?\N)R?_*RDK_SH_/_\?&A[_;U]6_W]O;/^1B(+_I)^:_^C9VO^ZKJ__
+M+2HI_R,>'O\D)1[_KZFA_Y.'@__BU=3_/CDY_R,?(?\C(";_1$=2_T1(2O^\
+MK*G_DX*"_^?8V/^#?8O_-TA6_U]B;?^<E)C_VL[)_^;4UO\V-T+_+C W_R at D
+M)O\G(R7_(AX at _R,?(?\B'B#_(AX at _R$='_\C'R'_*!T<_T=#1?]14%;_P+G 
+M_^K9X_]=5F?_/4)4_T))6_]]=H#_M:FR_^?=[/]44F7_86%P_XR"B__ at T]C_
+M/#]$_XZ0CO]03E?_0D5&_\[+RO\H*3#_*RHP_XF(CO\S-#__+C X_RXV.O_$
+MN+G_YM;:_]'!N/_3N:S_U[>O_\RPK/^9C8;_V<37_R,Q0/\E(2G_)BLQ_WAX
+M=?^=F)+_D(N+_X![>_^(@7O_9V!:_UQ=9/^5D)3_*"HM_S=#1_\[1$3_9FMK
+M_][;S_\M*BG_,"XW_S@^.O]_?HO_9D.*_Z-[K_]%+$O_JHJ]_WM8F/_3PLS_
+M[-K0_TI&3_\^/DO_4U)?_ST\0_\K(R'_+R,D_S D)?\^-##_<F9?_]?(P_^L
+MG9W_*R,F_R ;'?\G'R+_,RHJ_R\D(_\Q)2#_B8> _]W+R/_2P<'_)B$A_R0?
+M'_\H(R/_*R8F_R at C)_\I)"C_)B$E_Q\:'O]S8V#_C(![_WIW<O^GH9__X=/0
+M_\J_OO\O+"O_)2 B_RPH)O_$MJ?_=6UI_^?>WO\Z-37_)R,E_R0B)?]256#_
+M1$=3_[JKI?]U9V3_Z=O<_Y6)DO] 2EC_7%]J_Z.;GO^?F)C_Z-O6_T-#3O\N
+M,3S_*B8H_R8B)/\C'R'_)" B_R(>(/\B'B#_(!P>_R(>(/\7$!C_*2XT_\K%
+MR_^\M+C_W-/3_U%59/\A*3/_0$=3_VQN=?^7CH[_W,W2_TY17?]L;G7_=VUP
+M_]W.T_]$1U+_AHN+_V%?:/\^04+_QL'!_R<F+?\K*C'_DY&:_S<X0_\W.C__
+M)BPV_[BKJO_-N;O_MZ*6_\2HFO^[GI+_P:67_YN*>O_4R-?_+BY!_R$C*_\Y
+M-3?_C7QV_]K#M_]!-3;_)R,E_V!>6?^$A7__B(B+_Y2/D_]P<7+_2E-3_U5:
+M6O^'B8S_Z+O?_XA.H/^F9-'_QYS2_UQ(4?\U&3/_1S [_S4N*/^)?7G_FHF4
+M_]3(Q/_MVM3_4T]7_S<W1/]44V#_6UIA_RHB(/\K'R#_+R,D_STS+_^#=W#_
+MU,7 _\2UM?\K(R;_(!L=_RDA)/\T*RO_,28E_SPP*_^*B('_W,K'_]3#P_\N
+M*2G_)B$A_R,>'O\J)27_)R(F_QH5&?\A'"#_'!<;_U1$0?^6BH7_8V!;_ZFC
+MH?_;S<K_U<K)_S$N+?\H(R7_+2DG_\6WJ/]>5E+_Y-O;_SPW-_\G(R7_'1L>
+M_T!#3O\[/DK_L*&;_U]13O_DUM?_K:&J_T)-6/]35U__7E=7_X%Z>O_DU]+_
+M3T]:_RTP._\D("+_)2$C_R,?(?\D("+_(AX at _R,?(?\F(B3_(1T?_RLC(?\S
+M,2S_74Y._ZB>FO_7S\W_3U-B_RLS/?] 1U/_7%YE_X^&AO_FU]S_24Q8_U)4
+M6_]C65S_VLO0_SH]2/]^@X/_85]H_TE,3?_>V=G_+RXU_R\N-?^4DIO_/#U(
+M_SU 1?\N-#[_MJFH_\"LKO^JD8K_HHA]_ZJ4C/^YGY3_CG]R_\K#S_\P,S[_
+M(R8G_XU_@/^3 at 7W_WLO%_TH\/?\<%A[_(1\B_UQ>6_]K;VW_='%P_W]^??]W
+M?'K_9VMI_UU at 8?_FQ=;_;DAI_]RUV/_[XMW_54A-_RDB+/\I)C+_-35 _WIS
+M9_^5C7W_R[^Z_^30S/]'1$K_,S- _S at W1/]655S_*2$?_S(F)_\S)RC_-"HF
+M_Y&%?O_9RL7_Q;:V_RLC)O\B'1__)1T at _RXE)?\K(!__-BHE_X* >?_2P+W_
+MTL'!_RDD)/\E("#_)B$A_R(='?\<%QO_%A$5_Q<2%O\;%AK_134R_Z>;EO].
+M2T;_IJ">_]W/S/_7S,O_,"TL_R8A(_\R+BS_Q+:G_U5-2?_BV=G_.C4U_R8B
+M)/\:&!O_(R8Q_T)%4?^FEY'_4D1!_^'3U/^WK+/_/$A1_U9;8?^9DY'_D(F)
+M_^G<U_]&1E'_.#M&_R<C)?\G(R7_)2$C_R8B)/\M*2O_KJJL_R,?(?\H)";_
+M;6!5_X-[:_^>AWO_<&-8_]K2T/]35V;_+34__T5,6/]:7&/_?71T_^G:W_]%
+M2%3_3U%8_V!66?_>S]3_1$=2_XZ3D_]T<GO_14A)_\G$Q/\T,SK_+"LR_YJ8
+MH?\S-#__.#M _R0J-/^KGIW_KIJ<_XY]=_]P9%W_0CLU_Y*$>_][:&#_R[_(
+M_T$_0?\X,R[_9EM:_SX\.?\J+"K_-C0V_R =(_\V-SC_='AV_VIM<O^9EI7_
+M:&9C_W!U<?]45E/_5%5/_^C;UO];44?_Y-?2_^+.U_]>4E/_-#$E_RPH)O]$
+M.T+_>FYE_XZ%=__)O+O_W<G%_T$_0?\Q,3[_1$11_TA'3O\P*BC_*AX?_S,G
+M*/\W+BC_F(V&_]C)Q/_3PL+_*B(E_R,>(/\F'B'_+20D_RXF)/\V+2?_;FEC
+M_]K(Q?_/OK[_*R8F_R4@(/\D'Q__*",C_Q0/$_\4$!+_%A$5_Q at 3%_\P)B+_
+MLJ6 at _T-!.O^ZL*S_T\?#_]S1T/\T+R__)R(D_S at T,O^_LZ3_23\[_^79VO]"
+M/3__)B(D_R ;'_\C)C'_0D-._ZB<E_\T*"3_WM'0_\:YOO\\2%'_5EUC_Y60
+MB_^;E)3_Z][9_U!06_\]0$O_)B$C_R<C)?\I)2?_5E)4_V=B9/]33U'_(QXB
+M_RLG*?\J)B3_1T=$_V=87/^$@8?_V-#3_UA@:O]-5EW_2$];_U5:8/]T:VO_
+MYMG>_T-&4O]46V'_1CY!_]O/TO] 1E#_<'-T_VEH;_]'2DO_GIJ<_S0V._\M
+M+#+_IJ*K_S8Y1/\O,C?_+C0^_Y^2D?^QGZ'_:U]:_UQ74?]H:6/_;FIB_WUK
+M9__#LKC_ULS(_]&_O/]V85__JJ&A_R0C*?\_-SO_)B0A_XN.B/]G;&K_455=
+M_W%P;_]^?'?_5UE6_U!12_]344G_X]/*_V=84__ATM+_Y=#+_VQ at 8?\Q*C'_
+M.3,[_SPX0?^+ at GO_?G5N_[ZRK?_ at S,3_349-_SL]3/])2UK_.C8__S H*_\L
+M)27_+RHE_R\J)/^/BX/_S<&Z_]+ O?\N)27_)!\?_RDB(O\J(R/_*"$A_R<D
+M'_]D6U7_PK.N_\S N_\M)2/_)A\?_R0='?\J(R/_(QDC_R,<'/\@&2'_*"$K
+M_Q\D(/^]IJ'_>WMP_\"KI__&P;S_V]/1_S0M+?\D'R'_/34S_ZJDE/]5247_
+MY]C9_TI$2/\I(R?_(AP at _R >)_\M+S3_B(6$_T Z,O_BU-'_R;C _TM48O]$
+M257_E8^-_Z.<G/_FU-#_7%5A_SH[1O\M)RO_,"HN_ZJEI_\[-C;_AWM\_YN2
+MF?\A'27_(AP at _R,=&_\K+2K_-RXN_R ;'__0Q<K_5V%E_TM77O])4%S_4UM?
+M_VUH:/_>T]C_0$-/_S]+5/\T,C3_S<;&_SQ!3?]*1T;_8&%B_T-(3?]02T__
+M+3$S_RTM,/^JI*S_045-_SM 1?\I,3O_6E)0_]#$Q?^*C8[_?W^"_Y^=IO]6
+M76/_+C<^_SL]0O\N)R?_,2 at H_[2GHO\J)2?_(" I_TQ'2?^'A'G_I:29_V=L
+M:/]L='C_8W9^_X:(C?^"@G__1T5"_UI54/_8R+[_<6)<_]W/S/_FTLK_=VII
+M_S4M,?\_.#__0SU%_V=@6O]N9V'_K**>_][+Q?]M9F[_0T15_U97:/]'0D[_
+M+24H_RTF)O\K)B'_-C$K_VQH8/_)O;;_U,*__RLB(O\D'Q__)A\?_RTF)O\J
+M(R/_+2<E_TI"/O\J(!S_R[^[_RLB(O\D'1W_(AL;_RLD)/\E%Q__(QL7_R(;
+M(O\N)BG_("$:_\ZRKO]S<F?_PZFF_[BSKO_=U=/_-B\O_R,>(/\_-S7_E8]_
+M_W!D8/_:R\S_95]C_RDC)_\C'2'_(!XG_ST_1/]%0D'_M:^G_]C*Q__1P,C_
+M04I8_U199?^0BHC_H)F9_^/1S?]W<'S_1D=2_R at C)?\J)2?_K*6E_Y2,BO^+
+M?GW_G)&6_R4@)O\J(B7_?W1S_UI<6O_$O;W_MK.R_[RNMO]-55__0DQ:_TE.
+M8/]'3E3_<6QL_^+7W/],3UO_.45._X2"A/_:T]/_.3Y*_ST^/_]765S_/3]&
+M_R at G+?\4&!K_)"0G_[NUO?]"1D[_049+_S$Y0_^1B8?_W]/4_U=I=_\W36/_
+M)SQ5_S!%7?](7'#_%R$O_QT?(O\F)23_4$5*_S G+O\?&A[_I9^7_["EGO^.
+MAX'_;6]M_VYV>O]6:(S_2%%B_U!:2_],3$G_4TY._]?(N_]Z;&/_V<W&_^72
+MR/^ <F__*B$A_T$Y/?])0DG_:F-=_V=B7?]/1T7_T\2__V%<8/\Y/$?_8V9R
+M_T- 1O\N)R?_*R0D_RXI)/\H(QW_<V]G_[VQJO_6Q,'_*R(B_R$<'/\C'!S_
+M*B,C_RLD)/\S*BK_(1P<_R0?'__,P,'_+20D_R4>'O\G("#_*"$A_R@='/\B
+M(1K_%A8C_RDE+?\<'1?_RJVI_W-P9?_,L*W_L*BD_]_7U?\V+R__)!\A_S H
+M)O]X<F+_A'AT_][/T/]L9FK_*"(F_R8@)/\=&R3_/D!%_SPY./^UKZ?_X=/0
+M_]7$S/],56/_1$E5_X1^?/^*@X/_UL3 _XJ#C_]'2%/_+"@J_R\H*/^OI*/_
+MJYV:_X=W=/^BEI?_+RDM_RLC(?^JDI7_44E,_\N_P/^_L+#_N*>M_UAA9_]+
+M5V#_/T94_T=/4_]234W_W-'6_U-68O]#3UC_A(*$_]3-S?\]0D[_'2,E_R\Q
+M-O]/3UC_)B<N_SI 0O\H*"O_OKC _TI.5O\R-SS_,3E#_WUU<__9S<[_-$-0
+M_R<[3_]!5&S_,3]6_V%B<_\C'";_+B4E_R at A(?]@6%S_,BHN_\K"OO^BEY#_
+MBHB#_T]34?\]0D+_<WM]_SI+6?]A8&;_65Q0_U!.1_]C6U'_TL:W_VE=5/_7
+MR,+_XL_%_X5W=/\L(R/_,RLN_T4_0_]H8U[_,2TK_R4>'O_6R<3_AH."_UI@
+M8O^/EYO_8F-D_RLF)O\N)R?_+24A_RLF(?]A757_HY>0_]3"O_\I("#_(AT=
+M_R0='?\K)"3_+R at H_RXE)?\B'AS_3TU(_\O O_\M)"3_(QP<_RLD)/\M)B;_
+M(14>_U588_\@)%'_-S!?_QX<'__'JZ?_<FI at _\>MJO^4CXK_W-'0_S at O+_\D
+M'R'_*B(@_UU62/]V:F7_V\S-_X5]@/\H(B;_(!H>_QD9(O\^/T;_.3 at W_[6O
+MI__3QL'_V,?-_T--6/]%2U7_?G9T_UE45/_(N;3_B8.+_T]27?\E(R7_+"<G
+M_ZF=F?^SI)__FHN&_ZN=GO\D'!__+28F_S$F*_]"24G_GJ*@_ZVKIO]B6EC_
+M3%A7_T515O]!2%3_3%18_T(_/O_EV=S_1DE5_T),5O]K:6O_RL/#_S@]2?\J
+M,3?_$1(9_TA(4?])2U+_04=)_RHJ+?^WL;G_/$!(_RPQ-O\P-C[_>W5S_]K.
+MS_\C)2K_'"$G_U-68O\I)##_(ATA_R(@'?\D(AW_(B<E_UI96/]M96/_W=',
+M_[*HI/],4U/_,CX]_XF0D/]>86;_<7AL_Y*(?O^2AH'_?G-L_ZZBD__,Q[O_
+M7%%*_Z:2CO_AR<#_F8B(_S$I+/\X-3O_1#U%_R8A(?\@'Q[_+BPN_[^VL/^(
+M at X/_/$)$_V9R=O]O<7C_+RHJ_S$F)?\K(!__)" >_UM94O]".3/_T,*__S H
+M)O\B&QO_)1X>_S I*?\H(2'_*B,C_R\I(?^0B7O_R;VX_RLD)/\B'1W_*",C
+M_RDD)/\K(BG_6&)?_R I6/\\/&?_'!X<_[VHH_]B4DG_MZJE_W1Q;/_=S,S_
+M/C4U_R,A(_\H("/_/C at V_SLT+O]G6US_E(N+_RLD)/\>&1__&AHC_SQ 2/\\
+M.CS_IYN6_\>YL/_;R<W_2%!:_TE07O]/1TO_(B$G_U!%/O^!>W__0D)/_QT;
+M'O\G'2#_IIB5_]'%OO^JFI?_MJ>G_S$H*/\G(R7_-CP^_TQ04O]85%;_IIZ<
+M_S4Z-O\B*RO_2%%8_T9/7?]045S_AXF,_^/4V/]24&#_-T%+_U]@8?^^N[K_
+M-CE%_R at K-_\_04G_1DA/_T5(3?](3E#_*2DL_ZVIL?])25+_,3@^_S$U-_]+
+M24;_R\+"_R4@(O\B&AW_N:VP_R at C)?\@'2/_'!LA_Q86&?\E*"G_LZRF_]C+
+MQO_BUM+_N[>U_T)'1_]P=G+_4EA4_SD^0__CU]/_[]S4_^S9T?_SX-C_^^??
+M_\2[M?]%.S?_>FQI_]2^M?^:BH?_+28F_S\]0/\V+S;_'!T>_SP[.O^4D([_
+ML:6 at _W!H;/\G)S3_0DA8_V9D=/\E'2#_+B,B_RTB(?\E(1__)2,>_R 8%/_/
+MPL'_+28F_Q\8&/\B&QO_*2(B_R\H*/\M)B;_,"HB_Y.,?O_,P+O_*R0D_R(=
+M'?\E("#_+2 at H_RLB(O]?9%[_7F5S_UI:9?\;&13_PJVH_T<Y,/^FG9?_85]:
+M_][-S?]$.SO_(B B_QX9'?\N*RK_,BXL_W=O<O^BF9G_+28F_R$<(O\>'B?_
+M04=1_T='2O^;D8W_M:FB_]K(RO]26F3_1$Q at _Q\>*_\6&R#_4TQ&_V5?9_\I
+M)##_*B(F_S at I+?^LEY7_V<C"_Y>%@O]Y:FK_*R(B_R<C)?\O,C?_3E%6_S<W
+M.O^&@8/_'!\@_P\5%_]+45G_0$=3_T=(4_^)BX[_V\S0_T5#4_\W04O_(B,D
+M_S at U-/\L+SO_/4!,_T)$3/],3E7_2TY3_T)(2O\E)2C_L:VU_T%!2O\O,SO_
+M-CD^_TE+2?^0BXW_(1T?_R\H*/\X+R__0CT]_TA&2/]J967_D8F'_ZF=F?_K
+MW-;_U<C#_UI03/]S;6O_:&=F_T]44O\^1D/_3E)0_\>RK?_XW=;_^=[7__[C
+MW/_]Y-W_P+*S_RLC)O\A&QG_K9J0_Z>8D_\P*BC_04 __S8Q,_]%2TW_1$A&
+M_Y&(@O^:C(G_CH.(_RPJ.O]$15S_<6U]_RLC)O\M(B'_,"4D_QX:&/\>'!?_
+M&A02_\B_O_\G(B3_(AT=_R$:&O\G("#_*R0D_RXG)_\M)Q__AX!R_\_#OO\H
+M(2'_(!L;_R8A(?\F(2'_)B0A_UQ at 7O]G;&K_9&-B_R(>'/^]J*/_-RTC_YV8
+MDO\].S;_WLW-_V%86/\C(2/_*",E_S8S+O^ >W7_3$1 _ZJBH/\J(R/_(!LA
+M_QD9(O\]1%#_/3Y%_X^'A?]934G_X,[2_T1,5O] 2%C_+BDO_TI&2/^!>'+_
+M(2$L_S4R/O\H(R7_,2<J_Z&1CO_ L:O_J):3_[6FIO\L(R/_)B(D_S<Y0/].
+M4%?_&!H?_TY.4?\>("7_0$-(_TQ06/\[04O_3E!8_VIL;__4Q<G_.SE)_RDS
+M/?]'2$G_;VQK_R4H-/\S-D+_/D!(_UQ>9?]+3E/_-#H\_QP<'_^>FJ+_1D9/
+M_RPO._\N,#?_,30U_TM*4/^KI*3_V\[-_^G8V/_KWMW_XM31_[^MJ?^5A'[_
+M=V1<_^?2S?_MV]?_T\O'_];+RO_3S\W_/T)#_V]Q;__JV];_^=[7___DW?__
+MY=[__^7>___GX/^BD9?_(QL?_QD4%/^,>V[_I)2+_R4@(/\^.SK_1#\__S=!
+M1?]$247_CH-\_X^!?O^IH*#_/SQ(_SY 5?]84F#_+B<G_RTA(O\M(B'_*B4@
+M_QP:%?]G8EW_R[_ _RHE)_\A'![_'Q at 8_R<@(/\R*RO_+"4E_RPH(/^!?'#_
+MQ[NV_RDB(O\A'!S_(QX>_RXI*?\H*"7_8&)E_V5K9_]D9&?_(AT=_\*NIO\_
+M-"W_ at X)[_QL<%O_:R\S_<F9G_RDE)_\P*"3_4DI _WIQ8_]:3TC_KZ2C_S$L
+M+/\<%QW_+"HS_SE#3?\I*C'_?W9V_R8@'O_6R<[_2DU8_T5(4_^'?'O_7EA6
+M_R@@'O],3UK_.CQ$_QX:'/\K)BC_G9*1_[*GH/^\JJ;_O:ZN_RPC(_\F(B3_
+M-38]_U988/\@(BG_'!XC_XR-E/]*3%/_0$%(_V9G<O]&253_;G!S_Z^CI/]%
+M0U/_*# Z_VAJ;?\Z.#O_)2 at T_R8I-?] 0DK_5EA?_T-%2O\W/3__+2TP_VEE
+M;?\_0$?_,#$\_S4W/O]&2DS_+S$V_WYR;O^@BXG_T;FV_W5B7/_8RM+_6$Y8
+M_R at A*_\@&R'_Z=32_^'/R_]%/CC_3D9"_TU-2O]]?'O_W,_.__C9T___Z>'_
+M_^;?___GX/__YM___>3=_YR$C?\J&B+_03LY_WUL7O][:V'_)!\C_T$Y-_]"
+M.CC_+S@^_S(U-O^+ at GO_>6QG_[ZTL/\V-3S_3U)D_TQ(6/\K(R;_*R$D_R\C
+M)/\L(1K_*B0<_WAP9O_2Q\#_,B at K_R :'O\A&1S_+R8F_RXC(O\N)27_)R,A
+M_VMJ7_^XKZG_*"$A_R$<'O\C'A[_+"@F_RPC(_]B8V3_8FMK_V%D9?\@'AO_
+MNJF;_R4=&_]W>'G_&!<6_\2]O?^4AH?_*" >_R8>&O];44?_95Q._V)>5O^Y
+MK*O_*RHI_R8?)O\9%1[_.$%(_R<F+?\P*B[_)B at F_WMS<?\_.T3_-3M+_U15
+M7/]/453_14M3_TQ27/\Z.4#_'A@<_RLC)O^QIZ/_J)V6_[BDG/^WIZ3_+2(G
+M_R4?(_\P+S7_3D]6_R0F+O]L;WK_75YI_U9?9?]545G_-S5%_T!*6?\W/#S_
+M+2LF_S] 2_\G+S__+S9$_S$V0O\<'BW_&ATH_T1&3O\\/D/_-34X_SP_1/\E
+M)"K_*B<M_S]!1O\T.$#_-C at __S@Z/_\F)BG_H):'_\6MJO_GT,/_HY-__]/'
+MR/\^-S__)1\G_RDB(O_KT];_U</%_T,_0?]H:FW_4UUA_[6QK__XX=S__.7@
+M___GWO__Y=S__^;=___DW?__Z.'_MYV at _S$?(?])/3G_=F%5_W]M8_\B'2'_
+M-BXL_S\W-?\D)BW_)"(D_WUY<?]M85S_K*"<_S(N-O]66VW_24E8_S$L+O\H
+M'B'_,"0E_S,H(?\N*"#_;65;_\F^M_\S*2S_(!H>_R$9'/\P)R?_+R0C_R\F
+M)O\A'1O_7UY3_XZ%?_\G("#_(QX at _Q\:&O\J)B3_+20D_UE:6_]:8V/_:6QM
+M_QX=(_^QI9[_'Q<:_S<W.O]#0#O_O;6S_Z.7F/\J(B7_*2,A_U5,1?].1SO_
+M;6MD_\:YN/\N+2S_(!D at _RDE+O]03%S_+BXW_Q4=&?]$/SG_;&!<_S at S.?])
+M4%[_14Y<_T%&3/]"2%+_/D51_T!$3/\=&QW_)1X>_["DG_^<C(/_M*"8_[.C
+MH/\I'B/_)!XB_S8U._]55U[_*S$Y_UYF</]?8&O_769L_UE57?\O+3W_.$),
+M_SD]._^HJ:/_)"LQ_QXH/?\F+4'_'2(N_QTB)_\P-CC_.3Q!_QD;'O\:&AW_
+M-SH__RHI+_\B'R7_.CQ!_RLO-_\R-#O_+S$V_S<W.O^<DX+_QJVH_^C0P?^D
+MDWS_S+^^_SLT._\A'RC_)B$G_^70S/_/P;[_6EQ?_SD]/_^ >7G_\>#:__OB
+MU?__Y-G__^7A___CW___Y=[__^7<__WDW?^FD8W_+B(>_S\X,O]D447_=F9<
+M_QH5&?] .#;_,"@F_RDE+?\E(R7_:&A?_UM23/^XJJ?_,"HR_TY18_]&2%?_
+M,"LM_RLA)/\T*"G_,"4>_R at B&O^-A7O_QKNT_RXD)_\?&1W_'Q<:_RPC(_\M
+M(B'_+",C_R8B(/\Z.2[_7U90_R8?'_\?&AS_*",C_R <&O\M)"3_4U15_UYG
+M9_]B96;_(!TC_ZJ9D_\X*RK_6E12_W%K8_^TJJ;_IYN<_R8@)/\F(R+_65),
+M_T,],_]V=&__R[Z]_RDH)_\A&B'_(!PE_SD^2O] 0D7_ at H!X_YB.BO\@'2/_
+M6EQD_TA/6_]-5V7_045-_T-&4O],5F3_355?_R A(O\O*2?_J9V6_Z*, at _^L
+MEH[_HY.0_RD>(_\D'B+_*BPQ_TE05O]+5%O_2U5?_U!17/]<96O_M[.[_T$_
+M3_\I+SG_EY.;_R(?)?\K-4#_)BX^_QH>+?\A(";_A8" _TY24/\U.CC_&1L>
+M_SDX/O]!1$G_'ATC_S4R./\A(RC_*"PT_R4G+O\\/D/_/CY!_YF/>__!II__
+MY,NZ_YF&;O_3P[K_EHB)_S,M,?\G(B3_Y]+&_\:]M_]/5UO_86=I_^',T?_V
+MWMO__N?;___HW__[WMW_PZ6G__S at W/__Y][__^;?_[.DGO\F'AK_.30N_U=*
+M/?]Q8UK_&!(6_S(I*?\F'1W_*"$I_QT9&_]+3$7_44A!_\BVL_\H'B?_861U
+M_T5(5/\X,S/_)QT at _S,G*O\S*"'_*R4=_VYE7O^_M*W_,BDI_R$<'O\?%QK_
+M*R(B_S$F)?\J(2'_)R$?_RHH(/\_.#+_(!@;_R ;'?\G(B+_)2$?_RHC(_]/
+M4%'_769F_V%D9?\C'B+_IY2*_VY=5_]<4$?_<&9;_ZJ>F?^SI:;_)R(F_R0@
+M(O])1#__,BTG_W-Q;O_+P+__+"DH_R,=(?\A'27_(R\V_SH\.O]J75C_(QX@
+M_T1)5?]#2E;_04E3_T--6/]/4EW_2U!<_TM59/],5V+_(20E_RTH(_^GEX[_
+MNJ69_Z6/A_]$-C/_)QT at _R8@)/\N+C'_/$-)_T=05_]C9V__HJ*K_U1=9/\N
+M*S'_-31!_S4Z1O\:&"C_.3='_RHR0O\<("C_(" C_XR(AO]G6U;_BHZ&_X:+
+MA_]\?'__-SD^_TY15O\;'2+_*B<M_S8X/?\?(RO_+3(X_S,U.O\_/T+_AWIG
+M_[ZCF/_6NZK_ at W!8_\6MI/^ADHW_,2HJ_RHC(__IS<__Q[K'_S0Y4O\_/U3_
+M4$1=_UE39_]@7&S_:V=W_WMF<O^.=GO__.+?___HW?__ZN'_HY*2_R@<'?\X
+M,"[_2#XT_V9:5?\;$A+_*R,F_R,=(?\K)2G_(AX at _SX[-O]#-S#_RK6S_RXF
+M*O]-3EG_9V1P_S$I)?\O(R3_*APD_RLB'/\E'AC_;&-<_[.HH?\V+2W_'QH<
+M_QP4%_\M)"3_+2(A_RTD)/\E'A[_(1X9_RLJ(_\6$A3_'AD=_RDA)/\F'1W_
+M*"0B_S]!/_]@:6G_8V=I_R >(?^@C8'_8%!&_SDM)O]?54O_D(1__[6HI_\F
+M'B'_)AP?_S4P,O\N+"[_85U?_\>^OO\N)27_(QX at _R$?(O\;'2+_-C<^_TU1
+M6?\^1$[_+C(Z_TE/6?]"257_14Y<_T!*5?]&4%O_/DA3_TE37O\A(RC_*2$=
+M_ZB8CO^XI9O_:EI7_X5X=_\B&QO_)"(D_R at C(_\T.3__0DA2_["GI_^EH*#_
+M24Y:_TU+3?\X.D+_(2LZ_SA#5/\H,4+_'1TH_RDD*/^AG9O_6%-'_Z><D?^)
+MB7[_75M8_]?1V?\Y/$'_2TY3_R$C*/\M+S3_+2\T_RHN-O\K,#;_.#M _S]!
+M1/]K7U#_K9B&_[JBD_]C4D3_LYR=_R\C)O\@(2+_)B$C_^S0T_^[KK/_9W)Y
+M_T=(3__FV.#_Z=GA_^K:XO_FUM[_YMGD_]S,U/_ZY>'__^C;___FV_^CE)7_
+M(QD<_R\H*/] -S#_8%92_QH1$?\F'B'_(!H>_R0>(O\>&AS_+2LH_R,;%__"
+MLZW_.34S_T)'3/]L<'+_-BXL_RXB(_\Q)2C_,RHD_R0=%_\Q*"'_DXB!_SDP
+M,/\A'![_'Q<:_RHA(?\P)23_+R8F_R4>'O\8%!+_'!H7_Q40%/\;%AK_)AXA
+M_R@?'_\J)B3_14=%_V-L;/]@9&;_(!\E_XIX;O]00CG_*Q\;_T4[,?]U:63_
+MNJVL_RDA)/\F&R+_+R at O_QP>(?\/$A/_44M)_RD@(/\A'![_(!XA_QL=(O\I
+M*C'_/$!(_T9,5O\M,3G_6F!J_S]&4O])4F#_35=B_T--6/]"3%?_45MF_R$C
+M*/\J(B#_I):-_TH\,_^)?GW_& \/_R0?(?\A'"#_4$='_T)$2?]%2U/_9U]=
+M_W)M;?\]0D[_+RTO_S]!2?]%4F7_+#9%_R at H,?]12$C_M+"N_V1;5/^]K)__
+MHX^'_U=84?]H;6G_O[W _S(T.?]/45;_(2,H_S R-_\K+3+_(2,K_RTO-O\Z
+M/$'_-C8Y_V1;5/^#<VG_?FYE_WQQ:O\N("'_)1T at _R,A(_\H(R7_\-;3_\6X
+MM_]775__,BTM__G=V?__X=O__^';___BW/__YM___^;B___JX___Z>#__^7>
+M_Z"1E?\B&A[_*20F_S(K)?]734G_'!,3_R4=(/\>&!S_'A8:_QL7&?\D)2;_
+M'AD;_[VPJ_\Q+RS_66%E_X*'C/\X,#/_-"@I_R\D(_\Q*"+_'A82_T(Y,O]3
+M2$'_*R(B_R(='_\>%AG_*B$A_RH?'O\K(R'_)1X>_Q0/$?\7$A;_%1 6_Q0/
+M$_\E'2#_*R(B_RLG)?\J+"K_9W!P_VEM;_\?'B7_?&QC_UI.1_\Q)B7_1#LT
+M_UQ02__"M;3_+"0G_R8=)/\C'2'_*2<D_WQU;_]N9&#_(1 at 8_QT8&O\8%AG_
+M'!XC_Q87'O] 1$S_45=A_R(F+O],4ES_/T92_T=07O\]1U+_14]:_T1.6?])
+M4U[_)R4N_RD='O]83$7_A'MU_QX9&?\G(R7_(QXB_R$<(O]P86'_CHF-_T%&
+M3/]=6%/_HY^=_SQ!3?\O+2__.CQ$_T5.7/\J*S+_8%I8_ZB=G/];5E#_P+.H
+M_Z62AO^/@WK_0DI&_U-;5_]P<G#_-C<^_TY/5O\H*B__.CQ!_RPN,_\>("C_
+M("$H_SLZ0/\D(B7_'AH8_T(Z-O]0143_(1T?_R4A(_\B'B#_(Q\A_R8B)/_I
+MU-#_HYV;_VQT=O]234__]=_=__KAX/_[XN'__N7D___IXO__Z>+__^KB___K
+MYO__Z^;_E8B-_R$8'_\A'"#_*B4 at _TI"0/\5#@[_)!P?_QT7&_\@&!O_'AL:
+M_R at D)O]33$S_M*>F_S(P,O]A:&[_2D]5_R\G*_\S*"?_+B0 at _R\E(?\;%1/_
+M+B@@_S\W+?\;$Q'_*20F_QX9&_\K(B+_,28E_RD@(/\H("/_%A 4_QP6'O\3
+M#A3_% \3_R$<'O\E'A[_*B4E_S at Z./]I<G+_8V=I_QL:(?]N8EG_8550_S G
+M)_\[-R__-RXG_\>YMO\Q*2S_)2 B_SLV,?^+ at GO_:5E/_QT1#?\I("#_*B4G
+M_S0R-?\7&1[_)"4L_TE-5?]67&;_1$I4_T]48/\Z04W_0DM9_U-:9O]#35C_
+M1E%<_TY79?\B'B?_+"(E_W5K9_\;%A'_)B(D_R >(?\A'"#_'18=_YJ-C/_@
+MV-O_/D%&_V!;5O^8E)+_-3M#_S P,_]+3U?_&A\E_XN)AO^QIZ/_8UI:_ZJD
+MG/^:BW[_IYB+_V5E7/\\2DG_*S<T_T]44/] 0D?_4E5:_QL=(O\Z.3__*BPQ
+M_R,D*_\G)S#_0T%$_RLG*?\E(2/_.3(R_\&YO/\J*"O_'AXA_R$@'_\H(R/_
+M*20D_[JKJ_]S<&__765I_R\I+?_VX-C__>/8___HW?__Z^#_^^3?___KY___
+MZN;__^WG__WJY/^-?(;_(!<>_QT;'O\?&AS_.30V_Q<3%?\<&!K_'AH<_R,?
+M'?]S;FG_M*NE_U!#/O]22DC_*2<I_T1,3O]#2$W_,"LM_RHC(_\F'Q__*2(B
+M_Q at 2%O\C(!O_)248_Q82$/\C'B+_'AH<_RLD)/\L)"+_)R(D_R<B)O\6$17_
+M%1 4_Q00$O\<&!K_)" B_R0@(O\J(B7_+2LM_VAQ<?]=9&3_'ALA_U9/0_]R
+M9&'_(ATA_S@^.O\C'AC_Q+>R_R\G*O\C(!__?75S_R,6%?\E'1O_(ATA_R0<
+M'_\H(2'_(1TE_Q at 8(?\A(2K_1TA3_TY17?]-4V/_1DY>_T5-7?\Z0E+_6U]N
+M_T]99_]'5F/_3UAF_R<F+?\D'R/_(!L?_R0?(_\F)";_'AH<_QL7&?\7$A3_
+MNK:T_][1UO]%1$K_6E)._Y>1C_\Y.3S_;W!W_U!06?]E:&G_?W]\_X!X=/^\
+MJZ7_GY2-_Y>,A?]D7%C_1TU)_S="0_\J-3;_-#L[_X.)A?])3U'_)20J_S(O
+M-?\A(RC_)RTO_R,D+_\Z/#G_Y.'@_RLH+O\A'![_IIN:_R$9'/\?'"+_+BLJ
+M_Z.AF?^<F93_ULG(_[FMJ?_AU]/_^.;C__OBV___Y][__^?>___HW___ZN+_
+M_^OC__[JXO__Z^/__NOC_WUL=O\>%1S_&!89_Q at 3%?\M*"K_&!06_QH6&/\<
+M&!K_65!*_S at O*?\T+BS_03P^_T5 0O\B("/_04E-_U9;8?\H(R7_*R0D_R8?
+M'_\L)27_%A 4_QH7%O\<&A7_%1$3_Q82%/\;%QG_)A\?_RXF)/\F'Q__)2 B
+M_Q81$_\;%AC_)2 @_RDD)/\F(2'_)B$A_R<?(O\F)";_9W!P_V-J:O\>&R'_
+M3D<[_V]A7O\;%AK_0$5%_QL:&?]G8ES_)B0?_QX?(/\7$A;_(AH>_R$?(O\A
+M'B3_(1D<_R(;&_\;%Q__("(J_SD[0_\_14__2E%=_T)+6?\Z0U'_1D]=_U9?
+M;?\_3%G_2%5B_TE47_]-5V'_-S8\_R at C)_\@&Q__(!L?_X%Y?/\I(23_'!<9
+M_QL7&?^>F);_KI^D_T-%2O]74DW_D8J*_TA"1O_CU=W_Y=KA_Y&/F/]<5%?_
+MO*RC_[RIG?]]>W3_24Q&_S]$0O\Y1T;_+C<W_SE"0O^.E97_+C4U_S<]/_\Y
+M.#[_&!4;_S$S./\U.SW_*BLV_U!.2__<U=7_)28M_Q87&/\W,##_)R$E_Q at 3
+M%_\>(!W_9VID_WEV<?_ at T,?_SKBO__SDV__XWM7__.+9___FW?__Y][__^C?
+M___JXO__Z^/__NKB__[JXO__Z^/_;EUG_R$8'_\3$13_&106_R ;'?\5$1/_
+M&A88_QH6&/\D'!K_*!\?_T,^/O\]/#O_0SU!_QX;(?])4%;_3E):_S8P-/\I
+M(B+_*R0D_RHC(_\6$A3_%1,5_Q,1$_\4$A3_$@X0_R <'O\C'!S_-"PJ_R8?
+M'_\I(B+_)!T=_R at A(?\I(R'_*2,A_RDC(?\L)B3_*B(E_RXL+O]B:VO_96QL
+M_R =(_\[-"C_AGAU_Q at 3%_\7%Q3_7UU8_UI84?\C'!S_&A@;_R(=(?\C&Q__
+M(!L?_R(=(_\@&!O_*2(B_R4A*?\D)B[_+C0\_SM#3?\\1E'_0DQ7_TI47_]$
+M3EG_1U%<_SY(5O]!2%3_7EYG_Y&3F/]!/T+_(ATA_RDD*/\E("3_3T-&_R at 9
+M'?\>%AG_'AH<_]S4TO_PW^7_0T5*_V5C7O^"@(+_8%I>_^C6VO_IW=[_V-+:
+M_^[@X?^]KJ'_BH)X_TU/3?\X04'_.$='_R\Y/?]%2DK_B9&._S Y.?]!3$W_
+M/T=+_R,B*/^?G*+_,S4Z_RPT-O\Q,CW_75E7_^;8V?]*04C_F)*0_YF4C_^/
+MCHW_C8N(_V1I9_]=9&3_9F5>_^W=T__VW-'__]_5__[CV/__YMW__^;=___E
+MW/__Z-___^OC__SHX/_ZYM[__NKB___JXO]:25/_(1@?_QL9'/\=&!K_&!,5
+M_Q41$_\6$A3_%A(4_QT6'?\E'R/_/#<W_T5"/?\<%!C_&A8>_TU36_](2U;_
+M/SD]_R<@(/\B&QO_*"$A_Q42$?\3$13_&!89_Q(0$O\3#Q'_'1D;_R4>'O\R
+M*BC_*2$?_S H)O\J(B#_+"0B_R@@'/\O)R/_+B8B_RHB'O\J(R/_(R$C_UIC
+M8_]C:FK_)2(H_S I'?^8BH?_-3 T_W5P:O]%0C?_(QL7_QP-$O\B&AW_(1L9
+M_R<<&_\A'1O_'QH>_R,;'O\E'A[_)2$I_S0V/O\T.D+_0TM5_SU'4O\U/TG_
+M0$I4_T]98_\X0DS_,3M)_SX^3?^KH:O_I)ZB_SLV.O\F(27_(1P at _QP7&_]2
+M0T?_,B,G_R(8&_\/#0__J9Z=_^/2V/];76+_7%I5_W%V=O]A7&#_Y-+4_^/9
+MU?_HX.3_NJVH_XA^</],44O_.C\__S]'2?\A+S+_459<_XB,BO\V/#C_0TM-
+M_SI&2O\Z0D;_'QXD_]C5V_\V.#W_+C8X_R\P._]=65?_Z=?9_UU+3?_ at T<O_
+MGI2)_U]@6?]=7%7_6%U=_U9B8?]O;F?_[-_4__??UO_]W=7_^]_7__SBV?__
+>YMW__N3;___GWO__ZN+_^^??__[JXO_]Z>'__^CA
+ 
+end
diff --git a/lib-python/2.2/test/testimgr.uue b/lib-python/2.2/test/testimgr.uue
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/testimgr.uue
@@ -0,0 +1,1170 @@
+begin 755 test.rawimg.rev
+M_UI)4_\A&!__&QD<_QT8&O\8$Q7_%1$3_Q82%/\6$A3_'18=_R4?(_\\-S?_
+M14(]_QP4&/\:%A[_35-;_TA+5O\_.3W_)R @_R(;&_\H(2'_%1(1_Q,1%/\8
+M%AG_$A 2_Q,/$?\=&1O_)1X>_S(J*/\I(1__,"@F_RHB(/\L)"+_*" <_R\G
+M(_\N)B+_*B(>_RHC(_\C(2/_6F-C_V-J:O\E(BC_,"D=_YB*A_\U,#3_=7!J
+M_T5"-_\C&Q?_' T2_R(:'?\A&QG_)QP;_R$=&_\?&A[_(QL>_R4>'O\E(2G_
+M-#8^_S0Z0O]#2U7_/4=2_S4_2?] 2E3_3UEC_SA"3/\Q.TG_/CY-_ZNAJ_^D
+MGJ+_.S8Z_R8A)?\A'"#_'!<;_U)#1_\R(R?_(A@;_P\-#_^IGIW_X]+8_UM=
+M8O]<6E7_<79V_V%<8/_DTM3_X]G5_^C at Y/^ZK:C_B'YP_TQ12_\Z/S__/T=)
+M_R$O,O]15ES_B(R*_S8\./]#2TW_.D9*_SI"1O\?'B3_V-7;_S8X/?\N-CC_
+M+S [_UU95__IU]G_74M-_^#1R_^>E(G_7V!9_UU<5?]875W_5F)A_V]N9__L
+MW]3_]]_6__W=U?_[W]?__.+9___FW?_^Y-O__^?>___JXO_[Y]___NKB__WI
+MX?__Z.'_;EUG_R$8'_\3$13_&106_R ;'?\5$1/_&A88_QH6&/\D'!K_*!\?
+M_T,^/O\]/#O_0SU!_QX;(?])4%;_3E):_S8P-/\I(B+_*R0D_RHC(_\6$A3_
+M%1,5_Q,1$_\4$A3_$@X0_R <'O\C'!S_-"PJ_R8?'_\I(B+_)!T=_R at A(?\I
+M(R'_*2,A_RDC(?\L)B3_*B(E_RXL+O]B:VO_96QL_R =(_\[-"C_AGAU_Q at 3
+M%_\7%Q3_7UU8_UI84?\C'!S_&A@;_R(=(?\C&Q__(!L?_R(=(_\@&!O_*2(B
+M_R4A*?\D)B[_+C0\_SM#3?\\1E'_0DQ7_TI47_]$3EG_1U%<_SY(5O]!2%3_
+M7EYG_Y&3F/]!/T+_(ATA_RDD*/\E("3_3T-&_R at 9'?\>%AG_'AH<_]S4TO_P
+MW^7_0T5*_V5C7O^"@(+_8%I>_^C6VO_IW=[_V-+:_^[@X?^]KJ'_BH)X_TU/
+M3?\X04'_.$='_R\Y/?]%2DK_B9&._S Y.?]!3$W_/T=+_R,B*/^?G*+_,S4Z
+M_RPT-O\Q,CW_75E7_^;8V?]*04C_F)*0_YF4C_^/CHW_C8N(_V1I9_]=9&3_
+M9F5>_^W=T__VW-'__]_5__[CV/__YMW__^;=___EW/__Z-___^OC__SHX/_Z
+MYM[__NKB___JXO]];';_'A4<_Q at 6&?\8$Q7_+2 at J_Q@4%O\:%AC_'!@:_UE0
+M2O\X+RG_-"XL_T$\/O]%0$+_(B C_T%)3?]66V'_*",E_RLD)/\F'Q__+"4E
+M_Q80%/\:%Q;_'!H5_Q41$_\6$A3_&Q<9_R8?'_\N)B3_)A\?_R4@(O\6$1/_
+M&Q88_R4@(/\I)"3_)B$A_R8A(?\G'R+_)B0F_V=P</]C:FK_'ALA_TY'._]O
+M85[_&Q8:_T!%1?\;&AG_9V)<_R8D'_\>'R#_%Q(6_R(:'O\A'R+_(1XD_R$9
+M'/\B&QO_&Q<?_R B*O\Y.T/_/T5/_TI17?]"2UG_.D-1_T9/7?]67VW_/TQ9
+M_TA58O])5%__35=A_S<V//\H(R?_(!L?_R ;'_^!>7S_*2$D_QP7&?\;%QG_
+MGIB6_ZZ?I/]#14K_5U)-_Y&*BO](0D;_X]7=_^7:X?^1CYC_7%17_[RLH_^\
+MJ9W_?7MT_TE,1O\_1$+_.4=&_RXW-_\Y0D+_CI65_RXU-?\W/3__.3@^_Q at 5
+M&_\Q,SC_-3L]_RHK-O]03DO_W-75_R4F+?\6%QC_-S P_R<A)?\8$Q?_'B =
+M_V=J9/]Y=G'_X-#'_\ZXK__\Y-O_^-[5__SBV?__YMW__^?>___HW___ZN+_
+M_^OC__[JXO_^ZN+__^OC_XU\AO\@%Q[_'1L>_Q\:'/\Y-#;_%Q,5_QP8&O\>
+M&AS_(Q\=_W-N:?^TJZ7_4$,^_U)*2/\I)RG_1$Q._T-(3?\P*RW_*B,C_R8?
+M'_\I(B+_&!(6_R,@&_\E)1C_%A(0_R,>(O\>&AS_*R0D_RPD(O\G(B3_)R(F
+M_Q81%?\5$!3_%! 2_QP8&O\D("+_)" B_RHB)?\M*RW_:'%Q_UUD9/\>&R'_
+M5D]#_W)D8?\B'2'_.#XZ_R,>&/_$M[+_+R<J_R,@'_]]=7/_(Q85_R4=&_\B
+M'2'_)!P?_R at A(?\A'27_&!@A_R$A*O]'2%/_3E%=_TU38_]&3E[_14U=_SI"
+M4O];7V[_3UEG_T=68_]/6&;_)R8M_R0?(_\@&Q__)!\C_R8D)O\>&AS_&Q<9
+M_Q<2%/^ZMK3_WM'6_T5$2O]:4D[_EY&/_SDY//]O<'?_4%!9_V5H:?]_?WS_
+M@'AT_[RKI?^?E(W_EXR%_V1<6/]'34G_-T)#_RHU-O\T.SO_ at XF%_TE/4?\E
+M)"K_,B\U_R$C*/\G+2__(R0O_SH\.?_DX>#_*R at N_R$<'O^FFYK_(1D<_Q\<
+M(O\N*RK_HZ&9_YR9E/_6R<C_N:VI_^'7T__XYN/_^^+;___GWO__Y][__^C?
+M___JXO__Z^/__NKB___KX__^Z^/_E8B-_R$8'_\A'"#_*B4 at _TI"0/\5#@[_
+M)!P?_QT7&_\@&!O_'AL:_R at D)O]33$S_M*>F_S(P,O]A:&[_2D]5_R\G*_\S
+M*"?_+B0 at _R\E(?\;%1/_+B@@_S\W+?\;$Q'_*20F_QX9&_\K(B+_,28E_RD@
+M(/\H("/_%A 4_QP6'O\3#A3_% \3_R$<'O\E'A[_*B4E_S at Z./]I<G+_8V=I
+M_QL:(?]N8EG_8550_S G)_\[-R__-RXG_\>YMO\Q*2S_)2 B_SLV,?^+ at GO_
+M:5E/_QT1#?\I("#_*B4G_S0R-?\7&1[_)"4L_TE-5?]67&;_1$I4_T]48/\Z
+M04W_0DM9_U-:9O]#35C_1E%<_TY79?\B'B?_+"(E_W5K9_\;%A'_)B(D_R >
+M(?\A'"#_'18=_YJ-C/_ at V-O_/D%&_V!;5O^8E)+_-3M#_S P,_]+3U?_&A\E
+M_XN)AO^QIZ/_8UI:_ZJDG/^:BW[_IYB+_V5E7/\\2DG_*S<T_T]44/] 0D?_
+M4E5:_QL=(O\Z.3__*BPQ_R,D*_\G)S#_0T%$_RLG*?\E(2/_.3(R_\&YO/\J
+M*"O_'AXA_R$@'_\H(R/_*20D_[JKJ_]S<&__765I_R\I+?_VX-C__>/8___H
+MW?__Z^#_^^3?___KY___ZN;__^WG__WJY/^@D97_(AH>_RDD)O\R*R7_5TU)
+M_QP3$_\E'2#_'A@<_QX6&O\;%QG_)"4F_QX9&_^]L*O_,2\L_UEA9?^"AXS_
+M.# S_S0H*?\O)"/_,2 at B_QX6$O]".3+_4TA!_RLB(O\B'1__'A89_RHA(?\J
+M'Q[_*R,A_R4>'O\4#Q'_%Q(6_Q40%O\4#Q/_)1T at _RLB(O\K)R7_*BPJ_V=P
+M</]I;6__'QXE_WQL8_]:3D?_,28E_T0[-/]<4$O_PK6T_RPD)_\F'23_(QTA
+M_RDG)/]\=6__;F1 at _R$8&/\=&!K_&!89_QP>(_\6%Q[_0$1,_U%78?\B)B[_
+M3%)<_S]&4O]'4%[_/4=2_T5/6O]$3EG_25->_R<E+O\I'1[_6$Q%_X1[=?\>
+M&1G_)R,E_R,>(O\A'"+_<&%A_XZ)C?]!1DS_75A3_Z.?G?\\04W_+RTO_SH\
+M1/]%3ES_*BLR_V!:6/^HG9S_6U90_\"SJ/^EDH;_CX-Z_T)*1O]36U?_<')P
+M_S8W/O].3U;_*"HO_SH\0?\L+C/_'B H_R A*/\[.D#_)"(E_QX:&/]".C;_
+M4$5$_R$='_\E(2/_(AX at _R,?(?\F(B3_Z=30_Z.=F_]L=';_4DU/__7?W?_Z
+MX>#_^^+A__[EY/__Z>+__^GB___JXO__Z^;__^OF_Z.4E?\C&1S_+R at H_T W
+M,/]@5E+_&A$1_R8>(?\@&A[_)!XB_QX:'/\M*RC_(QL7_\*SK?\Y-3/_0D=,
+M_VQP<O\V+BS_+B(C_S$E*/\S*B3_)!T7_S$H(?^3B('_.3 P_R$<'O\?%QK_
+M*B$A_S E)/\O)B;_)1X>_Q at 4$O\<&A?_%1 4_QL6&O\F'B'_*!\?_RHF)/]%
+M1T7_8VQL_V!D9O\@'R7_BGAN_U!".?\K'QO_13LQ_W5I9/^ZK:S_*2$D_R8;
+M(O\O*"__'!XA_P\2$_]12TG_*2 @_R$<'O\@'B'_&QTB_RDJ,?\\0$C_1DQ6
+M_RTQ.?]:8&K_/T92_TE28/]-5V+_0TU8_T),5_]16V;_(2,H_RHB(/^DEHW_
+M2CPS_XE^??\8#P__)!\A_R$<(/]01T?_0D1)_T5+4_]G7UW_<FUM_SU"3O\O
+M+2__/T%)_T529?\L-D7_*"@Q_U%(2/^TL*[_9%M4_[VLG_^CCX?_5UA1_VAM
+M:?^_O<#_,C0Y_T]15O\A(RC_,#(W_RLM,O\A(RO_+2\V_SH\0?\V-CG_9%M4
+M_X-S:?]^;F7_?'%J_RX@(?\E'2#_(R$C_R at C)?_PUM/_Q;BW_U==7_\R+2W_
+M^=W9___AV___X=O__^+<___FW___YN+__^KC___IX/__Y=[_HY*2_R@<'?\X
+M,"[_2#XT_V9:5?\;$A+_*R,F_R,=(?\K)2G_(AX at _SX[-O]#-S#_RK6S_RXF
+M*O]-3EG_9V1P_S$I)?\O(R3_*APD_RLB'/\E'AC_;&-<_[.HH?\V+2W_'QH<
+M_QP4%_\M)"3_+2(A_RTD)/\E'A[_(1X9_RLJ(_\6$A3_'AD=_RDA)/\F'1W_
+M*"0B_S]!/_]@:6G_8V=I_R >(?^@C8'_8%!&_SDM)O]?54O_D(1__[6HI_\F
+M'B'_)AP?_S4P,O\N+"[_85U?_\>^OO\N)27_(QX at _R$?(O\;'2+_-C<^_TU1
+M6?\^1$[_+C(Z_TE/6?]"257_14Y<_T!*5?]&4%O_/DA3_TE37O\A(RC_*2$=
+M_ZB8CO^XI9O_:EI7_X5X=_\B&QO_)"(D_R at C(_\T.3__0DA2_["GI_^EH*#_
+M24Y:_TU+3?\X.D+_(2LZ_SA#5/\H,4+_'1TH_RDD*/^AG9O_6%-'_Z><D?^)
+MB7[_75M8_]?1V?\Y/$'_2TY3_R$C*/\M+S3_+2\T_RHN-O\K,#;_.#M _S]!
+M1/]K7U#_K9B&_[JBD_]C4D3_LYR=_R\C)O\@(2+_)B$C_^S0T_^[KK/_9W)Y
+M_T=(3__FV.#_Z=GA_^K:XO_FUM[_YMGD_]S,U/_ZY>'__^C;___FV_^SI)[_
+M)AX:_SDT+O]72CW_<6-:_Q at 2%O\R*2G_)AT=_R at A*?\=&1O_2TQ%_U%(0?_(
+MMK/_*!XG_V%D=?]%2%3_.#,S_R<=(/\S)RK_,R at A_RLE'?]N95[_O[2M_S(I
+M*?\A'![_'Q<:_RLB(O\Q)B7_*B$A_R<A'_\J*"#_/S at R_R 8&_\@&QW_)R(B
+M_R4A'_\J(R/_3U!1_UUF9O]A9&7_(QXB_Z>4BO]N75?_7%!'_W!F6_^JGIG_
+MLZ6F_R<B)O\D("+_240__S(M)_]S<6[_R\"__RPI*/\C'2'_(1TE_R,O-O\Z
+M/#K_:EU8_R,>(/]$257_0TI6_T%)4_]#35C_3U)=_TM07/]+563_3%=B_R$D
+M)?\M*"/_IY>._[JEF?^ECX?_1#8S_R<=(/\F("3_+BXQ_SQ#2?]'4%?_8V=O
+M_Z*BJ_]4763_+BLQ_S4T0?\U.D;_&A at H_SDW1_\J,D+_'" H_R @(_^,B(;_
+M9UM6_XJ.AO^&BX?_?'Q__S<Y/O].45;_&QTB_RHG+?\V.#W_'R,K_RTR./\S
+M-3K_/S]"_X=Z9_^^HYC_UKNJ_X-P6/_%K:3_H9*-_S$J*O\J(R/_Z<W/_\>Z
+MQ_\T.5+_/S]4_U!$7?]94V?_8%QL_VMG=_][9G+_CG9[__SBW___Z-W__^KA
+M_Z:1C?\N(A[_/S at R_V111?]V9ES_&A49_T X-O\P*";_*24M_R4C)?]H:%__
+M6U),_[BJI_\P*C+_3E%C_T9(5_\P*RW_*R$D_S0H*?\P)1[_*"(:_XV%>__&
+MN[3_+B0G_Q\9'?\?%QK_+",C_RTB(?\L(R/_)B(@_SHY+O]?5E#_)A\?_Q\:
+M'/\H(R/_(!P:_RTD)/]35%7_7F=G_V)E9O\@'2/_JIF3_S at K*O]:5%+_<6MC
+M_[2JIO^GFYS_)B D_R8C(O]94DS_0STS_W9T;__+OKW_*2 at G_R$:(?\@'"7_
+M.3Y*_T!"1?^"@'C_F(Z*_R =(_]:7&3_2$];_TU79?]!14W_0T92_TQ69/]-
+M55__("$B_R\I)_^IG9;_HHR#_ZR6CO^CDY#_*1XC_R0>(O\J+#'_25!6_TM4
+M6_]+55__4%%<_UQE:_^WL[O_03]/_RDO.?^7DYO_(A\E_RLU0/\F+C[_&AXM
+M_R$@)O^%@(#_3E)0_S4Z./\9&Q[_.3@^_T%$2?\>'2/_-3(X_R$C*/\H+#3_
+M)2<N_SP^0_\^/D'_F8][_\&FG__DR[K_F89N_]/#NO^6B(G_,RTQ_R<B)/_G
+MTL;_QKVW_T]76_]A9VG_X<S1__;>V__^Y]O__^C?__O>W?_#I:?__.#<___G
+MWO__YM__MYV at _S$?(?])/3G_=F%5_W]M8_\B'2'_-BXL_S\W-?\D)BW_)"(D
+M_WUY<?]M85S_K*"<_S(N-O]66VW_24E8_S$L+O\H'B'_,"0E_S,H(?\N*"#_
+M;65;_\F^M_\S*2S_(!H>_R$9'/\P)R?_+R0C_R\F)O\A'1O_7UY3_XZ%?_\G
+M("#_(QX at _Q\:&O\J)B3_+20D_UE:6_]:8V/_:6QM_QX=(_^QI9[_'Q<:_S<W
+M.O]#0#O_O;6S_Z.7F/\J(B7_*2,A_U5,1?].1SO_;6MD_\:YN/\N+2S_(!D@
+M_RDE+O]03%S_+BXW_Q4=&?]$/SG_;&!<_S at S.?])4%[_14Y<_T%&3/]"2%+_
+M/D51_T!$3/\=&QW_)1X>_["DG_^<C(/_M*"8_[.CH/\I'B/_)!XB_S8U._]5
+M5U[_*S$Y_UYF</]?8&O_769L_UE57?\O+3W_.$),_SD]._^HJ:/_)"LQ_QXH
+M/?\F+4'_'2(N_QTB)_\P-CC_.3Q!_QD;'O\:&AW_-SH__RHI+_\B'R7_.CQ!
+M_RLO-_\R-#O_+S$V_S<W.O^<DX+_QJVH_^C0P?^DDWS_S+^^_SLT._\A'RC_
+M)B$G_^70S/_/P;[_6EQ?_SD]/_^ >7G_\>#:__OBU?__Y-G__^7A___CW___
+MY=[__^7<__WDW?^<A(W_*AHB_T$[.?]];%[_>VMA_R0?(_]!.3?_0CHX_R\X
+M/O\R-3;_BX)[_WEL9_^^M+#_-C4\_T]29/],2%C_*R,F_RLA)/\O(R3_+"$:
+M_RHD'/]X<&;_TL? _S(H*_\@&A[_(1D<_R\F)O\N(R+_+B4E_R<C(?]K:E__
+MN*^I_R at A(?\A'![_(QX>_RPH)O\L(R/_8F-D_V)K:_]A9&7_(!X;_[JIF_\E
+M'1O_=WAY_Q at 7%O_$O;W_E(:'_R@@'O\F'AK_6U%'_V5<3O]B7E;_N:RK_RLJ
+M*?\F'R;_&14>_SA!2/\G)BW_,"HN_R8H)O][<W'_/SM$_S4[2_]455S_3U%4
+M_T5+4_],4ES_.CE _QX8'/\K(R;_L:>C_ZB=EO^XI)S_MZ>D_RTB)_\E'R/_
+M,"\U_TY/5O\D)B[_;&]Z_UU>:?]67V7_55%9_S<U1?] 2EG_-SP\_RTK)O\_
+M0$O_)R\__R\V1/\Q-D+_'!XM_QH=*/]$1D[_/#Y#_S4U./\\/T3_)20J_RHG
+M+?\_04;_-#A _S8X/_\X.C__)B8I_Z"6A__%K:K_Y]##_Z.3?__3Q\C_/C<_
+M_R4?)_\I(B+_Z]/6_]7#Q?]#/T'_:&IM_U-=8?^UL:__^.'<__SEX/__Y][_
+M_^7<___FW?__Y-W__^CA_Z*1E_\C&Q__&104_XQ[;O^DE(O_)2 @_SX[.O]$
+M/S__-T%%_T1)1?^. at WS_CX%^_ZF at H/\_/$C_/D!5_UA28/\N)R?_+2$B_RTB
+M(?\J)2#_'!H5_V=B7?_+O\#_*B4G_R$<'O\?&!C_)R @_S(K*_\L)27_+"@@
+M_X%\</_'N[;_*2(B_R$<'/\C'A[_+BDI_R at H)?]@8F7_96MG_V1D9_\B'1W_
+MPJZF_S\T+?^#@GO_&QP6_]K+S/]R9F?_*24G_S H)/]22D#_>G%C_UI/2/^O
+MI*/_,2PL_QP7'?\L*C/_.4--_RDJ,?]_=G;_)B >_];)SO]*35C_14A3_X=\
+M>_]>6%;_*" >_TQ/6O\Z/$3_'AH<_RLF*/^=DI'_LJ>@_[RJIO^]KJ[_+",C
+M_R8B)/\U-CW_5EA at _R B*?\<'B/_C(V4_TI,4_] 04C_9F=R_T9)5/]N<'/_
+MKZ.D_T5#4_\H,#K_:&IM_SHX._\E*#3_)BDU_T!"2O]66%__0T5*_S<]/_\M
+M+3#_:65M_S] 1_\P,3S_-3<^_T9*3/\O,3;_?G)N_Z"+B?_1N;;_=6)<_]C*
+MTO]83EC_*"$K_R ;(?_IU-+_X<_+_T4^./].1D+_34U*_WU\>__<S\[_^-G3
+M___IX?__YM___^?@___FW__]Y-W_P+*S_RLC)O\A&QG_K9J0_Z>8D_\P*BC_
+M04 __S8Q,_]%2TW_1$A&_Y&(@O^:C(G_CH.(_RPJ.O]$15S_<6U]_RLC)O\M
+M(B'_,"4D_QX:&/\>'!?_&A02_\B_O_\G(B3_(AT=_R$:&O\G("#_*R0D_RXG
+M)_\M)Q__AX!R_\_#OO\H(2'_(!L;_R8A(?\F(2'_)B0A_UQ at 7O]G;&K_9&-B
+M_R(>'/^]J*/_-RTC_YV8DO\].S;_WLW-_V%86/\C(2/_*",E_S8S+O^ >W7_
+M3$1 _ZJBH/\J(R/_(!LA_QD9(O\]1%#_/3Y%_X^'A?]934G_X,[2_T1,5O] 
+M2%C_+BDO_TI&2/^!>'+_(2$L_S4R/O\H(R7_,2<J_Z&1CO_ L:O_J):3_[6F
+MIO\L(R/_)B(D_S<Y0/].4%?_&!H?_TY.4?\>("7_0$-(_TQ06/\[04O_3E!8
+M_VIL;__4Q<G_.SE)_RDS/?]'2$G_;VQK_R4H-/\S-D+_/D!(_UQ>9?]+3E/_
+M-#H\_QP<'_^>FJ+_1D9/_RPO._\N,#?_,30U_TM*4/^KI*3_V\[-_^G8V/_K
+MWMW_XM31_[^MJ?^5A'[_=V1<_^?2S?_MV]?_T\O'_];+RO_3S\W_/T)#_V]Q
+M;__JV];_^=[7___DW?__Y=[__^7>___GX/_$N[7_13LW_WIL:?_4OK7_FHJ'
+M_RTF)O\_/4#_-B\V_QP='O\\.SK_E)"._[&EH/]P:&S_)R<T_T)(6/]F9'3_
+M)1T at _RXC(O\M(B'_)2$?_R4C'O\@&!3_S\+!_RTF)O\?&!C_(AL;_RDB(O\O
+M*"C_+28F_S J(O^3C'[_S,"[_RLD)/\B'1W_)2 @_RTH*/\K(B+_7V1>_UYE
+M<_]:6F7_&QD4_\*MJ/]'.3#_IIV7_V%?6O_>S<W_1#L[_R(@(O\>&1W_+BLJ
+M_S(N+/]W;W+_HIF9_RTF)O\A'"+_'AXG_T%'4?]'1TK_FY&-_[6IHO_:R,K_
+M4EID_T1,8/\?'BO_%AL at _U-,1O]E7V?_*20P_RHB)O\X*2W_K)>5_]G(PO^7
+MA8+_>6IJ_RLB(O\G(R7_+S(W_TY15O\W-SK_AH&#_QP?(/\/%1?_2U%9_T!'
+M4_]'2%/_B8N._]O,T/]%0U/_-T%+_R(C)/\X-33_+"\[_SU 3/]"1$S_3$Y5
+M_TM.4_]"2$K_)24H_[&MM?]!04K_+S,[_S8Y/O])2TG_D(N-_R$='_\O*"C_
+M."\O_T(]/?](1DC_:F5E_Y&)A_^IG9G_Z]S6_]7(P_]:4$S_<VUK_VAG9O]/
+M5%+_/D9#_TY24/_'LJW_^-W6__G>U__^X]S__>3=_\S'N_]<44K_II*._^')
+MP/^9B(C_,2DL_S at U._]$/47_)B$A_R ?'O\N+"[_O[:P_XB#@_\\0D3_9G)V
+M_V]Q>/\O*BK_,28E_RL@'_\D(![_6UE2_T(Y,__0PK__,"@F_R(;&_\E'A[_
+M,"DI_R at A(?\J(R/_+RDA_Y")>__)O;C_*R0D_R(='?\H(R/_*20D_RLB*?]8
+M8E__("E8_SP\9_\<'AS_O:BC_V)22?^WJJ7_='%L_]W,S/\^-37_(R$C_R@@
+M(_\^.#;_.S0N_V=;7/^4BXO_*R0D_QX9'_\:&B/_/$!(_SPZ//^GFY;_Q[FP
+M_]O)S?](4%K_25!>_T]'2_\B(2?_4$4^_X%[?_]"0D__'1L>_R<=(/^FF)7_
+MT<6^_ZJ:E_^VIZ?_,2 at H_R<C)?\V/#[_3%!2_UA45O^FGIS_-3HV_R(K*_](
+M45C_1D]=_U!17/^'B8S_X]38_U)08/\W04O_7V!A_[Z[NO\V.47_*"LW_S]!
+M2?]&2$__14A-_TA.4/\I*2S_K:FQ_TE)4O\Q.#[_,34W_TM)1O_+PL+_)2 B
+M_R(:'?^YK;#_*",E_R =(_\<&R'_%A89_R4H*?^SK*;_V,O&_^+6TO^[M[7_
+M0D='_W!V<O]26%3_.3Y#_^/7T__OW-3_[-G1__/@V/_[Y]__TL:W_VE=5/_7
+MR,+_XL_%_X5W=/\L(R/_,RLN_T4_0_]H8U[_,2TK_R4>'O_6R<3_AH."_UI@
+M8O^/EYO_8F-D_RLF)O\N)R?_+24A_RLF(?]A757_HY>0_]3"O_\I("#_(AT=
+M_R0='?\K)"3_+R at H_RXE)?\B'AS_3TU(_\O O_\M)"3_(QP<_RLD)/\M)B;_
+M(14>_U588_\@)%'_-S!?_QX<'__'JZ?_<FI at _\>MJO^4CXK_W-'0_S at O+_\D
+M'R'_*B(@_UU62/]V:F7_V\S-_X5]@/\H(B;_(!H>_QD9(O\^/T;_.3 at W_[6O
+MI__3QL'_V,?-_T--6/]%2U7_?G9T_UE45/_(N;3_B8.+_T]27?\E(R7_+"<G
+M_ZF=F?^SI)__FHN&_ZN=GO\D'!__+28F_S$F*_]"24G_GJ*@_ZVKIO]B6EC_
+M3%A7_T515O]!2%3_3%18_T(_/O_EV=S_1DE5_T),5O]K:6O_RL/#_S@]2?\J
+M,3?_$1(9_TA(4?])2U+_04=)_RHJ+?^WL;G_/$!(_RPQ-O\P-C[_>W5S_]K.
+MS_\C)2K_'"$G_U-68O\I)##_(ATA_R(@'?\D(AW_(B<E_UI96/]M96/_W=',
+M_[*HI/],4U/_,CX]_XF0D/]>86;_<7AL_Y*(?O^2AH'_?G-L_ZZBD__7R+O_
+M>FQC_]G-QO_ETLC_@')O_RHA(?]!.3W_24))_VIC7?]G8EW_3T=%_]/$O_]A
+M7&#_.3Q'_V-F<O]#0$;_+B<G_RLD)/\N*23_*",=_W-O9_^]L:K_UL3!_RLB
+M(O\A'!S_(QP<_RHC(_\K)"3_,RHJ_R$<'/\D'Q__S,#!_RTD)/\E'A[_)R @
+M_R at A(?\H'1S_(B$:_Q86(_\I)2W_'!T7_\JMJ?]S<&7_S+"M_["HI/_?U]7_
+M-B\O_R0?(?\P*";_>')B_X1X=/_>S]#_;&9J_R at B)O\F("3_'1LD_SY 1?\\
+M.3C_M:^G_^'3T/_5Q,S_3%5C_T1)5?^$?GS_BH.#_];$P/^*@X__1TA3_RPH
+M*O\O*"C_KZ2C_ZN=FO^'=W3_HI:7_R\I+?\K(R'_JI*5_U%)3/_+O\#_O["P
+M_[BGK?]886?_2U=@_S]&5/]'3U/_4DU-_]S1UO]35F+_0T]8_X2"A/_4S<W_
+M/4)._QTC)?\O,3;_3T]8_R8G+O\Z0$+_*"@K_[ZXP/]*3E;_,C<\_S$Y0_]]
+M=7/_V<W._S1#4/\G.T__051L_S$_5O]A8G/_(QPF_RXE)?\H(2'_8%A<_S(J
+M+O_*PK[_HI>0_XJ(@_]/4U'_/4)"_W-[??\Z2UG_86!F_UE<4/]03D?_8UM1
+M_]C(OO]Q8ES_W<_,_^;2RO]W:FG_-2TQ_S\X/_]#/47_9V!:_VYG8?^LHI[_
+MWLO%_VUF;O]#1%7_5E=H_T="3O\M)2C_+28F_RLF(?\V,2O_;&A at _\F]MO_4
+MPK__*R(B_R0?'_\F'Q__+28F_RHC(_\M)R7_2D(^_RH@'/_+O[O_*R(B_R0=
+M'?\B&QO_*R0D_R47'_\C&Q?_(ALB_RXF*?\@(1K_SK*N_W-R9__#J:;_N+.N
+M_]W5T_\V+R__(QX at _S\W-?^5CW__<&1 at _]K+S/]E7V/_*2,G_R,=(?\@'B?_
+M/3]$_T5"0?^UKZ?_V,K'_]' R/]!2EC_5%EE_Y"*B/^@F9G_X]'-_W=P?/]&
+M1U+_*",E_RHE)_^LI:7_E(R*_XM^??^<D9;_)2 F_RHB)?]_='/_6EQ:_\2]
+MO?^VL[+_O*ZV_TU57_]"3%K_24Y at _T=.5/]Q;&S_XM?<_TQ/6_\Y14[_A(*$
+M_]K3T_\Y/DK_/3X__U=97/\]/T;_*"<M_Q08&O\D)"?_N[6]_T)&3O]!1DO_
+M,3E#_Y&)A__?T]3_5VEW_S=-8_\G/%7_,$5=_TA<</\7(2__'1\B_R8E)/]0
+M14K_,"<N_Q\:'O^EGY?_L*6>_XZ'@?]M;VW_;G9Z_U9HC/](46+_4%I+_TQ,
+M2?]33D[_X]/*_V=84__ATM+_Y=#+_VQ at 8?\Q*C'_.3,[_SPX0?^+ at GO_?G5N
+M_[ZRK?_ at S,3_349-_SL]3/])2UK_.C8__S H*_\L)27_+RHE_R\J)/^/BX/_
+MS<&Z_]+ O?\N)27_)!\?_RDB(O\J(R/_*"$A_R<D'_]D6U7_PK.N_\S N_\M
+M)2/_)A\?_R0='?\J(R/_(QDC_R,<'/\@&2'_*"$K_Q\D(/^]IJ'_>WMP_\"K
+MI__&P;S_V]/1_S0M+?\D'R'_/34S_ZJDE/]5247_Y]C9_TI$2/\I(R?_(AP@
+M_R >)_\M+S3_B(6$_T Z,O_BU-'_R;C _TM48O]$257_E8^-_Z.<G/_FU-#_
+M7%5A_SH[1O\M)RO_,"HN_ZJEI_\[-C;_AWM\_YN2F?\A'27_(AP at _R,=&_\K
+M+2K_-RXN_R ;'__0Q<K_5V%E_TM77O])4%S_4UM?_VUH:/_>T]C_0$-/_S]+
+M5/\T,C3_S<;&_SQ!3?]*1T;_8&%B_T-(3?]02T__+3$S_RTM,/^JI*S_045-
+M_SM 1?\I,3O_6E)0_]#$Q?^*C8[_?W^"_Y^=IO]676/_+C<^_SL]0O\N)R?_
+M,2 at H_[2GHO\J)2?_(" I_TQ'2?^'A'G_I:29_V=L:/]L='C_8W9^_X:(C?^"
+M at G__1T5"_UI54/_HV];_6U%'_^37TO_BSM?_7E)3_S0Q)?\L*";_1#M"_WIN
+M9?^.A7?_R;R[_]W)Q?]!/T'_,3$^_T1$4?](1T[_,"HH_RH>'_\S)RC_-RXH
+M_YB-AO_8R<3_T\+"_RHB)?\C'B#_)AXA_RTD)/\N)B3_-BTG_VYI8__:R,7_
+MS[Z^_RLF)O\E("#_)!\?_R at C(_\4#Q/_%! 2_Q81%?\8$Q?_,"8B_[*EH/]#
+M03K_NK"L_]/'P__<T=#_-"\O_R<B)/\X-#+_O[.D_TD_.__EV=K_0CT__R8B
+M)/\@&Q__(R8Q_T)#3O^HG)?_-"@D_][1T/_&N;[_/$A1_U9=8_^5D(O_FY24
+M_^O>V?]04%O_/4!+_R8A(_\G(R7_*24G_U925/]G8F3_4T]1_R,>(O\K)RG_
+M*B8D_T='1/]G6%S_A(&'_]C0T_]88&K_359=_TA/6_]56F#_=&MK_^;9WO]#
+M1E+_5%MA_T8^0?_;S]+_0$90_W!S=/]I:&__1TI+_YZ:G/\T-CO_+2PR_Z:B
+MJ_\V.43_+S(W_RXT/O^?DI'_L9^A_VM?6O]<5U'_:&EC_VYJ8O]]:V?_P[*X
+M_];,R/_1O[S_=F%?_ZJAH?\D(RG_/S<[_R8D(?^+CHC_9VQJ_U%57?]Q<&__
+M?GQW_U=95O]044O_4U%)_^;%UO]N2&G_W+78__OBW?]52$W_*2(L_RDF,O\U
+M-4#_>G-G_Y6-??_+O[K_Y-#,_T=$2O\S,T#_.#=$_U957/\I(1__,B8G_S,G
+M*/\T*B;_D85^_]G*Q?_%MK;_*R,F_R(='_\E'2#_+B4E_RL@'_\V*B7_ at H!Y
+M_]+ O?_2P<'_*20D_R4@(/\F(2'_(AT=_QP7&_\6$17_%Q(6_QL6&O]%-3+_
+MIYN6_TY+1O^FH)[_W<_,_]?,R_\P+2S_)B$C_S(N+/_$MJ?_54U)_^+9V?\Z
+M-37_)B(D_QH8&_\C)C'_0D51_Z:7D?]21$'_X=/4_[>LL_\\2%'_5EMA_YF3
+MD?^0B8G_Z=S7_T9&4?\X.T;_)R,E_R<C)?\E(2/_)B(D_RTI*_^NJJS_(Q\A
+M_R at D)O]M8%7_ at WMK_YZ'>_]P8UC_VM+0_U-79O\M-3__14Q8_UI<8_]]='3_
+MZ=K?_T5(5/]/45C_8%99_][/U/]$1U+_CI.3_W1R>_]%2$G_R<3$_S0S.O\L
+M*S+_FIBA_S,T/_\X.T#_)"HT_ZN>G?^NFIS_CGUW_W!D7?]".S7_DH1[_WMH
+M8/_+O\C_03]!_S at S+O]F6UK_/CPY_RHL*O\V-#;_(!TC_S8W./]T>';_:FUR
+M_YF6E?]H9F/_<'5Q_U164_]454__Z+O?_XA.H/^F9-'_QYS2_UQ(4?\U&3/_
+M1S [_S4N*/^)?7G_FHF4_]3(Q/_MVM3_4T]7_S<W1/]44V#_6UIA_RHB(/\K
+M'R#_+R,D_STS+_^#=W#_U,7 _\2UM?\K(R;_(!L=_RDA)/\T*RO_,28E_SPP
+M*_^*B('_W,K'_]3#P_\N*2G_)B$A_R,>'O\J)27_)R(F_QH5&?\A'"#_'!<;
+M_U1$0?^6BH7_8V!;_ZFCH?_;S<K_U<K)_S$N+?\H(R7_+2DG_\6WJ/]>5E+_
+MY-O;_SPW-_\G(R7_'1L>_T!#3O\[/DK_L*&;_U]13O_DUM?_K:&J_T)-6/]3
+M5U__7E=7_X%Z>O_DU]+_3T]:_RTP._\D("+_)2$C_R,?(?\D("+_(AX at _R,?
+M(?\F(B3_(1T?_RLC(?\S,2S_74Y._ZB>FO_7S\W_3U-B_RLS/?] 1U/_7%YE
+M_X^&AO_FU]S_24Q8_U)46_]C65S_VLO0_SH]2/]^@X/_85]H_TE,3?_>V=G_
+M+RXU_R\N-?^4DIO_/#U(_SU 1?\N-#[_MJFH_\"LKO^JD8K_HHA]_ZJ4C/^Y
+MGY3_CG]R_\K#S_\P,S[_(R8G_XU_@/^3 at 7W_WLO%_TH\/?\<%A[_(1\B_UQ>
+M6_]K;VW_='%P_W]^??]W?'K_9VMI_UU at 8?_>V\__+2HI_S N-_\X/CK_?WZ+
+M_V9#BO^C>Z__12Q+_ZJ*O?][6)C_T\+,_^S:T/]*1D__/CY+_U-27_\]/$/_
+M*R,A_R\C)/\P)"7_/C0P_W)F7__7R,/_K)V=_RLC)O\@&QW_)Q\B_S,J*O\O
+M)"/_,24 at _XF'@/_=R\C_TL'!_R8A(?\D'Q__*",C_RLF)O\H(R?_*20H_R8A
+M)?\?&A[_<V- at _XR >_]Z=W+_IZ&?_^'3T/_*O[[_+RPK_R4@(O\L*";_Q+:G
+M_W5M:?_GWM[_.C4U_R<C)?\D(B7_4E5 at _T1'4_^ZJZ7_=6=D_^G;W/^5B9+_
+M0$I8_UQ?:O^CFY[_GYB8_^C;UO]#0T[_+C$\_RHF*/\F(B3_(Q\A_R0@(O\B
+M'B#_(AX at _R <'O\B'B#_%Q 8_RDN-/_*Q<O_O+2X_]S3T_]1563_(2DS_T!'
+M4_]L;G7_EXZ._]S-TO].45W_;&YU_W=M</_=SM/_1$=2_X:+B_]A7VC_/D%"
+M_\;!P?\G)BW_*RHQ_Y.1FO\W.$/_-SH__R8L-O^XJZK_S;F[_[>BEO_$J)K_
+MNYZ2_\&EE_^;BGK_U,C7_RXN0?\A(RO_.34W_XU\=O_:P[?_034V_R<C)?]@
+M7EG_A(5__XB(B_^4CY/_<'%R_TI34_]56EK_AXF,_\;(S_\N13+_3&]7_W6/
+M>O\H/RS_("PK_S$Z,_\W1#G_*#0]_S]"5/_*PL7_Y]C2_SL[1/\\0$__2TE9
+M_T9"2O\N)"?_+2,F_R<>'O\P*BC_8UQ6_][0S?^IF)C_)B,B_R$>'?\G("#_
+M+B4E_RXB(_\Q)"/_1$4__]?(R/_,N[O_*",C_R4@(/\D'Q__)B$A_RXG)_\K
+M*2O_.C\__Q\:'O]O7U;_?V]L_Y&(@O^DGYK_Z-G:_[JNK_\M*BG_(QX>_R0E
+M'O^OJ:'_DX>#_^+5U/\^.3G_(Q\A_R,@)O]$1U+_1$A*_[RLJ?^3 at H+_Y]C8
+M_X-]B_\W2%;_7V)M_YR4F/_:SLG_YM36_S8W0O\N,#?_*"0F_R<C)?\B'B#_
+M(Q\A_R(>(/\B'B#_(1T?_R,?(?\H'1S_1T-%_U%05O_ N<#_ZMGC_UU69_\]
+M0E3_0DE;_WUV@/^UJ;+_Y]WL_U129?]A87#_C(*+_^#3V/\\/T3_CI"._U!.
+M5_]"14;_SLO*_R at I,/\K*C#_B8B._S,T/_\N,#C_+C8Z_\2XN?_FUMK_T<&X
+M_].YK/_7MZ__S+"L_YF-AO_9Q-?_(S% _R4A*?\F*S'_>'AU_YV8DO^0BXO_
+M@'M[_XB!>_]G8%K_7%UD_Y60E/\H*BW_-T-'_SM$1/]F:VO_O\3*_SY(-_\V
+M0C/_+T P_TQC4/\Y5$/_#"D7_SY?3?\S3CW_/%I*_YBMH?_9T<W_2T=/_S0U
+M0/\M*C;_0SY _R8='?\E&Q[_)R @_R<C(?].3TC_T<?#_YR)B?\F'1W_(AT=
+M_R<@(/\L(R/_+B(C_S(E)/][>WC_U\K)_\BWM_\H(R/_)2 @_R<B(O\I)"3_
+M+B8D_ST_/?]=9&3_'QH>_WMP9?]H65G_LJNE_YN9E/_DU=G_MZNL_S,P+_\G
+M(B+_(2(;_ZZIH_^MGYS_Y=C7_S,N+O\E(2/_)" H_SD\1_\Y/C[_P*VM_Z>6
+MEO_NW]__8UUK_SA)5O]#1T__JZ"E_^'4S__FT=;_/3U*_R B*?\H(R7_)B(D
+M_R4A(_\B'B#_(AX at _R(>(/\B'B#_(AX at _R,;%_]14$__/$5,_XB*D?^+B8O_
+M24]1_SM'3/] 2%+_>'I__T9'3O\H+CC_'"4S_TQ55?]Y>GO_Q\')_SD^2O^+
+MB8O_3TM4_T)%1O_)QL7_+S W_RDI+/^8F9K_/#Y&_S<X0_\O-SO_OK*S_^+3
+MV/]W<FS_R;ZW_]_2S?^9CXO_FI6/_]['VO\U2%3_)B H_SI"3/\N,SC_:&EJ
+M_T(]0?\U-#O_4DQ*_[6IHO]=6%C_D(2'_SHU._\A*"[_0T5(_ZFDI/_$P]#_
+M'AP9_R,;'_\7&QG_'R47_SY#-_\Y0C7_+3HM_R,N&_\B0BW_:8^ _\;%Q/])
+M1$K_0#Y'_S4O-_] .3G_+B(C_R4;'O\K)"3_)2$?_R D'/_(P[[_ at W!P_S(D
+M)?\D'1W_)R @_S,J*O\M(2+_,",B_X>'A/_:S<S_OZZN_RDD)/\D'Q__+2 at H
+M_S K*_\V+BS_96=E_U]F9O\C'B+_=FQA_VI=7/^,BH/_C(^)_^?8W/^PI*7_
+M+RPK_R8A(?\K*B/_K::@_\BZM__KX-__,2XM_R,?(?\;%Q__,#,^_SQ#0_^_
+MKJ[_O["P__+EY/]N:';_259A_U977O^RIJG_X=7._^?2U_\_/TS_("(I_R4@
+M(O\C'R'_(Q\A_R <'O\@'![_(1T?_R(>(/\>&AS_*R,A_TE+3O]&4EO_6F5L
+M_T1/6O\Y0T[_*C$__S(Q3O\P)V/_-"IO_S\V?/];49K_&!5)_V5?B_]!/5W_
+M.4%+_Y64D_]85%W_3$]0_]G6U?\P,C?_,#$R_TE+2?\P,CG_,3(]_R<O,__#
+MM[C_Y-7:_WYQ;/_ JZG_W,G)_X9R;O]W;F?_U+W0_Q0G,_\E'R?_-T!._SD_
+M1_\A(R;_,"PT_R$D+_],2$K_>&IA_W9K9/^2 at 8'_1CU$_R,D*_\^/#__MJNJ
+M_]W.YO\G*S/_.TE8_R<K,_\V-C/_0D ]_S\[.?\;$Q'_+"TG_RTS+_]985W_
+MU=+1_S at Y0/\M,#S_.C=$_VED:/\L(B7_+B0G_R8?'_\F(B#_/3PU_]?+Q_]L
+M6UO_)A\?_R$>'?\G("#_+R8F_RTA(O\U*"?_CHZ+_][1T/^SHJ+_*20D_R0?
+M'_\H(R/_*B4E_S at P+O]E9V7_8FEI_R(=(?^9C('_P[*R_X-\=O]I9V+_Y=;:
+M_ZVAHO\I)B7_)2 @_S at S+?^]M*[_U,C$_^3<VO\X-33_)" B_R <)/\R-4#_
+M/D5%_\*SL__,O[[_[^3C_W%J=O]'4US_7EUC_[FKK/_IVM3_Y=#5_T5%4O\8
+M&B'_)!\A_R8B)/\C'R'_(!P>_R <'O\A'1__(AX at _QX:'/\U)RC_2T9,_U5@
+M:_]16V7_1TU=_R at H-_]@76__<FZ4_R,B.?\0$!__#0T:_PH)%O\;&3K_+"=1
+M_SDT5O\?(2C_IZ6B_UE57O]"14;_UM/2_RLM,/\X.CC_-3<T_S(U.O\P,3S_
+M*# T_\:ZN__GV-W_E7]V_\FPJ__ES,O_GH9W_WMQ9__7P-/_(#,__R$;(_\O
+M-3__8F9H_R4G)?\Q+"[_(2 G_TY&1/^(>6S_B'UV_Y&"@_]+1$O_(R4L_S4U
+M./^AG)S_S+S8_RDJ,?\4'R;_&QTD_R(B+?\E*C;_/$1._Q,:(/\E)"K_*RHP
+M_SDV//\?&!C_/3E"_SL[2O]*2EG_4U!6_RLC(?\X+"W_*AXA_RLD'O^7BX+_
+MV,S%_UE.3?\J)BC_(!L=_RHC(_\J(R/_*"$A_S F(O^)BH3_WM'0_Z22E/\H
+M("/_)Q\B_RHC(_\K)2/_+RDG_VUN;_]@96K_)B$C_Z21A?_ K:7_Q[NT_\6Y
+ML/_(N[;_JJ.C_R at E*_\E(R;_1C at U_[>PJO_?T-#_ZMO<_S0Q,/\D("+_'AHB
+M_S<Z1?]&3E+_9EYA_]/'RO][>7O_>GU^_W5[??]H96O_KJ.H__/AWO_8S=3_
+M24Y:_QD:(?\H(B#_)A\?_R8A(?\A'1__'!P?_R >(?\A'R'_'AL:_T8W-_]!
+M.T/_1T]?_U9;9_],3E7_#0,,_RL4'_\R(2?_%0H7_R$4'_\G%Q__&0T6_R$:
+M(O\8"QC_)A at G_R :(O^6BHO_1$)+_T=(2?_1T,__+"XQ_TQ.3/\9'!W_("4J
+M_SQ!1_\I+C/_2$-'_][6V?^)>&O_P:NB_^G3R_^8A';_?W1I_]>[S_\<+CC_
+M+"4L_R8Q//^1D(__*28E_S8P-/\I(RO_1S\[_YN+>_^1AG__D8.$_TE$2O\B
+M)3#_(R,F_ZZLKO_:P^#_)1PC_Q at 1&/\A$!K_(1HA_QL;)/\B*##_+3 [_R4D
+M*O\N+33_55%9_];,S_]".T7_.SM*_U-38/]33E3_,"@F_R$5&/\L("'_3D4_
+M_[^SK/_<S\K_1CX\_R at B)O\C&Q[_*2(B_R4>'O\I(B+_,RDE_XF*A/_>T=#_
+MH(Z0_R@@(_\G'R+_*"$A_RPF)/\P+"K_7U]B_UUB9_\E("+_L**9_V584_^F
+MGIS_IIR8_\.WL/^>EI3_*",E_RHB)?]72$C_E)*/_][2U?_BT]C_+RXM_R0@
+M(O\B'B;_-SI%_SU 0?^\M++_VLO+_^+>W/]56E3_-3,U_V!98/^CF)W_[=?8
+M_\[%S/\\04W_'!LB_V):6/]53TW_&A45_R,?(?\A'R'_'!H<_Q\='_\?'1__
+M6$U,_TA)4/]&4V3_04A6_R<=,/\]*RW_JY*!_W9:5_]:/3S_F'5N_X5A5_^$
+M8E__*Q at 2_Y:!?_^GD9/_% 8#_S<I)O]"1$S_24I+_]72T?\<'B'_+"TN_S]"
+M1_\P-3O_+#$W_R0G+/\<&AW_'!<9_XAV;/_!JZ+_ZM3,_Y6 =/]W;V7_U;W2
+M_Q at J./\L)2__(S ]_Z2 at GO\L*"K_/#<]_R\H,O] .#;_H)2%_X)Y<_^)?7[_
+M7UMC_R8I-?\[.D#_O+>]_\;!V?\R-3K_'" H_Q,2'_\;%QG_$Q,6_Q87'O\<
+M%R/_'!<=_Q(.%O\J)"S_T,3'_U-,5O] 0$__04%._U!+4?\L(B7_*Q\B_TQ!
+M0/\W+"O_6$U,_Y>/C?\T+2W_)!\A_R8?'_\K)"3_*R0D_R at A(?\R*"3_B8J$
+M_^'4T_^+>7O_(QL>_R4=(/\K)"3_+2<E_RTI)_]@8&/_4UA=_R<B)/^;D8W_
+M)!T=_QH7'?\C'B+_F(R'_Z:;FO]74%#_+" <_[ZRKO^!@X'_KZ6H_]K/U/\R
+M,#+_)2$C_Q\;(_\V.43_/C\Y_\F_N__BT]3_X];1_U%/1_\\.#;_S,'&_^?8
+MW/_UW]W_S,/*_S@]2?\;&B'_,BHH_S<Q+_]"/3W_&147_R$:&O\E'A[_(!P>
+M_R =(_]Z<G#_/#Y%_T=79_\[15/_.4)"_QL6,O]I5H3_%0P=_Q,4&_\2#A;_
+M%A 8_Q01'O\7%AW_"@86_S$K0?\9%1[_)1P<_S]#2_])1TG_UM'1_R ?)?\N
+M+3/_,3,Z_RXR.O\P-3O_)2 at M_S N,?\A'![_BWMR_\&MI?_IU-#_F(5[_W%M
+M9?_+MLS_%RDW_R8>*/\?)37_Q[^]_RTH*O]$/T7_+2DQ_S at Q,?^?E8O_A81]
+M_XV*B?]F9F__*BT^_T5!2?_$O<?_U<GB_T=+3?\G,SS_%!XM_S P/?\>'RK_
+M&ALF_R0=)_\8$1C_&1,;_RXG+__DU=G_1T!*_ST]3/\V-D/_3TI0_RH>)_\I
+M'"'_+2,?_R\C)/\:%!C_(AP at _R(='_\J(B7_)R @_RDB(O\K)"3_*2(B_S0J
+M)O]E9F#_X-/2_X%O<?\E'2#_)1T at _RPE)?\J)"+_*R<E_U]?8O]97F/_'!<9
+M_\B\N/\E'A[_(R F_R(<(/\5#@[_&108_VAC9_\H'AK_KZ":_\&_M__$N+/_
+MP+*O_S4T,_\C'R'_&Q<?_S<Z1?]-4$K_R</'_^37XO_FU]C_95]5_VA at 7/_C
+MU-C_\M_?__/=V__%O,/_/$%-_R4D*_\^-C3_65-1_VQG9_\A'1__<6=C_RL@
+M'_\E'R/_'ATD_Y.'B/\Q,#?_04Q=_SD]3/\\2$W_!@LG_V9CF/\Y-F;_&! L
+M_QL,'O\>$!G_'Q8=_QL9,_]75X#_4E)Y_P\/'O\E("#_+C0\_T-!0__;TM+_
+M*BDP_RXM-/\V.$#_*RXY_R8J,O\A)"G_*RDL_SHU-_^_L*K_UL.]_^W8UO^>
+MBX/_A8!T_]C S_] 3E?_+ATC_W9VA?_6Q\?_.# S_V]L<O\U-#O_-3 R_[NS
+MK_^0EY#_041%_UI=:/\S.$K_+RLT_Z2=J?_4Q-C_13Y&_R,@)O\A'RC_-S="
+M_SDZ1?]35%__)B8O_R at H,_\?'RC_2$!#_][/S_]#/TC_0T-2_SPY1O] .T'_
+M)!P?_RD@(/\R*2G_,RHJ_QD3%_\:%1O_'AD?_QT8'/\;$Q;_(!D9_R4>'O\H
+M(2'_,"@D_Y&(@O_7R,/_:EY:_R<B)/\A&Q__*" C_S$H*/\D(A__7F-C_V)F
+M:/^*A87_4$A&_R ='/\?'1__(QP<_R4=(?\A&Q__)B D_RHB)?]/24?_D8Z)
+M_YR7D?^TL*C_-C$Q_S K+_]_?H7_-#A _V1F9/^[MKK_W-;>_]34T?^]P[__
+MJ[&S_]31U__LV]O_\MS4_[FUM_\_4%;_%20D_W=X>?^ZK:C_75-6_QX<'_\=
+M'!O_D(J(_T4Y-?\C'B+_I9F4_X)Y>?\O,3G_,C0\_RTU1?\Z15;_" \C_R4=
+M./]J9)[_44Z0_S at X<?\8&TK_&A(M_R\I/_\4$23_%Q<B_Q at 3%_\;&![_(2$D
+M_ZBHJ_\K+C/_+"XS_S8Y/O\L+C7_(R at N_QXA)O\E)RK_,S$S_\"VK/^RGY?_
+MWL;#_Y.!=_^%=&[_K9N?_VQJ;/^">W7_Y-?6_^/.S/^PGY__N[*R_R<E*/\O
+M*B[_-#$P_R F*/]?9F;_+#0^_S(W2?\P,#O_T\O5_^'4YO\R*C3_)2 D_Q82
+M&O\9&!__'QXE_QT<(_\@'R;_'AXK_R<H+_]E7E[_W]#1_S<U/O\P,#__1#Y,
+M_T([0O\M)B;_)1X>_R4>'O\B&QO_,"HN_QD4&/\5$!3_%Q(6_QH2%?\B&QO_
+M*R0D_RLD)/\V,3'_)1T at _WYR=?]A65S_)!\C_R$<(/\K(R;_+R8F_R<F)?]=
+M85__?(!^_YJ3D_\O)2C_'QL=_QD:&_\?&AS_(!H>_R(<(/\?&1W_(QTA_Q at 3
+M%_\<%QO_'1H9_U123_^/BHS_3TI._Q\>)?\]04G_5UQ8_\#"Q?_3T>'_O+?)
+M_R,<+?])0U'_U<[8_^K=W/_JUMC_IYRI_TI"5O\\(S3_=VYU_\.TKO]+0$7_
+M(1XD_VQK:O\P*R;_FXZ-_RDC)_^XK:;_*!X:_VEH;_]$1DW_&Q\G_S= 3O]&
+M3V#_/T)._Q -&O\) !+_(A<N_PX'&?\I)##_&QLD_S$T0/\H+SW_)"8M_QD9
+M'/])24S_%!07_QL=(O\I+#'_+S(W_RXQ-O\D*2__(R8K_R8H*_\]/C__OK2I
+M_[.?E__FS\K_DX%W_XY[<__!LJW_:5];_UY22?_NW]K_Y-/3_]?(R?]%0$3_
+M.3H[_R,A'O]A7UK_*S N_RDM*_]'25'_-#1'_S0O.__2QL__V,O=_RXF,/\Q
+M+##_'QLC_QL7'_\B'R7_'1H at _S M,_\Q,3K_%Q0:_S<P,/_CU-G_.SA$_S\_
+M3O\^.$;_,RPS_R8?'_\G("#_+28F_R8?'_\V+C'_,BHM_S8N,?\B&AW_'A<7
+M_RHC(_\I(B+_*2(B_R4@)/\>&R'_'!D?_QT:(/\H(RG_*R8J_RLC)O\M)"3_
+M)",B_V=E8/]F85O_*!T<_[*GKO]S;77_)B<N_QT<(_\B'2'_(!L?_QX9'?\A
+M'"#_(QXD_R(=(_\?&B#_&A0<_R4?(_\A'"#_)B4L_RLO-_^BF9G_IZFL_WR9
+MI?]<AY__0#M5_VE3:/^OGJG_:%YA_]S/SO^-CI7_.TM5_S="0_^.CY#_U<; 
+M_SXS./\G)"K_F).._Y^4D_^6BHW_'AD;_\:ZL_\Z,"S_O[[%_T%#2O]>7%[_
+M-#8]_SQ'4O\\2U'_'RD?_T5(0O^5E)O_*2\Y_UQ?9/\;("7_+C9 _Q at B,/\]
+M1DS_-#H\_T!"1?\9%QK_%QD>_RTP-?\I+#'_+C$V_R$F+/\B)2K_+C S_SDZ
+M._^^M*G_K9F1_^++QO^,>G#_F(A^_\"TK?]_<VS_J)6)_^?7U/_.OL+_13H_
+M_R\R-_]!1D;_0T$\_XV-@/]E9U[_*BDH_RTM./]W=(;_1C]'_[NQM/_>T>/_
+M*" J_R ;'_\A'27_(ATC_QL6&O\?&A[_*",G_QL8'O\9%1?_I9Z>_]K/UO]#
+M0T[_/#Q+_T Z2/\Q*C'_)R @_R,<'/\K)"3_)A\?_RPC(_\U+"S_."\O_S G
+M)_\R*RO_*R0D_R<@(/\K)"3_*R,G_Q40%/\5%1C_&Q49_QP5'/\<%QO_(!@;
+M_RLB(O\C(2/_<FUG_T<]+_^ADY#_TL70_V);9_\9&B7_'B$L_S,R./\E(R;_
+M(!XA_QX<'_\C(!__(1X=_R$<(/\A'"+_)A\F_QX9'?\<&R+_+S,[_YJ1F/^<
+MH:'_-D]._T5?:/\Z-T3_DX.+_[FJKO]A6F'_Y-WG_WZ!DO]!25W_+BHR_YN6
+MFO_/P+K_-RPQ_RLH+O^VJ:3_O:ZO_WUR>?\H(R/_Q;FP_YN1C?_(Q\[_3$Y5
+M_Q84%O]_ at 8C_0TU;_SI&5?\=+"C_3$Y3_X^*G/\H,D;_'R,E_TI+3/\>("C_
+M*"L\_SL]1/\T.#K_0DA*_SM!0_\N,3;_+C$V_RDL,?\J+3+_)BLQ_R at K,/\T
+M-CG_)"4F_\*XK?^WHYO_X,G$_XMY;_^@C87_NZNH_V-;6?]32T'_YM30_^+-
+MR_^6BXK_-3HZ_S,X/?\A'AW_D(J"_WMY<?\F)BG_)2 at T_RLN/_]34EC_A86"
+M_^'/X_\Q+##_(B,J_S(N-O\E("+_% \3_QX9'?\Q+##_0S]'_R at J+__,O\3_
+MV<C2_UA6:?]&25O_0T-._RDA)?\J'Q[_-"DH_RXC(O\K(!__,"@D_RPG(O\T
+M+RK_*",>_S(J)O\R)R;_+R0C_RXC(O\I(B+_*B4E_QP7%_\7$A+_'1@:_QT8
+M&O\=&!C_*R8F_QT6'?^%>G/_=FE<_T<X0_\R*3K_)1XH_QX:(_\:&2;_&1HE
+M_Q at 8(?\I*3+_,3$Z_RLL+?\C(B'_(1\A_Q\='_\?'2#_'!H=_Q<5&/\U-#K_
+M%Q0:_Q45&/\?("'_+RDM_R,D)?]32D3_A79P_VUT>O]7D:?_2(BG_S!PC_\Z
+M9HC_*C!&_W!:6_\W(B?_)"$G_[:OJ?_0N;K_8519_R\M+__-P;C_:FMV_]K1
+MT?]!2UK_4$Y1_\"^P/]&4%__/DI9_Q<C(O]&1$'_A(*2_R0M/O\L-37_)2(=
+M_TM*5_\L-4/_.3Q!_SH^0/]%24O_,SD[_S<\0?\Q,S;_$1,6_S$V//\D)S+_
+M(R8K_QL@(/\P,S3_LZZN_]7.SO_9S]+_U]#0_^#0U/_DTM;_P;"V_[*CI__O
+MU=C_WM/8_VMF:/]T;VG_+"XK_R<F)?^!?WK_:VED_T9%2_\I*SK_&ATN_S$U
+M-_]%1D?_XM'I_S0S.O]57&C_(!\L_QP;(O\B("G_(1\H_R$?*/\R+C?_&QXC
+M_]#!Q?_9R-+_7%ML_TQ/8/\_/TC_*R,F_RL@'_\L)"+_*2$?_RLC(?\L)B3_
+M+RDG_RDC(?\F(![_)!P:_RTE(_\N)B3_+24C_R\H*/\T+R__(QX>_RTH*/\U
+M,##_'AD9_RTH*/\D'Q__)"(D_XM^>?]%-SC_)C15_Q(>._\8&2K_%A,?_Q85
+M(O\>'2K_&AHE_QX>*?\8&"/_%!,:_S(Q./\?'B7_(!\F_QX<'_\>'!__'!H=
+M_R<E*/\I)RK_$Q$4_R$?(O\F)"?_'!LA_V)64O_#LJS_8VEK_QXY.O\*)"?_
+M'D=,_V2%D?]:?8K_HZZI_S8Q,_\M*"[_MKJR_\RZO/]'/$'_*2<I_\J^M?]$
+M2EK_U<K)_SA%5O^+B8S_R,/#_T),6_\W0U+_*C8U_TQ*1?^#A93_)"Y"_R8R
+M,?]!/SC_3TY;_QLE,_\P-#;_*BXP_R\S-?\G*RW_*2XT_R\Q-/\K+3#_+3$Y
+M_RHP.O\A)"G_)"@F_S4U./_(NKO_QK2V_\&OL?^]JZW_K9^@_Z&5EO^AE9;_
+MF8V._Y-X?O]*14G_-C0V_U-.2/]N;FO_&AD8_R8E)/]B86#_6%=>_R at I.O\D
+M)SC_-#@Z_TA&2/_AT^__*RTU_TA17_\N,#__&ATH_R8J.?\=(3#_$!0C_U=0
+M6O]I:V[_OK"Q_\2YP/]E96[_3E!8_T9)3O\J)2?_)1X>_R$:&O\C'!S_(QP<
+M_R at A(?\A&AK_*2(B_RXG)_\Q*BK_)A\?_RHC(_\M)B;_*B4E_R<B(O\M*"C_
+M*R8F_S0O+_\I)"3_)2 @_R$<'/\?(1__3#\^_SPU/_]#9XS_5&R-_PT4*/\7
+M%"#_%Q4>_Q at 5(?\3$!S_&!4A_QT:)O\:%R3_'QPI_QD6(_\@'2K_-#([_RDH
+M+_](1T[_'QXE_QX<'_\3$13_#PT0_PL)#/\7$AC_0C@[_QX4$/\8&A?_%A<0
+M_S8U+O\A.#/_35]C_S]!2/_7R,/_,B<N_QP?)/\X0CC_95E:_S4O,_\X-#;_
+MRKZU_R4K.__/Q,/_-D-4_YF7FO_-R,C_0$I9_SE%5/]J<W/_<W!K_WU_CO\Q
+M0%/_-$ __T=%/O]656+_(BPZ_SY"1/\Y/3__/T-%_SU!0_\\04?_-SD\_SY 
+M0_\P-#S_)RTW_R$D*?\E)R7_HIR at _^;1S__[XMW_^-_:__KAW/_]YN'_^N/>
+M__CAW/_YXMW_]-C:_VUK;?\F)BG_.#4P_Y.2D?\E(R7_(B B_R$?(?]34EG_
+M+B] _R0G./\V.CS_3TU/_^31Y_\I)BS_,#9 _S8S/_\C(RS_$Q0?_Q,4'_\[
+M/$?_*R0L_RXL+O\_-#/_6E)6_UE97/\T-SC_+3(R_R8E)/\B'1__)2 B_R(=
+M'_\E("+_+"0G_R0<'_\I(23_*R,F_RDD)O\C'B#_*",E_R8A(_\F(2'_(1P<
+M_R0?'_\D'Q__&A45_R(='?\G(B+_)!\?_R(:'O\C'!S_(1\B_Q at A,O\0%RG_
+M'AXG_RPI+_\@("G_'ALG_Q81'?\7$A[_%Q(>_Q84'?\9%R#_'1LD_Q\=)O\:
+M&B/_%Q<@_QP<)?\S,SS_5U1:_RHH*_\0#A'_&A@;_V!@7?]X>G?_/#] _Q47
+M&O\>("7_0T!&_QDF)_]96U[_Q;*R__+5T?\W)R__(R,F_XF+ at O_+M[G_-2HO
+M_S$O,?_.PKG_;W6%_\S!P/\[2%G_D(Z1_\K%Q?\T/DW_+SM*_WE^?O_;UM'_
+M;6]^_S)"5?\M.SK_5%)+_U)17O\R/$K_&1T?_Q49&_\R-CC_.CY _SH_1?\]
+M/T+_3$Y1_S T//\E*S7_(20I_VMH9_^[L+7_T\C'_\W#O__4RL;_W-+._^79
+MU?_HVM?_Y=?4_^K<V?_VXM[_7%Y<_VQN<?\D(2#_.SD[_Q\=(/]&1$?_&A@;
+M_UM:8?\>'S#_)"<X_S0X.O^"@(+_Y=OL_S0L-O\M)R__'ALA_RHL,?\C)2S_
+M(1\H_RXI-?\?&B#_'AH<_Q at 3%?\:%AC_)B$C_R4C)O]!0$;_)R,E_Q at 3&?\7
+M%1C_%A<8_Q06&_\0"P__&A08_QD3%_\9$Q?_$@P0_QH4&/\3#1'_&Q49_QL3
+M%_\1"0W_$0L/_Q$,$/\6$1/_'!<9_R,>(/\C'B#_'1@:_R,<%O\<'B/_*",I
+M_QX:(O\='2;_'1TF_QP=*/\L*S+_,C$W_R ?)?\8%QW_&A<=_QH7'?\9%AS_
+M%Q0:_Q<7&O\='"+_)B4L_QT:)O\7%AS_7EYA_S P,_],3$__+BHL_QT@(?\Z
+M0D3_&1L>_Q47'_\<'"7_$Q87_V-?7?_$O;?_ZMK7_RHF*/\G(1__LJ:?_^/+
+MSO\P*"S_,"PN_\.WL_]N=W#_N[6Y_S]27/^0D9+_PL3)_S<]3?\Y057_>H&'
+M_]3,RO]H:'?_/$95_SQ"1/]T;&C_3$M8_RXY1/\1&Q+_(B0I_S at Z/?\5&AK_
+M)BLP_SD\0?])2$__,C8^_S0U0/\@)2K_JZ*B_\:WO/^LFY7_JI2+_ZB3A_^?
+MBWW_EXA[_Y>%>_][:&#_BGQS_^?2S?]C8V#_JZNN_TI%1?](14O_(!L?_X)_
+M?O]I96?_9%]C_UU=;/\C)SW_/#U$_Y60EO_IW.?_&Q$:_R8?)O\@&Q__(ATC
+M_R0?)?\G)"K_+2HP_Q at 3%_\=&1O_%Q,5_Q41$_\7$!#_'!<=_ST\0_\D'B+_
+M0C(Z_S$@)O\A%1;_)AL at _QP3&O\=%QO_34=+_QH4&/\7$17_'!8:_RHD*/\<
+M%AK_'A,8_Q0,$/\<%AK_%Q(6_Q81$_\=&!K_)R(D_Q\:'/\;&1S_'AH8_Q(:
+M*O\6'3'_%Q<B_QL;)O\;&R;_&QLF_QP:(_\=&R3_'APE_RDG,/\N*S'_,"TS
+M_Q<4&O\3$!;_$Q,6_Q(2%?\5%!O_%!(;_R(D*?\^0$/_*BPO_RHL+_\N+C'_
+M5%99_T-%2/\7#Q/_%Q,;_S0S.O\Q+##_2T9&_ZFBG/_KV-C_*B8H_RTJ)?_)
+MO;;_Z]/6_RTE*?\R+2__LZ2I_WR">O^[L;O_0%1B_Y.(C_^^O<K_0E=I_T=R
+MA/]'?8?_M+_&_V!8;/\N0T__;GEZ_][6TO])2%7_+3A#_RPT*O]-3E7_.SU 
+M_R\R,_\G+#'_.3M"_TI(4?\R-4#_*RPW_QLC)_^<D)'_S+W"_\.OI_^^I)?_
+MO:.5_[^FE?_&KIO_Q:N8_\2FEO_$K:#_W\;!_W%P;_^SL[;_44Q._UE36_\F
+M(27_D(Z+_XB$AO]F86/_9F9U_RLN1_\R,CO_J*"C_^W9X_\B&"'_'1PC_QT<
+M(_\I(RO_&!(:_Q at 3%_\?'1__)" B_QX:'/\<&!K_&A88_QX7%_\E(";_/3Q#
+M_R8@)/^CF9S_M*ZL_XF&@?\L*2C_'A@<_Q0.$O^OJ:W_&Q49_W!J;O\H(B;_
+M6U59_S<Q-?^9CI/_9%Q at _X)\@/\3#A+_%1 2_R8A(_\G(B3_'QH<_PX.$?\6
+M$A3_$AXS_R at U4O\2$1[_$@\;_Q01'?\7%"#_&Q at D_QX;)_\?'"C_&A<C_R >
+M)_\?'2;_&A at A_Q(0&?\1$!?_$A$8_Q,2&/\4$QG_(2,F_QX@(_\Z/#__.CP_
+M_Q0:'/\W.3S_4$M/_Y2)CO]K9&O_ at GV#_T) 0_]C86/_I:. at _^G:V_\K)2G_
+M+BXK_\W!O/_RVMW_)AXB_RDD)O^PH*C_P,"]_[JIN?\S3%W_9G5[_V^$D/]N
+MAIG_67B*_W*0IO]OD)S_8HV?_R@\2O^"A(?_V-#,_TY-6O\Y1$__7%Y5_SH[
+M0O\S-SG_<G-T_S S./\\/D7_03](_S(U0/\K+#?_)"XR_[NRLO_-P,7_P*RD
+M_[>=D/^SF8O_PZJ9_\2JE?_-KYK_PZ22_\FPG__ at R<3_='5V_ZFJL?]?6E[_
+M9F)J_R4@)/^5DY#_C(B*_V5 at 8O]U=83_+3!)_S P.?^OJ*C_Y]3D_R,=*_\P
+M.DC_&R R_R$F./\8&RS_%14 at _QL6'/\9%1?_'!@:_R<C)?\U,3/_0SP\_S8Q
+M-_\Q,#?_+B at L_V5=6_]P:6/_3$D^_TY+1O\B'1__&1,7_Q<1%?\4#A+_&!(6
+M_R,=(?\9$Q?_%A 4_Q\4&?\7#Q/_%Q$5_QH5&?\6$1/_)!\A_R0?(?\D'R'_
+M$A 2_QP7$O\,$1W_#Q,B_Q(.%_\8$Q__%A$=_QP7(_\<&B/_%Q8=_Q85'/\9
+M&!__'1TF_QL;)O\;&R;_%14 at _Q(0&?\3$1K_%A4;_RPM+O\N,#/_&1L>_R,E
+M*/\F*"O_("@J_RHN,/\M+3#_*R\Q_RDJ*_\O+S+_'B(D_T-(3?]%2DC_T+_%
+M_R\F+?\H*2K_KJ*=__7=X/\I(27_+"<I_[RPL__2T<K_LZ*M_TIG<O]RAHK_
+M25QD_VYWA?]24E__5%QL_SY*4?]XDJ7_3FAV_WJ @O_;T\__:&=T_S [1O^@
+MGI;_0#]&_SM!0_^^NKS_+"\T_SD[0O]+25+_*RXY_R4H,_\C+S/_ at 7Q\_\>\
+MP?^GDXO_O:.6_["6B/_!J)?_R+&?_\VRH?_!I9?_R;*F_]O'P_]:7%__KK"X
+M_UA56_]U<7G_)R(F_YN9EO^/BXW_75A:_VMK>O\I+$7_-#0]_VA=8O_8T-[_
+M$Q8G_SE'7/\1'CO_04QE_R at O1?\;'2S_'!PG_R$:(O\M)BW_,"LQ_S,Q-/\T
+M+S/_/T%&_S8X0/\Z.#O_V,G)_\R]M_]Y;F/_>6UH_QH5%_]@6U__44Q0_Q<2
+M%O\B'"#_)1\C_TY(3/]:5%C_@'I^_S J+O\6$!3_-S$U_QD7&?\<&!K_(AT?
+M_Q\='_\4$!+_+BDC_SU%3_\4"AG_$Q$4_Q,0(O\A)"__("$H_QP<)?\7%R#_
+M'!PE_QH:(_\B(BO_&QLD_Q84'?\7$QS_%1(>_Q04(?\5&2'_2$E*_TE+2?\J
+M+2[_'!\@_QH='O\8&A__&!<>_QH6'O\>'R;_)B4K_S at Z/?\<(2'_.CL\_[RY
+MM/_GU=G_)B H_RXR,/_"MK'_]=W at _RHB)O\O*BK_LZRL_]C+RO^BG:'_5VMO
+M_YNBKO^7F:'_)R8L_QX=(_\B)B[_969M_[&VPO]&76G_ at HN1_]O5T_]#1E+_
+M.D12_[.NJ?]#14S_.SX__[Z\O_\Q-CS_.SU%_T ^1_\G*S/_+C X_R4M+_^!
+M>7?_NZ^R_]S*QO_>R\7_WLW'_]W)P?_BR<+_T+NV_]C$P/_1NK7_W\G+_U-6
+M6_^EJ*W_5U)8_XJ&CO\E("3_HI^>_YN6FO]E8&3_<&Y^_R<H0?\S-3W_;G-Y
+M_^?:V?\^.#;_-C4T_SDW.?\]/T?_'B$L_QL:(/\C'R'_,BLK_S4P,O\[-SG_
+M.SHY_S,S-O\M+3;_-SE!_RHH*_]J75S_6D](_W9N9/^&?WG_'AD;_Q at 3%_\=
+M&!S_'1@<_QL5&?]<5EK_'A@<_QH4&/\>&!S_)R$E_R8@)/\B'"#_%0X._YZ1
+MD/\S)"3_)!D8_R<9&O].1T'_.$-._QH3)/\4%1;_%Q0G_RDL-_\Q,S;_)"0O
+M_QT=*/\A(2S_'!PG_Q06'O\0$!G_$Q$:_Q,/&/\6$R#_%QHE_R at M,_\\.CW_
+M/STZ_ST[-O\].S;_/STX_S$O,?\>'!__&A@;_QP:'?\<&AW_%Q<:_Q@;'/^+
+MAX7_T,C$_^G7V_\I)BS_-3,N_]3'PO_OU]S_+B8J_S$L+/^VL;'_Z=G=_YB8
+MF_]89FG_G*2N_Q,6&_].3DO_>WUZ_U=86?\K+3#_*3,W_SA'3/^#B(W_V]73
+M_T!$4_\Y0U'_BX:!_SY#2?]&24K_R\G,_S$X/O]"14K_/CU$_RDL-_\U-S__
+M*# R_\&VM?^^K[/_T]'._T-#0/]!1T/_6EI7_\S*Q_]/44[_6%U9_UM=6O_0
+MO\7_2DU2_[2WO/]J96O_G):>_R8A)?^FHZ+_G9B>_VIE:?]U<8'_*2I#_S(T
+M//^)A(C_XMCA_R at C)_\>(![_,S$L_S<S,?]!/3O_3TI%_U502O]&03O_03XY
+M_T9$0?\\.CS_*BDP_S(S/O\X-C__,BLK_]7(Q_^HGYG_=G!H_VED7_\=&!K_
+M(!L?_Q(-$?]P:V__'1<;_T$[/_\;%1G_65-7_Q80%/\E'R/_:6-G_QT7&_\=
+M'AC_96-<_Y")@_^;FI/_C7MX_Z:<F/\_4%[_%1,C_QD;'O\@'C'_&1DB_R4C
+M(/\M*S3_+2TX_RLK-O\E)3#_("$L_R8G,O\4%!__$Q <_Q at 7*/\<'2C_6UU@
+M_R,B*/\D(A__-C,N_T9#/O]"/SK_/CPY_QX>&_\_/SS_0T- _R0B)/\?(1__
+M/T1 _V=D7__-R,/_ZMO at _R<G*O\]-2O_U\; __/;X/\G'R/_-"\O_[BUM/_A
+MU=C_EY>:_TM56?\6&!__)R at I_VUL9?]+3T?_8F-<_RHK)?]?9&#_45E6_XJ.
+MD/_AV]G_/T-2_SQ&5/^ZM;#_049,_T!#1/_+R<S_.$%!_T%&1O]*25#_,30_
+M_R at J,O\D+"[_P[BW_[ZOL__/QL;_0S\]_SLY-O].2$;_V-#3_T='1/]04TW_
+M249%_]/"RO]15%G_MKF^_VED:O^DGJ;_)B$E_VIG9O^AG*+_<6QP_W=S at _\L
+M+4;_+C X_V%@7__MV^G_(A at B_P\0%_\0$A?_&A47_QD4%O\5$!+_&106_R :
+M&/\R+BS_2D5)_SLW/_]+2%3_24E6_SDU/O^YK*O_:UU>_TM#/_]O:F3_7UM9
+M_Q<2%O]%0$3_HYZB_QD4&/\E'R/_24-'_X1^@O]O:6W_9V%E_V=A9?^ >G[_
+M'A@<_R\J)/^IGY7_CX%X_VMC6?]Q8%K_DHV(_T!39?\A&2?_&1D<_R,A-/\@
+M(2C_9F%;_Q82&O\?'RK_%A8A_QX>*?\G*C;_+3 \_Q04(?\4$R#_%QDN_Q\A
+M*?^'A(/_%A@=_QD8'O\G)2C_,"XQ_QT;'O\>'1S_+2PK_S0S,O]"04#_+BTL
+M_RLM*O]X?7?_H)Z7_XZ(AO_ at U=K_&QP=_VU<3__=R,/_[=7:_S$I+?]!/#S_
+MM;>U_^+8V_^&A(?_,CH^_QT;)/]J9&C_:&5 at _U%64/]I9V+_1$$\_V-?7?\E
+M(B'_B8F,_^/=V_\\0$__/$94_[>RK?]!1DS_/4!!_Z">H?\P.C?_0$9"_TE(
+M3O\V.43_*2HU_R(J+/_$N;C_Q+6Y_]G-R/]-1#W_03LS_U)'0/_DT]/_7%)(
+M_V=D4_]72D7_U<#&_T1'3/^!A(G_=G%W_UA26O]54%3_'1H9_QT8'O]O:F[_
+M<6U]_R at I0O\L+C;_:6QM__G8]_\6$17_(1$5_QT/&/\4$A7_%1 4_Q at 3%_\7
+M$AC_#@D-_S4P-/]&1$?_4$U3_T9'4O]6663_3$=-_\:UK__GTL[_C']^_QL9
+M&_\5&!G_%1,5_Q at 4%O\:%AC_%A(4_Q$+#_\." S_(!H>_QL5&?\4#Q/_(!L?
+M_Q0/$_\>&1W_/SXS_[VTIO_ LJG_O+:N_\>_N_^JFX[_2%MY_U-05O\:'Q__
+M*R9*_QP>(?]85D?_'A at F_Q\=)O\9%QK_%!0=_R4L,O\3%B+_# T4_QH:)_\,
+M&D'_1D91_\"YL_\F*";_'AD=_Q81%?\6%AG_$A46_R4F'_]64T[_(Q\A_R$C
+M(?\D*27_/T1"_Q\C(?]345/_;FQI_XJ)B/\='QW_BWAN_\BTL/_6Q\O_(QTA
+M_UI44O_#O;O_XMK=_X%^A/\H*C'_(!LA_W%E8?]^<VS_.CQ!_Q<2&/]85D[_
+M-#$P_SHX._]M<7/_V=;5_SH_4?\Q.TG_PKJV_T!%2_\Y/CS_R,/#_R<K+?\^
+M0$/_1D=._R at K-O\R-C[_%R(C_\.YM?_&M+;_U\[(_U)#/?]"/C;_4$@^_^S4
+MT?]P853_=G)=_V%;4__5P\?_14A-_SD]1?\V,SG_;VUP_R4A(_\>&AS_(!P>
+M_W5S=O^ ?XS_+"T^_S$S./^PJZ__^-?H_Q@:&/\6$AO_& X7_Q02%/\5$!3_
+M$PX2_Q at 3%_\Y-SK_/SU _TI(2_]%0T;_0$))_T-%3?]'0DC_T+^Y_^W7S_^$
+M=7;_(!@<_QP7&?\7$Q7_&!06_R\K+?],2$K_6U59_TQ&2O\4#A+_(QTA_S$L
+M,/] .S__&A49_Q0/$_^ZKZC_Q;&I_Z")A/^ ;&C_:V-F_[BFF/\S1F3_&!D:
+M_Q(7'/\C)E#_%1@=_UQ82?\:&"O_'1TH_QH8&O\6&"#_(RPR_Q86)?\4%A[_
+M&!PQ_R(Z:_^4EZC_I)N;_T,^0O\<%AK_03Q _QH8&O\;'1K_2DA _Z&>F?\:
+M%AC_+"LJ_T! /?\R+R[_$P\1_QH5&?\>&QK_*"PJ_VEI9O]B4DG_MZJE_\*Z
+MOO\E("3_9U]=_Y^7D__BWMS_<7-Q_TM/4?\V.#W_7UI5_V)64?]/35#_6%)6
+M_TU-0O]+2DG_&1P=_W5^?O_=V=O_/$!5_RHT0_^_M[/_1$E/_S@].__/R\G_
+M*2LR_ST^1?] 0$G_*RXY_R8L-/\9)"7_P;:U_\*RMO_CU,[_=5U4_U!&._];
+M44/_Z]3/_W]L8/^+ at G'_9F!8_]3"QO]!0TK_.CY&_T5$2O]^?'[_(Q\A_Q\;
+M'?\B'B#_:6=J_Q at 8(_\5%R;_3$Q/_[*ML?__[>#_+!\._Q\<(O\=&B#_(1X=
+M_QX9'?\9%!C_&108_Q84%_\@'B'_+RTP_S<U./\Y.T#_/T!'_U915__&N;3_
+M[MO3_XI\>?]%-CK_*1<9_Q8/#_\3#Q'_&!06_UQ86O]K96G_75=;_TA"1O];
+M55G_55!4_Q40%/\>&1W_$@T1_XV'A?^MI:/_O;BS_[BPK/^EI*O_PK&D_U)B
+M?/\C(!__$QDC_T9;B_\9&R/_7E-(_Q,8,?\:'"O_$A(5_Q8=*_\J,SK_%1<L
+M_PX5*?\0&SW_(T%Y_Z.IO_^*@H;_*2$K_Q43%O]F963_(B(?_S,T+O^.A7__
+MHIR:_Q<5%_\J)R+_B(!\_XR%A?\?&AS_&1<:_Q84%O\4%!?_&!44_S$F'_]E
+M75G_6%-7_S0R-?^(@'[_55%/_X""?_^&B(7_-SPX_Q@;(/]C8V;_;FEI_["L
+MKO^]NKG_=W=N_SL^0_\X0T3_?(B%_]S8VO\Y/5+_,3M*_[ZVLO\S.#[_/D-!
+M_\_+R?\R-#O_0D-*_S<W0/\M,#O_)2 at S_R,M,?^\L[/_MJNP_^#3QO^(;F#_
+M4DD[_V)83?_QW-?_CWIN_YR/?O]I8UO_U,3(_S at Z0?\X/$3_/CU#_Y".D/\@
+M'![_7EI<_S0P,O]M:V[_)B8Q_R$C,O\S,S;_KZJP___NUO_'FW/_/AX$_QP5
+M%?\9%!C_%1 4_Q<2%O\6$17_$Q$4_Q84%_\5$Q;_%1,6_Q02%/\4$QG_&108
+M_[ZRKO_HU<W_:EU8_R4<'/]*0#S_2D5%_QD5%_\6$A3_&A88_U%+3_\U+S/_
+M)B D_SDS-_\6$17_%Q(6_QD4&/\:%1G_&QD<_Q49&_\Z1$'_4590_S(W0__L
+MV\[_-4-8_R<C(?\0%B;_-%J-_QL;*O]30#C_'2E&_QD<+O\:&AW_%1TQ_QLE
+M,/\=(#G_$!PY_R,W9/\@/7?_K;'&_Y*-C_\M+S3_%AD:_W!R;_\T-2__2$8_
+M_\N[N/^OI:C_*2LI_T [-?^[LZ__B82$_WU^?_\I+2__&Q\A_QX;(?\G(B3_
+M2$,]_QD:%/\1$13_*2<J_X9^?/\O+3#_$Q8;_Q 3%/\J+BS_9V9E_S Q,O\=
+M("7_.3Y$_S4S-O\M+"O_0$-5_S%!2O^"D(__W=G;_S T2?\V0$__O[>S_S4Z
+M0/\\03__S<G'_R\Q./\V-S[_/CY'_R<J-?\E)3+_&2(H_X>"A/^UKK7_WM/(
+M_X9Q9?]34D?_+2PE_]_0R_^.>6W_J9B(_VAD7/_2PL;_/T%(_S8Z0O]#0DC_
+MF):8_R(>(/^5D9/_EY.5_VIH:_\E)3#_(2,R_S0T-_^MJ*[___+I_]FC:O_ 
+MEVW_?5PX_QT;%O\1#AO_%!$>_QD1)?\T+D+_$ T9_Q84%_\8$QG_% T5_Q(/
+M%?\K*"?_Q;NW__#6S?^KHIO_:V5C_R,9%?\E("3_&10:_Q,.$/\<%Q?_$@T-
+M_R(='_\?&!__(QPD_RH?'O\T)"'_.",A_RX<&?\G&1K_(AT?_QTB(/]U>7?_
+M0T5#_];1T_]::WO_&B8J_Q89)/\F3H;_&A\K_W)B4O\=-%[_<W-\_U953O\B
+M,$?_7&)L_RDP1O\+'TS_(CQ[_R at Z=/^XM<?_76-?_S<X.?\4$A7_='9M_U)0
+M3?]G7UO_T<*\_[JOKO]*2$K_B(6$_XB#A_]*3%'_+#$V_RTP-?\E*2O_*2LN
+M_R$C)O\3%A?_&QT;_Q\='_\?&QW_BX2$_S,S-O\Q-#G_*R\Q_Q at 3%_\4$@__
+M(!L?_QPF,/\@+#/_O;>[_^C;VO]\?8[_/$Q6_W%X?O_?VM[_-SU-_S8]2__'
+MN[?_049,_SD_.__8TM#_,C,Z_S(U.O\K+S'_*RTT_RHJ,_\;'2+_O+2X_X1X
+M>__CSLG_D7UU_SXU+_]&1#__\N'4_Y^'>/^LFHS_ at W=N_]_0U/]!0$?_769M
+M_T!#2/^;F9O_&147_YJ6F/^GHZ7_<&MO_R(B,?\E*#K_+R\X_ZZFJO_\\.O_
+MW:=M_\J@<O_&EUW_KI!Z_SDB,_\>%S7_*R1"_R at A,_\<%QW_'QH:_R 9&?\:
+M$QK_'1@<_RDE(__.PKW_\]S7_S\].O\Y-SG_85I:_S8X-?\<'!G_)R4B_R\J
+M*O^8C(7_II:-_].]M/_OT\O_Y<_&__#8S__MT<G_[-;-__+6T__BR<C_LJ"=
+M_[BFH_]&.C;_UL[8_S1'6?\T3ES_.SY)_SA6 at O\?'R+_L9I]_R4[8O]#14S_
+M.C\__QPQ5/\6(#3_(#)._Q<X;?\G3(W_+$!Z_[NYR?^"BH;_)" B_Q at 3%_]W
+M>F[_=G1Q_[6KI__7Q[[_OK&P_V!;7_]234__L*JN_XB*C_\A)BO_'!\D_QD=
+M'_\M,3/_,C8X_R<K+?\E)B?_'AP>_Q\;'?^9E);_.#<]_T9+4?\O-#K_%Q,;
+M_R D(O\A'B3_(2X[_S! 4/\Y.T/_J:*B_Z"BL?]-5V;_>WM^_^;@WO^2F:?_
+M-C]-_\.XM_\X/4/_.3X\_\S(QO\W.4'_.C]$_QHA(?\P-3O_$QH at _QXC*/\L
+M*BW_*2$D_^?3S__MV-/_Z]G5__+DX?_ZY-S_]MW6__'<U__>S,C_Z=?9_TE#
+M2_]\A8S_F)N at _Z6CI?]*1DC_I*"B_Z*>H/]Y<W?_)R<T_R,H.O\L+#?_K:6H
+M__WPZ__AJW'_SJ-S_]VC7O_:IVO_K85K_R@;%O\8$Q?_$ D5_Q at 3&?\V,R[_
+M.30O_S$I)_\;%Q7_,"LF_\N]NO_MV]C_.3X^_QPA)_\@'R7_,3<O_T=+0O\S
+M-"[_'!D8_RDE)_\G("#_C7]\_]C#OO_NV,__]]W4__O?U__SW=3_^-[5__3>
+MU?_RXMC_]>+8_^'/R__8Q<O_/$14_RU!5?\[15K_)T%L_Q<@+O^?DX3_(#%5
+M_UM=9/\7'B3_&S-:_R,P2?\2+$W_)U:-_RQ9F?\Q38;_OK[-_ZZPK?] .#O_
+M&108_X&$>/^ ?GO_QKRX_]7&P?^ZKK'_ at 7E]_\:]O?_)O;[_1$!"_P\5%_\8
+M&B'_(R<I_Q,7&?\4&!K_&!P>_Q$4%?\>'R#_,2\Q_SDU-_\V,3?_.#E _S<Y
+M0/\<&R+_)B\O_R(A)_\=*C?_+3Y6_S$^4?\E*B__'B at W_T5/9/^6E9O_KJFD
+M_W=^BO\P.DC_LJVM_SD_2?]!0D/_T<W/_S T//\U/4'_&R0D_RPS.?\@+3+_
+M,3U!_Q@>(/\5%A?_W]#+__+=V/_UV]C_^=S;__?;W?_VW=[_\-G:_^_:V/_O
+MV=?_FHZ1_X2&C?^HJ[#_H)N?_V9A9?^LIZO_IJ&E_W-N</\1$AW_("8V_RXN
+M-_^AG9____7K_]FE;/_-IGS_VJ1A_\JA8/_%E5[_O91C_Q\- /\@&#K_,"I&
+M_RHI,/\\/#G_3TI%_SDT+O\]-3'_U,3!_^W9U?\V.SG_(R at M_R0D)_\M,C+_
+M%AH8_S$S,?\_/CW_+3(R_R(F*/\G*2S_'1@>_VQF9/_$M[+_[M;3__'7U/_V
+MW=C_\][9__'>V/_XW]K_[MO3__G<V/_BT]/_4DI._QT;)/\L-TC_'"(L_VMB
+M6_\@+$G_*20D_R<I)O\S/5+_)R<T_Q at M1O\]:)S_,UB9_RU+ at __$QM7_GIZ;
+M_QT5&/\9%QK_ at H5Y_X>%@O_)O[O_ULC%_[ROM/]J8&/_T<7!_\R]N/]M9V7_
+M)B at K_R(G+?\V.3[_)BHL_TI.4/\[/T'_1TQ,_SL^/_\@(2+_'QT?_];*S?_&
+MO+__&A88_QX:'/\Q-CO_*B0H_Q8>(O\N/TW_,3]8_SE#3?\]4&+_,$1 at _R8S
+M1O\1%Q__.D=8_RXY1/\=&AG_*S \_RPL+_]A863_+#0^_S=!1?\@+"O_+#4[
+M_R0Q-O\K-SO_)BPN_RTK+?_GV-/_[]K5__+=V?_UW]W_^.'B__;@WO_VX=W_
+M^.'<___GX/_FU=7_<G%X_ZVOM/^9E)K_<6QR_["KL?^HHZG_ at GU__UI<9/\?
+M)C3_,#$X_XN,C?__\NG_WZAN_\29;?_=IF;_QYMI_\Z::/_#D%O_OY)I_T ?
+M-_\Q(D'_#104_QT;'?^9B8#_C8%\_TY+0/_6R<3_\=O<_S0W./\@'R7_&A<=
+M_RPP,O\3%1K_(24G_QPC(_\P-3'_1$E#_S(R+_\:&!7_*"<N_QL7'_]N96S_
+MW<[2__3>W/_YW]S_^-S8___ at W/_XW-C_^-[5__??UO_QV-G_\=?4_VY:5O\Q
+M*"+_+2<?_RPH*O\M)![_*B$:_S4P*_\H(AK_'!TH_QU, at O\H1I#_)DV(_[W"
+MR/^6E(W_)!D at _Q@:'_^/B7__AX1__\O"PO_=Q,7_O+6U_\C"P/_1Q+__V<2_
+M_UE,2_\H+"[_(B<G_Q\B(_\7&AO_'R0D_R\V-O\3&!W_$QD;_Q49%_\W,BS_
+MSL&\_\&YM?\Y.S+_>X1W_X^-B/\N(!W_'!02_PT<'/\O0$[_-455_S=+8?\R
+M1V/_+D-?_S%&7O\Q0EG_-#Q&_Q88&_\4%AW_.C<]_QD6'/\7&1[_+S<[_QTI
+M+?\A*C'_(S$T_R,P,?\P,C7_*R4I_^?3S__RV];_[]K5__?BW?_WXM[_^N7A
+M__7 at V__WXMW__>+;_\^YNO]H8FK_J:NP_Y&-E?]^>X'_N+6[_Z>EJ/]M9VO_
+M>G>#_R,D-?\H*3#_*2<I___T[O_7GF3_SJ!S_]:=6__-G6K_RIED_\R;9O_-
+MG&?_MH]>_V _/_\=$"/_'R$?_U902/]J9&C_/T Y_]/'P/_GT]7_.#H]_QT<
+M(_\9%AS_+3$S_S8X._\G+2__*3$S_RTR,O\4&!;_-3<U_T5$0_\N+S#_%!07
+M_QL=(/\J+"__4TY4_\_$R?_KV-C_]-W8__?<U?_WW=+_]MS3__G=VO_PU]+_
+M[]C3_^K5T?^FE)'_>7%T_S,N-/\M+S?_;G1^_VUO<O\>("__&$R%_R$\B?\D
+M3(K_OK_*_WQW=_\C$R'_$!,8_YJ2B/^'@GW_T,;)_]_)R_^;G9O_R<7#_]3$
+MP?_9RL7_.S,V_Q47'O\8&!O_)2,F_QX>(?\A(R;_*2TO_R at J,?].45;_%187
+M_Y&)A__3P,#_T+V]_Z28E/],0SW_0T0]_U-73O^*A'S_G(N%_VEC8?]:6EW_
+M/$1._S=$5?\W1UO_,4)9_SI)7/\S/$/_*C R_R$G*?\V.CS_%A@;_R<G*O\R
+M.#K_,S]#_RLR./\N.C[_+#<X_QDA(_\H+"[_VM+5_]O2TO_AV-C_WM75_^C9
+MVO_GU=?_Z-;8_^?5U__TX^/_U,G._WAS?_^PK;G_>G9^_X!]@_^SL;3_K:NM
+M_X)\@/^$?XO_)R8W_R4E+O]L:FS___'K_\6,4O_ DF7_SY94_\*37?_)F6+_
+MR)AA_\657O_*EU;_R)EI_V!%(/\F&A7_(AP at _R$?*/]/3TS_S\.\_^?6UO\O
+M,S7_%QD at _QT<(O\C)RG_&QT at _R,I*_\P.#K_*"\O_Q@='?\K+B__&!D:_SL\
+M-O]&24/_*3 P_QDC)_\<(R__(R8Q_S8S.?_1R,C_Z-;2_^W:U/_VW]K_^-_:
+M__+<U/_SW-?_]]O8__;7U__LU-'_QK&O_R49'/\O*3'_9FAO_Q 4*?\A5Y'_
+M)#N(_R-,B/^]O<S_7UA?_VA79_\?(B?_HIJ0_Y"+AO_=T];_V<G-_SI!0?_%
+MPL'_WLO+_\F]OO\Z-S;_'!T>_QX;(?\G(B;_)" B_QP:'/\9&AO_(!TC_Q at 8
+M&_\7%1C_<6=J_XN#AO^>F9G_='!N_ZNHH__.O+C_+B@@_R\R)?_'O:[_GX]\
+M_YN.??^1B('_75Q;_TY15O\Y04O_-3]*_SD^1/\I,2[_+S<S_SY#/_\\/CO_
+M(R(A_RXS,_\G+S'_*2XS_QTA(_\A)RG_'R<I_RLX.?]B9&?_<G!S_VUK;O]H
+M9FG_8&1F_UA>8/]46ES_4%98_U%24_]/3E7_9F9U_Z^IM_]X='W_B(6+_X:$
+MA_^IIZG_BH2(_W]ZAO\E)#7_.#A!_VQN<?__].[_U)MA_\R><?^^A4/_PI-=
+M_[^05O_$E5O_P)%7_[^15_^_C%?_P9%2_Y)L1?\M&PW_7%-3_V!@5__2Q\#_
+MW<[/_RLQ,_\M,CC_/D!%_R4I*_\F*"O_+3,U_R(J+/\D+"[_%AP>_RPP,O\4
+M%AG_*2LP_Q03&?\T,SG_(1\B_Q@>(/\C*RW_)BXR_RLP-O\X-#;_U,G._^#/
+MU__IU]3_[=G1__':U?_QV=;_\]K9__C<V?_SU]/_X<G&__CCW_^CF97_-C- 
+M_R54CO\>,X'_*U.*_\'"S?]74%?_)Q at C_W1X>O^?EXW_AX)]_]W3UO_>R<[_
+M&1T?_YF7F?_.P,'_V,?'_V995/_/R<'_*"<F_T5"/?^">W7_/CPU_UM:4_]!
+M/#?_$Q$,_PP*!_^JHJ#_SL/"_\2\N/]03D?_0$0[_UI43/^VJJ7_2D,]_RPM
+M)O^WL*3_ at GAJ_Y6+@/^4B7[_85E/_R\M*O\X.T#_86-F_V)E7_]35$W_,C$J
+M_TQ*0_\N+"G_,C0R_R,J*O\9'1__&QT at _Q<9'/\?(23_+2\R_Y>-B?^AE(__
+MHY:1_YN.B?^5C(;_D8B"_XR#??^, at WW_A'UW_RHF*/]A86K_MK*[_S(N-O^.
+MBY'_%A07_Q .$/^%?X/_>G6!_RHI.O\I*3+_3%%6___PX__,G6W_RIIG_\N<
+M9?_#E%K_QI1;_\*05__!CUC_P)-7_\"/7/_!C%;_OI!3_ZE]2_]&+QC_86-A
+M_\S%Q?_=T<W_'B<G_Q 5&O\='R3_'" B_Q@:'?\G*2S_%!@:_R<K+?\9'1__
+M*2LN_Q47&O\H*B__$A09_QT@)?\>(B3_(R at H_RPQ,?\R.#K_*S U_SU#2_^A
+MHJW_*#U:_UUFC__9S-__\-G:__75R__PV=K_\-K8__'9UO_NW-C_[^#;_^WA
+MV/_CV-?_766%_SY1E_\M3'S_O</3_U!$1_].3%7_'2 at O_Y^9C_]^=6__Y=G<
+M_^S3TO]23T[_0CT__]C)R?_FT<S_FH^$_\O!M_\J*"'_O[FO_XZ%?O]W<FS_
+MQKZT_UY>4?^ >G+_(B ;_[&JI/_ at SLO_A'EX_ZBCG?^UL:G_,C H_S8U+O^]
+MO+7_I)^:_R,D'?^9E8W_<65<_W5N8O]K9EK_>71H_R =&/\='R+_&1<4_SX]
+M-O\P,RW_35-/_V=E9_]B8E__1DM%_RDN+O\B*2G_*BTR_Q at 7'O\;&1O_FI*5
+M_Z"8F_^CFY[_KJ:I_[.JJO^[LK+_O+6U_\2_O__$SKW_6V!:_UA98/^KLZ__
+M*RTP_Y:2FO\Q+##_-C(T_W=U=_^)B93_)"(R_QT>)?\M,S7__^_B_[R2:O^Y
+MCV'_K8E<_ZZ#5?^WBU__M(E9_[2+5_^WBUG_O(Y;_[>*5/^WBU/_O8E7_ZF 
+M5O^1>5[_V,J[_^31R_\L+2[_$Q4:_Q(6&/\6&AS_$Q48_R at J+?\0$A7_)RDL
+M_QD;'O\F*"O_%!89_R4G*O\1$Q;_&R$C_QHB)/\?)2?_(2<I_R,I*_\I+S'_
+M+#LZ_R B*?\@+D?_.DEL_RDY9?]37X3_SLC6_]_-R?_IT,O_\-31__79UO_U
+MV=;_\M;9__36U/_KU,__X]O?_XV/GO_/Q\O_3CPX_S(Q-_\7(BG_GIB._W5K
+M9__EV=S_\-C5_WUX>/_*PL7_V<C(_][*QO^*A7__S\?#_W=U<O_*QK[_?G=Q
+M_XZ*B/_(O[C_;V]F_[^WL_\C(!__N[&M_][.R_]_=W7_3TY'_SHV+O_!NK3_
+M?7MT_S$P*?^SKJC_34Q%_WAV;O^!>6__=W)F_WYZ:_][=&;_;&9<_Q\='_]0
+M3$K_4TM'_R$;&?\@&QO_+BDM_SP\.?]04TW_-#<X_Q\C(?\;'2+_$@X6_QP8
+M&O\;&1S_&18<_QH7'?\6$QG_&!89_Q at 6&?\6%AG_&!H=_U!$6?]%/DC_8%Y9
+M_[FGL?\N(RC_GIN:_Y^@H?\C)B?_*"PJ_XF)DO\G)37_)R at O_S0X-O_PYN__
+M0BXF_S<K)/\T*R7_/"TG_T4V,/\X*R#_1#@I_TX],/]50C;_64 at X_VM;2/]V
+M7$[_ at V=,_X-N4?^%?'7_X<_,_\K&R/\W.3[_(RDK_R,K+?\L,C3_)RTO_R4K
+M+?\B*"K_&B B_R,I*_\/%1?_(R4H_P\1%/\;(2/_&2$C_QLC)?\?)RG_(BHL
+M_R4M+_\I/2[_:&][_R<C,_\=)BW_("E!_R4S2O\6(3+_/3@^_T,_/?]P;&K_
+MIIZ<_\N^O?_5RLG_YM?1_^W7S__NT]+_[=C6_^G6S/_9R;__)R0C_R at K,/^@
+MEY#_6%-5_^3;V__LU-'_>G5U_]')S/_8Q\?_V<;&_R,@'_^_NKS_3DY1_X.!
+M?/^:EI3_FYF;_\O'Q?]V=W'_P;BX_RHF*/^ZL:O_X-#-_["HIO_!P+G_:&1<
+M_SPW,?^JJ*'_4TY(_S8T+?_ N[7_,BXF_\&^L_]Z=VO_>WEJ_W]Y:?]\=F;_
+M/#HS_S0Y-?\/%1'_("0B_QL<'?\B("+_*"8H_R<F)?\H)B'_-S4P_QT:%?\D
+M(![_2D=&_Q(0$_\2$!/_%Q48_Q(0$_\6%!?_%1,6_Q$1%/\8&AW_/4(^_R,G
+M-O]<7G/_G9&,_VQC7?]*147_D925_S,\//]&2TG_D9&:_QX<+/\H*3#_D(^(
+M__3J]/]71D;_)2$I_QL=)?\='!O_&1<:_QL8'O\;%R#_'!@A_QL7'_\9%AS_
+M&1<:_R >(?\;&AG_&A at 5_S(H*__:SLK_25)2_S4]0?\U/3__,3P]_R\W.?\J
+M,C3_)R\Q_Q\G*?\;(R7_&2$C_QDA(_\@)";_'!XA_Q<='_\9(2/_&B4F_QXI
+M*O\A+"W_(2PM_RXU*?\W267_$1DI_QH9&/\B+$[_*S1%_R B(/\>&B/_'R$F
+M_R(E)O\C)B?_*"TM_S U,_\R-3;_-#,Y_SPX0?\^-CG_6%-._X9^>O^TJ*O_
+MP;R^_Z:;E/]'1$K_Y-W=__+;UO]Y='3_VM+5_]G(R/_0N[G_ at GIV_\2XN?\F
+M(2/_PL' _U!.4?\T-CO_O[V__U]=7_^\L;C_*R<I_[NRK/_>SLO_85E7_S8U
+M+O_)Q;W_9V9?_S\Z-/_#NK3_.3<P_ZZBG?]L9E[_.CHO_W1T9_]I9UC_@'MI
+M_W=O6_]T<&'_+BDD_R ;&_\3#A+_$1 6_Q,4%?\8%1O_&!(6_Q<2#/\W,2'_
+M.S<A_S0T(?\V-"S_%Q85_R0B)/\D(B3_(1\A_Q<5&/\2$!/_$Q,6_Q\A)/\&
+M"C#_-RYC_[2CV?]%.&O_44QD_QP9)?\^04;_&R >_RTQ+_^=G:;_>WF)_RHK
+M,O_$O[K___+M_X1R:/\6%!?_%!8;_UY=5O])1T3_'!@:_QP7&_\B%13_'1$,
+M_QL6%O\4%!?_$Q at 8_RDM*_^KIJ;_W]#+_[*KJ_\P-#;_*C(T_RLX.?\J-C/_
+M+SP]_RHV._\K,S?_*3(R_RDU-/\D,"__)# O_RHR-/\J,C3_("@J_Q at C)/\:
+M(B3_("8H_R at N,/\F+"[_+"PC_QP[7?\'$R[_'148_QTW</\G,U+_)R4H_QX9
+M'_\D'R/_'QP;_R0C(O\H)BG_(QPC_QT;)/\C(RS_(R ?_QP=*/\H(R?_)B4K
+M_RPH,/\S+#/_+BDM_S<U./_5S,S_Z-;3_XZ/D/^9E)C_U,7%_\>XL_^NI*#_
+MQKZ\_UY96?_,PK[_D(J(_T=(2?_'Q<#_,BHH_\S$R/\D)"?_N+"L_]C*Q__.
+MP<#_A'UW_R(D&__*QK[_2#PU_ZRLH_^LK*/_,"\H_ZNLI?\W.S/_D)*)_S\_
+M-/]$13C_<7%D_V9G6O^1BX/_V,6__]["O_^ID8[_)Q at 8_RD=(/\2#0__$0\,
+M_RLK'/\R-1__,30=_R\S'?\N,"W_,C,T_S$R,_\Q,C/_*BDH_Q43%?\8%AC_
+M3DQ/_Q48._^AG+;_9F%<_Z>BNO\<&S__(R(__UM;;O\S.#+_.3TU_R ?)?\K
+M*#3_961J_\C"P/_^[_#_QK2P_SX\1?\D*##_&!83_Q at 7%O\5$Q7_%1,6_Z*=
+MG_^OIZK_&108_Q86&?\E*BK_-3<U_[6PJ__<S<C_X-'1_X^&AO]J967_75Q;
+M_TE/2_\K-C?_+CH__RXV.O\J,S/_*C8U_R8R,?\B+BW_)C$R_R(M+O\B+2[_
+M(RXO_Q8>(/\;(R7_'24G_R H*O\O,2C_)$!;_PP7+_\A'1__(#MQ_R0M2_\:
+M&1__(QXB_Q\;&?\Y-S#_2DA _TQ'1_\5&2[_'"U1_QTW8_\I0&K_'RE$_UI4
+M6/\H)2O_'R G_QX:'/\D(B3_*"8C_^;<V/_OW-S_0T-&_VMF:O_8R<G_O[*M
+M_[>MJ?_$OKS_44Y-_]/*Q/^]LZ__J*6D_\K%O_^QI:'_Q+S _RTM,/^WKZW_
+MWM#-_S,H)_^AG);_5%1+_VIJ7__ MJS_-CLO_YVBEO]!/SC_N[JS_T$_./^N
+MJ:/_5%1+_SL^,O]664W_4E5)_TY-0O_7Q[[_]-7/_^K/R/_6O[K_J9>3_T at _
+M/_\3$!;_%1(8_QD4&/\7$Q7_,"TL_Q83$O\=&1O_)R,E_Q at 4%O\<&1C_+2 at H
+M_TY)2_^;E9G_#PL__Z>BS/^VL[__KJ/8_Q01//\@'C__*29#_V)C;O\Z.SS_
+M)B8I_R0C*O\L+"__TLK(___S]__.O+G_'QPI_QP?*_],24C_'!L:_QD7&?\9
+M%QK_'!8>_R :(O\<%QW_&!89_T=,3/\]/SW_O[JU_^S=V/_PW=?_T+VW_^C7
+MT?_ at T<O_JZJC_SE 0/\O.S__*# R_RPU-?\I-33_*#0S_R<S,O\D+S#_(RXO
+M_R,N+_\B+2[_)C$R_RLV-_\J-3;_)S(S_RPV+?\U15G_$!XG_Q\A)/\M2'[_
+M*C1/_R,C)O\L*"K_+RTF_UA73/]C8E?_7%A6_PT6)_\,)E'_$3I\_QY$@O\G
+M.5__LJVS_RLF*/\>'2/_)!\9_YR<D_\Q+RC_[N+=__';W/\Y-SK_RL3(_]_,
+MS/^YK*?_P;>S_\"ZN/]23T[_VM'+_[RTL/^ZL[/_S,.]_];(Q?_.QLK_)24H
+M_\S$PO_:S,G_6U-1_SDX,?_!O;7_(B<;_X!]<O\Y/##_M[RP_UU84O^EGIC_
+MF(^)_WEM:/]$1#O_4E5)_TI-0?]%2#S_86%6_\:XK__OT\S_X,? _^7*O__P
+MU]#_TL/#_R 6'_\5$!;_&!06_R4B(?\O+2K_%A,2_S4Q,_]#/T'_&A88_R8A
+M(?]84U/_=7!R_XN%B?\2'1C_&!0[_RP?6O\<&D'_&!DR_Q,0+?\9%CK_)R5&
+M_S(P0/\F)2O_)R<J_R at I*O_8RLO___+R_\V\MO\@'R;_%ALA_T-!/O\E)"/_
+M%A06_QH8&_^,?W[_FHR-_R$7&O\@&R'_.C\__S4W-?^_NK7_\.'<_^W?W/_C
+MU=+_VLK'_^'1SO^RJZ7_/T$__R\Z._\N.CG_+3DX_RTY./\N.CG_+SLZ_RTX
+M.?\N.3K_-#] _S(]/O\N.SS_+SP]_R8S-/\@+2[_'2XD_R$E-/\;*BG_,#@Z
+M_R4]=/\H,TS_'R A_RDF)?\I*2;_2DU'_SH^-O\V-SC_%R$P_Q\[9_\0/(#_
+M'D!__RH]9_^?FZ/_0S at W_R<E*/\K)B#_GYV6_S,N*?_KW-W_[]C9_RLF*O_/
+MQ\O_Y,[/_[ROJO_)O[O_M[&O_TY+2O_;TLS_M*RH_\G P/_6Q\+_T,"]_\W%
+MR?\H*"O_R<&__]/%PO_!O;O_.3TU_Z&8D?\K,B;_M*RB_T,],_^.C8+_H)Z7
+M_UU;5/^LIZ'_0CLU_U]?5O])3$#_8&-7_U583/])3$#_P[>N_]2YLO^LEH[_
+MOZ:?_^'(P__:P<#_T[V__QL4%/\?'!O_7UU:_R8D'_\[/SW_+"\P_SL^/_\W
+M.CO_.3T[_UY at 7O]?8&'_2TM._Q00&/\4#QO_#Q00_QD7*O\6$2W_%Q,M_Q44
+M,?\3$C?_.35+_S,R.?]$1D3_0T5#_]'(R/__]O+_P;2S_Q\>)/\2%!?_'1D7
+M_R0?'_\A'1__%1,6_S0I*/]#.#?_'QH<_QH:'?]$1D3_,#@T_\.YM?_QW-K_
+MY]O7_^'5T?_=T,O_V<W&_YZ7D?\C(!__%A@=_Q<;'?\?(B/_'1T at _R @(_\B
+M(B7_)2DK_R4K+?\F+"[_*C R_RPP,O\R.#K_)3(S_R8Q,O\J+RW_'RHQ_R4P
+M*?\E)R[_)#]U_RDQ2_\D(B3_)R at I_T='1/]454__4E5/_R8H*_\/)$#_&$-O
+M_P8R?/\F1X__(SYM_[BGI_\P+S7_*R8A_S,Q+O^@FY7_-"PH_^O<W?_OV=K_
+M+"DH_];-S?_;S,S_>W-O_]?&QO^PJZO_2DI-_]O3T?^ZKZ[_RK^^_\6XM__&
+MO+C_R;_(_RPH*O_ O+K_>G1R_[&LI_\].#+_BH%Z_TY,1?]E7EC_C(9^_T1!
+M-O^AH)G_+B\H_ZJIHO\L*R3_7F!7_T!$._]#1S[_044\_UA?4_^EGI+_R:^F
+M_\6JG__*LJG_RK*I_\>PI/_*LZ?_.R\K_SXZ//\?'2#_+2HI_TA)0_]*2T7_
+M4E--_U)33?],2$;_2T9&_QD5%_\5%1C_%Q(4_Q8.&/\6$![_%Q4>_Q83*O\5
+M$2O_%Q4O_QH;-/\A&CS_'A at L_QT<&_\E)"/_B(.'___S[_^WJZ[_%1(8_Q<7
+M&O]U<'#_/3 at X_Q82%/\8%AG_)A\?_QL4%/\;%QG_%1<:_TI)2/\T/#C_P[NY
+M_^K;V__ at T=+_XM#4_]G(R/_3P\#_GYJ4_Q\;'?\5$AC_$Q,6_Q at 6&?\3$13_
+M&1<:_Q43%O\0$!/_$!(5_QD;'O\7&1S_%148_QD='_\@+2[_(RLM_R at K+/\=
+M*B__)2LG_RPN-O\D07?_)2M%_R,?(?\D)B3_/STV_U%12/]=7E?_&!D:_PXD
+M1/\91&[_!C-Z_RA)DO\>.VO_NZ:B_R D+/\E(!K_2TM(_Z&<EO\Z,"S_Y=;6
+M__#:V_\V-#'_V-#._][/S_^:EI3_WL_0_Z6AH_]*3$__T,G)_ZRFI/_4SLS_
+MP+JX_\2\N/^]L[S_,BTM_S(P+?](1DC_FYN8_YB6D?\].S3_J:6C_S,O+?^O
+MK*?_0#XW_YR:E?\Y.C3_=79P_R<H(O]%24'_-CHR_T1(0/]"1C[_1DQ$_U10
+M2/_*MZ__RK*I_\FQJ/_#J:#_O*&6_]6XK/^_K*+_0S<S_QX9'?]'0DC_%Q,5
+M_Q\;'?\E(2/_0CY _S\W-?^CFIK_&Q88_Q<5&/\4$1?_%Q,<_PT,&?\4$B7_
+M'1DS_QP8,O\;&"__%1(H_Q\:-/\F(S#_-#(M_S0R*_]055/___#L_[&EJ/\>
+M&R'_&QL>_R4@(/]U<'#_&147_Q$/$O^JHZ/_LJNK_Q<3%?\3%1C_'AH<_R$I
+M)O^QKJW_P;R\_W=\@?])457_0TM-_T)+2_]*3TG_%Q47_Q,0%O\2$!/_&A@;
+M_Q02%?\5$Q;_$Q$4_Q04%_\2$A7_&1D<_QH:'?\8&!O_&Q\A_R M+O\B*BS_
+M(RDK_QHE+/\C)2/_)28Q_R9">_\?)#[_)R,A_S4V,/]'13W_5E9+_TU/1O\A
+M(R'_%2Q)_R!+=?\)-GW_)D>0_QPY:?^QG)C_%1DA_R8A&_]34U#_G)>1_T V
+M,O_NW]__[]G:_S N*__9T<__W]#0_WMY=O_;S<[_GYV?_T5)2__3T,__K:NH
+M_]73T/^\NK?_HYZ9_ZF?J/\T+R__'!H7_S4X/?\>(R/_%AH8_S at Z-_\S+S'_
+M&A88_W!M;/]&1$'_*"<F_S0V-/^BI*+_(R4C_U]A7O]*3$G_04- _S]!/O]9
+M6UC_3DY+_V%<5_^9C8C_MZ6A_\BTL/_1N+/_T;6N_\VTI_]40SW_(QHA_U5/
+M5_]X<GK_>'%[_WYW@?^NI['_J)^?_Z^FIO\:%1?_%Q48_Q<9(/\4%2#_(R(S
+M_QH7+O\:%RW_$Q B_QD5)?\4#!K_$ P4_S0Q,/\_/C/_2T@\_S at V+___\>W_
+MJIZA_R$>)/\:&AW_A8" _[:QL?\2#A#_&!89_QT6%O\K)"3_&147_Q47&O]H
+M8F;_%1P<_YJ=GO\Z/3[_+C\__S%"0O\T1D3_.DQ(_TI33/\4%1;_%A,9_QP6
+M&O\9%QK_'AP?_Q43%O\3$13_%A07_Q02%?\<&AW_&A@;_Q45&/\:'B#_(B\P
+M_QHB)/\B*B[_$QTG_R(>(/\:'2G_(3IU_R0F0?\J)2#_45%&_SL[+O]864S_
+M,30H_QX@'?\8+TS_'TIT_PDV??\J2Y3_(C]O_\2OJ_\4&"#_)B$;_W1T<?^H
+MHYW_23\[_^K;V__RW-W_(B =_Y^7E?_6Q\?_N;FV_]O/T/^8F9K_5EQ>_\_(
+MR/^ZLK#_VM+0_\*ZN/^[LZ__I9ND_U)-3?\B(!W_'ATD_Q06&_\7%1C_'!@:
+M_Q -$_\3$!;_$Q$4_RLI*_\K*R[_&1L at _Q06&_\1$QC_)"4F_S4V-_\^/T#_
+M2TQ-_R at F*?\6&!O_+#$Q_V9H9O]05%+_/#X[_U=23?]L8%O_?W-L_W)J:/\>
+M&1W_4$I._R$<(O\0"A+_+"8N_VQF;O]<4U/_K:2D_QH5%_\7%1C_#P\:_Q(1
+M'O\B'RO_%@\9_Q44&O\7&!G_HIV=_Z:>G/]*2$'_1D0]_TY)0_]23$3_?W=S
+M___Z^O^\L;#_)R(B_R0@(O\D'Q__C8J)_P\+#?\5$!3_LJBD_[>KI_\?%Q7_
+M'!D8_Q\5&/\@)"+_EI:3_U]B8_\T0D'_.$9%_SI)1?\^34C_0T]&_Q$4%?\;
+M%AS_$Q$4_Q at 6&?\9%QK_%Q48_Q02%?\9%QG_%A06_QP:'/\5$Q7_&!88_QPA
+M(?\F,C'_(BDI_R4L+/\9'2S_'QL9_PX5(?\A.W3_&A\X_R<F)?\K+2K_-#LT
+M_S4X.?\O,RO_,38P_QPQ4_\K5H3_%D2._RY-E?\X473_JYB8_QHB'_\>%!?_
+M?'MT_Z*:EO]@5%?_Y=?8_^O<W?\N*BC_P,&[_["IH_^HH:'_W,W1_X:+B_]A
+M:FK_U=3-_[VZM?_?V=?_R\2^_[RWLO^-B(K_8%U<_S,R,?\:'!__%A@=_Q04
+M%_\8%AG_$A 3_Q(0$_\3$13_)B0F_QX>(?\:&AW_%!07_Q$1%/\4#Q/_% \3
+M_Q,1%/\2$A7_%QH;_QH=+O\-$2#_&!$B_R4:+?\G+37_-CLY_U164_]54DW_
+M5%!._QD4&/]13E3_+BLQ_R(=(_\N)2S_;FEO_TU$1/^NI:7_&!,5_Q43%O\.
+M#13_% \3_ZRGI_]\>'K_.#8X_S P+?^6EHW_H9R6_TQ*0_\>'AO_(R$C_R at C
+M(_^WM;C___3Q_Z&6E?\:%Q;_'AH<_S<T,_^VL[+_%A(4_Q\:'O\>&1__%1(8
+M_Q<9(/\4&B+_=W!W_QPD(/^*CXG_>7Y\_S5!0/\Y0D+_.4- _SM&0?]"3D7_
+M$A,4_QD4&O\5$Q;_%1,6_Q84%_\7%1C_%!(5_Q84%O\7%1?_%A06_QD7&?\8
+M%AC_'2 A_R(N+?\@)R?_+#0Q_Q,5)/\<&13_"Q4 at _QHR:_\9'S7_0#\^_T!"
+M0/\K,"K_'QPB_S8T+?\W.3;_'S99_RE7A/\:1I#_*TJ2_SE2;_^QGZ'_%AX:
+M_R8;(/^*BW[_I9^7_VA:6__JV=G_Z-C<_SDT-O]Y>7;_U<G$_Z6>GO_8S,__
+MC9*2_WR#@__(QK__O;BS_^#7U__*P;O_O[>U_XJ%A?]Q;FW_+BTL_QH<'_\6
+M&!O_%!07_Q at 6&?\/#0__$Q$3_Q,1$_\D(B3_'AP?_QP:'?\9%QK_%!(5_Q4/
+M$_\4#Q/_$ X1_Q,3%O\8'RO_'R5%_Q$7,?\:%RW_6$EB_V1I=?]D:6G_5EA5
+M_X6#?O]134O_&A49_U).5O]_?H3_?WA__X!W?O^VL;?_A7Q\_Z:=G?\<%QG_
+M'!H=_QP;(?\<%QG_K:>E_X:!A?\[.3S_+RXM_X:&??^=FI7_244]_QH8$_\:
+M&!7_-"TG_[ZXO/__]/#_?79V_QD:(?\0#A'_B(2"_ZJGIO\>&AS_'1@<_W1I
+M:/]N9F3_&A45_QT;'?\O)RK_%R 9_S8],_\Z0C[_)BTM_R,F)_\9'1O_*2XJ
+M_T901_\;'!W_&10:_Q43%O\8%AG_&!89_Q at 6&?\4$A7_%A06_Q43%?\6%!;_
+M&1<9_Q43%?\>(2+_(R\N_Q 7%_\K,S#_%!8E_QL8$_\4'BG_&3%H_R\S2/](
+M1D/_)"0A_RTK(_\Z,#/_.3,K_S\].O\D0&+_+%>%_QY&D?\J2Y/_1%]W_[>E
+MJ?\3&AK_(A<<_XV0?O^CH)3_>'!N_\&UN/_2R]7_'!XF_S@[//^UJ*/_J:2D
+M_]G/TO]]@H+_FYZ?_\K%P/^ZM++_W-+5_\B^NO_.QL3_?GEY_XF&A?\?'AW_
+M%QD<_Q47&O\1$13_%Q48_Q05%O\.#Q#_/T!!_QX?(/\G)2C_)2,F_R >(?\4
+M$A7_%A 4_Q0/$_\/#1#_#P\2_QPE/?\;(5'_$QY _Q8;-/\O(#G_:6YZ_V1I
+M:?]H:F?_B8>"_U).3/\D'R/_/#A _T9%2_\1"A'_.C$X_XR'C?]C6EK_HIF9
+M_Q\:'/\6%!?_$Q(8_QP7&?^JI*+_-"\S_QH8&_\G)B7_?W]V_WUZ=?\_-C#_
+M*R8A_Q\:%/]63$'_R\'$___V\/]53U/_-3I&_R at E*_\D(![_L:ZM_QD5%_\:
+M%1G_8595_V-95?\;%Q7_'1P;_RH<'?\H*2+_E92)_X>(@O\E+"S_'R8F_QXF
+M(_\J,B[_2E9-_Q at 9&O\6$1?_$Q$4_Q84%_\6%!?_%A07_Q,1%/\4$A3_%!(4
+M_Q84%O\9%QG_%Q47_Q\B(_\G,S+_"Q(2_RTU,O\/$2#_&Q at 3_Q<A+/\9,&7_
+M'B$S_R$?&/\L*B7_/#HK_R<B'?]*1SS_ at HB$_R-"9/\L5X7_'$"+_RU.EO])
+M9WW_P*ZR_QH?)/\C&1S_D)"!_YZ<E?^,CI/_F:2K_Y>CLO]'6&7_.D9*_XZ&
+M at O^HI:3_W=78_Y.8F/^IJJO_P;^\_[JYN/_8T]?_NK:T_\G"PO]S;F[_A8*!
+M_R\N+?\8&AW_&AP?_Q(2%?\8%AG_#0X/_UA;7/]15%7_(B4F_R >(?\D(B7_
+M'1L>_Q,1%/\6$!3_% \3_Q$/$O\4%!?_'28^_QPC4?\4'4+_'" \_RX>.O]A
+M9G+_65Y>_VMM:O^5DX[_44U+_Q<2%O\L*##_'ATC_S(K,O\Z,3C_?GE__U1+
+M2_^IH*#_'!<9_Q at 6&?\3$AC_&106_Z6?G?\8$Q?_%1,6_QH9&/^!@7C_F9:1
+M_SPX-O\F(B3_&1 at 7_U--0__ O;S___7R_T Z/O\;'"/_&!D:_WAZ=_^\M;7_
+M&A@:_Q43%O\F'Q__&A88_Q83&?\='"+_BX:&_RHK)/^=E(W_ at X%\_R\W-/\H
+M+2O_&1\;_RTS+_]365'_&!88_Q01%_\4$A7_%1,5_Q02%/\3$1/_%!(4_Q43
+M%?\4$A3_&1<9_QD7&?\7&!G_(B<G_RPU-?\;("#_-3<__Q8>&O\=%1G_&1\G
+M_R0V;O\9'R__)BHB_RTP*O\V+2;_.S8V_VUP<?]&3T__,%%C_QM0AO\;.W/_
+M.$]Z_UQH=_^ZLK;_(ATC_R <'O^-CX;_HI^4_U-@:_]+;(/_1VN _R%"6/\A
+M-D+_*2LI_Y^AGO_#QL?_A(F._\/$Q?^_O+O_6EU>_[[#P_\^04+_S<G+_V%9
+M7/^7CY+_&1<9_R4E*/\9&1S_$Q,6_Q<7&O\0$!/_'AXA_R,C)O\H*"O_&1D<
+M_QL;'O\5%1C_$! 3_Q,.%/\3#A+_$A 3_QL:(?\5'##_&B%+_Q8=0?\;'#O_
+M9D]Y_W-P??\J-3;_'R K_WUZ>?]'1$/_&!06_RTH+/]'0DC_13M$_T,T/_^$
+M?(;_85-4_Z&9G?\4$AO_$Q48_Q,1$_\<%Q?_EY&/_PT+#O\5$AC_&!<6_V5F
+M8/]K:6;_-38W_QH<(?\4$A3_5%)*_[VZN?__^/?_3$1(_R4D*O\>'R#_+S0P
+M_[^VMO\9%QG_)"0G_[.KJ?^ZM;?_&Q8<_Q$/$O\>&1G_'R,;_Z&6C_^ ?GG_
+M-#PX_SD_._\M,R__-3LW_UU at 6O\8%AC_%!$7_Q02%?\3$1/_%1,5_Q43%?\5
+M$Q7_$Q$3_Q02%/\9%QG_&1<9_Q87&/\A*"C_,3HZ_Q(7%_\M+3C_(RLA_R,9
+M(O\?)2W_'RUE_S8U1O\W.#+_/T$^_S,X,O^9I:3_F:6I_VYU=?]/96/_,UEY
+M_W1\D/^(@'[_AH&!_ZRFJO\;%AS_'AP>_XV.B/^=F(S_2%EF_T=MB/^OS^7_
+M=(ZC_T147O^$A('_F9J4_R<M+_\P-3K_L*ZP_\._P?]D:6G_7&AG_W1[>_^$
+MB8G_D8^1_ZB at H_\8$Q7_%1,6_QH:'?\0$!/_%Q<:_PX.$?\6%AG_'1T at _QX>
+M(?\?'R+_&1D<_QD9'/\/#Q+_$PT5_Q,.$O\0#A'_%!(;_Q4>-O\9(U#_&R5*
+M_Q09-_]E3WS_?GF%_Q 8&O\:&B?_'1D;_U524?\H)";_,"LO_R<C)?\B&!O_
+M,B4J_WEP=_]!-C7_8%99_Q$.%/\4$!+_%A(4_QD2$O^5CXW_%1(8_Q00&/\7
+M%1?_75Y8_YZ>F_\Q,C/_'!P?_QT;'?]+2D/_AX2#___X]_]#.S__'QXD_Q46
+M%_\E)2+_L:6F_QL9&_\9&Q[_&1,1_Q81$_\<%QW_&QD<_Y*-C?]%24'_HI>0
+M_W]]>/]<95[_9FUE_V!G7_]A:&#_9&A at _Q@6&/\9%AS_%A07_Q84%O\:&!K_
+M'1L=_Q84%O\5$Q7_$A 2_QD7&?\7%1?_%Q at 9_R8M+?\T/3W_%!D9_S,P//\A
+M*![_&A 9_PD2&?\;*6'_&Q<G_R<A'_\8&AW_-$M*_X6AI/]D>'S_3596_U)I
+M:/\S6'?_<7)]_Y> ;?^%=V[_J*">_R$?(O\?'1__C8J%_ZBAE?^1HJ__)4]I
+M_[//Y/^URMS_$QTG_XJ%?_^#@'O_'B(D_UE;8/^_N[W_O;F[_RPQ,?\T0#__
+M-#T]_TQ85_\D)RC_E(R/_QD1%/\6%!?_&AH=_Q 0$_\8&!O_%148_QX>(?\>
+M'B'_0T-&_RHJ+?\@("/_*BHM_Q(2%?\4#A;_% \3_PX,#_\3$1K_%2$\_QHE
+M5/\<)TK_,C5._V)4>O^ ?H?_#1 5_Q42'O\0#0S_4$U,_QD5%_\@&Q__0T)(
+M_U945O]C7%S_IJ*J_VYO=O\6%!?_&1(2_S$E(?\?&AK_&Q04_ZFCH?\4$1?_
+M$P\7_P\-#_]B8UW_FYN8_ST].O\=&QW_&QD<_RTN*/\:%AC___KY_RLC)_\2
+M$1?_'!T>_WEW=/^FEYC_%1,5_RDM+_^[M;/_G)>9_Q at 3&?\=&Q[_%Q(2_T5)
+M0?^HG9;_A8-^_V5O9O^%C8/_;'1J_V=O9?]I;67_%Q47_QH7'?\:&!O_&!88
+M_QL9&_\>'![_'!H<_Q at 6&/\:&!K_&A@:_Q at 6&/\7&!G_*"\O_S([._\4&1G_
+M-3 \_R(F'?\@%A__'2DP_R$V;/\6%"3_(A\>_R$M-/^*I*?_ at IVD_UUT>?]&
+M4D__46IO_RU5?/]T>8O_D8!R_W- at 5O^>DHW_(2(C_Q\;'?^2BH;_IYV2_YFJ
+MM_]VHKS_F,38_ZG-XO\J.TC_B(.#_YV8D_\>'B'_2TI0_\7 PO^NIJG_.C at Z
+M_S@[//]+3D__/D-#_QH='O^"?H#_&!D:_QD9'/\<'!__$Q,6_Q04%_\6%AG_
+M%A89_QH:'?\]/4#_'Q\B_QD9'/\?'R+_#@X1_Q0.%O\4#Q/_$0\2_Q(0&?\8
+M(CW_(BI6_Q8<-O\8&"'_5$QG_T ^1_\K)R__&QHA_X.!?O]23T[_&A88_R<B
+M)O\-$B3_'",Q_S(U0/]G;8/_=H&@_Q\D-O\=%A;_."TF_R,?'?\=%A;_K:>E
+M_RLH+O\?&R/_%!(4_V)C7?\;&QC_,S$L_QL8%_\5$Q;_*2TK_Q at 6&/_Y[>[_
+M)Q\B_Q81%?\>&QK_'1T:_YJ.D?\H)2O_*"DJ_S$I)_\J)2?_%1(8_R(@(O]^
+M>7G_6U90_Z.:D_^4DHW_;G5N_Y&6D/]Z?7?_;G5N_W)S;?\4$1#_&108_QD4
+M&/\6%!?_&!89_Q at 6&?\9%QK_&1<:_Q84%_\@'B#_%Q85_QL:&?\G,S#_-#DW
+M_Q8:&/\X/D#_(1\<_QX7$?]&3E[_)"U<_QH;)O\@'AO_)C0S_X:;G?]SB8O_
+M87=U_UMD7?]>=7#_+D]Z_WA\?O^<AX+_9UI-_XJ"?O\?("'_+S$O_X>. at O^I
+ME8W_4%AB_V24J?]YJ,#_F\WH_UM\D_^\M[W_ at G9W_Q\@(?\[/C__S\G'_ZBA
+MH?\Q+S'_+R\R_RHI+_]<6%;_'1H9_T-"2/\)"!7_'18B_R :(O\9%AS_$!,4
+M_Q45&/\4$A7_$0\2_Q84%_\?'2#_%A07_Q$/$O\1#Q+_$A 3_Q(0$O\2$!/_
+M%1(8_QTC0_\1%C#_$0\2_Q<1%?\I)2[_)R0J_S M,_\N+"__,2\Q_R8D)O\>
+M&AS_0CT__R(E/O]*46?_%!XL_R,S3?]*5G7_"PP=_R$9%?]A7$K_0SXY_Q at 3
+M%?^BG9__*B0L_QH5&_\5%Q7_24Q&_U!/2/^LHI[_&1T;_Q at 9&O\H)BG_,"XP
+M___S\O\H("/_)"$G_R$<'/\_03[_=&EN_Q -$_\E)B?_-2HI_R<A)?\4$QG_
+M&Q<9_UM65O]G8%K_H)J2_Z6CGO^ A8'_G)V7_Y*0B_]V>7/_>'AU_Q<4$_\8
+M%!;_%Q(6_Q$/$O\6%!?_%!(5_Q<5&/\5$AC_%A07_R,A(_\;&AG_'!L:_R<V
+M,O\X/#K_&!P:_R\[./\E(B'_-C F_R8L//\>)U#_%1LC_R(>'/]46UO_5&=G
+M_W2*B?][CHC_8&IA_UEP:_\N2WO_=WQZ_YF#@?]U:V#_=G!N_RHK+/\P,B__
+MCXZ#_VIB8/\,)S+_<:C _X.QS?]BEK?_>:+ _[.SO/]N8F/_)"(D_RXR,/_/
+MR<?_GYJ:_RPJ+/^FIJG_*BPQ_[&MJ_\;&!?_8F-J_R<D,?\\.47_(2(I_Q03
+M&?\1$13_$Q$4_Q84%_\2$!/_$Q$4_QH8&_\7%1C_$0\2_Q$/$O\2$!+_$A 2
+M_Q .$?\<&1__'"-!_PH-'_\0"PO_*",E_Q at 5&_\9%QK_)"(E_R >(?\=&QW_
+M(B B_VIF:/\F(2/_$P\?_Q at 8(_\5&!W_&B4P_QXF.O\Y/47_)R(=_S4O)_]L
+M:VK_# T._Z2BG_\U,"O_(1P<_Q88%O\^0S__3$I%_\6YM?]'3$K_+B\P_QD4
+M&/\8%AG___3Q_SDT.O\7%R3_)B$E_RDK*/]]<G?_&A<=_XJ+C/^RIZ;_<&IN
+M_Q,2&/\:%AC_%Q(2_W=P:O^;E8W_N+:Q_[V]NO_!OKG_J:&=_YR:E?^<FI?_
+M)2(A_R <'O\<%QO_$Q$4_QH8&_\3$13_%Q48_Q<4&O\7%1C_(!X at _QD8%_\?
+M'AW_*CDU_S<[.?\8'!K_+C8R_R<B)O\G)A__)"DU_QPF2_\4&R'_/SLY_X2)
+MCO^7K*[_ at 9:8_WB&A?]E;6G_771O_R1!<?]Y?GS_E7]]_W=M8O](0D#_*2HK
+M_R,E(O^?D(O_&2,G_S9?<O];C:C_99.O_UV1LO]0?9O_JK"Z_U]56/\L*BS_
+M%!@6_]#*R/^@FYO_'AP>_["PL_]"1$G_LK"S_QH:'?]S=7C_%1(8_QXB)/\E
+M*2O_%!07_Q,.$O\6%!?_&A@;_Q<5&/\2$!/_%1,6_Q,1%/\0#A'_$0\2_Q$/
+M$?\1#Q'_#PT0_QL8'O\?)CS_%!8E_QT8&O\E("3_$A 3_QD7&O\L*BW_'1L>
+M_Q<5%_\5$Q7_34E+_RDD)O\:$A;_%A(4_Q(1$/\A'R+_$18<_Q@>(/\L*2C_
+M'Q,6_V1:;?\;$BK_C("5_X%V??]%/#S_(1X=_SQ /O\P,C#_S\3#_S U,_]'
+M2$G_(!L?_QH8&___^//_03U&_U-8:O\4$A7_)"0A_V]D:?\7%!K_$Q05_R@=
+M'/\6$!3_%Q8<_QH6&/^/BHK_?WAR_YV7C_^MJZ;_TM#-_]C0S/_7R\?_K:BC
+M_ZNIIO\9%A7_&Q<9_QH5&?\3$13_%Q48_Q$/$O\4$A7_%1(8_Q84%_\6%!;_
+M.3 at W_R0C(O\I.#3_-3DW_QD=&_\O-##_)2(H_QT?'/\4&B+_%QL[_QP>(?];
+M65;_BYB=_XFCIO^%F9W_96UO_UAB7_]7;VS_)4)R_WA]>_^+=7/_;V5:_T])
+M1_\D)2;_(R4B_YV4C?]=;''_'RY%_Q K0_]]I;K_F\OG_Z',Y/^\P,+_4$='
+M_RDG*?]@9&+_SLC&_YF4E/\;&1O_N[N^_TI,4?^XM[[_&1L>_V!C9/\8%!+_
+M(1T;_R8A(?\9%!;_% \5_Q at 6&?\B("/_'1L>_Q(0$_\1#Q+_$A 3_Q84%_\3
+M$13_$0\1_Q$/$?\-"P[_'!D?_S,[2_\2$A__%0\3_Q(3%/\0$!/_&!89_R0B
+M)?\7%1C_&!88_Q<5%_\8%!;_&A47_QP7'?\-#Q;_!PP2_P@'#O\3#A+_'!H7
+M_Q\;$_]^:6?_?6)R_T at N2?]3/5?_0C8__XV"@?]645'_.3L^_RTO-/_.PL/_
+M+C,Q_TQ-3O\=&!S_%Q0:___Y^O\G)BW_*B\[_S4T.O\H(B#_5U%5_QX;(?^6
+MD9/_M*RJ_SXY/?\.$!7_&A@:_QX;&O^ ?G?_E9.+_[RZM?_7T<__V]#/_]?,
+MR__"M[;_K:FG_R0A(/\:%AC_%1 4_Q,1%/\9%QG_%Q47_Q at 6&/\7%1?_%Q47
+M_QL9&_\?'1__'R,A_R\W-/\X/3O_'"$?_S0X.O\?(2;_&AT>_Q$6'/\7&BS_
+M%R(C_T-84O]YCY?_EK&\_WR8H/]>>(/_0U=E_TAE<?\R1V3_>7MY_Y:$=O]P
+M95K_2T=%_T)%1O\Q-C+_CH^(_WIW=O\<&2;_05IQ_X&PPO^9Q=G_C+?)_ZJY
+MN?\X-33_.3X^_V1I9__-R<?_CXJ*_QP8&O^.CI'_86-H_Z:DI_\3$Q;_,#,T
+M_R4B(?^2AH'_9%]9_QL=&O\;%QG_&108_QT;'O\5$Q;_$A 3_Q(0$_\2$!/_
+M$0\2_Q$/$O\2$!/_$A 3_PT+#O\;&1S_4$]5_TA'3?\4$A7_$A 3_P\-$/\0
+M#A'_&A@;_Q<5&/\1#Q+_" 8)_Q(0$_\:%1G_1D)*_S\]1O\Z.D7_$0\8_S(I
+M,/] -3K_B7Z#_UA'4?]I76S_9%IK_W=N>_]O:&__HYZ at _YV7E?]*1T+_55-5
+M_];*R_];4U'_2D-#_QH5%?\<%AK___3T_RPG*?\>("7_(1X=_RXF)/],1DK_
+M(R F_R8>(?\J(B#_'1@<_Q@:'_\<&AS_=G-R_X.">_^8F(__O+JU_]C0SO_>
+MT]+_V<[-_\6ZN?^JIJ3_1T1#_Q82%/\7$A;_%Q48_QH8&O\7%1?_&!88_Q<5
+M%_\7%1?_&QD;_QT;'?\?(R'_,38T_S,X-O\=(B#_.#8Q_R0B'_\?'"+_%A,?
+M_Q01*/\P-U7_'S%5_R V7/\O5'?_.VJ-_RU:@O\>16__+%5Y_RY(;_]\@XG_
+MGI"!_VQ at 5_]%03__+C$R_RHO*_]?8UK_ at 7]Z_UUF;?]9?)/_;YZV_Y_!V_^#
+MI;G_H::K_S,L+/\T-3;_55I8_WZ ??]L:VK_*2PM_Z"DIO]>8VC_PL##_Q,1
+M$_\0$Q3_'AH<_R,A&O^XKZG_BWQ\_Q<1%?\6%!?_%!(5_Q02%?\3$13_$Q$4
+M_Q,1%/\2$!/_$A 3_Q,1%/\2$!/_$Q$4_QH8&_])1TK_%1,6_Q .$?\2$!/_
+M$A 3_R >(?\B("/_(R$D_Q<5&/]$0D7_%!(5_Q .$?\4$!C_$@X6_QH6'O\G
+M(RO_,2XT_S at S.?].2%#_85ID_Q$.&_\4$1W_'!@@_T(]0?^MJ*K_LJJH_WMO
+M:O]L967_BX*"_W!G9_]L967_7%=7_T,^0O__\.S_.S0T_RDJ*_]-1T7_-2TK
+M_S8P-/\:%QW_65%4_V-;6?\G(B;_&QTB_R8D)O\V,S+_CXZ'_YB8C__$PKW_
+MV='/_]?,R__:S\[_Q;JY_ZJFI/]/3$O_%1$3_Q81%?\3$13_%Q47_Q84%O\5
+M$Q7_%1,5_Q84%O\>'![_&A@:_R(F)/\X/3O_,#4S_R,H)O\U.3?_%Q\I_PL7
+M,O\,$S?_"!,R_RL\:O\>.7;_*4M]_R=0@?\^;)[_+%J,_S);C/]%:)#_3F:-
+M_TQ47O^7C8+_=VMB_S at R,/\_0$'_2$A%_VIN9?]J;F;_=8.&_SM=<?]]K\3_
+MH,?@_WBBLO]UA(3_A'M[_T,_0?\;(R#_+3,O_T-(1O]U>GK_N,#"_Q4?(_^]
+MN[[_'!H<_QH='O\>&AS_%1 at 2_[>IIO^;@(;_% \5_Q$1%/\4$A7_%!(5_Q .
+M$?\4$A7_$Q$4_Q,1%/\1#Q+_$A 3_Q43%O\3$13_%!(5_QX;(?\3#Q?_$0T5
+M_Q ,%/\1#Q+_&A@;_QD7&O\8%AG_$A 3_QT;'O\4$A7_&A@;_Q(/%?\2#Q7_
+M%1(8_Q42&/\3$AC_$1,8_Q,5&O\7&1[_$1(9_Q,2&/\R,#+_'QT:_Z&<G/^/
+MA(/_:%Q7_V9=7?]@7%[_7EI<_TI&2/]134__4E!3___R\_\J)2G_("(I_T(^
+M0/]#.SG_-2\S_Q<4&O^3BX[_FI*0_RHE*?\7&1[_'!H<_RDF)?^/CH?_E96,
+M_\3"O?_6SLS_U<K)_]K/SO_-PL'_H9V;_R<D(_\6$A3_%Q(6_Q,1%/\7%1?_
+M%A06_Q43%?\5$Q7_&!88_R,A(_\;&1O_)"@F_SD^//\O-#+_)"DG_SL[./\;
+M(RW_-D9 at _T-/:O\U.E/_,#I._SU/7?\L/$S_)RP^_RLR0/\Z1$__7F)J_VAG
+M9O]N9V__@'ES_X1Z;_]S9%[_GY>5_T]-3_])1T3_'QL=_QP8&O\C)2C_/DI3
+M_Q8M,O]/8F[_CJBU_QPE)?\E("#_/#] _TA-2_]65E/_34Q+_TQ-3O^RM+?_
+M&!L at _[2OL_\>'![_'R(C_QX:'/\A'QS_N;6S_UA35_\6%1S_$@\5_Q(0$_\0
+M#A'_%A07_Q\=(/\5$Q;_%1,6_Q02%?\3$13_$0\2_Q,1%/\2$!/_-S,[_S(N
+M-_\=&2+_&A8?_Q(/%?\7%1C_%Q48_Q<5&/\/#1#_(!XA_Q<5&/\0#A'_&A@;
+M_Q02%?\4$A7_%!(5_Q,2&?\3$AC_%!07_Q,4%?\4%1S_$ X7_U%/4O\?'!O_
+M;F]P_V9D8?^%@'K_5%-2_TQ04O]%24O_2$A+_TI(2_]'14C___C[_R(?)?\5
+M%AW_'QP;_SPV-/\@&Q__(QTA_RX?'_\G%Q3_(AH>_Q,5&O\1%!7_0$%"_XB&
+M at _^;E8W_Q\*]_];/S__=U-3_V]'-_]'%P?^>EY?_'!<9_Q82%/\4$A3_$ X0
+M_Q43%?\=&QW_$Q$3_Q<5%_\9%QG_,"XP_QH8&O\E*B;_,C<U_S,X./\D*B;_
+M/#@P_R at D'/]O;&?_8EQ4_W]U<?]L8US_;6A<_W!J8/]U:F/_<F9=_W-F6_]J
+M7%/_4DY,_U).4/]=6%/_;61=_VI>8?]%.C__'QH:_RXJ+/\;%1G_%Q,5_R0C
+M(O\?&AS_-"4?_R<9%O]74E3_>W-Q_R4=(/\E)"K_)BHL_R @(_\H)BG_&18<
+M_VAG;?\@'R7_G)ZA_Q<9'/\:&AW_'1L>_R,>&?_$P<#_:&=M_R,@)O\7&A__
+M%QD>_Q(/%?\8%QW_*RPM_QT;'O\@'"3_$Q$4_Q84%_\U,S;_#0L._Q84%_\Q
+M+C3_.#0\_QH6'O\;%Q__%Q(6_QH5&?\6$17_%1 4_Q83&?\7%!K_%!$7_Q83
+M&?\1#A3_$0X4_Q83&?\5$AC_%A,9_QL9'/\9%QK_&1<9_Q$-%?\3#Q?_.#0\
+M_RLH+O\G*2S_<G9T_U)44O]*34[_1$A*_T1&2?]#14C_1D9)_T9(2__[\_;_
+M)R(H_R$@)O^.B8G_=6]M_RHE*?\8%AG_O;>U_[VUL?\<%QO_&!<=_QD5%_\5
+M$Q7_DHV-_YZ5CO_*Q<#_UL_/_]W4U/_<TL[_TL;!_Z"7E_\?&AS_%Q,5_Q43
+M%?\6%!;_&1<9_QP:'/\3$1/_$Q$3_QH8&O\;&1O_&QD;_R0I(_\U.CC_+C,S
+M_R8L*/\T-2__+"DD_TM&0/])0SO_8UE5_UE13?]*14#_.38Q_S L+O\D(B7_
+M&!89_Q at 6&?\;%QG_'QL9_RPE'_^ =FS_9UM<_V)<6O]I9F'_0#T\_QX:'/\9
+M%QG_-#(O_R<<&_]*,R?_.24=_\J]O/^#>77_(!D9_RPJ+/\S.CK_2$M,_RDJ
+M*_]A8F/_?7Y__Q$2$_^SM;C_&QT at _Q86&?\<&AW_*28A_[VXN/]@6F+_&Q@>
+M_S(W//\X.C__'QPB_R at J+_] 04+_%Q0:_R<C*_\3$13_# H-_S(P,_\6%!?_
+M$A 3_Q\<(O\8%1O_$Q 6_Q$.%/\8$Q?_&108_Q81%?\5$!3_$@\5_QH7'?\3
+M$!;_$@\5_QD6'/\1#A3_&A<=_Q,0%O\6$AK_%Q,;_Q41&?\6$AK_'QT at _RPJ
+M+?\M*R[_4E!3_TE*2_]%2$G_1$=(_T-&1_\_043_/D!#_T%#1O]"1$?_0$)%
+M__CP\_]'0DC_*RHP_Q\:&O]O9V7_)!\C_R B)?\@(!W_(1X9_Q43%O\:&1__
+M'Q<:_RXL+O^-B(C_GI6._\_*Q?_2R\O_U\[._]S2SO_6RL7_H)>7_R0?(?\=
+M&1O_%A06_Q<5%_\9%QG_&A@:_Q,1$_\2$!+_&A@:_QP:'/\:&!K_)RPF_S<\
+M.O\J+R__)2LG_R\Q+O\H)B/_/#DT_T0_.?]'/SW_&Q(2_R,<'/\>&1G_,RHD
+M_T0\./]234W_:%]9_V9:4?]Q9EO_>FU at _X5W:/]Y<6?_9F-8_VQK8/\U,S#_
+M(!XA_R(@(O\H)"+_+1X8_T8K&O\X(17_U,;#_Y2,BO\>&AC_75]<_V%L9_]:
+M8%S_7V5A_VEO:_]=8U__/4,__TI/3_\1$Q;_#P\2_Q84%_\W-"__3$='_R :
+M(O\8%1O_-#D^_UQ>8_]95ES_,#(W_U!14O\2#Q7_(!PD_Q<5&/\6%!?_&QD<
+M_Q84%_\@'B'_%1(8_R0A)_\4$1?_'ALA_QH5&?\:%1G_%A$5_Q0/$_\7%!K_
+M%A,9_Q at 5&_\5$AC_%A,9_Q01%_\6$QG_&18<_Q00&/\@'"3_%1$9_R,?)_\U
+M-C?_4U53_V5G9?]*3$K_2DQ/_T1&2?]!0T;_/D!#_T)$1_\_043_.SU _SY 
+M0_\Y.3S_^O+U_S0O-?\D(RG_;VIJ_Y:2D/\>&1W_+2 at L_Z"5E/^+?WO_'1<;
+M_QH9'_\=&QW_'1X?_Y60D/^>E8[_S\K%_];/S__8S\__U,K&_]C,Q_^=E)3_
+M&A47_QT9&_\5$Q7_%Q47_Q at 6&/\:&!K_$Q$3_Q,1$_\;&1O_'!H<_QP:'/\G
+M+";_.3X\_RHO+_\F+"C_-30M_R\J)/]+13W_6U-)_W]S:O]-03C_13LQ_TU%
+M._]P8U;_(1D5_R\K+?]/1#W_<F58_WUP8_]]<F?_AGMP_W!H7O]E8E?_<G!H
+M_S8R-/\<&1__&QD;_RTH(O\Z*![_8D(L_T at O'O^^KZK_:6%?_S$M+_]C9VG_
+M86QM_UE?8?]46ES_-ST__SM!0_\L,C3_3U-5_Q$3%O\-#1#_(!XA_SPY-/^-
+MB(C_(1LC_R,@)O\Q-CO_7F!E_Q83&?\9&R#_2DM,_R =(_\<&"#_)B0G_R$?
+M(O\<&AW_2$9)_TI(2_\4$1?_44Y4_Q\<(O\0#1/_%A$5_Q at 3%_\4#Q/_&Q8:
+M_QD6'/\5$AC_%A,9_QX;(?\:%QW_%!$7_QL8'O\;&![_)2,E_R4C)?]'14?_
+M=7-U_U]?8O].4%7_2DQ1_T-%2O]!0TC_/D!%_SP^0_\[/4+_,3,V_S$S-O\T
+M-CG_+S$T_QD7&O__]/7_+R<J_QD9'/][?G__DY25_Q<7&O\='2#_:65G_VUL
+M<O\2%1K_)B<H_QP4$/\L*"K_D(N+_YB4C/_.S<;_U-'0_]',S/_6T,[_RL"\
+M_Z::G?\>%Q[_&Q@>_Q42&/\5%A?_$Q05_Q87&/\0$1+_#@T3_QP;(?\8&1K_
+M'B >_R at M)_\\03O_*S L_RLP+/\O*RG_03PV_U=11_]H7E#_8E!&_U! -O]%
+M.B__75-(_U]23?\5$!+_$! 3_VYI8_]L8EC_>VYA_WEK7/^$=VK_8UQ6_V!<
+M5/]Q;&;_'A89_Q40%O\2$!/_(!D9_U1!.?]A0RW_5#<E_Y!]=?^/AX7_*R0K
+M_V!?9?]05%;_4%-4_TQ-3O\'!@S_(R$D_T5!/_]25DW_)B at E_QX=(_\8%QW_
+M349 _UA03O\;%AS_%Q8<_R at P-/]66%W_&Q@>_Q06&_]$1DG_%!8;_QT=*/]$
+M1DO_+2LT_Q\<(O].3$[_3TQ2_Q01%_]85EG_'AP?_Q84%_\1#!#_&A88_Q at 3
+M%?\A&AK_&Q8<_Q,/%_\;%Q__'1DA_QP:'?\@'B'_'QT at _U)04_]O<73_55E;
+M_TU14_]*3E#_0TA-_S]$2?\^0TC_/T1)_T!!2/]#0DG_0D%(_T)!2/\]/T3_
+M/4%#_SH^0/\V.CS_%1,5__SZ]_\K*"?_&!88_QL9'/\M+3#_&!@;_Q86&?\4
+M%!?_'ATD_Q06&_\5&!G_*28A_QX?(/^/CHW_GIB0_]/.R?_3R\G_U<O'_]/*
+MQ/_+Q;W_HY>8_Q at 2%O\9&1S_&!,7_QP8&O\:%AC_'!@:_Q<3%?\1$A/_'1X?
+M_QL=&O\D)R'_'" 8_T!#/?\E)R3_%1<5_QX:&/])1T#_5E-(_VE?4?]X9ES_
+M6$@^_T4X+?]21SS_8E-3_QH2%O\8&AC_<FQD_V5;4?]M8%/_:EI*_W-F6_]5
+M4$O_:6=@_UI84?\3#A#_%1 4_Q42&/\?&AS_138Q_UL^+/]*,!W_EH9]_X9_
+M?_\D'23_*28L_V)H:O]\?X#_B8J+_Q<4&O\;%AS_>W=U_V]Q:/]B9&'_&AD@
+M_Q<6'/]%0#O_3TE'_QL5'?\5%QS_&B(F_SD[0/\9%AS_%!8;_T=)3/\/$!?_
+M'!PE_U!25_\V,3W_'QLC_V%?8?]855O_$Q 6_UA66?\>'!__'QT at _SLZ0/\8
+M%QW_%!$7_Q83&?\9%!C_$@P0_RHD*/]*1$C_4E!2_W1R=/]<6ES_6%98_TM-
+M4O]'2D__1DE._T)%2O\_0D?_.SY#_S@[0/\S-CO_+C U_RXM,_\N+3/_+BTS
+M_RLM,/\K+3#_+C S_RLM,/\@'B#_\_7R_SP^//\A'R'_CXJ._T]-4/\6%!?_
+M%Q48_Q43%O\C&Q__&108_QT@(?\6&!;_&Q\=_Y"1B_^BFY7_U<_-_];+RO_5
+MR<7_T\K$_\W'O_^>DI/_% \1_QD:&_\;%QG_&!,3_QD4%/\>&1G_%A$1_R4F
+M(/\R-2__0$,]_T)%/_\R,B__+BXK_R$@'_\1#Q'_(!L;_T1"._]$03;_7%)$
+M_WIK7O]31#?_0C,F_U-"-?]:2TO_'AD?_R,E*/]234?_;6)7_V582_]K7E'_
+M95Q5_U)-4?].2TK_:FAC_Q41$_\6$A3_&A<=_Q83&?\J'A__6#TL_TTX)O]H
+M7UC_;FIL_QX7(?\@'R;_4UM?_V%E9_^)B(?_%!$7_Q<3'/^"@(/_?X%^_V-C
+M9O\8%A__%!,9_TY,1_\6$Q+_&14>_Q at 7'O\8("3_5%9;_Q,0%O\@(B?_65I;
+M_Q,2&/\H)R[_9&1G_S0P.?\?&R/_9V5G_V%>9/\3$!;_5U58_QX<'_\C(23_
+M96-F_QP;(?\6%A__1TI5_R <'O]D7U__ at GU]_UU86/].3$__2DI-_T]/4O]&
+M1DG_0$)'_T)$2?\^0$7_/3]$_SY 1?\Y.T#_/D!%_ST_1/\]/4#_/S]"_SX^
+M0?\_/T+_/CY!_SP\/_\S,S;_(" C_QH6&/_V\O#_/SHZ_RTK+?\G)RK_FIB;
+M_Q at 3%_\8$Q?_&108_QP8&O\5$Q7_&AT>_Q at 6&?\?&QG_FI6/_YF4CO_6V=K_
+MS]/5_]#0T__-R,K_R[_ _V)65_]'0D+_?X%__V)?7O]34U#_0D(__S,S,/\N
+M+BO_.#LU_S0V,_\K+2O_(R,F_QP9&/\6$Q+_$0P0_Q40%O\@&1G_14,\_U93
+M2/]M8U7_@')C_W-E5O]N7$[_;UM-_UU.3_];5U__*"LP_T5"/?]G6U+_9EE,
+M_UM01?]*1D3_(Q\G_QH8&_]555+_'AT<_QH5%?\9%AS_&!<>_Q\9'?](+B'_
+M2C at J_U)33/]&14O_(!LG_Q<9(?]>9VW_8F=L_VAG9O\=&B#_&QLF_T)$3/\X
+M.CW_+2PS_Q at 6'_\6%1O_/$$]_PT/$O\4$1W_&!8?_Q$9'?]%1TS_&!4;_Q06
+M&_],3DS_&A@;_R,@)O]F9&;_+"@Q_Q\;(_]H9FC_75I at _R,@)O]&1$?_(1\B
+M_R0B)?]02DC_13\]_T9$1_\5%1[_8F1I_U!/5?],2U'_24A._TE+4/]&2$W_
+M/T%&_T!"1_]-3%+_-30Z_S O-?\M+#+_*RHP_RLJ,/\J*2__*2 at N_R<G*O\I
+M*BO_*2HK_RDJ*_\K*2S_*2<J_RHH*_\;&1S_&147__ST]_\G(B3_&1<9_Q87
+M&/^2D)/_'QH>_Q at 3%_\8$Q?_%Q(6_Q02%?\5$Q;_&108_T0\./^7D8?_F9B1
+M_\'$Q?^TN;[_K[.U_[:TM__!N;W_96-E_V=J:_]=8&'_,S8W_R0I*?\A)B;_
+M(",D_R,D)?\B("+_'1L>_Q at 6&?\6%!?_$ X1_Q,.$O\3#A+_$@T1_R(=(?\_
+M.3W_+"8D_UY72_]K85;_74] _WQD5?]^9%G_+1XB_R at C*?\W,S7_/CLV_V5:
+M3_]A5TG_5TQ%_SPV-/\:%A[_%Q@?_SP_1/\]/$+_'ALA_QP;(?\/#1;_)B(K
+M_RTB)_\E(AW_6EQ3_U-15/\;&"3_&1DB_UE<7?]77%;_6EQ9_QD9'/\9&![_
+M7F)@_V]Q;_]D9&?_&!<>_QP;(?\_03__*2DL_QD5'?\=&2'_/ST__QT:(/\9
+M%1[_%Q8=_S$P-O\;&![_'!H=_VII:/\B'RO_(!TI_U!+3_],1D3_*B(>_R@@
+M'/]/2D7_:&5 at _XF%A_]85EG_24=*_Q44&O])3%'_1DE._SP_1/] 0TC_/D%&
+M_T)%2O]%2$W_0$-(_T%$2?\_0$?_.CM"_SP]1/\Y.T#_.SU _ST_0O\O,33_
+M*RPM_RPJ+/\I)RG_*2<I_R at F*/\H)BC_*"8H_QT;'?\6$A3_^/#S_RDD)O\=
+M&1O_&QD;_WQW>_\6$17_&A49_QD4&/\8$Q?_&!,7_Q\:'O\F(27_2T5#_Y"+
+MA?^+B83_'Q\B_V)G;/]26%K_2DY0_S<W.O\K+3#_*"HM_R0F*?\:'!__%148
+M_Q04%_\9%!C_$PT1_Q(0$_\3$13_$0\2_Q$/$O\0#A'_$0\2_Q$/$O\0#A'_
+M$0X4_SXX0/\=%QO_)R,;_TI%.?]>4D/_;%E'_W5 at 5/\I(27_,BTS_T1 0O]"
+M0#W_55!*_U%,1O]'0D+_.C4Y_Q\;(_\7&!__4UA>_U-26?]/3E3_5U9=_Q at 6
+M'_\[.$3_,R\X_UM96_];7%;_6%99_QH7(_\9&!__6UQ=_U]C6_]@8F#_&!<=
+M_QH9'_]?85__8&)?_UY>8?\8%Q[_%Q8<_T5$0_\V-#?_&!0<_QT:(/]K9F;_
+M;6AN_QD5'O\<&R+_;VQR_QT:(/\C(23_5U57_R8@)/\L)"C_4DU-_UM65O^ 
+M>WO_A8""_T1 0O\T,C3_)"0G_Q\>)/\;&B#_$A$7_R(B)?\H)BG_*RDL_S(P
+M,_\R-#?_-SD\_SD[/O\Z/#__.3M _S at Z/_\V.#W_-3<\_S(T-_\L+C'_*"HM
+M_R<I+/\J*"K_*"8H_R at F*/\H)BC_)R4G_R<E)_\F)";_*"8H_QD5%___]OG_
+M(!@;_QX9&_\9%1?_&108_Q<2%O\<%QO_&108_Q81%?\6$17_%1 4_Q at 3%_\7
+M$17_(!L;_Q<4$_\7%!K_'B E_QX@(_\H+"[_&Q\A_Q$1%/\1$13_$! 3_Q 0
+M$_\2$!/_$PX2_QH4&/]P:&S_%Q(6_Q(0$_\1#Q+_$0\2_Q$/$O\0#A'_$0\2
+M_Q .$?\9&1S_3TM3_S<R./\C(!O_/3LS_U=01/]:4$'_7E=+_QH5&_\8%!S_
+M.#8Y_SP[.O\^04+_45-8_U%26?\L+33_'QLC_Q<8'_]A9FS_:&=N_VUL<O]O
+M;G7_%A0=_QL8)/]=6V3_9F9I_V=H:?];6F#_&!8?_QH9(/]C8V;_:&IH_V5F
+M9_\6%1S_%!,:_U976/]?85[_5E99_Q85'/\9&![_0#\^_R<E*/\:%A[_(A\E
+M_V=C8?]A7&#_'!@@_R$@)O]'14?_'AP>_R at F*/]>7%[_>G5P_ZFAG?]W<G3_
+M6UA>_U157/].3E?_1$9._SU#2_\X.T#_.3Q!_R\R-_\I+#'_*RDL_R\J+O\M
+M*"S_+2 at L_RLI+/\H*"O_)B8I_R8F*?\F)BG_)24H_R at H*_\G)RK_)R<J_R0D
+M)_\D)"?_)24H_R at F*/\G)2?_)2,E_R4C)?\E(R7_)2,E_R at F*/\B("+_+2DK
+M__SP\_\>%!?_,2DL_RPG*?\<%QO_% \3_Q40%/\4#Q/_%1 4_Q40%/\4#Q/_
+M%Q(6_Q,/&/\7$AC_&!,7_Q at 1&?\3#!/_%Q$5_Q40%/\1$13_$A 3_Q(0$_\1
+M#Q+_$0\2_Q 0$_\-#Q+_%1,6_R ;'_\5$Q;_$A 3_Q$/$O\1#Q+_$0\2_Q .
+M$?\1#Q+_$ X1_Q$2$_\8%Q[_&!0<_R$='_\1#Q'_2D9$_U502O\C(B'_$Q$:
+M_Q43'/\M+#+_4U15_TU05?]665[_7EUC_S M,_\<&"#_%Q@?_UYC:?]B86C_
+M8F%G_V-B:?\8%A__%Q0 at _UY?9O]@86C_8&%H_U166_\;&B'_&!<>_V5D:O]E
+M9&K_7UYD_QD7(/\<&B/_34U0_S$S,/\G)RK_&QHA_QD8'O]!0#__+2LN_R <
+M)/\G)"K_0#TX_T(^0/\A'B3_'Q\B_V1E7_^"@WW_EI64_V=E:/]565O_55A=
+M_TY05_]*2E/_2$=._T9'3O]%1T[_049,_SY#2/] 14K_/D-(_SU"1_\^0$/_
+M/CY!_S0T-_\O+S+_+"HM_RLI+/\J*"O_*B at K_RLI*_\H)BC_*2<I_R at F*/\I
+M)RK_)B0G_R4C)O\E(R;_)B0F_R0B)/\D(B3_)"(D_R0B)/\D(B3_(B B_R8D
+M)O\2#A#_^_+R_R8;(/\;$AG_%Q,5_Q at 3%_\5$!3_%1 4_Q40%/\5$!3_%1 4
+M_Q0/$_\7$A;_%Q,5_Q<3%?\5$!3_&!$;_S0J.?\:$QO_%1 4_Q0/%?\1#Q+_
+M$A 3_Q$/$O\/#1#_#0T0_Q 0$_\?'"+_&Q<?_Q81%?\5$1/_$P\1_Q00$O\2
+M#1'_$PX2_Q(-$?\3#A+_$1 7_Q<3&_\8$AK_&1$5_QD4%O\M*"C_14$__STZ
+M.?\6$AK_&!0=_RDF+/]245#_35%/_U%34/]24TW_)R0C_R <)?\='"+_6UU;
+M_V-B8?]=6UC_9V-E_QD5'O\:&"'_6EIC_V)C:O]H:7#_1450_Q46'?\:'"'_
+M7U]H_V1D;_]B8FW_$A(=_QL;)/]A8FG_7F!H_V%?:/\?&R/_'!XC_SY /O\Z
+M.#K_'AP?_R at E*_]:55#_8%M6_W9R</]D86#_5EA;_U=97/]:7&'_45-8_TQ1
+M5_]*3E;_14E1_TA*4?]%1T[_149-_T-%3/]%1T[_0D5*_SP_1/\]0$7_041)
+M_SU 1?\^0$?_/#Y%_SP^1?\X.C__.CD__RPK,?\I*"[_+BPN_RHH*O\J*"K_
+M)R4G_RDG*?\E(R7_*"8H_R4C)?\F(B3_)R,E_R(>(/\E(2/_)B$E_R,>(O\W
+M,C;_)!\C_Q81%?_Z\?'_(!<>_QL4'/\;%AK_&!,7_Q40%/\5$!3_%1 4_Q40
+M%/\5$!3_% \3_Q<2%O\7$Q7_%Q(6_Q,.$O\8$QG_'!4?_Q at 3%_\5$A'_%! 2
+M_Q$/$O\2$!/_$Q$4_Q,1%/\3$13_%!(5_Q82&O\<&"'_&Q8:_QP8&O\?&QW_
+M&Q<9_Q<2%O\5$!3_%1 4_Q0/$_\3#Q?_%Q,;_QX9'_\?&1W_'1D;_R,@'_\G
+M)"/_(1X=_Q82&O\8%!S_(!XA_TM+2/]-3TS_3E)*_TY/2/\<&!K_'A<A_QL8
+M'O]65E/_6EI7_UQ95/]955?_&!0=_QH9(/]@7V7_75Y?_V%B8_\]/$/_%A<>
+M_QL=(O]A8FG_8F)K_V%A;/\7%R#_(B,J_TY05?]355K_5U)8_R<?(_\E)"/_
+M6UM8_UU>6/]K:FG_E9.5_X2 @O]95E7_6%A;_U-56O]04EG_3E!7_TM-5/])
+M2U+_2TU4_T=)4/]"1$O_1$9-_T1&3?]#14S_04-*_T%#2O\^04;_0$-(_T!#
+M2/]!1$G_/#Y%_ST_1O\]/T;_/D!'_ST_1/\\/D/_/3]$_S at Z/_\R,#/_*B at J
+M_RLI*_\H)BC_*"8H_RDG*?\G)2?_)"(D_R<C)?\H)";_*24G_RDE)_\E("3_
+M&!,7_Q<2%O\8$Q?_% \3___U^O\O)2[_1SY+_R$<(O\:%1G_%A$5_Q40%/\8
+M$Q?_%A$5_Q81%?\5$!3_&!,7_Q<1&?\7$AC_#0@,_W9R=/])1$K_&147_QH8
+M$_\8%13_$A 3_Q43%O\5$Q;_$Q$4_Q,1%/\-"P[_&14=_QT9(O\=&!S_&!06
+M_Q,/$?\4$!+_%Q(6_Q81%?\9%!C_'!<;_Q00&/\7$QO_'ALA_QT;'O\A'R'_
+M,2\Q_S\]/_],2DS_%Q,;_QD5'?\E(R;_45%._U%34?]/4DS_4E)/_QT8'/\C
+M'";_&A<=_UE95O]555+_5E-._UA45O\;%R#_&1@?_UA75O]96E3_6%E2_R4C
+M)?\<&R'_&QH at _UQ;8?]=7&+_86!F_Q<6'/\D)"?_5UA9_U!03?]744__7E94
+M_U924/]Z?7?_8&-=_U=95_]65UC_3%%6_TM/5_]*4%K_1TI6_TY06/]*3%/_
+M1TE0_T=)4/]%1T[_1DA/_T=)4/]$1DW_/D!'_T!"2?\[/43_0T5,_T!#2/]!
+M1$G_2DU2_SY!1O\_04C_/T%(_SY 1_]"1$O_/D!%_SY 1?\_04;_/D!%_SHZ
+M/?\R,C7_+2TP_R<G*O\K*2O_*"8H_RHH*O\C(2/_)2$C_RDE)_\B'B#_+BHL
+M_QD4&/\4#Q/_&!,7_Q,.$O\8$Q?_^>[U_SXT/O\F'"O_'QDA_QD4&/\6$17_
+M%1 4_Q<2%O\:%1G_&!,7_Q40%/\8$Q?_%Q03_Q<3%?\:%1O_'1<?_QL4'O\L
+M)RW_(1T?_QP7&_\5$Q;_'AP?_QT;'O\<&AW_03]"_TE'2O\:%A[_'AHC_QT8
+M'/\8%!;_'!@:_Q\;'?\@&Q__(ATA_R0?(_\H(R?_&1,;_Q at 4'/\='R3_2T]1
+M_TU/4O]04%/_45%4_U145_\7$QO_&14=_R\M,/]I:6;_5597_U%34?]14$__
+M'!<=_R,<*/\8%1O_6%A5_UA85?];6%/_5%!2_QL7(/\;&B'_6%98_UM:4_];
+M653_)2(A_R$=)?\<&1__7UU at _T=%1_\M*RW_(B B_R at G)O]24TW_>7YX_Y&1
+MCO]O:FS_5U=:_U)45_]14U;_4%)7_TY/5O])4%;_2$]5_T1(4/]%1E'_1DA0
+M_T=)4/]&2$__1$9-_T%#2O]$1DW_04-*_T!"2?] 0DG_04-*_T1&3?]!0TK_
+M/D%&_T)%2O]!1$G_1$=,_T%#2O]!0TK_/3]&_T!"2?] 0D?_/D!%_S]!1O\^
+M0$7_.3M _ST_1/\\/D/_.CQ!_ST[/O\W-3?_,C R_S N,/\M*2O_)B(D_Q82
+M%/\7$Q7_%1 4_Q(-$?\7$A;_%1 4_Q81%?__^?C_(1,4_RD;(_\;%!S_&Q8<
+M_Q<2%O\<%QG_(AH=_QX7%_\=%1C_%Q(4_QD4&/\6%!;_%Q47_Q,0%O\7$QS_
+M&A,?_QT7&_\A&AK_%1,5_QP9&/\Z-S;_/#H]_T5$2O])24S_3T]2_QT9(?\C
+M'";_(APD_TE&3/]'1$K_1$)%_SDW.O]$0D7_3DQ/_U%/4O\9$AS_&Q0>_RDE
+M+?]45EO_55=<_U577/]<7F/_7%YC_Q41&?\:%A[_(B$H_UE87O]56%W_6%Q>
+M_UE;7O\8%1O_(!PD_Q<6'/]14U'_7%I5_U=63_]345/_&A8?_QX:(O](1DC_
+M/3XX_TE*1/\I)RK_(AXF_R0C*?]:6US_6UE6_UY85O^!?'S_ at 7Y]_UY?8/]0
+M5%;_5%=<_U-56O]25%O_3E!7_TM-5/]&2$__0T5,_T1)3_](25#_2$=._T9'
+M3O]*3U7_24I1_TA'3O]#14S_24M2_TA)4/]&1T[_0T1+_T)$3/] 0DK_0T5,
+M_T9)3O]%2$W_1$=,_T!#2/]$1TS_04-(_T-"2/\_/D3_0T)(_T%#2/\Y/$'_
+M.T!%_T _1?\^0$7_.CQ!_SH\0?\Z/$'_.SL^_S\]0/\5$Q;_)2,F_Q<5&/\T
+>,C7_%Q48_R<E*/\4$A7_$A 3_Q02%?\3$13_%1,6
+ 
+end
diff --git a/lib-python/2.2/test/testrgb.uue b/lib-python/2.2/test/testrgb.uue
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/testrgb.uue
@@ -0,0 +1,971 @@
+begin 644 test.rgb
+M =H! 0 # 'T :0 #         /\     3FEE=7=E(%-P:65G96QS=')A870 
+M                                                            
+M                                                            
+M                                                            
+M                                                            
+M                                                            
+M                                                            
+M                                                            
+M                                                            
+M                                                            
+M                                                            
+M                          O8   -6@  #MT  !!@   1W0  $UH  !36
+M   65@  %]D  !E9   :W   '%\  !W?   ?8@  (.4  ")H   CZ@  )6L 
+M ";J   H;   *>\  "MR   L]0  +G@  "_Z   Q?0  ,P   #2#   V!@  
+M-XD  #D+   ZC@  /!   #V3   _%@  0)D  $(<  !#GP  12(  $:E  !(
+M*   2:L  $LN  !,L0  3C,  $^V  !1.0  4KP  %0_  !5P@  5T4  %C(
+M  !:2P  6\X  %U1  !>U   8%<  &':  !C70  9.   &9C  !GY@  :6D 
+M &KL  !L;P  ;?(  &]U  !P^   <GL  '/^  !U at 0  =P0  'B'  !Z"@  
+M>XT  'T0  !^DP  @!8  (&9  "#'   A)D  (8<  "'GP  B2(  (JC  ",
+M)   C:<  (\J  "0K0  DC   ).S  "5-@  EK4  )@U  "9LP  FS(  )RS
+M  ">-   G[,  *$N  "BIP  I"@  *6E  "G)@  J*(   Q8   -VP  #UX 
+M !#?   27   $]@  !56   6UP  &%D  !G:   ;70  '.   !Y@   ?XP  
+M(68  "+H   D:P  )>H  "=J   H[0  *G   "OS   M=@  +O@  #![   Q
+M_@  ,X$  #4$   VAP  . D  #F,   [#P  /)$  #X4   _EP  01H  $*=
+M  !$(   1:,  $<F  !(J0  2BP  $NO  !-,0  3K0  % W  !1N@  4ST 
+M %3   !60P  5\8  %E)  !:S   7$\  %W2  !?50  8-@  &);  !CW@  
+M96$  &;D  !H9P  :>H  &MM  !L\   ;G,  &_V  !Q>0  <OP  '1_  !V
+M @  =X4  'D(  !ZBP  ? X  'V1  !_%   @)<  ((:  "#FP  A1H  (:=
+M  "((   B:,  (LD  ",I0  CB@  (^K  "1+@  DK$  )0T  "5MP  ES8 
+M )BV  ":-   F[,  )TT  ">LP  H#0  *&L  "C*   I*<  *8F  "GI   
+MJ2$   S9   .7   #]\  !%>   2VP  %%<  !76   76   &-D  !I;   ;
+MW@  '5\  ![A   @9   (><  "-I   D[   )FD  "?K   I;@  *O$  "QT
+M   M]P  +WD  ##\   R?P  - (  #6%   W"   .(H  #H-   [D   /1( 
+M #Z5  ! &   09L  $,>  !$H0  1B0  $>G  !)*@  2JT  $PP  !-L@  
+M3S4  %"X  !2.P  4[X  %5!  !6Q   6$<  %G*  !;30  7-   %Y3  !?
+MU@  85D  &+<  !D7P  9>(  &=E  !HZ   :FL  &ON  !M<0  ;O0  '!W
+M  !Q^@  <WT  '4   !V at P  > 8  'F)  ![#   ?(\  'X2  !_E0  @1@ 
+M (*;  "$&@  A9L  (<>  "(H0  BB0  (NC  "-)@  CJD  ) L  "1KP  
+MDS(  )2U  "6.   E[<  )DW  ":M0  G#0  )VU  "?-   H+4  *(K  "C
+MJ0  I28  *:G  "H(P  J:(   "     @0   ($   !_    ?P   'X   " 
+M    @0   (    "!    @0   ($   "!    @0   ($   "     @0   '\ 
+M  "     @0   ($   "!    @0   (    "!    @0   ($   "!    @0  
+M (    "!    @0   ($   "!    @0   ($   "!    @0   ($   "!    
+M at 0   ($   "!    @    ($   "!    @0   ($   "!    @0   ($   "!
+M    @0   ($   "!    @0   ($   "!    @0   ($   "!    @0   ($ 
+M  "!    @0   ($   "!    @0   ($   "!    @0   ($   "!    @0  
+M ($   "!    @0   ($   "!    ?P   ($   "!    @0   ($   "!    
+M at 0   ($   "!    @0   ($   "!    @0   ($   "!    @0   ($   "!
+M    ?P   ($   !^    @0   '\   "!    ?@   '\   "!    @0   ($ 
+M  !_    ?P   '\   "     @0   (    "!    @0   '\   "!    @0  
+M ($   "!    @0   '\   "!    @0   ($   "!    @0   ($   "!    
+M at 0   ($   "!    @0   ($   "!    @0   ($   "!    @0   ($   "!
+M    @0   ($   "!    @0   ($   "!    @0   ($   "!    @0   ($ 
+M  "!    @0   ($   "!    @0   ($   "!    @0   ($   "!    @0  
+M ($   "!    @0   ($   "!    @0   ($   "!    @0   ($   "!    
+M at 0   ($   "!    @0   ($   "!    @0   ($   "!    ?P   ($   "!
+M    @0   ($   !_    @0   ($   "!    @0   ($   "!    @0   ($ 
+M  "!    @0   ($   "!    @0   ($   !_    @0   '\   "!    ?P  
+M ($   "!    @0   ($   !_    ?P   '\   "     @0   (    "!    
+M at 0   (    "!    @0   ($   "!    ?P   ($   "!    @0   ($   "!
+M    @0   ($   "!    @0   ($   "!    @0   ($   "!    @    ($ 
+M  "!    @0   ($   "!    @0   ($   "!    @0   ($   "!    @0  
+M ($   "!    @0   ($   "!    @0   ($   "!    @0   ($   "!    
+M at 0   ($   "!    @0   ($   "!    @0   ($   "!    @0   ($   "!
+M    @0   ($   "!    @0   ($   "!    @0   ($   "!    @0   ($ 
+M  "!    ?P   ($   "!    @0   '\   "!    @0   ($   "!    @0  
+M ($   "!    ?0   'X   !\    ?0   '\   !_    ?P   'D   !\    
+M?P   '\   !_    ?P   ('J^!0C'!P6&1T7&!08%A<6'!\;&A48-CU*3%(A
+M)B1,2D4Z14]2'!XM6UQ<8V,9'BA>75Y>&R0<455/4Q\B2#A$*B8I7%96?'U@
+M5EQ:6U=43TQ/4$Y.55%.3%)03DM,2DQ.34Q(3$A(1$A(00-% T&+/D 6)A at U
+M&"@5$Q4!% $6 .WY$QL4%A(7&A<5$A04%1 3$Q<:$QDW.D1)3QD<'$9$0C="
+M3$\2%"565U=>7A$6(5A87%L5'!936E91%AI&/DHG'B-;65A\?E]45U544$U(
+M14E)1T=/2D=%2TE'1$1"14E(1T-'0T(^0D,\0#]  SR+.ST3(Q4R%242$!(!
+M$0$3 .W_(2D;&Q<<(AX=%QD6%Q,7&ATA%1PZ/$5)3QTC(DE'1#E$3E$9&RE4
+M555<7!4:(EE56%D8(!=17%=3&AY(/4DI(B1:6UZ!@5Y05%-23DM&0T1(2$9*
+M24A#24A&0T) 0T9%1$!$04,_0T$Y.T ^ SJ+.S\5)1<T%R<4$A0!$P$5 /OU
+M/BLA&!44%AD7%!<3%1L?'BT?&Q8?'AU"2AXC'!8:'1\A(R<;'"114E-45QL=
+M,&9744\=*!M555-2("%84U0A)1]@1RTB)DUXCFQ:5U975E954%%04$]-2DU*
+M24E*34I&2DE,2DI&24=%1D5 1$-!/C<R,"LD%!44$18!% $5 +;N-!P9%!$0
+M$A43$!,4$Q47%"<=%Q,<&QH_1Q8:&!08&QL='R,3%!]/3U!15!,5+6E64U 7
+M'!4#6,)0%QI66EDB'1E=12L@)U-^D6I75%-23U!/2$9(24A&0T9#0D)#1D-!
+M141'0T,_0D) 04 [/SX\.S4P+BDB$A,0#1(!$ $1 /OY/B8?&185%QH8%1 at 7
+M%QH=&RPA'!4>'1Q!21H>'1@<'R B)"@9&!U+35!15!<9+VE545$<(QA86%M4
+M&QM86ULE(1Q?1RTB*%)YD6]74E%03DE(1$5&1T9$041!0$!!1$$^0D%$04$]
+M0$ ^/SXY/3PZ/3<R,"TF%A<5$A<!%0$6 /OZ+DLB&144%Q45%!<9& QT2A<3
+M%!,6%A04#ATB'!81$A85&!L8&R$>(3$_3!L=)DY13$\<)AU64DY6(!]65%(E
+M(2!A8F8<)UE-3U10=UU76%976E984U!03D]034=)1$Q(25)&2$A'2T5%1D4]
+M-3 J*R at J(R,G("P8$Q<!$@$7 ./U)3X<%1$0$Q$1$!,1$@AR1!48%1 3$Q$1
+M"Q49&!0/$!(1%!<0$QL;'R\]2A,5(U%34E(8'!=955-4%QA76EDC&QI;7& 6
+M)%A04592?6-95U%/4$I03$E)1TA)1D!"/45#1$T#095 1$! 04 Z,BTG*28H
+M(2$E'BH4#Q,!#@$3 /O_+T<A&A85&!86%1 at 7%PUV21D:&!(5%1,3#1D='1 at 3
+M%!<6&1P4%QX=(3$_3!<9)5%13U(=(QI95598&QE865 at E'!M<76$7)%=05UY6
+M>F!75DQ+2D=.2D='149'1#Y .T- 04H^/S\^0CX^/SXZ,BTG*R at J(R4I(BX9
+M%!@!$P$8 (7Q'AP:%P44C!,6%182&1\7$1(2$P,4XA4:(1H:'1D6%!03%QL?
+M'1L?(QT:'"%(3$I(&B$>4U=45QT at 95]C0QXB:6ML("I56E at C(UA8:96"55M:
+M65=44E102TU-3$I*1DA(245&1D=$0T0_,RHK*"@I)R0E)B<G)!<6 1<!$P"%
+M\1<4%A,%$(P/$A,2#A,5$Q(0#Q #$8T2$A at 6&!L7$A 0#P\3 QG2("0>$A0>
+M2T]23Q at 7&%9:6544&5]>8CP7'6)B81<C4%52'R1;7FJ3@%9855)034M-241&
+M1D5#0T%#0T0^/S] /SX_.C H*28F)R4B(R0E)2 3$@$3 0\ A?H@&QL8!16!
+M% ,7B!,8'!@5%!$2 Q/B%!8<&QP?&Q<5%103%QX?'2,G(188($M-3DX<'AM6
+M6EQ9&!I at 76$]%AMA8F$7(DY35R<E6UUKE8196%-03DM)2T="1$1#04$^0$!!
+M/#T]/CT\/3 at R*BLH*"DG)"<H*2DE&!<!& $4 (7R(!D5%P44\1,6%144&SD;
+M%!42$Q(0$!,B'Q43$1(1$A$2%QL:%18H/SD:'2Q03U!-(R4B6V%891XA8VIP
+M4!TA:&]M'21I:&@C(SXZ'RM05G!@6UQA6%=645%.34Q.2D1%245'144_/S$N
+M+BHJ)RDE*"4D)2 C)2(V 2,!%0"%\AL2$Q,%$/$/$A,3$!$J$Q /#Q /#0T0
+M'!<1$0\0#0X-#A 3$A$4*$$Z$A0F45%34R0<'%UB6V,5&%IC:446'%]D8A(;
+M8F!?&QY .!PE55MR85A97%-13DE*1T9%1T4_0$1 0#X^.CDK*"PH*"4G(R8C
+M(B,>(2$>,@$? 1$ A?LF&Q<8!16!% ,7[148-!H5%!$2$0\-$!\;%A43%!(3
+M$A,1%Q at 9&2U%/188*5)-45(G(!U;8UUG&1I:8FA%%1I?9&(2&V%>81\</CH>
+M*%I@=F165UI13$I%2$5%0T5"/#U!/3X\/#@Z+"DN*BHG*24H)28G(B4F(S<!
+M) $6 )[S%RPI&Q,4$Q04$Q88&!<9$Q44%!,3$A(3$A8?%A,#$KL1$A$3'AP?
+M$41*(1H<,E557F,S(!]I:&=I'R!F:&A;(1YJ:F0@(U P*B$>/RXD*CA )")?
+M?91H6UU74P-.BTQ(2DA'0T$W,BTL RN%*"DH*B<#)@4D at 2(!)@$0 )[P%"DG
+M%P\0#Q 0#Q(/$A,1#!$0$1 0#P\0#Q,;$Q ##[L.#PX2%Q0=#T90(A$3+%10
+M65TM&!AC86%B%A1?86%6&A=D9%X7&DTS)QH80"L<)#T^'A]E at Y5E65A02@-'
+MED9#14-"0#XT+RHI*"@I)B<F)R0C(R0%(H$@ 20!#@">_!XQ+!P4%105%107
+M$Q<8&!,7%1$2$A$1$ T5(!42 Q'4$!$0$1 at 8(1%*52,3%2U3359>,!P77F)B
+M8Q at 77F!@5!L8965?&1Q-,2<;&4$M("= 0B$?9(*69U553DI(1D5!/D ^/3X^
+M-"\L*RHJ*R at I*"DF)24F!22!(@$F 1( EOD;&Q<8%AL8%144%Q4;$QHE(RXA
+M%!0#$X42&&P6$P,2Q1$2$1Q3.!LS1$%+&QPY.D)8630C'VQN<G4=)&1I:6 ?
+M(&9H9QP:6%Y9'!X^*!XE86 @)D<>*%YPG71>7%=.2T!!-S$L+@,L at 2L#*8DH
+M*RHJ)R<H*"<$)8$H 2(!*P"6]A at 9%102%Q01$1 3$1L4%" @+!\1$0,0A0X4
+M:!(0 P_)#@\.&4LR(#M04%<5%#8[05-2+1L89F=L;A086V9H6A898VIF%1-7
+M8585&#\E%A]C7!@@11PF7'6A<EA53D9#.SPR+"DJ*"@I* ,FB24H)R<D)"4F
+M)00C at 28!( $I )[_(!X9&1<<&186%1 at 7(!<7'AXH&Q$1$! 2$QIP%Q(#$<D0
+M$1 93S<C/5=:7AH8.#P^45$L'Q=A:&UO%AM=9F=;&!IC:&46%%9?5A890"<:
+M(F=A'"%''BA>>JEW6U1.1#TX.2\I*R\M+2LH R:))2 at G)R0D)2 at G!"6!* $B
+M 2T ]?,F&QM[%1D8%Q<>)4.%A")L6E Z,"TI'Q at 7&!$3%!(2$1(2$11 &QLY
+M0T=4)3-"/4I&0CDC'UY95%T?1#A;5EDC'UU;8!T?7U]A'AQ#-QP at 9FX>(G(@
+M)%<D*$U6>X)"-"<D(!<E*2PS-SP^/T _/3PW,2TL*@,H at R<G)@$H 1< ]? D
+M&1EW$144$Q,:(46+B1]G6$XW+2HF'!44% T0$0\/#@\/#@XX%R-%4EE@(2U 
+M0%!,0C4;&%A23E86."]97%87&%QC8A<986)>%Q9$-!0:9F at 5&VP:(54@)$U6
+M>X! ,B0>&A$B)BDP-#D[/#LZ.#<T+BHI* ,F at R4E) $F 14 ]?@I'1M\%AH9
+M&!@?)DN0BQ]B4DHW*R at D&A44&1,2$Q$1$!$1$!$^'2=*7FQU*3)$0E511SH?
+M%U-33U<8.S-;6U@:&5M?8!@:7V!>&!=%-A@=:VT9'&\=(U<F+%);@(5$-"0?
+M&Q(B*"LR,C<Y.CDX-C4R+"@G*@,H at R<G)@$H 1D ^/<D&1B3'A<7%A46&#B'
+MD<6^M;>]96MA-RDF)"4B'AD7$1(2$2$])$M60%59(BDU-D])130>'T1"(2$6
+M*R<=4U0D(EU661P>8&]G'B$_+!TA/R >'38>'6 at K*4]$'AQ%8(=92AI13D1(
+M1DI-2$E'0D1 0$(T+2PI*0,H 1T!% "@]"(7%Y :$Q,2$A,4/)&8Q+FSM+EC
+M:F V*28C)" ;%A0##I at -'3DF5V%/9&0>(S,[6E=,-A88/SP;&PT#(H1<41 at 9
+M URS&1AB<607&T$I%1D]&A46,!@::1\=2T8B($IEA59'%$Q)/T-!14A#1$ [
+M/3L]/S$L*B<G R8!&P$2 /C\)QD6DA\8&!<4%1E$EYG!M*^VP65G73,D(2 C
+M(AT8%A 3$Q(B/RQ>:UU\?BTH-SYE85<\&A<\/1X<#R8M)5I3&QE95UH9&5YO
+M9!@</RD9'3\=&1<Q&QQJ(B!03"HH3VB)6$D5248\0#Y"14!!/SH\.3L]+RLL
+M*2D#* $= 18 ]? Z+2J;%Q<8&A4>&1F/CMK5T\K 5T)_7E _,"LU,RLF&!(0
+M%AD\2%5C5DY-3U\P/5),140G&U(<%1P>'2$J3$LG(6UL9B F3#TS'QL]$AT?
+M'4P;&TP;)F8Q(VA@)D<B)4@]1QYI55%.4$U&1U(Z-3(P,"\N*@,K at RPJ*P$<
+M 1< PO(Z*R>8$Q,4&!,=%AN5E-G3T,B_5D*!7U-",RX[-BTC&1,,$!E#4V-R
+M95Q;3E<K0EM94$8?&%4=%187&2XX4T4;&0-GL!H;1#HL%A5!#Q$6&4<5%DX8
+M(&0H&V9:($0?(DH_1!5D3TM(2TA!0DPT+RPJ*BDH)P,J at RDG* $9 14 ]?8_
+M+2>:&!@9'!4:&!^:F=;/T,W+8D=_8E-",RXX-"LC'!81%2!%5FV <VYO75LH
+M16=F6THC&E4>&AD8'TA*4D8@%UYB:!T;0C at M&!8\#108$448%$P:(V8L'VA=
+M(T8A)%!%1A5B4$Q)248_0$TU,"TK*RHI)P,I at RLI*@$; 1D ^_(\(8Y0%Q at 6
+M'Q at A%AV+E<W*Q<2_DQ$;&1,4&1$@+ST_+RL?$1L[-D1>-R8U2Q\H1U=+4551
+M2F,3%!T9'RPF6&PA)E]GAQ<<@WYF'QE'$AX>)%L6)UL8+F<Y(V=D%E@?)&8A
+M'U4>7WU83TU224=)141%0$5$0$)!0D$_-@$C 1@ ^_4^'XI-%!43&Q0@&!^1
+MF\_+R<K'D@\:%Q,4&1$F-4-%,BX@#QM"05)K1#-"2QDE36)87EQ-2V at 1$A<3
+M'CTX7VH7'UMEB!$3@(%C%A-,$Q47(%80(EH2)V0P&V5>$%4<(6,;%DH<7WU8
+M3$I/1D)$0#] .T _/3\^/SX\,P$@ 18 ^_,\(8]/%A<5(QD=%AN0HM76U=/-
+MGA09&Q at 9'A8E,D!",BXA$2!$1%QZ4T)36AXC4FUE:V523FH5%AH6*EA-:&X>
+M(%-AB107 at G]C&!1.%AD8&%03(%D3*&0T'V=A$U<>(V4<%D<@9()=3DI/1D!"
+M/CT^.3X]/3\^/SX\,P$@ 1H ]?<G&!PP&QD7)!L9(2"-D,G)Q\2]F!8<%QH8
+M&A43'QHA&#TD%1A 2%%</BT\4Q889%%32EM+8%$0%!@<,2P=?7\D+&J BQH<
+M=6AA(!P[1QT<)D <&TP7)5<](V%;%ED?($ =%QD8$"A(4G1<6%)/3DI'0T [
+M-0,S at S P,P$P 2  ]?HH%ADM&!84'188)A^.F,[+R\K%EQ(9$Q at 6&!,2'ATG
+M($,G%QI'4U]F2#A'4Q(:;%M at 6F909U@.$!(:-CXPAG\=)FA_BA06=W%D&19 
+M2147(CL6%DD0'%(Q&U]5$%8<'3H7$1,4#"1$4'):5DU*245"/CLV, 4M at 3 !
+M+0$> /3\*Q@;+1 at 6%!X4%2D>CY[3T]73RZ,8&1@<&AP7$1T;)!Q )14>259I
+M>%A%4F(:&')E;6IS56E:$Q45'T5;2I:&)"EB?(D7&WMO8AH714\;%1HY&11'
+M#QQ0-A]A6!-8'A\[&!06&1(J2E)T7%A+1T9"/SLX,P0N at RLK+@$K 2  ^_4J
+M''^5&B!G<AHH$"J+C,;0S,Z\G1X>&!<5&!(3(1H>)SLL+"DV1U!&-B](31(3
+M8UAA7&I65&89%A,9.2TE=84K95943 at PD/TTE(QU 3AP<-%T>&TD;*$LT(DY2
+M%UD?%Q 8%1H<%Q\A'2$@4W1;4U!-24A)2$E(2$1#0 $\ 14 ^_0G&7Z4%QUE
+M;!4G%"B+E,W1S-# FA<8$A84%Q$-&QD at +4$P,"L\45Y00#I34A 0:6)N:W=<
+M7&P6$! 904,W?8<D7U13308A058H'1=&4!86,%@8%D86'48K'$Q,$58<% P6
+M$QH6#Q<9&AX=4'%944Y(1$-$04)!03]!/@$Z 1, ^_\O&7N3%QUI;1(F'"R0
+MF,[4T=;*IAX;%143%A .'!@>*#PK*R]!5VAB4$5=7Q40;FQ[>81C8'$>%1(@
+M5&%4D(\K8%!03 <C15(F'AA-6!L7*%8;%$04'40M'TY/%%@>%A$:&"$;$QL=
+M'" ?4F]534I#/SX_0$-"0CT].@$V 14 ^_4U*6J0'2R4>QL?'1^0CL7/S\;'
+ME!<;%1<8&A,3&QP<)CPO*"TD/4EJ.#$[5A4M/5AC9W!>5V at T'QLB'BP>JE\O
+M:6UA7#]#-%46$"$TB",F.V49($PC("<B'4E+%U0B$Q47$QH<&!DA'1<>'B4E
+M1W5B55%*2$5#0C8V.0$T 1H ^_(O(VJ2&2B5?Q<9&QZ0E<K/S\K,E!49$Q46
+M&!$1&1H:+#XO+#0J15-S03M%8QDK1&5P<GMH8G R&1DH*$(OKV$M9VQ?6CU!
+M,E,3#1XYB!L at -F 3&TL=&"0?&D9($4X<#1$3#Q86$A,;%Q$8&",C17-?4$Q%
+M0T ^/3,S-@$Q 1< ^_HT)&^6'BV at BQT:'1V5GL_6V-38G1H=%1<8&A,3&QP<
+M)SDJ)C4O2UM_345-<"$O3W)]?89P97(V'!LM.F)(OFDQ8V%95#<[+$\1#2 \
+MC2$C,5X6&4H@'"8A'$A*%%$?$!88%!L9%18>&A0;&R4E1W5?3DI#03X\.S$Q
+M- $O 1D ^_-(,!IE(R4=&18?&BZ(CL7+SL[%ER$;%A<9&A,2&AP:)CHO)RXC
+M-#D]$AP9)#A-65%;8&AG6& P(2(B&!H5PXH87&=<86M?/T\6$A<O1R(;/F-<
+M-U(5)!@7'!<A&"<7(1D9%1,:&1L8&1<9'!@D&2<W4V5*3TE&0T=$0 %# 3P 
+M^_!"*AIG'R(@'A,9%RR(E<K+SM+*EQ\9%!47&!$0&!H8+#PO*S$F.3\_$AP9
+M*CQ-7UIF;7=Q8VLS'B D'BLAQHP:7VQ at 96]C0T\3#Q0T1QH5.5Y6,E$/'!44
+M&10>$B$1&Q45$0\4$Q42$Q$3%A <$1\V56=,3$9#0$1!/0%  3D ^_A'*Q]O
+M)" @(14:'RZ-GL_2U]S6H"0=%A<9&A,2&AP:)S<J)2\H/$1'&R,>,T12:&9Q
+M>H5Y9FPU("(H+48XU)0>76%:7VE=/4H1#Q8W3" 8-%Q9,% 2(!<6&Q8@%204
+M'AH:%A07%A at 5%A06&10@%2,U4V5*2D1!/D(_.P$^ 3D KO8H)HEM*1FUL1L=
+M%Q6-CL#/U,[!EQP5%189'!,3&AL;(S at S*"\D0#M534 Q+B4$&<D?;%Q:83P<
+M&2\;)QV\=1DL.DPK8W\3N" 9'2&X8AX\/R(O0AHK% TS%Q,B&Q84%Q at 5%!4=
+M%A4<%!T6&AL9&B M+E-+24A'1$-& 4<!10#[\R(@B6\E%K>U%Q<5$XV5Q<_4
+MTL:7&A,3%!<:$1$8&1DI.C,L-2E&0UE1138L(A86%QLE=EM<9CT:%S(<,R6]
+M>1DJ.DLJ8GX2M1T6&B:X6A at W.APJ010C$0HP%! <%1 .$Q01$ \7$ \6#A<0
+M$A,1$ATJ*U!*2$=&04!# 40!0@#[^R<ACG4J&+V]'!@9%9*>RM;=W-*@'Q<5
+M%AD<$Q,:&QLD-2XF-"Q+26-92CDP)!@8&Q\L@&=B:4 >&30G2CG*@R L,T at I
+M87T1LQL6'"F]8!LR.!\H0!<G$PPR%A(?&!,1&!D6%1(:$Q(9$1H3%A<5%A\L
+M+5))141#/SY! 4(!0 #[^R4=&S0?(1\4'AH50H.-O<_4S<&7&104$!4=$Q<9
+M,!HF-3 at F,!QG5'%<7&!C75M33%!376$_&BP9%2(<'Q94<2 J+",I'&TEH1P=
+M'AG ;28?'A4=+1XD%!<V#A<T/!X?%AD5%!D:%QD4%!D8&1P:&147/"XL=%).
+M2DE( 4D!2P#[^!\6'#8;'1\7&A4408:5PL_4T<67%Q(2#A,;$147+A at J-S@J
+M."1L7'5C:&IJ9F9<3DY89%XZ&BH5$R,:)1E2<QTD*B F%F<?GAD:&Q[!9R :
+M&0\7+!L<$10S"Q0N-!87$A41$!,4$1,.#A,2$QD7%PT/-"@I=E1-2$9% 48!
+M2 #[_R(5'SP@(RXG(A,10(B;Q];=V]&>'!84$!4=$Q<9,!HE,C,D/"AO8G]L
+M;7!U<G-J4E)=;6I%'RX;%R0?-"=7>R4E)B H&6@@G!<:'2/$:",7%Q(8*QT@
+M$Q8U#18Q.!H;%QH6%187%!81$185%AL9&1$3."LG<E)*1$1# D8 X?,I*4 Y
+M,QJ.D"D>'"6'C+W,R<[!FR,4%A07%A45&",;)CPR)S at M8&I33EU,/D!/:F9O
+M<V]>E4]$'1HH4S)NM24 at 0$M32TZW(+,>(QP<LU<<%1,1%R 6%A44$A03.S<B
+M'Q4#&(40(1 at 1&P,5C1D8%Q4<%U(;<&%Z4E(#2P%( .'R)2(^.R\4BY(E&1HF
+MCI7"SLK/PITD$A(1%103$Q8A&2@^-"D[(T9/.CI//"PR1&)G9WEZ9)=-1QL8
+M)4HM8J at E(#]-5DQ-M!NO'"(:'[53%0\0#A0=$Q,2$0\1$#,N&18/ Q6%#1X5
+M#A@%$HT4%!4.3QQO9(!34$E( 4@!10#A_RH at 0D,U%Y.:*A<<*8^5Q-;5VLVA
+M)Q87$Q<6%148(QLD.2\D.QLV0S4P/2PG*SI>:&Z A'.?3TD?'",^%D^.'"4\
+M2%9-3+(8M!X?'B&Y6!82$A 6'Q45%!,1$Q(W,AT:$@,7A0\@%Q : Q2/$Q,4
+M$Q0041]N9H543$5( 4H!1P#J[#0K12LT'519)B(F,H>/O<_+SKFD2Q,5%!<6
+M%146'AHD.S,F-RDR-S)J=GV!GHR,D(U>@F(P045E9H9QQ."RA'M!("]&>L(C
+MOAP>'!*FAA44%141%104$A,6%!4A%Q44$AL:&1,>%1L5%008C1H>&1 at R&IR#
+M5UU>7$@!3P%3 );P-"I'+3 745LB'20SCIC"T<S/NJ9, Q'1%103$Q0<&"8]
+M-2 at Y'Q<3$SPY2U!L6EMH9E2-:S) 2&YN at UVOQZ*$>S\C,TAZP!^[&AT:&*F 
+M#Q$2$@X2$1$/$!,1$AL/#0P/&!<6$!L2& \/ Q*.$Q49$A(P'9R$7%U<6D8!
+M30%0 /O_.RE--38:66,G&R8VCYC$V=?:Q:I/%183%Q85%18>&B(X,",U%PL,
+M""L>*2<^+#)%3DR7=S at _2&IJ=3M]H'AUA$,;+4-UN!6]'!H>%;>;%!$4%! 4
+M$Q,1$A43%!X3$1 1&AD8$AT4&A(2%143$1,7$1,R'Z&/:&9 at 7DH!40%2 -/T
+M*24=)$HF(2 <'QQR>X^USM+-N:1#%!88&A<8%Q<;'2$T-B Q'R(?*%557'>-
+M at F]Y;XF!5S\R*UIZ;9.VV[FK+#98?6HMIFC#$Q0<&JE\%1<5%0,4I1,3%!,4
+M&TH6$1,3(2,D&$45$1 at 6'BLT.5!D&QT at 0:JH:F6"9V4!5P%" -/T)R >)D8@
+M'B(8&AIS at IBZT-/.NJ9$$A(5&!46%149&R,V."(V(AP3$3<Q-E1J6D552(.0
+M8$$Q+V-_9GR>P:6F+#5:@&LLI&/ $1,:(:]\$102$@,1I1 0$1 1&$<3#A 0
+M'B A%4(2#A .%B,N,TA:#A$8/:BJ;V6"9V4!5P$^ -/_+!XA+DPC)BH=&!QV
+M at YB\V-[9Q:I'%A<7&A<8%Q<;'1\Q,QTX)!\6%# ?("\[+1XL+GR>;$4N*E^!
+M75EOGX.A,S15?FPIH%["$Q >([B+%Q84% ,3I1(2$Q(3&DD5$!(2("(C%T04
+M$!02&B<Q.$YA$10<0JVR>VR+<&P!7 %# -/Z+3LZ(%4ADZH]%1H:=XNUS\_+
+MMJ<@&!04&1<8%Q<;'R$T.Q\Z)AX<+"-2E[R@@V5Q9'EV6D5&,HAV)G'"V<FY
+M-#YGQXH:D6BG%C0A at 5D:&1@>%@,3I1(2$Q,.'%5-%1,0$1L8$@D3&4I&11 at P
+M.H-1;&M[;Z"50E7+44,!%0$: -/Y)B\T(E$;D:PY$!@;?I.ZT=#,MZDA%A 1
+M%Q46%149'2,W/2$X(1T6&B)8C[&8>%=E1WN$94=%-H]W&5JPQ;>Y-3YIR8H8
+MCF.D$S,BAE\=%Q0;$P,0I0\/$! +&4]'$A -#A at 5#P80%4(].@\I-7Y'75IN
+M:)Z71U/*4T,!%0$6 -/_)RHU*%<>EK0^#AH>@)6\U]O7PJTD&A43&1<8%Q<;
+M'Q\O.!PT'QH1%Q=#>99\7D-(,GF6<$M",8YZ'$&!F8RJ.#EDS8\<CF&F$S E
+MDF0;&QD=%0,2I1$1$A(-&U!(%!(/$!H7$0 at 2&D8_.A$R0(E8:61W;Z.=2E76
+M6TH!&@$< /OS1FH5(6D:%1P4'!B*<H^FS<S'HZ85&1D4&!(5&!<6-R(T-QLP
+M*!PB.R%6G::=;U]L<GMS6D<F(HUQ14.ZY^3"1REBQI0;OE&^'F02&R$6%1DC
+M'A,2$Q<4$1$.'TL?$Q03&248&!<6%QT6$@X2%Q-G<DE7/X%1/C3#,4X!' $:
+M /OX/5 at 2)&04%!T0%A:*>)>KT-#+J*D6%Q41%0\2$A04.",X.1TT(A\:&QY9
+MF*.9;6)O0GUU94DE)91L+BNER\S 1R=DR)09NTRW&V,4'2$4#Q8@&Q /$!01
+M#P\+&3L2#Q,0%B(5%A44%1</# <.&AMI8BX]-H)1.R_",TT!& $4 /O_05,4
+M)&\7$R at 6%QJ/?YVMTMC7K:L9&QH3%Q$4%186.20I-1DO)1T4%QQ;BXF%95A7
+M)7B+;T\D(YU='Q!]FZ&\4"E at SID;NTJX&6 8(289%!@B'1(1$A83$1$-'#,2
+M%1(0&"07&!<8&AP-!P at 3'!]^?4A30HU6.2W.+DP!'0$7 /OQ.B0E*'<=C*9N
+M&!@2:HVQNKF=E9<A'AL4&Q08&A@@%QTU.1HR)A\U2R$YCJZ8A6EO<7Q]8D K
+M(HLG<JBOLINZ6"P6R)L>LTFS'7 at 8)"L7$A<;&!,6%!$2$1$0'CPE&B03&BT>
+M%Q5+)A84$"(<("@6;2J5?3P=/C##,TD!'P$; -?T-!<A*W(7BZ=J$A82<)6V
+MO;ZAFIHB'!<1&!$5%!4>&!XY.QPV(B8I)AL[B:R6AFUT07Y_;4(J)9 C7XV3
+MD7VP52H8RIL<L$2P&G42(BD4#A08%1 3$0X##Z$-&"86&" 0%RH;%1-))!(2
+M$1\6'BD36A* =CP>0#+$-4@!&P$8 )O_.1<F*7T:BK)P$QH7=YNXO<&IG)PE
+M(!P3&A,#%[D@&1\J-Q at N)R<D'!0_A)>!>&5=)'F5=T at I(Y\9-EME75"J7RP4
+MT* >L$*R&G,5'B44$Q8:%Q(5$Q #$:$/&Q\4'242&2P=%Q5-*1H6$B$1&"P?
+M9!N, at 44A/##/,$<!( $: -;R(R<</FX3)RDE&1E66I*>@9>+<W43%A82%Q48
+M&!<C&1HR.AHX(28\4",<6V>)B&%K>WJ!8&XL+X-@,L#-M\"\8R0PQYHLJ3&K
+M%VHQ12D9%!07$Q0;& 02H1$?01\+)1L:)2$=(F at C'R,=,#I%'2=J#I\K'!8_
+M1;5*, $8 1D ^_,@(1Q!:0TF*B$3%U9 at FJ.%G9!Y>!04$@\4$A42%"$:&S8\
+M'#LB,"PG&QY;9XJ.:G!+?(-K<"LRCF(GJ+&6HK-B(C+)FBJF+*T88R0Y(A,1
+M$100$1 at 5#P\0$ X9(PT+(Q47(AX;(&8A#Q at 8)28](B]K#:(P'!A#2KE,+P$4
+M 18 ^_\H)"$_=! E-2<4&UMGH*6 G))V>!<8%Q$6%!<5%B,;'"<X&"\E-B8>
+M%2)45'1[8%DN=YEU=BHPCVH,<8-B>;-N)"[/GRRF*K$;8B<\(101$Q82$QH7
+M$1$2$A <' H0*!@9)" =(FHF$Q at 5&AXY)S5L#*0U(18^3,5'+@$9 1@ ^^XB
+M%1H:D2LJ)R<8(GE0DXUND'=N;1 8&!<9&1H:%R 5&3 W&$ <$5Y<)ALSG8MU
+M77!Z?H)-?B$O at HUBJ<#HD[UW(3_'H3$R+U892!4B(AP4&!42%R 7$A(3$A,8
+M0S 2%2XJ,R\Q)AP_/F<L374=%4HY%9\L&Q5&2)X;&@$I 3  ENT?$1L=CB4I
+M*242('E6FI)UEGUU<Q$#%+\6%A<7%!X6&C,Y&CX?%TXM&QXTFXEW9'5/?(=:
+M at B QCI58E*C-?+=V(#[)H2\O*5@:0@@6&A83%1(/%!T4#P\#$* 2(Q8/$24D
+M+2PO)!H])5$>,U8,&5P^$YTD%1=,3Z(=&0$F 2X V/DG%AX=FB at H,2H5(GY;
+MHY1ND7IN<A09&188&!D9%B 7&R<T%C at A'D8D&B FAG-A6UXN>)QGBA\OAZE0
+M9'F;6[R"'SO/J#$O*EP=0PD=(!D0%101%A\6$1$#$J 5'1$1%RDG,"XQ)AY"
+M(DH4(TH+(6%#&*(J&A5)4*P9& $H 3  ^_DG%QYTF!4OLYD9'A)!EGYF at VIE
+M91<=&Q@;'AP8&AH8&2\[&3P='S!L)!XTIZ1Y3V]\BW)6C2,=AI*WO-CB2(.3
+M(5#"J3H\3T,>@!H<'Q87&1D=0"(<(A$6$Q(9/58V(6='+R%^3A at F)#% @Z V
+M%B8=%J4N(Q1=&"P7%@$K 1@ ^_HC$1UWEQ,MM9<3&Q))G8-OC71O;147&!89
+M'!H6&!@6&"\[&3 F%BDV%!\MI)UT4FI5>8!@DB(;BIVJHL3-.X.8'DK IC@[
+M3D,=?AD9'!,4%A8:/1\9'PX.#P\0(BH<&$P^)QJ!3Q8B$B,U;8$D%BT?%J<H
+M&Q)C&S$8$P$M 18 ^_\K$AQYIA4INYP8'1=%J(5EA6QG:1<:&A@;'AP8&AH8
+M%R at R%#4B(!TA%B(ABH)=1E$M=)%SGB$?DJ>9=IBI*HB='DO%KCHX2SX:@A at 9
+M'!,4%A8:/1\9'PX4%!$2&"(6&%1 *QN#4AHG#1PR9W8?'3 at C':TK'Q1B&S,;
+M%0$I 1@ ^_<_)!<BIAL>$1,='(U!D'A>95]@8!@<%Q8:'185$AD7&2T]&3P>
+M&1EA)Q\=2J1\5FAW?6UNGB(?A96O:>3<)W][)&"]NS$_/5<HCQ07'1,;&"$A
+M1BTC+146$P\:/%1*3GJ'%1X,3!<?2%9<JG87$B$:%*$7%P]=F#H=' $H 1@ 
+M^_@['A8EI1D;$Q$7&8U)EWUE;6=H:!86%!08&Q03$!<5&"T]&3 H$!(I%R$:
+M2Z%X5FE8<H!WH!\=BJ&B3\_*'86 (EN[N3% /5 at GC!$4&A 8%1X>0RH@*A(.
+M#PP1(24G-51^$!(-314;0E1<HF\4$B4:%*,1#PUCFST;&0$N 18 ^_]#'Q4E
+ML1L9&18<&Y)%HG]<9F!A9!@9%A8:'185$AD7%R8T%#,A&@D;&R<8-(5D35(S
+M<9>%J"$?C:B1);.U$XJ#'EF_O2PT-$PDE!D6&A 8%1X>0RH@*A(4% X3%1H<
+M,F* #1404!D at 0U9CIFX6&3$?&ZD4$P]BFST=&P$M 1H F?=(*B PMADGJ;<<
+M$AD;CWDX.R\W6A at 7%1,#%=\3%!D9&"@Z%S at A(BUE1C(^,J2I=6-YD'Z!JAP>
+MB(QFB.6C7H&4+SJPP6EG>XF1HQ46'1,:$1D@(2(<'!(5$A$;-E!*-WR%&B<;
+M428O)1LJ=S59%!(4$HT8&!=8FS,?'0%# 8, F?A$)!\TMA<DJ[46#QDCEGX\
+M/S,[8!81$A$#$X(1$@,7VB at Z%RTK&24M-3A!.*6E=659?("!IA8<CIA9;<^.
+M5(2:+36NOVEH>XF/H!,3&A 7#A8='A\9&0\-#@X2'B,E&4]Y&!H94B0K(Q at E
+M<#96#A 2$H\2$!5>GC(<&P%* 80 F?],)1XOOQDDL[H;$1X?H8 T.2TU71 at 4
+M%!,#%=\3%!D9%B$Q$BTC(Q\?-C<_,YF9;D\S=(B&K!L>C9U(1Z]T1(29)S"P
+MPV1<=(21J!@5&A 7#A8='A\9&0\3$Q 4%1D;%&5^$!H=52 at P)R(R>4%@$106
+M&945%!==GC$<'0%+ 8< GO(^(QIWM1H6'Q at 9(H8DC7PT*QLO41 at 7%144$Q05
+M% ,9VB<U(#\:&2=N+R(J)C9Q3V.&<WIWMB,>AI1K at X!80BF>QX[%NU[#0LM<
+MDADH'!8:$R$F*QP>&!,4$A,A,$M!.WE]-BMY0Q8L2$0_AE2=&Q at 3%X\.&!9@
+M9C<A% %* ;D ^_4Z'!EZM1 at 3'Q83'(8KE($W+1\S6181$A,2$1(3$A<7&"<U
+M(#<>%1\V'RHP+39P3U%0.T]HLAT<CY]@;&M"-BNAQHG$O%W#0<E9CQ<E&1,7
+M$!XC*!D;%1 .#A :'"$='$]P-2!Z1!0H0CLT?%.9$A41%Y$+$A=F:38<$@%2
+M ;H ^_] &QAXO!H5)AH6'8LJG8,O*!DM4Q at 4%!44$Q05%!D9%R(L&S46'1DD
+M&28M-CMM1C ;&SA<NB(@C:)32T<A(2F?PX3#OUJ^/LUAEQDE&1,7$!XC*!D;
+M%1 3$Q(;%1H6&V9S*A]]1Q at M1T5#A&&A%!,3')<-%1AE:S4:% %4 ;T F/!3
+M1BL>K1<95545&QTBB8(L)B,N31H7% ,7 Q3=%AD7(S(2,B 3+&4S&"4K'3R$
+M9(6+EGVR)!R!E9.KLF5*@J38F*N\N->TPFZ!+1T?%1D/7%4F(24>%!03$A<^
+M44(\.G)>:HY+%C C,CA_2Z 9&1 at 6G1<6&'B1-B07 4,!O "8]D\Z)2"N%156
+M61<<'"F4B"PF)C)6&1$1 Q3@$1(2%!<5(C,2-1$8(3 A'RHZ(D>(0E= 3F>N
+M'QF0G(ZDHUA&AJ75F*J_N=.VPFZ"+AH<$A8.6U0E'B(;$1 /#Q0F(QT@'F9>
+M;9--$B@=*S%Y2Z 7%A(4GQ,3&8&6."(8 4T!O0"8_U4U*"2Q&1IA8QL=*BB5
+MAR4?'BI*&!83 Q;@$Q04%AD7'R<++0\;%QD>(2P\)TJ"(RP<+4G &B.0GHR9
+MET<ZCJC=DZG!NMBZR7.%+Q@:$A at -6%$B("0=$Q84$10='!0<+F%9:Y51%RP>
+M,CI^5*D<&!,9I1 at 5&H&9/"89 5,!P "8\'8A$8*F'!QH9!4=*ADS/BTG&RI'
+M'1H6 QG@%185%AD5(BX7,"43*6A(0R$C,RLZ8H61DW>I&AQ^E&ZXU28\HZ32
+M at I_ LM6ZQ'F%'1P:%!@6$$$@*"8A%103$!(]44 T.7II9X),(T!+$3B-6ID<
+M%Q at 9HC,;)79U,"$4 4$!Q "8]'8:#H2G&AAI9A4;)R ]0BTF'2Y0'!03 Q;@
+M$A03%!<3(2\7,Q88'C$S1B0K,#,]0%=&2U^E&A>0H'"URQX[J*3/@I[%M-*^
+MQGF&'AD7$145#T ?)2,>$A /#0\E(1X;(&YI:H=.'SA%"C&'6ID:%!(7I"\8
+M)G]Z-B8: 4P!P0"8_WT9$(BJ'AUT;AH=+Q<V.B8C&2E&&QD5 QC@%!85%AD5
+M'B,0*Q0;%!DO2"0M.CD_)"P>*D2W$R*-HWC!TAPXM:G9?9O*NMS(SGZ)'Q<5
+M$1<4#C\>)R4@%!84#P\<&Q,6+VED:(E2)#Q&$3J,8Z(?%A,<JC0:)W]]/RL?
+M 58!RP#[\946'#.R%!X?&" B=R")?$!"0$%%%!H6%A<8%187%AD8(2TG,204
+M(&LU/D J(BTV6820DF^A&B!^EUO9W#9VQ)[/DH._L]>[M85M+!\;%QD/$Q,D
+M'QT:%1,3$18K13$M8G5I57Y+&5:$?WZW?)T9'2$9I84\+7V5/1,5 2<!O #[
+M])87&C2S$AH9$AD:<"2/?D%"0T9.$Q03$Q05$A05%!<6("XG-!49%3(?/T(P
+M'#0Y-E=&2E*?'AN+GUK9V#1YR9[,DH/&N-?!MX5N+1P8%!8-$1$B'!H7$@\/
+M#A,?)1<726EI6(--%4Y^>'>Q?)T7&AL7IX$Y+H::11 at 8 2T!N #N_Z$:'C>V
+M%A\>%1<4=QR*>34Y.3M"$AD5%187%!87%AD8'2(@+!,<"QH90$ K'S8W'RD:
+M*SFQ%B:*I6CJZ#EYU:78C7S(O>#*OXIQ+AH6%!@/$Q,D'AP9%!44$!,8'Q$:
+M6&1D5H51&E)_?X"VA:8$'(FMACLOAIU)&AH!- &^ /OZL"(B'XD-%*2G%1 at 8
+M(I-C045%2$85'!09&A at 5&18<%1 at A,2DL+!DA=#@E*C0Y*S!3A(Z5=)@?%W26
+M5]C=*+NCH=&+:LVUU[ZRBEPQ'QT7&1,3%"8A'1<4$Q,4%1LN("(M-3E334X8
+M5#$C+&]$I146%!.G>C at MC99#&R,!(P&X /OZL2(@'XH+$*BK%QD5))9B0D9)
+M34\4%A$6%Q42%Q0:$Q8A,BDL'1L5.Q\F+3LX,S8Q5D1-49 at B%'N:5-?<*L&I
+MH<V+:M2ZV<2WB%TR'!@4%A 0$20>&A01#P\1$AH=$1$:+3M64E 43BL=)6E$
+MI1,3#0^G>#8PEIQ*'B$!(P&U /O_O"<D)(T/%;*W'QP?()9?-#@Z/D,1&Q,8
+M&1<4&18<%1@<)B(E&1\.(1HG*S0U+S$<*Q8N.*L:'GRB8.7K+L"PJ-R&8=6]
+MW\N\C6 S&A84&!(2$R8>&A01%!03$A<:#1 at E)S94550942XB+FY-KA at 5#A2L
+M?#@PEJ%,'B,!* &W /OMH20=@+$0&18D%QIF')X^/T)$2$P6&1H:'Q84%Q4=
+M&Q@@,"0N)R I=4$@1BY,*!U,='V4;ZL@&W&=.]O='97'MM":7LBPT+BOI$T=
+M)!L8&A,6%"LN(!L8)C= 32D;,692.TU;;&@=3B(2+FY3I!<8&AXK&1H9G9Q!
+M/4,!1 %S /OQGAX:@+$.%A8D%1=B')T]/T)&3%,5$Q87'!,1%!(:&!4>+R(J
+M'1X=.B8E43M9-" O2C9+/Z\8(72C/]O<()?'N<^97,BRTKJSFTT@'185& T0
+M$2DK&Q83)38_3"88,6A4/E)@<VH92AP*)F93I!45#Q$?#Q08G9Y(1$D!3 %W
+M /O_JB$:A;82&!TK&15H%9HZ+C$T.DH4%AP9'A43%A0<&A4:(AHB$R(:(20J
+M43M8,1X8'PDJ(L04)G2H2>KR(I_6N=N85L^ZVL*[I5(B'A07'! 3$RLK&101
+M)#4^2R at 6+&90/%=L?W(>4"$0+&Q<K1H7#Q(B%A47HJ9*1DX!4@%_ /OLJ"$>
+M(' 7$J.K%1@<)JV\@55-2TD7%A,;%184%Q4<'1LA+BPK+",Q>SXA,#U+1B%)
+M=7V0:9 at A&U"1,M_:*\_0=LZ?2\^HT+>9J"\7/2,8-S$8;$$F-*(C7DE /EA+
+M5XBAL+.NISTA5WI[@;&?IA<8(" S+BTB)1H4,#,!/ $O /OPI1L;(' 5#Z.K
+M$Q4:*:Z\?%%+2T\5$! 8$A,1%!(9&A@?+2HI)24F0B0C-D563R,L2S9'.9P9
+M(5.7-M_9+M'0><V=2="KT[J>GR\:.",:.B\6;40G-J0E84Q#05M.7(VEM+BU
+MM$,:3W)Q=Z>?IA45&14B%Q<0%0P,,3X!2 $V /O_L1X;)749$:JR%Q,>(;'!
+M=TE#0DH7$Q(:%!43%!(9&A@;("(C&B,E)A\G-4=632$5( DF'+$5)E.<0.[O
+M,-G?>]N?1=.MU;RCJ30<-1X6.#,:<$8H-*(C7TI!/UE.89FWR-'1S50C57AX
+M?JZHKQH7%Q0C&AH3&100-#\!2P$X /OOKA@:<#@4&1\4&1I(.+G;TM3(P)0=
+M&!89%!H6$Q4>'!@?+BTL+R<V=T4A)#9(5QI$;GJ2:Z(L&DB6+-;;,<[/E-"C
+M3\FDS+BXO"TM2)B1-*,MIS>5-' B03) /D1(KZFHH):LHC,=2!4=(T UFA at 8
+M%QP9)3,R+R at T,"T!*P%3 /OSJQ(7<#@2%A\4%Q=)/+O;T=#(PYH;$A,6$1<3
+M$!(;&14=+2LK*BLN02L?)CU17ADD1#-).Z8D($N<,-;:--#/EL^A3,FFSKJ\
+MLRTP1IN6.Z4OK#Z:.G8H23I(1DQ0M[*QJ:&XK#<90A,;(3XWFA85$1,,$AD8
+M&!(:(S(!,@%5 /O_MQ47=3T6&"8;&Q5*-,/JX.+9TY\?%1,8$QD5$! 9%Q49
+M(",H'24L)"4C)#]171 at .&08H'KL@)4NA.N7P-MC>FMZE2M"LU,#$O3(R2)N8
+M/:DSKT"<.74G139$0D94RLK)P[S5OT,>1Q<?)4(_HQL7%!<-%!T<&Q4?)C0!
+M- %0 /ORLR07%Q\?%B at W'!U$-+7:U]'+QI$?'1TC(",E*RTN,C(Z,S(M,2DN
+M=4LD*41/3RM ;WR/;:<U(2Z5*-W:*,W,;\:K3=&NOK>XR"JZ<J<R>D58?C:9
+M**(D5SL^/%.2II^IJ:2G*SP@*4-%34U&1A<8%!@>'BHK+S0\+!L!(P&' /OV
+MM!X4&1\=$RDX&AI&.+G<V]70S9<@&!LB'2 B*2LL,# X,C$O*C G/S$B*$=5
+M52 at D0S)'/J<O)C&;+-S9*<W,<\:K2M.OO[B\ORB\=*PX at 4Q>AD&@+ZDK8$1'
+M15^>KZJRLK"S+SH=*DE+4U-(1A45$@X0%1,1%1L:&!P!) &# /O_P1\2'20A
+M%31#'QI$,,/QY^'=V9XC%A<?'2 B)24F*BPR)28J'R4E)"DD)T=44B8/& 8F
+M([@P*S. at -.OO+-;;>]>P2MNZRL7&R2S >K$]BDYEC$2A+JHL7D!#05BER<7*
+MRL?*.SX?+4A*4E),2QD5%Q86%Q85%QHA'AT!)0&( /ORMB8A/B,6&WZ-&B$_
+M-;7<W-+'SJ4_.SDX.#DZ.3I /CP]-"XD-"DZ=$PA)29'-C at P9X!_9Z,W*""6
+M*=W9*LO/JKNO2LRHP,*]R2N_PKLUD2:B,X*75*$U5D!73$"NLHZ?P\"_%!M:
+M'STP/SL[7F%.&!L0*BTM,3=+.40!0P'( /ORO!\;0204&'^,%QL_-[KAW]7*
+MT:M!.CHY.3H[.#D_/3L\,RTN)2HX/3,@)BE-/C<A.SQ /9LX)2:=+MS8)L?.
+MK[^Q2]*LP,? Q2C!Q;T]F#*L/8V>6Z<[7TQC6$RWN9:FR,&]%!Q=)#\O/CH]
+M8&!+$ \4%Q$3%!(U,D8!10'( /O_S2 60R46&HR:(2 Z-;_P[>/:X;(_+RXM
+M+2XO+2XT,BXO)B =(1LP)2@?*2E*.C87'Q >*I]#)RN?,^OO*\_DO,FW3MNT
+MR=;0S2C)T\$YH2NT0XZ at 7:Q"7TE at 54G#U*R_X=K3&Q]?)CLL.S<Y7E]+%!0/
+M&187%1,Y,T0!0P'1 /OWN2DK2!H9&AXB'1E,/;78U[?1RZ- /S(U-#,R,"\O
+M+C(W-C,M62<D?D\F*B9,5U8G47R"7[,H(QF3*-W<.LC,I[.X3LNPL[W%RBC"
+MR5$QM1MR,+!2F(EH.TE!/%:OS,"_T,,?%A8A*A(S01 at A4W*)&#M:03(M.D9 
+M*RH!*@'+ /OSO!P?21L7%Q8:%Q9,/[K=W;W7T:I .S U-30S+RXN+3$V-3(V
+M11XA2#0C*"U78E at 6)CI$.:TF'1^<+^+;-\3,K+>Z3]&TL\/(QB7$S%,XO2=]
+M/+Q8GH]M1%5-2&&XT\?*U\,6$!0B+1,Q/Q8A4W"%'10?&AD0%B4P)2<!*0'*
+M /O_SA\<3!P9&1P@'!A'/;_L\-#HX*LY+R at L*2 at G)",C(B8K*B<L-1 ?+2HC
+M+"]88UP-#!$>)[(K'B2<,>[Q.<K?N<' 4MJ\NLS6SB7,VELYP2* .;==I9AY
+M1%)*16'&[^#E\-(@%1 at E+Q8U0QHF6'6+$A at L'!@3&2<R)B<!* '8 /OPL$4P
+M$Q85%I^J&!DJ-:O(T89E6TLW/SHS-3$M,BXN+R E)RHH6R\?<4L?(ADP0$<N
+M46-J1%@K)QPD(]C<1FK)K:F\3<2OI+^AP#"MS2>62U^L+Y8XLSBC2S)-24*^
+MS\BZDS\6&!@5+!(;)188*$N9/\R_V#P_0VX\*2H!+P'( /OOM#PH%A<3$YVG
+M%!8J-[#-T89E7$\V.C8S-C(N,2TM+AXC)2 at Q0!<=.RT9'ALW2$<9+3= *50E
+M(!HB)MS<0V;)LJV^3LJSI<6EO"VOT"B<5&JV.Z(_NC^I5#Y954W'U<^_ES\0
+M$A03+1,9(Q09*$F5"Z*SHQ$>)F,[)B,!+ '* /O^QCXD&!@5%:*O&18E-;7<
+MX(]J74DK+BXJ*B8B)B(B(Q8;'2 O) PA("0:(Q\Y2DP5'!TI'UHH'QXD*.;O
+M0VO8O[?$4=.]J,JQQ"VWWC.A5&K -IU!NT&N5#M64D[7].K6J4 at 3%1D7,!8=
+M)Q@<+4Z;#Z>VKA0@*6(Z)B0!+ '2 /OM:!<;5D0:&Q0,%A<8*Z;+JS8T.3,]
+M.S<R-"\O-#0J)"0H,"XC72X8<%(H'R,;(BDC)"P?*"<K,#,M.,S3D)C%LZ"\
+M6;Z(2< HR">LQ\!W&[XUHZ,HI3.)-#AD6H._OXX8( \,'!\='2TT,S,H%1A/
+M.[9<NC\_;C(U)30!:@'  /OR<A0674<8%Q41%A08+:;0JS0R.#8\-C,R-3 P
+M,C(H(R(F+BPL.Q,5-S,E&1\<(R8<&R,@'2,E*"PI-<S6CY3%N*2^6<**2,4J
+MQ"2PRL%]),8\K*POK#N2/T5Q9XO%PI$8'0T/*S4T,S S,C(I$Q9,&)QAHALB
+M6S@]'R@!9 '" /O_A!847DD<'"(=&Q03*:O?LC J*RHO*BLI*20D*BH@&!H@
+M*"8L' <='2<G'B0?)"@C'2,C'"@F+#,N-]7HCIG4QZ[&7LR01\<RS"2XV,Z$
+M(LI(K*PPJS>0/T1Q9I'8WJDG*1(1*S(Q+RXR,3$J%1A.%:%FIQPC6S,Y("L!
+M90'( .3T1BDE&QH>("$?'!HA&14KRE)!/STY-#$I)2,C)B$?(R8J+2TI92D8
+M3D4@(R8F)RTS-CE!.4YZJ[Z42MW6=-7(N7:Y(\!1.[]?N"FLRU<NO5\TM#"=
+M7B]G6&E;820;$A85&Q8, R&4+!4D)"$8$Q8D,&/9:V0E1AXOIHD!,@&Z /OJ
+M1B$='!<8%Q at 7%A<>&A at HSE(]/3PW,B\G(R$A)!X=(24I+"PU21D9+#0B&B$E
+M)BTU-3,X-E-^J+R;1-W;=-+(NWJX(<%.-KU=L2>RSEDUQ68ZNC>B9CIT9WMO
+M<"D;#A 4%1(2,3<T-!8B(A\5$!,A"BZC.$P902 QG7D!*P&_ /OT5R4;'1D;
+M&QP;&1D@&QHRVDDU-3$O*B<?&QD9(!P7&1H>(2$N-Q$:(BL@'A\B(R at P,C0\
+M/EB&M,&F1^3R>=K9T(+$)L)0-+]?O"N[WF$VR6<_PSFN;#IT:8!W="X@$Q$3
+M&!@7-SLT-A<D)"$7$A,?!C>T15$</ALMG7L!*@'$ /OO)B0E)S @*3 V.$A.
+M3%%US,@^*RTT+RTJ(BL7*!0C(R4I+"\N>S,M04HR/CUJG+W)T<_2ULR_(S"0
+M5=O1=<S'QA^\47R4F\5QN"BKS::Y7#&A2"VU)K-K:FEF,S41(ATB*"4A,!4>
+M1A,3&!,7%A0=/C9SC%U%E3Q)FBP!, &( .'F+BLK+38K.#U"2%M<9VY\S\8Y
+M*2LR+2LH("D5)1$A(2,G*BT];R,F*3,A.#]LGK[*U]?3V-;))"N74]O4=<G'
+MQB"Z3H&6F<=WN":QT*C 9#>H3C2[+KYW>7EV.CD5)!P@ R:7-1H at 1Q 0%1 4
+M$Q$:0B=>D6-%E#Q+D1P!*0&/ /OP0C<T/$4X1$Y566MV at X.%X<HW(R,L)R4B
+M&B,/(P\;&1L?(B4I:"<=("46/4-PILO5YNWN[>G9)RB at 6.3L>M'8V2._3H.:
+MF\MVP2JZX+#!:#RJ4S; ,L%Z>W]\/#0/(!LB*"<H-QTD2A(2%Q(6%1$8/2-<
+MG6Q*D3-&D1X!* &0 .OB:F%<55]95UE;5%-75EZ[RRX:&!P8+14L'BL9*A8C
+M)"<I*S$Z*4=L9836R<O1UM;9U,_?GLLX-RF.9]S5>,7(QG_#<KYQB+AFLQ^M
+MRW5'+K1T*:A%;F]F:V9<'TI'&1LM.4TX(2(6&AP<'009C!U92%FQ*)JA)RJ2
+M-0$O 38 [.^2CXF#BXF+BXZ*BXF ><K1+146&A4J$BD;*!8G$R$B)2<I+SLB
+M+DDY7\C-T-39V=;6U-N/QSPQ(IAKV=AXPLC*A<=UQG>*OV^W(+'.=TXVNGLP
+MKDQV>7)Z=&8=3$L;&RD\4S<C'0X8&187$P,6C!I$/EZG(YN@)BR))0$H 3@ 
+M^_^\N:VNM[2TM[RWM[VID=CD+!,2%A,H$"<9)A0E$1L:'R$C*2P@(#HI4\[?
+MZ?#U]?+TZ^.-STXR%YYUY?!]RMG>BL]WRGZ.R&^_([O>?T\ZP7TQLTUX at 7=^
+M>VP?4%,A("X\4#0?&Q(<&QD:%A at 8%AA016"Y+IZ?(RB))P$G 30 ^^-M9V5:
+M6U=85UQ64TL88<7-)QHD(ATL&BT?+AHO&24D*#$Z-4NM6H_?VLO:V-;8V]C7
+MA9=\TT=5+X]OW-)./\G,A+<AKWYLM%%R&Z3+>)VI*"ZUFAV-7&)::!@B%#8M
+M3V=?12XI,AX;E9N>J:JRM;^]6F"O,)HP-'>4,@$E 34 ^_"=FIR4E)"/DX^,
+MD'TO8\71)Q4?(!HI&"L=*Q<J%" B*#$X,$.B/6;,V=79VMG<X.'895%,PT1,
+M*)EUV=-//<G1C\$HN85ROEYZ(*K.>:.Q,#6\GR2596YF=!T?%STS4V5B2RXI
+M+1<9DIB;IJJRM;_.8%FS+9(L,G6)(@$> 3, ^__,RLO#QL+!P,#!OJE&8<S=
+M'A ='!@G%"<9*14H$AT>(RPR*SVA*%W9\/7P\/'N[^WC73XMO5!.'9]^Y>Q2
+M0MCFFLLJOXYWQEZ (K'@A*BU,C:]I".9<75K>2 =&3XP36=B1BDB*A@;FJ"C
+MKK.[O,3$6UBK*Y8Q-G>)) $= 2T A^YA<4-=5EL#5_%210U35\#/,SA%*RLU
+M+"X>,ADP&3DB("TR-C;.U]31U=;9V=/&WY5 CH&*S5<C>HU]ULX?F<''5,$F
+M/74U4S<,!Z#"N$<[3*4])J1J@'Y/*D!F7TTJ0RDR*A\@'"0RB8^1B8:"?7UW
+M*&J[-I$7$(.!.@$R 58 ^_2;GH63D)61D8R1;!M38,?.,3) *2 at S*BP<,!8K
+M$S,?'BLN,#3)S]?9VMG:W-?)XYDS5#-3PE 8>)>"T\D=E\#'6<DG0GL\6CP1
+M"J+#O$Y$5*I#+;!XBXE9+3MC950Q2BPT*AT=&2$OC926CHR(@X-])F&R+HL4
+M#G]U*0$I 5$ ^__4S+["O\3 O[_!DBU<8-+=*RT^)28M(B06+!0I%#0A&",F
+M*SC4X.GM\?'S^//A^*,V)1XKP5<G=)^'W=X9F<[89L\H18(^6T$3#*K.Q%! 
+M6K9*++>"E91A+SAA8E,R3"XR(QD;%Q\MEZ&CFY61C(R$*F&V,HX6$(5Z*@$I
+M 4P ^^M2951=8F%>5FD@%2 H3+S6-2 B*2 K.B\=+QHV0S G+S$YR-+4VMK4
+MU]C7T:\<,6\ID8B(S%]G)Y"&ULU!P<N^-AXA)B(<&R,;&&J&F6ZCN" EKGQ]
+M at 5M62TI$+C,_.R$S,3,C*2DY9W-N:69 at 7%A3576W?8N'J8B&-0%! 7$ ^_&,
+MDI:3F9B5EYE%&AP?3\/6,QD<)QTI."\=+AD\23 C(R8SR-;:W]_<W-O7U+$9
+M*6 at 45SM,O5A7(IJ+T\E!PLN]-QT;(B :&AT8%6>#F7"HO"@RO8^.B%Q103\^
+M,3=#/B(S+RXA)R<X9'!K9F1>6E923F:I=(6$IX1Z) $X 6X ^__%P,_"R<C%
+MRLA@)B(A3\_G+Q<=(QLC,"@8*Q@[1BD9'",VT>CM]OCR\_?V[,8E+V80(20C
+MO5]H'Z*0W=DZQ=[).AP>)R0<&2 8%W&+GG2KSBXOQY^;D5U..34Y*2\^/",N
+M)RD=(1\K8G)M:&!85%!13V:O>(B&J8I_)0$X 6P ^^YD<UMJ9&9G7C\C'TAH
+M.<#5/2,<,SLO,S(6-4,P%R O5,G8V-72T]K2T]&1=#0W?G(OA8F*RG<A&(A]
+MR<N;P\'%-AX;)B$F+S%6%X? O90]/4Y\A6%=3E5;65Q#,BD\&RHZ0S@^.",N
+MU=+8U=K7V-?CSG^Y?H.TK8"+-P$N 6P ^_2>H)V=F9N<CS\0(5!D0,?3.AP6
+M,3 at M,3(8-T0O%!TL3L38W=S=W-W7V-64<2XO=&\@3#Q,OW<3$Y*"QLF=Q<3*
+M,Q<8(QXC+2I1%HG O9A#1%>$BV-:1$1'0DD\,"<Z&"<X/S(Z-R$LTM+8U=G5
+MUM7CR7.M=GVQJWQ_)@$E 6H ^__7SM;-RLS-MF ='U9J/]/G.!T9+38G*2T4
+M-44N%!LJ4\_K]/?W]OGP[^JF>3,M;FT>&"$DOGPC$)J'T-^;R=39.Q48)1XA
+M*2A.%9'3T*1,0U.*G&E:/#<W,3HS*B$V%B<R,RLN+!DHVMOAWNCGZ.?TU'BP
+M>H"SK8*$)P$E 6P ^^EN;69I:%MI-T$4'8!\0,3<."4=,AHG(S%#+Q4N'VS2
+MW-S8W-C5UMG45B(?*AX:*QHH at I"(R(T@'W]_PL6UP+^_2RXG(QLD-AT;%RR\
+MM3)WB!T2'$Y586-?7EE&&QT]'!X[+3$T,34IS];5W=[AV]W;NFJPE8&[J&N#
+M-0$P 2D ^_*HF::;FI"2'R(4&XF!2\G;-Q\7,!4E(S5),A at G%V7.WM_<X-S>
+MW]C76B at G*"0A,"(=3$9-PI09&HF$PL2UPL3$3"PG(AHD-A at 9&3+!N3N$C2 4
+M'$!%2T=#1D(\&!8W%ADW*2HQ,#(ET]O:XN+EX.+BN6*KC7NUI6=W) $I 2< 
+M^__?Q-W'SL._0#$-'9F-3M;Q-" :+!,A'#!$,AHH&V[=]/GX__CX]_'Q;C$M
+M+"TJ-2@<'2 at FO98D&(^'R]V\R-'962 at B'Q<?+Q,3%3?.P3E[CRX<#2\U-S(N
+M,3$T%A0Z&1<O'2$C(S KY_+O]_?Z]??]SVBID7ZXIVUZ(P$H 2D ^^ML?&%@
+M7F, .D8P.44N,<'5.2TG,A at Q/3(H+!YDLM/4V-G8VM/8TTXD2"Q;220F4C1&
+MG)F#U9L8&GF"N\6T8\&X92LM/BQ004P_(A_-OQ@<.R at B35A-8F!&'UA$&3PO
+M9#Y%*SLV.RXMT]79W>+>W=S at U7BTFG*QJ7]D- $X 8T ^_6EIJ2AE90-&"HI
+M/$HT-<39.R at D,AHS/C(F*1AFM];7W=[>W]O<TTH;-R)B+"0I/2<M:%A+QIX5
+M%X6%O\BO8,6]9R at G.2I./TP^(1W*O!8:-B0>/S]#4$0S%T<Y&C L831!+#4Q
+M-RPKV-K=W^'@X>'GU7&OE&RKHWU<)@$Q 8P ^__9S=K*Q;T?(# J/$\Y/=3M
+M-B,D+18Q/RTB)QULQ.[Q]O/Q^.[YXE(=+!QK("DG,R<8/3,MQ)X=&8*'R=:\
+M:M',;28B-B9*.T<[(!_6QAH>,2H6+C$Y/3 F$3HN'2LL82PW("PD*R8MY^_R
+M]?CV]OC_YG*MF7&PJ():'P$P 8L ^^MQ<UYK:Q87%1DN+R<5)KK8/B<E+T(N
+M&"<@?+[/U-?4U=78V,O+5%5:;"Z$560D6DE-C9F&S:T[&'A[N,&Q?;V^0A<A
+M*1D:'A4@,3<W0$ B+R<W5E$O-V2;I(I(K4E#SSQ!)#DR02 7R]C8V]W>VMC7
+MD8VPGV6KI7 =-@$W 9\ ^_"KHZ.GA1L3"1,S-"D7*[W;/B$?-TLT&24@?\/8
+MW=_=WM[BXL_%1$%%02"3,5T>,S L5EE-OK X%(1^O,:N>;V]0!4:)Q<8'!0?
+M+S4Q.3D;+R$J/CXJ*$^5J7XZK3]"S30])#,M/1X6T-W;W-O=V=K9CH:KFV&G
+MH6X2)@$N 9T ^_WASMW:K2 at 8$!@V.3$;,,OM.1P@,4<S'"DGC=CN]_OS^/3R
+M]>'8/"T[)Q>?(%L7&R,2)RPQOJY &8& QM6Z@<;)1 \8(Q,4&!$>,3DV.#<<
+M)B(=+3$E'D66KG<PLCE!T3 U&RP@,1 at 5W_+U^??V\._OFH2HH&:LIG,1( $N
+M :$ ^^MM<EUZ,S5",QT:&1H<([W7.CE:-1DB*H6-M,O&S\G-T\B=HS;865Q)
+M at B)]8DP_5#1.;8UZR88B%VYQI[ZP7T^NCRLD'S,X+2<>'98]43H;(B0[4$.B
+ML69^WJ=-MT,\QD%$(3L@*"TDS]/5X=S6U\C92XR at I4BBH'<T.@$W :@ ^_"G
+MH)>0(A<D(1<:&1,8)<+</3=:.!PE*HR6O=//V-'6ULF at ICK.1TX^5A^:.T4_
+M,2 R.$Q N8H@$WITJ\>Q6TVJBB8?'3$V*R8<&Y0W2S03)!XN0#NBHE=[X)D_
+MN#T^R#D_(34:(RHAT]C9Y.3=W,S70X6;HT:@GG,G* $L :4 ^_S=RL:N.1XK
+M*!P?(!H=*<[S/SEA-APG+YBFT^_E\.WL\N*RN$;6-#0[.!^Q)4,Z'!8@%R<L
+MNX(D&'=VM=>^8%*PB"$<&2TR)R4>'YDX1B\7("$A,#FIH$U[YI(VPS at YS#<Z
+M&C 3'BPIY^WK\OKV\=[I27R8I4JDHGDG(P$L :T ^^EJ;3 at 6&QXE0AD7&145
+M)[?-FV,5)!H0%PT?'R0>(2$9&A\@=T/3>RHDABM27GQ.1VQ&3'MTQU\Y%6U-
+M6[RN2H2'438U*RXF%QL?'80V.3$7#Q\P,[O:CE9^WDU+MTP[T#HZ,30S(KA[
+MR74O/]1XC&[41VU(FQ>8I6\Q.@$X :H ^_*CEUP;#A$1+ at T4$PT/*+O6HF49
+M(!0.%PT=&!P?)",<&1TB>471:R893A]B-'-5,&(P'SPZM6,X$G907\*O2(6#
+M3#$P*2LC%AT=&X0S-"\3$ALF++?;?4QXVCT]NT8_TC,U+RTJ';1XSGTU1.&'
+MFG?00&9#F166HVLB* $O :8 ^__9P'T=$109-! 6&!02*\7PJVLC)1D3'!(B
+M'R,J-#@N)R(==4/66AH6)AIR'7-6(EPI"R(HN%TW%'129]&Z2HB(2BPM)2DA
+M$QL?'XLS,2L8%" <(+WH?#QQWS<VQT$YV#(R*RLJ&[R$XY$^1O*?K(/?05U 
+MFQF:IW B)0$O :X ^]9S!!48%!85%!<6%A09&*[-6!P\11<4&$\S)#<5%A at 9
+M'!M!4$/.6"$FC2HX1BX=,3 Y.61WQH\T&F\O/[BH*36OA'\O(2$D/104*GPP
+M&Q0L93(E1#8K54J/VTE/LT _QS@^1S4R*(2UR&5')<MMB%S&2$)(F""3E6LP
+M,@$W :X ^^Z;'A44$!(1$103$Q(3%++571Q 1142%DLO(#,1$A05&1E$5C?;
+M0R,66AM *1P:'24@'#<]L8TO&7(U1KNE*SNSA'XM'QLB0QH1)WXM%A,N9C$@
+M/C,L0T&0V31 MSI!R3$W/BHE(H*NTW%2+-!YF&3"03I"EAZ1DV at E(P$T :@ 
+M^__'/AP9%1<6$Q85%104&;[H:B5*2AD6&E$U)CD6%QD:&Q4Z43+L-2<0-!M3
+M'1D:%1L=$", at K9(M%G T2,NO*4"[B7TI&QXG2!D1*88O$Q J9S =.34M0#&"
+MW3 VOS4\S2\V/B<E&8>UWH93+=^.J6C2/S9#F"*5EVHE(0$T :T ^^ .(B =
+M'1 at 8%R$P.$!'5[33>3H9#Q$66FE;1EE4%!T1A:.SK*ND?!\CBR-(,2L5*SHL
+M*3UYOX8K%F0?+G^:%R)\A1P:%A<4'UE7-7Y/?X4X(&9IKKEN0T2%VE)*LCY!
+MR3M*0#LS,;.PQF [3==N?EO(041#D!Y<,FXQ,@$V ;  ANT?'!H>&0,4\AXM
+M-3M 4;G;?#87#P\46&570E50$!D-AZ6XL*2Q8B 96QM3&!P2'3,7%1M!J8(A
+M$V4B-(6<%2> A1H7%!05)EU3,H!1 at H@\&V-IK+IW/D.(V#T[MCA#RS1#-S H
+M+;.KTVY)6-QZCV/$.CP]CAQ:,&LF(P$S :H ^_\L'QTA'AD9%B O-SD_5L;N
+MBD4I%A,87&M=2%M5%1X2C:V]N*7"4B,31AE>$QH2%BH5#A CHXHI%68B,XZB
+M%RJ(C!\9%A08,658-(A5@(8W&&-NL+UW.SA\W#DQOC,^SS)"-RTE([RVX(A2
+M8O&/G&G4.#@^D"!>-&TF(0$S :\ ^^@8&Q<4%!(7.D!+1DE-2+G/=AP9%18M
+M2EE*$B$P/QD3J*F$:&:89!H<4!U)*R@:(#(E'C%KJ)M"&D :&D"9&"H]+A$9
+M&BIF2:6^)%V3W'%1/5514%9"21U^VU5#LT\[R3)%23DT);6VSE0[0\]@<5C&
+M2D9*?B$=(&HC)@%/ ;$ ^]<:$@X2$ X3-SU(0T)%0K_7=1 at 7$Q0K2%5&#ATL
+M.Q4/K[&);&.F1AD7)AA8&!T8&"P6%APZEYL^%CP8'4B>%BM +P\5&RQI4JJZ
+M(%^7WG-/.%I635)-2AQ^V4 TMTD]RRL^0"XL)+:RU%U&4=1L at F#"0SY$?!\;
+M'F<8%P%, :T ^_ at 8%A at 4%1,8.3]*14!#1]#MA" <%Q at O3%M,%",Q0!H4NL6@
+M@&NX,Q at 2(Q5<&AT:%B,6%!@BE*1#'$$:&TJA&BQ ,A,:'BAI8K?")6>?XG%+
+M-E]B3UA-2QEUW3PJOT0XSRD]0"LF&<'"XW506^M_BV;403I%?B,?(FD8%0%,
+M ;( ^_<5%1 at 5%!<8#31'4U)D3:_.?AL9%188% \,'AD3'Q,=,Z:IKKN.>58?
+M2B%')B8:'3(B%"=!4;,F'149%A].(2$E0B%3:8@=;K#+(5*[W80Q(6%L01A.
+M,#MSU5%)MDL\PRU#3C8^([6VR#TV/M%475/'344Y<",<'G:,/@$X :\ ^]@1
+M$0\2$!,2"3!$34=91[72?QD8$Q06$@L(&A4/&P\9/K2RMK^;6U ?)AY6&!T7
+M%"P6#1H:1KDH&1$6%293'R,I1"-1;(D?>+3''52]VGXJ&V5S/!)6,3AQUC\[
+MND4^PRM 1RLV(KFTSD,^2-1A<EO#2#TS;2$:''-_+0$S :L ^_D6(1T4%1 at 7
+M#C5&4$963,;GC!L5%1@:%A$.(!L4(!0>/[W O,>J2%,:*QQ8'A\9%"43#!H,
+M1L F'A86$B56(R$D/Q]3;HH=B\C6(UK#XH$H('%^.A=8-#IMV3HQPD YR"<^
+M1B at R%\/&UU)"4.QP=F'513DV;R4>('6 + $Q ;  @NDB Q?V%A(6&"Q)/U16
+M/JM>/V19%D2B&"-'@FUE97X<))5X65J(92<<-"A;&BHA*38\(2 N*8,='B at Q
+M'APK,D L*G>7AMH=3\/:+3RUVX<^)&A at 4&(\72&,VT]4K4Q!H3="3D0U++BY
+MR#TS0--(4T7&3(EW6E09'FY]0@$V 6T ^]L8$!(5%! 4&BY%-TA)-:Q=0VI;
+M$D">%!]#?FEA87H8*I^!8V"-4QD9(2%A$A\6'BHP%!,9(808&"4N&QTL,T$M
+M+7V>B-4<7,C5*3RWV(0Z&V1E5F=!7R*)W4!&LD9 GCI&2#DJ*KFUS40[1]-2
+M9$K 1X1Q4E :&&IM*0$N 6P ^^TB#Q :&149(#)*.TM).;EK2V]?%T6C&25)
+MA&]G9X >+ZF/:W&20"$9(R!F%A\6'B<M%!07'X<6&2<P'1XM-$(N*WB at CN ;
+M;=WM,4&UXH8R'6IH46E$8R6)XSP\MT$]H#! 238I(L3$V4U!4N1<9U?51(%V
+M6%4='6]Q* $L 6D ^^$G'BPQ.T5*.SE!/# ^/RO'F6A?&A\1;QL_&5<4(V<;
+M&%R#DWB87B,>,2(@-#@V,"PR'QPH*& H'RX^.CD;/$ D'T!?P^ J*\#@(R^T
+MV)I9'REE1UPE8%:0V5)4L$Q$S$%&4#\R+K>SQCTV1M-$347*6;YJIB5FHG"#
+M1@$X 5\ ^]@C(#$S/4I003Y$.BDS-BO(GW!D&!L-:Q<[%5,0'V,7'F.)FGN<
+M4!,;'ADC*RTK)2$G%! 7'5TB(C-#/SP>/T,B(41DR-LG-<;;'R^UU9=5&"AL
+M3V,K9%F.VT-&M49#R4%&230J++BOQC\Y2-!'4T;"5+EDGB%GG&QS+0$P 6  
+M^^(H'C,W04]51D%&/"HR.#+5J'9I'2 2<!U!&UD6)6D='660FXVF/Q49(!DE
+M+2TK)2 F%!,8'%LC)#9&0CX>/T,D'S]GS>HG/=?S)S2XX9=+%B=M2V(J7U&*
+MX3\\ND% RSA!2C$H),.^ST,[3MA'4$G34;9II"9JH7%W+ $N 6$ ^]DV-#E'
+M+" A*S(Y.38V02M<2&1Y&Q<<'!E:'!@<)20@#I D&!I!3B06)S<V+R at L)QX9
+M&A@@)3,].C8V.#$?&QT=&AR%Q-LL+L+<*BRQW9MIKAM+>EDP-TR-TU-1 at 4E*
+MS#Y*1#<_,K6SSD!#5\=.65K%4KQKGB6BGFF!0P$\ 8@ ^]HX-3<_(1H?*S W
+M.C,M.2A=3VY_&1,8&!56&!08(2 <#I$D&1E'0Q,5%"PS)!TA'!80$0\3&BTZ
+M/3L[/2\<&!H:%QN'R-<F,\?7)BRQV9AFI!9.?5 at M,T>(U41#AD-)R3A%/2PW
+M,+:OT4-'6LI175V_3;=EEB&CF&5Q*@$T 80 ^^<^-CD]'ALC,C4[.S,M-RIJ
+M6G:&'A@='1M<'AH>)R8B%9XS)"=..!H4%RDQ)!TA'!00$Q,6%R@\/ST]/S$>
+M&AP<%QB+T.DI-=3O+C&VZ9A8G!-.>U<K*3B#VT YBSY&RS%"/BDU*,&^TT-!
+M6LQ/6%O02K1JG":FG6IU*0$R 8D ^]XG7#ME12PG(BTQ-#-&0#O)MV-H%U]0
+M%B C3%A^+A0U&1H?'Q(C3QD4(B\H)2 E(RLD'1P>(2%*22X@'A\>'B8K/2$\
+MM-DH,+'@)BJLRJ%OKJ$L(RYMPFF1TU)2J4P_OSQ%1S,X+W>RQL7'P<*VP+7+
+M6ZU8CB2>FF1^00$] 7D ^] 61QY,+QT<&B8K,2]!.#C)O6YM%5M,$AP?2%1Z
+M*A Q%Q@='1 I10H1$"0A'!<<&B(;%!,2%!E)2RT?'1H7%A\E.B$[N=4@,K;=
+M(BJLRYUKHIDF'29FMEV+U49$KD4^O#8]/BLP+7FORLO-R<F[Q+K)5JA2AB"?
+MEF!N* $U 7, ^]@3.1%!*!L<(2TP,S0_-CK8S'EY&F!1%R(E3EJ ,!8W&1PB
+M'Q0N/103$R$@'!<<&B(;%A<5%!5(22H<&A at 8&AXF.!PZO.<F+L+U*B^SV*)7
+MFY<G'B)EL4:"VT,ZLT,[OC$[0"<N)8&[W-[>W>+0V-'?4Z57BB6BFV5P)P$S
+M 6X H>0K2#(X+" <%QHE,SPW-RQ;8SY&'Q<5$A8A%Q09$Q49$P,AC!(2'2(7
+M'QTC(QT<'P,FR" 9&ALN,QXH*RHP,#$K,B1-2,4M*IW@)2FSRJURBF2%7VQ1
+MI7:"SW1&ED9#O#1"4CDS,WS!BY:(EY^AEZ;#7[A;>2:6C5IZ10$] 6( H=0=
+M.B F&Q46%1 at C,3PQ,"A=:4E+'1,1#A(=$Q 4#Q$5$0,?UQ 7$1,.$Q$7&A85
+M&!T;&Q40$14M,!LE*"@N+2\J+R)(2K\F*:+=(2>PT:)GAEQW4EQ*DFB TV<[
+MGC]!NB\[22XH+WR\DZ.6J+&RI;+'7+!5<2*9BUAK+ $T 5T H><C,!LA&!4;
+M&1PG-4,V,2YE<$Q.(AD7%!@C&18?%Q<:%@,DUQ(<# \2&!8<'!<6&1T;&Q42
+M$Q8L+ADC)B J+2LI+QY#1= O**[U*2R\TK-*<DEN4E0^>$YZVV at PH$ [OBPY
+M2RLE(X''I[VPP<C-P<G;6JY8=2>;CUUK*0$T 6@ ^^,A(R,K&A<?(AP:&!<F
+M0R2<K($H'!*M&6XF63638( 2$B,D'!$4,U(>&QT@)"<H(R<F(1D7&!@9)B,_
+M/QP\3XYK at T-CH-LI*[S=(B:HO;E=>Y"9BJ:<GTJ'S%I/54(Y=#A%2$ W,K+%
+MI)"+F96:DI_$=K%>:B20BF*$20$Y :@ ^]D8'!PC$A,=(!H8%A<@/""9KH8I
+M& ZI%6HB53&.7'P.$"$B&@X2'C41#Q$4&!L<%QX=&! 0$1(3(R \/!HY2XED
+M?4!AH]HE+L':'B2 at P*E,=82&>)"0C3R$T$U$7CLW<S,^/S4L+K+ K)V9JJJO
+MI+#)=:I:8B"3B&!U, $P :@ ^^TB'1TI&!@?)!X<&AXE/2:CM(DL'A2O&W H
+M6S>99((3%28G'PX6$B at 2$A07&QX?&B ?&A(1$A,4(1XZ.A0W4)1K at D)CI>DK
+M+LWR)BFPP+HS9F]N67)O8BB"V$XY7#HS<C \03(K)+O-P+>SP\3-P\G@=*E?
+M9B65C&5U+0$P :\ J><:)A\C)2HP%QL5$Q =0R(Z)A8@&AM+&!4:*!H8$!H6
+M$QHD'!P8*C$B R;/(R0E,#$S&A86%1L;*4,O+S%92!,;.C!&G- at H);;6*2^I
+M>KMBC\IIA(?&;$]ZTE5#*E5 ,S%"44 W)Y'"IY>5E9N8EJ#!;[9.6R6+AF-U
+M1P$[ :, J=P1'QL='R0J$QD3$1 7/!XR(!4;$Q='%!$6)!83#!82$1 at B&AD:
+M&AT7 QO/&AL<)RLM%! 3$A02)$ L+"Y610\3,RQ&HM at F*KW3)2VD at K%4B+U7
+M<GV_6$-YUD at X-$X],BP[2#4L(Y"]KZ2CIJZKIJW&<+-,4R&.A&%F+@$R :  
+MJ>D;)B B)"<M&!T7%1<</21",2$F'!U-&A<<*AP>%!P7%ATG'QL>$A87 QO/
+M'!T>*2XP%Q,3$A44(CXJ*BY40Q<7-#%+J>LJ+<GK+3*S?+M D[Y"1T>T8"YN
+MWDDM+$T[+R<Y2C(K&YS,P[Z]O\;%Q,3?<;-162:0B&9F*P$R :@ K^PV+R$Q
+M+"@U(!P5&",F1B49&!@;#Q at 7%Q 8$1D7#0\0$QD@(!H6(RDB)B8H,C<E QW)
+M'!H:(BPF'&$S3RPA1!X?)1==M]<H'Y_.+"ZS<+E<DLE-58?*=U5$:%A$$BD]
+M&C!!3SY *J*\E8N'?7M[8'/-8*Y%2Q]^9V-L/0%$ 98 J=LL)QLL)1\I&AH3
+M%B$C0",3%1<6"Q03$PP4#143"0L,$1<>'A@<'B,: QV#*S$? Q?)%A07'"4:
+M%EXP3"H at 0AL7'!9?O=HF(:;+*"RW=[52D<0]08',:$9";$LY&R0Z&BL\2#8U
+M):*WFY23BXB%:'S28ZM%11M_95]=)P$] 9  G>4T+1XJ(R$N'QX8&B8E02<8
+M%Q84$!H9&1(:$QL; Q';%APC(QTC'"@>'1T<+#(@&!H:&1<7'28=%UXP3"X=
+M.AD5'!-CQ.HJ)[+C,###;KL_D,(W.7K4:#P\=$PN$2(X%28Y23(T(*O&K*JH
+MGY>7>XKG8ZM*2""":61=(P$\ 94 ^^<L0#\L'Q]'+"XS5EPX,B0?(A\B)Q\D
+M)B8@)2,A'!\?%1TB'QX<(C(I)R\I)QT>'AT@)"8C("4\6BL1&UUW0!HE1B=>
+MLM$O)H*Y+S&YA<!9D<5-2G[1?E4Z2UY*'QLX0$5"43PU*6>UQ[_&SM77U-G>
+M7'$@.R!'&V$P. $Z 8( ^]$F-C,C%!0\)"PT4EDW,B4=(!T@)!PA(R0>(R$A
+M'!\?%1TB'QH<'R$7'BD@&Q$2$A07&QT:%QPS5"@.&&!Z/Q<@0"9;LM4G(XNW
+M*B_"=<%(CL4^.W[6;T([4E$\'1DV/C\_3C0K)&BPR,/*TMG:U]SB7FXA.1U$
+M&%H?)P$X 8  ^^0I,#8C$Q,[*RX_6EDT+28B)2(E+"0I*RDC*"8F(20D&B(G
+M)"(C(1 at 0'BP@'A87%Q89'1\:%QPS5RH0&F!X/!4>0QE9Q?(W(XG+-3'.;\P[
+MD,HT+WG;;3(M5%(R&14R.CH]3# E(6N[T\W4W.7HY>KV7&PD.Q]&&EL>) $T
+M 8( ^^\U7S\H.3 C6FZQP&Y83B<>&AP<(1HB)RH?(R8E(B at F+R0@'!\^/XR-
+M*" >(1PA)B0I(RH[+TXE'Q00#!@[$!<0+C-C2,,N)#A:,S:U.\-4FLA95'-K
+MCE,_/F(Z1#]%0T<\0SPW*26 at S]W:W.'>W-W:;2DPD24B(5E . $\ 4\ ^],M
+M43 =*B$44&NPN650224>&AP<(1HB)RH?(R8E(B at F+R0@'"$_-6=L%!05%1 5
+M&A<<%ATR*$<>'!$-"1(X%!H7-3A?0<@G'T)9+S2^*\1#E\A*17-P?T! 154L
+M0CU#04$Y0#0M)"><T>+?X>;CX>+8:R8UDB,@'U(O)P$Z 4T ^^$K2"X:)AT0
+M5VF^Q&5.1BHE(2,C*"$I+C$F*BTJ)RTK-"DE(1],/$-4#1<7&!,8'1H?&2 T
+M*4@?'A,/"Q="'A at 6-B%-/]<R'#AE-3C*)<\VF<U .6IS?3$T1U8B/CD_/3PW
+M/C G(26BYOOX^OWZ^/GT;28XDR4B(5,N) $V 4\ ^^DZ:"PB*2 at H-R/%TFQ@
+M2"8?(A\A)"<A'AHC)",H+QXH,!DH'R1Y.%4[*A\B*B4I(QHX)28?'QTH*A0B
+M)R%2K&LZ)TR1BJDS+K*\02FU6LE6C,-;4C5%E$(Q.%LS-C U+30T,#DZ*28X
+MN[:QK:"6EHY^239(:Q at D8%XZ. $Z 4@ ^]$S7!\;(!\?+A[!R%M//R,@)"$C
+M)BDC(!PE)B4H+QXH,!DH'R)^-S0>&1,5'1H>&!,Q'A\<'!HE)Q$?)!M6LFDY
+M)$>%?:XQ*+JZ/">^2LI%B<-,0S9*A2XR/TXE-"XS*RXQ+3$P)"@UNK2OJY^5
+ME8UX131.;ADE85<I)P$X 48 ^^(T52 <(B$A,AO0V5Q,/RLK+"DK+"\I)B0M
+M+BTO-",M-1XM)"2+1282&!86'AH>&!0R'R >'APG*1,A)AQBPV,>"AYD6J,V
+M+;;,1RG*1-4XB\A"-RI, at R0F04\;,"HO)RDO*RTJ(20UR,;!O:VAH9F32C93
+M;AHF8E at H) $T 4@ ^^,P*C8B$QTP1R_$TFE;3B4>*"(?)"(J'B8F(R(B)1<2
+M&AH8)AUS7$,Z*",F)2$R.BTA(1\@'1 at Z&A at A+25$<'JGIX^(1ELG)ZFZ62^X
+M=M%:4<!?62)!DCXU'5=#04!+.T$V%CPR*R TKL[2T-36MJ?8V&AI*R5Z9$LZ
+M+@$W 4< H,\L(RX@#QDL/RJ_R%9)0R$?*2,@*"<O(RHG)",B)1<2 QC8)A9Z
+M:3 at I'AH9&A at I,2PB'QT=&A4T%!4@*21*=G21B'!F,%HB(:^Y5"W!:]%+3KY0
+M2B-$@BTU(DHU/#Y).3PS$S8G)B SKL[/T-#2L*/5TV9O+B9_:44K'0$U 48 
+MH.$Q(C(E%!XQ0RC,V5A&0RDJ-"XK,"PT*#(R+RXI*AP7 QW8*QV%=D<R)1X:
+M&1 at I,2LC(1\?'!<U%Q4?+R-3A6U72# Z*G W)+;082_-:MI!4,!&/A=&A"0L
+M)4LL.3I%,S<Q$3$D(QLPL]79U^#DP;+OWFMT+">!:T8I&@$Q 44 ^^,J'R4C
+M&AXG'A>>UDY+2#$@'"0?(RPO)RLD("0G%!@9'!L;(B-G+Y#09R4L."8A'Q\=
+M("(F'2([F*%.:$2+KF'GDETRFKHQ+J2O>2.PC<Y5%HA;52A3G$8E3"@\1#I*
+M0S8V,3(Q,#DFK9O$;X6H64'0RXHZ/1V"<2DT/P%8 8( GM$@&QT=%AHC&!6>
+MST,\.BH@'"0?(RPO)RLD("0C$ ,5VA<8(B%M/9/%6QHA,B,>'" >'!P?&1LS
+MD:%/7S>#JEK=@4DJEL L**FN<B.YD<=.%(%-1BQ.BC(C2R K/3A(03$Q+"TK
+M*S8EN*/)>8VK6TO4S8LZ.!Z*>28H+@%2 84 L=XH("$B&Q\H&QFEVD,\0#$G
+M(RLF+#4X,#(K)RLK%14;'!P@*R-R1Z'28AD>,R4@'B,#(<<F'APOFIPV13J3
+MN6'D?D$NF\\W*[:]?2C%F\A,%G]#.AU,CR@?2AXH.S1".RXN*2HF*#0DPK?@
+MBZ"[8U/FXI8U,R&0>R8E*P%3 84 ^]TP,",?)2 S.AHPV41.1C,?("8?,2TQ
+M'1<C(B(D(1\@*2HF)")@6QRN=2XC(1\=("0C(!PC("PWF:REGU5HJ6'.E55#
+MD, X*HZ3C1NS+,5*7CU241]"FSED)4 P3#Q%&AXU,38L*C,[J9'&<'ZM;(G4
+MPC\W1CR 7B at XA@%' ;0 ^\LF+!L7'QHM,10PU#@_."P?("8?+BHN&A<C(B(@
+M&QD:(R8C)"-E81VG;2<<'1L9'!X=&A0?'"4OF:F9ASM3GE[/CDM"C\8S)).4
+MBAFZ,+Y#7#9'2RE(E"]?(#8B1CI"%QDP+#$F)3 ZM)G+>HBT<Y77OCHR1D&-
+M9RDM= $_ ;$ ^]@N,1\;(ATP,1<WXSL_/C,F)RTF-C(V(AXJ*2DE'AP=*"LK
+M+21G9BBR<R8=(B >(2,B'QHE(28KHJ=\7$!IKVC<C3LWCM4^)YB?EA[&.K]!
+M7C0\/!]%E2E<&RX8/31 &1<M*2XA(BXYOJWBC)C ?ZCGSD4O04.-92HM=P%&
+M ;L ^^8T)!H?)2,F*R]>T3X_3$(F'AX;+A at 4%A4;)"0Q('5<(R F)B5??I,H
+M'1L<'B =(1<;&4^,3B5)6,7AR2U1V-S8J58T=:Y%)&HFC2>F&F]-)TY at 3AH2
+M+ADP)$ ]+1Q,%R(Q-S8O*RL_J9?*=W.M6TG:T\E$.QY:+BM11P$[ <\ JM0J
+M(!(8'AP?'BA>T#4P/CLF'AX;*A00$A(;)"0Q'7)9'QPC)B9A@),E&P,:SAP9
+M'1,7&E**2AY!7,+1MQQ#SMW6G$(C;K1 'FLKCB.M'FA&'T!/0 at T %P<D&S0O
+M)AE)%!TL,C$I)B@^M)_/@7NR7U+?T\A .B%?,"U)- $O <8 ^^$R)189'QT@
+M'B=EWS<P1$(M)24B,!D5%QHB*RLV)7YA)"$K+R==?)HO'QD?("(?(Q@<'52/
+M3Q\]5\#3O"-)U>KJITH\=\-+(6PPFRFX*&E$&S=&/Q )(@XI&S$H)!E)%!LI
+M+RXD(R8]OK/FDX[!:5[NY-=%.2-A*RE'- $T =( ^]A&)BA"15\O,RA#STA2
+M1D$?("DJ%QL?'!89'B$D at L-:)!\C*!]C:(5&'!\<(1\D)4>)D:@Q+X5 9+K>
+MT;^SU]O4MU8D>:A6'QN(-2*4>3D\158C.)Z0<4HM/R0B%QXDJS,S/C4N)BHS
+MK)?#=VZ?;'76S)^R*"XP*&8^20$[ =4 ^\0^(!\W.E0F*!] SS]#.3L<("DJ
+M$Q49&!,9'B$HB,A>(AL@*")C9H5('1T<'1L@(DF.E[ Q*WXX9K;6U,.QT=O<
+MM5 D>*U3'!R*.1Z9>3$T-44/'61..!L2*1$7$Q at AJ"XN.2XH(2<QMI_&@72;
+M:GO7SI^R)2HQ)F8T-P$P <L ^]1%(R$W.5,F*!](WD-#/$ D*3(S&1H>'1L@
+M)2 at PD==J)R$H,21>8HI0(!\C)2$F*D^1G+0V,'\T9+O<U+VKU.SRN3\5=[I=
+M'AV012.E at B\R+3H()6I1.!@:+Q07&!LAJ"LL-BPC'B4SP++>DX6M;(+DX["[
+M)R\T(%\L,@$P =, ^^)-/"T]*B8G&!LOV4I,0U G(1\D&" ?)2 B)"(F8-)Q
+M(" E(B5B8QFX'B8@#AAG&IJWLZ\S(1]%2L?BV%5<V-_;PTTK-%%G'V,?(R2(
+M-UU,32>89BP>&1TS@'D>(#Q#TC T0#DR*2PWJKW6 at W3/5R.%QS-R.S*OD$5H
+M2@$T :D ^\E+,QXP'QL=$1,GU4 ]-DH>'",C%!P=(B B)"(J9M-O'1TE)"=?
+M7A>\'B <#A1C'J"_N+(T'Q<Z4,/7UU]@U-_=O$$D-E-G'6<@'QV',$P]2 MC
+M-A ,$!895U(/(#1!TBDM."XJ)"DUL,/8BX# 3AUVQS!L-#"SET1=. $K 9T 
+M^]5')Q0P'AHD&!DNY$<]-D\J*2TO&B(B*B<I*RDT9>"!)24L*BM?61S()2,B
+M%1EH**_!Q, U(QLW3<GDYF5HX_+SQ3PE/EEL(7$K)1Z3,4$Y/ 9F.1@;'A\;
+M5U(/)2Y#VRHN-BLF(2LZO];MGH780"YVUCAO-36[D$%:,P$O :0 ^]DZ*!\9
+M%AXC'18LQU9/3E$E(D K3(TM(1\D)"$DA--['B D)2=C722-'1TBAYI0'*Z!
+MJ-0R(R-$.;O4T4<VQMS=RDDA*"\]%QH>'B-P16=30C*$'1L6&!X=%D$>'$M)
+MT24S.CH[+3$><J70>V7,-R at UO2I%,3&+?8EO/@%) << AL$U(!(7$P,7\@XD
+MQ$Q 04LB'T$L38\M'Q\D)"$HBM1Y&QTD)RE at 6"*1'1<>C)M0(+*#I<\P(1LY
+M/[_3UD\XP=C?PST:*C$]%1H>'!UR/E=%0A96#!0.$!$6!BL5'$-'T1\M,S(U
+M*"X<>ZW4A6VV*1XEOR at _*3&5A(IF+0%! ;T ^\8R'!,;$Q8<'!(JT%- 05 L
+M*TPW6)<T)"8K*R at RB>&+(R4K+2U at 4R>;)!HCF*97++Z!K]HR)1\V/LGBXU$\
+MS.?US#@;,C="&2$E("!Z/$<[.1MI%1,2%A07"C$9)3])UB N,2XP)3 AB\'I
+MF''+%R8?QRU$+3B?A8UF*@%% <0 ^^ C&!HA)# [*C19ST5*8%0F&"$_K,H\
+M)AXB'B(EA-"0(R(A)"IB9R*94YR8L)0E)4B/U=@M(B9%0;++W%0U8)W8S$TB
+M6$T5(2$<'Q],4&16,"V!5SQN5U\2?Y,#)DQ+T2$N1SLW+!T9;*+,=&72."\]
+MGBH],C:%<WYC-0%  ;T ^\,<$1 :&R at P)"U1S#L[4TXH%2!%L\\^(ALB'B(I
+MBM&.(!\A)BQ?8B"B6)Z<MY8C(DB2TM,N(!XZ0+3+WEHS69C7Q4$;6D\5'Q\:
+M'1U-25-('2N26CUU86(8 at 9$&*41*TAXM0C4Q)QH7=JO4@&^]*B4PH"@W*#B4
+M>7U;*0$Z ;< ^]HE&"$A&R(M)2Y5UD([4U,P(2Q.O]Q&*",I)2DSB=Z@*"<H
+M+#!?726P9::FPYXH*E>4WN(O)"(W/;S:XE4U8*/MSCP<8E4:(R$<'Q]82$9!
+M)SVK=EJ8A80KEJ<4-T))U1PL/S L)!P<B,'JE7?5&"PCI"P\+T"@@HE?)@$[
+M ;P ^]@Q)B0M-DX@*C \&$)*658A+2$>@L5-*!TC(R$BA-"4(R(C(R=O:B.%
+MI;2PMJ,K)C6JT-PP(B)%4F'*>WY]:ZC>U%HA(!\A'Q\A(1HW0U]G50P?)Q<?
+M'Q8B&"<BBTM)SS%,'2I',T?9:Z++=FG/."P\CR4T*SM[?X1*, $F :X ^[PJ
+M'QTB*D0:)"HV&#D[2E C+!XDB\Q.)ALC(R$FBM&2(!\C)2EN92&1K;NYNZ,E
+M(SBPT-LQ(!HZ3E['>7U[9:/AS4X:(A\A'1P>'QLW.T];3@,4(0H4%PT:"Q@:
+MBD)(T"Y.'"5!+D/6>*O3A'2[+B4QD"8P(S^+AH-$)0$C :P ^\PI%!LB)3P3
+M)2LY'ST[2E,K."HKE]A9*B J*B at PB=ZD*"<J*R]M8":DP,?%R*HH)4:WW^HT
+M)!XW1F;3>WIU:*[SV$D9*"8F(1P@(1Y&04=63 TK,A4A)QDA&"8 at ED1'T2Q,
+M&2 \*4C>B<'IF'_7'"PFD2DV*4>;D9%)(@$C :X ^^8S6#,S/3D1)R]=T4 \
+M1&@E)Q\@-<=;'QT@)B(GB]"B)!\C)2YE:2&!LG9BVJ(E("VNQ-HT(B1 1;.^
+MXW9<8ZS4U5(A(20A'AX?(!PH3&ME73=OE#D?&A8Z458HHEY&TC X-#H\-+O=
+M=JO+=V?3/R,_:"4N)T1L=H-++ $X 9P ^\XK22LV0#L3+3-ATCDP-V0B)!\B
+M/,M;'QX@)B$HCM&B)!\C)3!G:1V,LGQGUJ$F(#.TR-PU(!PU1;._Y&I37:O:
+MT$4:'R(?'!P='AHG1F!;32A=;B(0#0D9)S0AI55%TRTZ-S4Q,+K8?[#,AG' 
+M,QLU9B<L($9Y?8)$)0$U 9P ^]TG.R<V0C\;+"U9U3 at M.FDL+B8F/==L)B$G
+M+RTUCMZS*20H*CAE8B*9PX-IY:TI)3B]U.0X)" R/L+,[W%'7KGIY448)"8C
+M(" A(AXU2U511RA@<B,0#0H;+#D?IUE"UBLX-3(P*,;GE<GEGGO7("$O8B4Q
+M(4Z(B)%+(P$U :$ ^] 9'QD7-S4M&RV Q$I'-SDC'B0?'+YP)1T@*B(BA,RN
+M)!\H*RQE9B)A7(.)W*4K(2. at M]\M(1\^0ZZPY'9A7JG.UTPI(B$A'AX?(!PA
+M3EML6DX_3F-O?)I)BUU+DUU0U3<R23D],[C:;*G);F?0,R=.1R8T+TIA9(%$
+M*P$_ :H ^\,<&QLE0T(Z+D*/Q40^+SDB&R0A),-P)!T@*B$CA\VN)!\H*RYG
+M9AYL78J/V*0L(2JFNN N'Q<S0ZZPY6A65Z;5TC\B(!\?'!P='AHC2U)E3T,Q
+M,2<J-E$57SU!E%1/UC(Q2S(R+[?5<:O)<FZ])Q] /R,L)$AJ:X$]) $\ :L 
+M^\0>(Q<?/CDM(R)IQDE -4 N)2LE(,B#,B0G,RTPA]J_*20M,#9E7R-V:HR,
+MY[ O)BNMR.LQ(QLP/+^_\FY)5K+AYS\@)2,C(" A(AXK249:1#DJ,C T/UL8
+M94$YE5A,V3 P23 Q)\/D?L#<AG?4%"4W.2$P(4QX=I)&(P$^ ;8 Q\HW,S!0
+M0Q=-/4JAS4] -D ='B A2,.)'1T@(R,D>,FW(R B)"0]9!YE6:64V:PO(ANC
+MG-<N(RA'/JV6WVM63Z7/UDHI)20C!2"O%T],D8M13%)_3C at S57O)2HM41L4W
+M+)I&0SNSV&RWS8N/VE0H3#AJ03M*HEB'.RX!2 &D ,?$2$) 8U0I7TY:K=%'
+M-2H^'1L@(T_'B1T=(",B)7O*MR,@(B0F/V0:<%FKF=6K,"(BJ9_8+B$@/#ZM
+MEM]=24>@U-$](B,B(04>KQM018J)3T=(>D<N)55ZP3Z)2T7&,"F9/C at WLM-R
+MOM*/E<=(($(S:3TT3*E8A#4H 44!I #'OSXV+TPY##XS/)C92S0M0R8E)R=.
+MT9PF(B<L+C)[U\@H)2<I+CU='WMHLIODMS,G(:ZMY3,E)#DYP*?N8SA#J^'F
+M/2 H)B4%(J\C43R(BTD[0'A&*!Q,><<YBT]"R2\IF#PW+[[B=\G?F9K>-28Z
+M+FA"-5*U79 Z(0%# :D ^\\R5WHL*S,Y/53%TD1/64HG)AXH5LV8(AT@)2,C
+M/\B[(R ?(2<K/QY6;(*:VJ\I'AZA@]0Y(2922JF"V(M6;9C)UD(W)B4@(2 @
+M'R$<15; XV=46X"R[&5PB]A$CE=&RC PCC\X.KG:N*ROK(;70"DQ=9*+>WM:
+M9)0M1P%$ 6L ^\A%;X\_+#I$-$+"V#M 24(D(QXJ7-"8(QX@)2(D1<B[(R ?
+M(2<I/QI?;XB?V:XJ'B6IA]4Y'R!'2*R"V'U(8I3.U#<P)",>'QX>'1\=0U"Y
+MV59"27:IW5)A at M,_D$Y%RRDJB#0P-KC6P;FWL(W$,2$K>)B+>X%@79 J0P%$
+M 6L ^\8N3'4H(#$W*#_*YSL\2T8N+2<P8]ZI)B$G+BXQ1-?,*"4D)BXK.A]O
+M?Y&DZ+HM(R2OD^(^(R-$1+R3YX,W7YS:YC8N*"<B(R(B(2,H1U' ZET]0GVU
+MYU1AC. \CE!"SB at KB3,N+L3FT=/7S)G9(R4F>)V0@(AG7)4H-P$[ 68 ^\\I
+M-SJ+BJ]+O9C,T$]+7T,A)"4P7\.=)ATB*B,@@,C!(1\C)B<H)1Y@>W*?T+XK
+M(B:G:=XU)25 at 4Z5DW))8:IZ8UDX\*"0A(B @'B 8-,NXTV0S4W6.TEUU<--2
+MBVA"P2TQFD,_-JJ[EIJ2EWK702LW=K<V)5E_BY-R4P%: 8P ^]LJ+CY^0WLL
+MBEC"VD8^4CPC(R0T9LB=(QL?*B0EA\O!(1\C)B,D(1IC@'>AT[\L("BV;=XU
+M(R)51ZMGVXE*7YN8VT,Q)B(?(!X>'!X0+L6TTU4I1VZ.S5%N;<Y'BU]!P28J
+MD3 at Z+*NYHJB>I8K(+B,U?,,U(UZ%B(]Q4P%: 8D ^]XM,#A_9J-%JGO3[$H^
+M4STK+S ^<M>L*R G,R\QB=W2)B0H*R at I)A]SC'JGX<HO)2S$=><Z)R121+IU
+MZ95 7*.?Z$,N*B8C)"(B("(7*<J\W%$A0&R7W$YL=]U$AF$^QB<KDS<W)KC-
+MM\2[P9O4+B$YC=I!)V"$B)1P2@%5 8< ^]^@T=)1,SLH>93$U%=$8&$@("0O
+M<,"U)ATD*R4K@<?#*2$>)289(!M!A5NARLDM)2>H4MLW)1Y.2IM.UZI87U=Z
+MTEH[(B,A(B A)!\A+$Z:S6(]4V6&W%A;7-!(@VA-V34UFTA%/JBNBGV,E'+/
+M/B> ?<4]'B);;7!]>@%I 6$ ^[M.9)Q(&3 N?8G(VD\W4UHB'R,S=\6U(QLA
+M*R8PB,K#*2$>)2(5'!=$BF"CS<HN(RFW5MLW(QM#/J%1UJ%-5U=ZUT\P("$?
+M(!X?(ATC,4Z>SU,S1UZ&UTQ46<L]@U],V2XNDCU -*FLD8B4GW_#,R9_@<L\
+M%A]>;W%^? %K 6  ^^B(IL=<-4<UB9K4[5,W5%LJ*R\]@]3$*R I-#$\BMS4
+M+B8C*B<:(1Q4EF.IV]4Q*"W%7N0\)QU .[!?Y*U"4UZ!Y$\M)"4C)"(C)B$K
+M,UVHUT\K0%R/YDE28]HZ?F%)WB\OE#P]+K; JJ*JN8[*,".-D]Y*'"%<:W1_
+M=P%G 5T ^]9IV-U-+#) 9WVZS$I 1%P?)R at F?L6V)A\@)1\E>;W!)" A'1L5
+M%AHRED:>S,LL(RRG2=DU)!LQ49%!U+-189&)UU%&)24C)"NL(295:WM8T&8_
+M6&-TWU186=12DWM)Q#HRH3] -)V<=UTU>V#(02Y:.2HV(SAV<I5C<0%3 4\ 
+M^\5(M>)((B8U<XV_T$0S-U4A)B<JA<JV(QT=)2 J@,#!)" A'1<1$A8UFTN@
+MS\PM(2ZV3=DU(A at F19=$TZQ(6Y.)W$8[(R,A(BFJ'R1@>X=CTE<U3%QTVDA1
+M5L]'DW)(Q#,KF#0[*IZ:?60[A&B_/S-;/"PT'3=X;99F=0%6 54 ^^9NW/M5
+M*2DU>I7+Y$<S.%8I,C,TD=G%*R(E+BLV at M+2*24F(AP6%QM%ITZFW=<P)C+$
+M5>(Z)AHC0J92X;<\5IF0Z48X)R<E)BVN(RAM at YYPVE,M15I]Z45/8-Y$CG1%
+MR30LFC,X)*NNCG!"DGO+03AF/BHV(#9T:IEH< )4 /O61]+74R4F0F5WN\5!
+M/E%.*!\H*(;$PB4@(20D)V/%OB8@'R,3$A47(J ZK,/0+R0RI#O:/R0?,4Z7
+M)-"^46.+E-E;2R,E)U1D42(I)$1<A]-J75M@:]Y284'24'1O2YP[,JM$-SZ1
+MH5I18V)GN,B\7Z$I.R&(:EUO=U8!2P%) /O;4=?.4C$H.VZ%O,D_,41'*AXG
+M+HW)PB(>'B0F+6G(OB8@'R,/$!$3)J5!L,?1+R(TLS_9/2(;)D.<*-&Y2%V0
+ME-Y00"$C)5)B3QXG)D=8@=!@5D]::]E&6S[/1G-H2IHV+*(Y,C22GU]7:6IK
+MLLR_8:$C-R2.;%5P?%D"40#[Z%ODXEXT+$1ZCLG=03%$2# J,S>8V-,J(R8M
+M+C9NVL\K)20H%!06&#"R0[K3W#0G.+])Y4(F("-"J#3>QCQ6E9OK4#TF)RE6
+M9U,C*RI'9X386$U(573F0U1&VT!P:4>>-"VF-B\NG[%K7&AN?</6T7:J)#\F
+MBV=1<7Y7 5 !4P#[RE/2RV$Q.T%[;JW$34Q:/RLE)22#NKTE'R(C(1]5KKLC
+M'QTC(QPA*R"A<*>\T2TA,Y1%V4 at G("<TA#+1P&)5C9S0848K+J<V?)DE(!LJ
+M+A_*95Y<7VC83U0TQDU&8DU/,S"L344[4,6. at J9C/D(G**(G*4EYF6AX?HU_
+M 4(!4 #[TUC2T& J,SB"=;+,1CU+-B at E*BJ+P< E'R(C(21;L\ E'QTC&1P9
+M(22F>ZO!TRT?-:1)V$0C'!XOA3K4N%1)CYS453LG*J4V>Y(='!TM+AO%85=0
+M6VC30TLRQD%'84A+,2VD14 Q4L2-?YU=-STG**<E($>$I&QT=HB" 44!50#[
+MXV?AY6PQ.3R+?K[@33M).C L+R^/S=(N)"DJ*"=DPLPM)B0J(R,@*!^]>\#&
+MVS0D/:I5YTHI(B MB$#BR4M$E:/F7#HM,*H[AYLA(B,K-R#05TM)4VW>0#\T
+MS3Q*8$-0+2VJ03LI6M"*?Y]6+CLN,;0J($R'I6=L8X:" 4<!6@#[OES,RFDQ
+M/T5:89[%;E5H3B at F(2M at MK\B'Q\F(R4^'+LB'1LD'Q<B*1JN9Z:NTR\@-7]@
+MS&,G(2=$0:?'R%AEB)G-?%(E)Z6*?98F)7-:O;*V7UI at 5&S<6TZ$TTH_7$8M
+M&B>]3DM#A]1W8U5=<"\B)$HN'I>>@6UZC&)+ 4D!3@#[R&+/TFHM.#U at 9Z++
+M9D170B4F)C%HO<(B'Q\F(R="(+\B'1LD%QL;)B&R<JFSU2\>-X]DRU\C'1X_
+M0J_*P$I9BIG1<$<C):6,?I$@(G1<O;.N54Q.3FS73T6"TSX^63\G&"2U1D8Y
+MB=-I33Q%7"$?)44G&I^EAV]V:%%: 4P!3@#[V''=YG<U/T-G;JS>;4-61RTM
+M*S9LR=0K)"8M*BU**LLK)"(K)2,B+B#.<\.XW38C/Y5PVF4I(R ]1;78T4%4
+MD*#C=T8H*JR4BYPE*G]:Q+:\34))1W'B3#F$VCD]5STH%"2[0D$QD=]7-R<P
+M2!<=)E P'Z6PCFUN5DA0 4P!4P#[NV/&R&\A/4E=746_8$=R1B<D)!UGJL$B
+M'!PC)"H<'\$D'B A'!HC+1>I9:VDU2\A)F)TT&HF)"1%.*?0S&-5?(/ CU,J
+M**.:=)<M(95,P+"M9V!44TW68EB$S4XE-E at N0BO 5CQ#<\Y03VQ6<R8E(5PN
+MOI"#44)]6690 4<!40#[R&S-TG(A.4)C8D?$7#QF0"<D*2-OL<0B'!PC)"H<
+M'\ D'B A'2$6)1VM<+"HUR\?*')XSV8B(!M .:_3Q%5)?H/$@T at H**2==Y8I
+M(Y))O["G85=&3TW15D^"S4(C,4\G0"BX3C<Y=<U#.U0_8APE(5 at JPI>(4T)[
+M2V!< 4X!6P#[UWK9Y8 J04EJ9T_383EC0RXK+BASO=8K(2,J*S,A),PM)2<H
+M*"(6*1S*<\RPWS8D,'B$WFPH)AT^/+7AU4Q$A(K6BD<L+Z^KAZ(O*ZI1R[^X
+M6$L_1U+<4T.$U#T=+T\F.BB^2C(Q?=DT)T$Q82,N*& RRJ**3SUS.F%9 5 !
+M8P#[MU3"Q70C+D->*Q[$@F*;9"8G(2%5D+\@'1TD*"4<2+\D'"0F'F-17Q^G
+M8*J*T"\A($AES8 F'B)&-Z?!S5A5=%2TBUTE)YF?AIX?)BM)H*985U946#[<
+M559KPTDW&5%222VY2#8^<\\J)V(P(1T=)5ACS*13/9!F;'Z! 6P!DP#[QEW(
+MSW<C*S]C+1[)@V"78R8G)29=E\(@'1TD*"4>3< D'"0F%5 at D,!RK:JV/T2\?
+M(E9JS'TB&AD_.*_&QTU+=E2Y at U(C)YVDBYT<)B9)HJM:6%%(5#_924QIPSTQ
+M$DA+1RJQ0#$V=<XE(58D'2 B)UEET:A3/I!A>(B& 7,!H@#[TFG7XH4L,T5H
+M,276AEJ/8BLN+2MAH]0I(B0K+RXB3\LM(RLM(54 at -Q['<L>4W#@D*EUVVX4H
+M(!D^.;73V$-%?EG(B4\E+*FSFJLD+3%"GJUB3$5!3$+E1D)KRC at J$4A)02JW
+M/"PP>]HC'%,I(B(D(EIMW;),,HE><9*2 7X!K@#[NTJ.P(@L.T4A'BZP at T1V
+M>"HE'QY2,[\F&QXI(2,A>[@D'2,D*5]89QRC2:5LS#4C(S8N7(LD'R-(/):P
+MS5I>2R<^?T\>()6^EZ<H)3Y25IPV*UA=7(S88$MAND4W24]-4"RQ4CXW1L(B
+M'; E(R$9*:;&TK5'<E1#T]31 =@!WP#[QU&2R8 at I-3TA'RRV at T)R<2HF("!9
+M.<(H&QXI(2,IB;TD'2,D(F(I/!ZH4JIQS#4A(#@T6XLD&1I .INYR5!01R%%
+M>T(;'9C%FJ<H(SQ05)XZ*U%/48G44$%@NSDK04A(3BFI23 at U2<(@&JTC'1L6
+M**S+UK='=E@^U]S9 > !YP#[S%RFX9DQ.$0F("Z_B#QF;R\Q*R1;0M P(B4P
+M*"HOD,DK(B at I*U@@/!R]8K=TW3XC*#X[9Y0K'AH\/*?'VTA)3R)0 at 4(=)Z;1
+MJK8Q)S9,6*8U(DA&4(?C4C=?OC8H/T9%2"FM23$Q2\LE(KDH(!P6);/8XKM"
+M<%(YX^_L ?,!^P#[M3=IM8<F0#8>.HZ@;#18=" B(1\>%,$F&!LB*"8B?KLD
+M'2 H(EYS912H,)=:S3LB'2HL<IDF(B=12HVBRF1@*R!&9S F+97"@FHB)3=6
+M.H,@%UE34X[04TLD-#M,3%532BBU2CL^28T?*"\]2&6'F=;#3&MF4D-0K=;7
+M =P!W0#[NSMLOHHF/2\=.Y"E:"=(9!TC(B$C&,(F&!LB*"8JC, D'2 H(F1E
+M6AFM.9U?S3L@&2LN;YDF'!Y'1Y&IR%I,'AM,7R0B*9?(A6HB(S)1-X$?%5%'
+M2(O,0T$C-2] 1$Y.2"6M03,Y2XL=*"\]1F6)G=S(4&UG5$92LMW> >,!Y #[
+MQ$5ZU)HM/S8</)2Q<"="9B4N+24E(,\M'R(I+RTPD\PK(B4M*U]>6AO"1Z9A
+MWD0B'BXR=Z(M(1Y!1YNUVE)$'Q9392DJ.*S9EWDK)R].-X8<#TM 1XG;13<B
+M."P]0DQ+0B6Q02\V29 A+SA"2&J1J>O56G-H3SY.Q_CY ?X!_0"BLR89D),H
+M/S--1H*)B#I<?28A)!@7$K\D'1H@)"<?<KXA&P,AUEYJ8ARC(Y(VS5 at C)2YU
+M0* C(2)0185)TE98+TAR+#XE*HZKDZ8C)$!7'U$E2%A+6&_)23U):S1"2&53
+M/!^B3SLW-5"DS=C=T:E^7,W7Q\K-0V_6U]W> =X!X #[LB,;FI at J0#%+2(B,
+M at RI%;2,B)1H<%+\B'1H@)"<G@,,A&R$A)&!L8QZH+9@[S5 at A(S-[1*(C&QE$
+M/H=-SDQ(*49X(3(C)Y&QEJ8C(CE0&DX at 0U!!4&S%.3-(;"@V0%Y..AR:1B\P
+M-$JDSMC>U*V$9-+;R\O/0G';WN3E >4!YP"BP"LAK:<P039%1)&:CBQ$<2LM
+M,!X>&L at G(B$G*RXMA\\H( ,FU%QG9"*]-YT]WF$C*#: 3*HJ(!D]/8]9X$1 
+M+DJ!(34H,:' J+4L)C=.&$X>0$P[3FK4.RE';R4S/EQ+-!R>1BPN,4NKV^GK
+MXK^5=^?MT];3/V_J^03_ /N7'Q1NBR Z/T5%?'Z at 2%5@)R(A(!5=P"<>&" K
+M)2!PMB(<'BDE96=G':8M>Q;,9R<D0&-(HRP=,TTQ=A[.6%-[5AY:1!PHD:"F
+MKB,D/6 I(Y132')4<Z13.FT[-#5*7TH_,&U'/#Y,-FZ)MES26"LATLLX0DI[
+MSM/AW^ !WP'= /N1&Q1[E" [/T%)@X&@/$!2)R$B)1IBOR4<&" K)2A\NR(<
+M'BDH8FMD':XT at AS+9B4H2G%/I"P7*D,J=B#)34A\6"!//!HFDJ>JKB,B-E at B
+M'HU,06=)<*-#,&HX*"E"6$4]+65 ,3=*,7*+N6+*3B$;U,\^1DU\S]GIYN<!
+MY@'D /BB(QF,I"4^1#=$CH^I/SY8+BTM*AQGRRHA'R<R+"R!QRDA(RXH8&5D
+M(L(_ at QO:<BDP4GI:KS$<+#DI?R;62D6'7BA,.AXKG;*\O2PF-58@'(Q*0&9&
+M;J]%*&@Z)29 5D,W+6D_,#5&+WZ at T7786"@@Z>%%3DU]W/@$_P'] /N-(CE>
+M82,W.#XV>V>P/&18)B0D&AQFP"L>'"8B)2%?J2$>'B8C9&ME&YL;>1:]AQX:
+M1TY6JRDF'D at M+B9Q1$M<5%-<0!PFHY:<I"<C-58N>FEE6459/"9+/T1"+2A.
+M0SA$*BU&0#\_*8>JPW_(/R<BUL5!;6&OW.#>W-T!W0'A /N$&CML:Q\Y.C at U
+M at FRT-5)((R$C(21PQR@:&28C)2-JKR$<'B at C8VMD'JD=>!>]AB >45Q>K"H?
+M%4$F*BAS.SM544M2.1 at CIYVDIR(?+T\F;UY?435*/"M +S8V'AU&/C4_)"=!
+M.#@Z)I:MT)/'-Q\BT\,_:EVQX>7GY>8!Y 'H /B<*D%]>R1!0B\RBWF^-D],
+M*RLO+"IXTC(@(2\N+B=KN"@A(RPL8F)A(+HE=QC$E"@F6V5BN2LF&3 at G,"9[
+M/S543T5,.AXKL:BXMRTE,$XD;%U653= -RT_)R\Q'!I$/#4\)2H_-#8X)J#%
+MYZ/3/B4IZ]5#:%.U^/P%_P#[H"$Y56,A+#4M)'%<G#9M6"XA)2$@6[<L'APG
+M(R8;4W\@(!HD)%MC;2.>&CH[LY at E(44[9+ at L("Y<-QDY7#E>7$Q244P='I^#
+MF* C(CM>.7!K;%T]3#NC,3U!+B<X01X=/R\E03<[-CJ"J,%\OCLH)\R^7S]Y
+MVM79X=_> =P!W0#[G1\]86T=+C<F(GEAH"Y;22P>)"@H9;XI&ADG)"8=7H4@
+M'AHF)%IC;!VE%S= M9<B(TQ':[DM&25,+AT_8#-03D9(140;'J2,H*,>'C57
+M,69 at 9E4M0CVI*R at M(B(V/!L:.BD?/"\T,3>3K="3OS0?(=#!7#UYX.+DY>/E
+M >4!Y #WMS%)=G\B-C\D)'UMK#)623$H,#,N;<DS("$P+R\A7XXG(Q\J+5E:
+M:1ZQ'S=#O:,J*55.;<8N("E0+A5$;#A)14%"/D =);"<M+,I)#95*UY?75DO
+M.#FH)!XF'1TP.1D:-RHB.BLR+S><QNBDS#LA)N7/6CF \?L%_P'] /N-'C)%
+M7!DV)BTE7TRG,F-7+20I'AI[M"<=&B,A(R N4!\<(QHD56=F(Y,J4F.FG"0B
+M3#-OO2<A)4I%>(HC9%ME35)D7R(GEH..D",B,59;7UQKNT\YFR5 /BTF@% X
+M'CY)(S at H-"Y#07N?NFZZB3$DQK=;:=';V]_=I]P!W@'? /N1(CA19A4X*"4C
+M:%*J*E%(*R$H)2*%NR09%R,B(R(Y5A\:(QPD5&=E'9DK5&NJFR C4CUTOB@:
+M'#Y"@(X=7$]7149652$IG8R6DQX>+%!455%ELS\ODQ\U+AX@@%(Z&SA$'3(C
+M+"<^/H^FRX;#B"TBTKU79\S>Y^C>I> !YP'F /NF+C]D=AI ,"DE:%NX,$Y&
+M,"LT,"B-QBX?'RPM+"8Z7R8?*" M4UYB(*HX6G&TIR8F64-VRRDA(#E @I@@
+M6DA-04-,32 OJ:*LHRDD*DE+2U!<MT$IER(K)AHAA4XU&3E!'C4A*"4\/IG!
+MY)G3EC,GY\9/8>'V_O_[P_P"_P#[GAHN/5H6*1TI&T5!LR=U5#,@*B$=7JTI
+M'AHB)2$?(#(;'2(?(U%F92**5T=;F:8F(C\G;K\H(24V.E@@55936%U<9&(E
+M(XZ9AS,@)#%)5V^K9#%!1BA'0B at CAE:&AW\^5B(M/2LX.D)GF*I8I(TJ(\_'
+M4E1=9VQW<GO? =T!X0#[I!XT2F,2*1TA&4Q(MAYD2#,=)R at E9;0I'!<B)B$A
+M*#@8&R(A(U!F9!Z475!FGJ4B($0M<< I'1TO/%T>24I)35)055<D*)>ECS8=
+M("Y#4&>B72LT.A at W,B @B%N.BWPY41TG.",R-3]ZH[MPK9(J(\VZ.3]$4UQG
+M9G;B >@!Z@#[LR8Y5W$8,B8H'4M1R"AA13 at G,S,K;K\R(1\K,2HG*C\@("<E
+M*D]=82.G;EQPJK,G)$DR<\LL(R$C.FHC1$-!0T]+2TPA+:>ZI40G)BX\1V.B
+M5"XU-1HY*AP at C&>*AGPW3ALJ-A\M,S^'OM:#Q:$Q*NG'-#]066!K>X[\ O\ 
+M^Y(=+C15$B8A*2 V,+,J67 E)"0<&%RA+1P7)"$D'ADC%!TD'2(_:6DA at 48F
+M2W^G(1\R+E^^)2 B(CY93CI955Q56U->*!V.FU=W&R0C/U*GH%I-0CI40B at H
+MFT>1?EC905,H-#0V-D!$4(:31)TF(B/3LWE/X.'BWN34X0+; /N2'# ^6A(C
+M'24>.S>U)DYD*2,<(AYCJ"T:%"0B)!X>*A(9(1TD06EG'HU0+56$J!X<,"Q=
+MOB4>'QTW440R3TE.2E!(4R,AF*5:>!LB(SE(IZ!.2SHK0S$=))U3G(E;T3Q.
+M(R\O+C [05^8HE*<(R$AT*YR2-C9VM;9S.4!Z 'F )JC*#A(9ALK(RLB/D/*
+M+DUG,2\J*R5LLS8?' ,MWB4A*Q8>*28H/V!C(*!@.5^0M28F-2YAQRXC(1LV
+M33XN24)%0$8^22$IJ+AJA2(D*#1"L*5)33 at A."@=*:%8IXE=USE+(2TM*BLX
+M/VNMNF.S+R F[+MG1^;IZN;FW/H"_P#[E1PH,%(1(1XB'"@7K3-,<BPC*"07
+M(8$P'AHA)"8>$A<4&B$?)$5L9B5N.1LQ9*PD(B\A$TD@'B$B,4A6.6I28&)8
+M5V8H((TS?0\A($=)4UUM3B])944Q2*Y4GX=1:< Y5B at W,BLV03E4:65J(2 C
+M)=.W7RW9V]O<W^+C > !W@#[E!DH-U81'AH>&BL;LS5'<"XB)2H=*(@P'!<A
+M)28>%!H0%AX?)D=L9!]X0A\[::TA&R@>$DL@'!X=*D!,,6!&4E=-3%LC(I8\
+M?@\?'$=$2U]M0BU!4C8H2+!;K(]8;;TT42,R+2,O/#9;<VYQ(!TA(]:X72W=
+MX>'BYN;J >D!Y0#UHR,O0& :)B D'BTCPCE";#8N,3,D,9,Y(1\J,"\E&!P5
+M&R8H*D5C8""*4"M%=;HI)B\<#U$I(2 ;*3Q&+5H_24U#0E$A*J1*B1 at D(5!"
+M16=R/2\_12PH4;1DO:-7:+\R3R$P*R$M.C9D at WY\+B4C*/#%5S+Y"/\ ^)4>
+M)B5)$R <&ADF&ZLL98PS*2,B$C)!(A\9(1XA'A$6%A,@(B4J<&\E8T<E-$NT
+M)R0A)&]@&!H9(QY,82Y<4EY26EE>+AY%=1DE(B)AC4Q3G4TO1%PR6)Q0J(9Z
+M1E=P/E8O03,H*$ E&#9$'R,@(230FW9/W>#AY /B N8 ^Y$:)"M-$QT8%A<E
+M&; O88<P*"0H%CE((AT6(1\C'@\2$ \=(B<L<&T>;$XF.U"U)!T=)W5D&!@6
+M'A=$5R921E!'3TY3)1U,>QDC'AQAB498GT$M/$XK6IU6LY*#2EMR-T\J/"X@
+M(3HB&CI%'2$>'R+4G71-W^'BY>GIZ at +K /B@(BDR5QPE'AX;)!Z],5F".#0O
+M,1Y"4RLB'BHJ*R44%Q44)2LK*F=I'WQ:,41<PBPF(RE\;B$=&!P60%$B3#]'
+M/45$22<I6(0>)R,A<(Y!7:,\+SI%*F"H6\"ECT)3<#9.*#HL'B [)!Y"4"$E
+M(B,FZ:-L4O7Z^_X%_P#[C1\@($ .'QL;&B9,IC)N52LG("$3("T1)ALB)2 C
+M%!X4$QX>)3AR:2%94"<O)[8L(C%[3PT@)S4>+%5F5&!-66987&4G)6<1)"$@
+M'8S;1E:20S-7)8:C6IQ^BUQ)-%!'6B(_,2LP1"DC,KPK(1\C)*MO:2W8V-W@
+MW^?F ><!Y #[B!@<)4(.'!<8&R1,IS!H3R<H)"45*#<3)!DB)B @$!8.#QP>
+M)3IR9QIB52<W+KDI(#:"61$@)3(9)4U<2E1!2UI-45<>(FL6(AX<%HW805N4
+M.S!/'XFG6J2+F&5*-U1"51TY+"0G02<A,KDH'B C)*MP92G at X^CKY.OJ >T!
+MZ@#YE2$A*DH5)!T@'BA3M#)A2B\S+B\;+C\;*1XK,2DH%AP3%"$E*CAI8QMN
+M83 [-\<Q)3N+:1TI*C07)$E61$\Z0E-#1DXB+'4;)B A'9K@/F"8-3!+&HNQ
+M8ZJ:IV4\*T] 4ALZ*B,G0RLE.<$J'B$H*;IS72_V_?__^P/_ ?T ^X8>'APV
+M%1H<'6FE/D at I3DTM(Q\B%AL8$"(<)"(D)A44$AHB(B4M<60A0V$A.ABR*A]S
+M%1LA'R$E(2I376->75)N9V-F+2,?(R8<&12TUDI.CSQW66E\=*6-A5A)0S8[
+MA5$J-2 at O+SG@+AZ:'"(JF93(J=/CV][>W^+CX at +C /M\%QL:-!,8&A]NJT-*
+M)TQ(*R,?(A(@)1(>&B0D(B(1$! 8(" B*W%D&T]D'3X>MR<@=18='1PA'1 at A
+M2%%33DU"7UE66"8?&Q\D&A<2MM%$4I$Y<%!H?WBKE(Q<34(U.XE/)"\C+20\
+MX2@<FQD<*Z&9R:W7YN+GY^CJZ^H"ZP#UC2 ='SD7'!XC<[104BE$0S J)BD8
+M(R46(QXK+"<G%A44'"0D*BUH71Y6<B(X(\0O(WTC)2(D*"$8(4=.349%.EM/
+M1T\G)" D)AX;%[K>15J7.6]097^ O)^79$<W*C2#224R(2<C.N0K(:8A'RZC
+MG-:YX?C[!?^!_@'_ ?X ^W8<&14J%A@:2BDL/D(C36$E)!\E%!85$Q09'R0?
+M(A,8("0A(2(F<&HA.UX:11E<'R 6'B(D'!L?*D-/75E176U98E]A/"<?'WPD
+M&1F6I$I-BD;=X9A7HYUT1D)&-T*5-3\^&S@]-DO5+1 at P)1<=9'''K]O5V=W>
+MW^+CX@'B >, ^VP5%A,H%!884"\N/$ @25LC)!\E$!<:$1(7'R8?(!$6("0A
+M(1\D<&H;1V$611IB)!\2&A\>&1L7(CM%44M#3U],5517-B,;&WDA%Q>8GT52
+MBD+5VH]4K*E[3$1'-T*5-3TX%3,[*T[5)A<P(1,@:G;0N.3>XN;GZ.KKZ@'J
+M >L LWT>&!@M&!H<63 at T044B058H*R8L%AH<%18;)BXF)18;)2DF)B<F9V,>
+M3F\;0!MG)AX7(@,AOR(;(#D_2D(Z1E8_2$E--R@@(($I'!N>KD-7D4CCY9%<
+MO+Q]23\Y+CF.+C<Y&#$U*E#<)18W)Q@>9WG at SOSX_ 7_@?X!_@'_ )]G'Q06
+M'1,8&!H?/CM!(59:-"(D(Q05$Q00'APJ'R(=!"'8)"4N:VPC*'47%%A1'!LA
+M'Q\C&R(I+CQ-45=?65Q65&>80B$H)$8=&1S2Y4I>@E[:WMKAH7A-04<]2HXY
+M34LHHCHV/5?92)"/C8AG9%[3T=78W=W<W^/@W at +B *!=&!$4&Q$6%AP?/CP]
+M&U!2,"(D(Q(3$1(.'!PL'R(=(0,CV"8B+&ML'31X$Q==6!P8'1L;'1 at B(28T
+M0T9,5$Y12$A>DS\=)"!#&18:U-]%8X!:UMW2X*Z"3T%'.4J1.4Q'(IPU-#)9
+MV$&2E(Z+:61EW=S?X^;FY>CKZ.8"Z@"@;B$3&2 5&AHD*$,]0QY)3C8I*RH6
+M%1,4$B C-"8I)"@#*=$L*BYB92 [AA at 77UHC&B(C("(@*24D+CL\0DI$1SY!
+M7I%!(BDE3R@>'MSP0V6"8.CIV.Z]BDTX."]%B3!!/R.?,RPQ7>9*F)F/C61=
+M9NWV__X%_X+\^@'^ ?\ M%,?'!H5$Q04'2,W/1@>6U8](!LA$109$A$;'B@?
+M)B B'",B'B,C8VHH'8<T:C<7$AT9&QL#'L0I/D)54DE48TQ)3:NB.B4@&T<G
+M&P^=V&)5=F#4U>2H<$L_23)<BCA-2D8DVSTX.U?93<N)655=86?4UM77V=W;
+MWN+?X at +A /M)&!D8$Q$2$A8?-T(4%E-+.2 ;(1(1%A /&1XJ(2 at B)" G)B(C
+M(6-J(BF*,'!"&PT:&QP=&AL>(38Z2T<_2EE".SZAGC8A'!=#(Q at -GM)=6G9<
+MTMG at K7Y1/T<O5HP\2T9"'M4X-C!9UTO1E&!<76)NW]_=W^+FY.?JY^H!Z0'H
+M /M:(1L=&!46%ATE/$4<&DU(/R<B*!43&!(3'24R*3 J+"@O+BHJ(UIC)3"8
+M-75%(QPB(2<A'R,E)30T0STU0$\X,3ZKI#LF(1Q2,B(/J>-;7'%AY./HNHA,
+H.C\A48 at V0SHZ']@V+B]=Z5W at GE]=6%9O[/?]^_S__O__^_X!_0'_    
+ 
+end
diff --git a/lib-python/2.2/test/tokenize_tests.py b/lib-python/2.2/test/tokenize_tests.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/test/tokenize_tests.py
@@ -0,0 +1,175 @@
+# Tests for the 'tokenize' module.
+# Large bits stolen from test_grammar.py. 
+
+# Comments
+"#"
+#'
+#"
+#\
+       #
+    # abc
+'''#
+#'''
+
+x = 1  #
+
+# Balancing continuation
+
+a = (3, 4,
+  5, 6)
+y = [3, 4,
+  5]
+z = {'a':5,
+  'b':6}
+x = (len(`y`) + 5*x - a[
+   3 ]
+   - x + len({
+   }
+    )
+  )
+
+# Backslash means line continuation:
+x = 1 \
++ 1
+
+# Backslash does not means continuation in comments :\
+x = 0
+
+# Ordinary integers
+0xff <> 255
+0377 <> 255
+2147483647   != 017777777777
+-2147483647-1 != 020000000000
+037777777777 != -1
+0xffffffff != -1
+
+# Long integers
+x = 0L
+x = 0l
+x = 0xffffffffffffffffL
+x = 0xffffffffffffffffl
+x = 077777777777777777L
+x = 077777777777777777l
+x = 123456789012345678901234567890L
+x = 123456789012345678901234567890l
+
+# Floating-point numbers
+x = 3.14
+x = 314.
+x = 0.314
+# XXX x = 000.314
+x = .314
+x = 3e14
+x = 3E14
+x = 3e-14
+x = 3e+14
+x = 3.e14
+x = .3e14
+x = 3.1e4
+
+# String literals
+x = ''; y = "";
+x = '\''; y = "'";
+x = '"'; y = "\"";
+x = "doesn't \"shrink\" does it"
+y = 'doesn\'t "shrink" does it'
+x = "does \"shrink\" doesn't it"
+y = 'does "shrink" doesn\'t it'
+x = """
+The "quick"
+brown fox
+jumps over
+the 'lazy' dog.
+"""
+y = '\nThe "quick"\nbrown fox\njumps over\nthe \'lazy\' dog.\n'
+y = '''
+The "quick"
+brown fox
+jumps over
+the 'lazy' dog.
+''';
+y = "\n\
+The \"quick\"\n\
+brown fox\n\
+jumps over\n\
+the 'lazy' dog.\n\
+";
+y = '\n\
+The \"quick\"\n\
+brown fox\n\
+jumps over\n\
+the \'lazy\' dog.\n\
+';
+x = r'\\' + R'\\'
+x = r'\'' + ''
+y = r'''
+foo bar \\
+baz''' + R'''
+foo'''
+y = r"""foo
+bar \\ baz
+""" + R'''spam
+'''
+x = u'abc' + U'ABC'
+y = u"abc" + U"ABC"
+x = ur'abc' + Ur'ABC' + uR'ABC' + UR'ABC'
+y = ur"abc" + Ur"ABC" + uR"ABC" + UR"ABC"
+x = ur'\\' + UR'\\'
+x = ur'\'' + ''
+y = ur'''
+foo bar \\
+baz''' + UR'''
+foo'''
+y = Ur"""foo
+bar \\ baz
+""" + uR'''spam
+'''
+
+# Indentation
+if 1:
+    x = 2
+if 1:
+        x = 2
+if 1:
+    while 0:
+     if 0:
+           x = 2
+     x = 2
+if 0:
+  if 2:
+   while 0:
+        if 1:
+          x = 2
+
+# Operators
+
+def d22(a, b, c=1, d=2): pass
+def d01v(a=1, *restt, **restd): pass
+
+(x, y) <> ({'a':1}, {'b':2})
+
+# comparison
+if 1 < 1 > 1 == 1 >= 1 <= 1 <> 1 != 1 in 1 not in 1 is 1 is not 1: pass
+
+# binary
+x = 1 & 1
+x = 1 ^ 1
+x = 1 | 1
+
+# shift
+x = 1 << 1 >> 1
+
+# additive
+x = 1 - 1 + 1 - 1 + 1
+
+# multiplicative
+x = 1 / 1 * 1 % 1
+
+# unary
+x = ~1 ^ 1 & 1 | 1 & 1 ^ -1
+x = -1*1/1 + 1*1 - ---1*1
+
+# selector
+import sys, time
+x = sys.modules['time'].time()
+
diff --git a/lib-python/2.2/this.py b/lib-python/2.2/this.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/this.py
@@ -0,0 +1,28 @@
+s = """Gur Mra bs Clguba, ol Gvz Crgref
+
+Ornhgvshy vf orggre guna htyl.
+Rkcyvpvg vf orggre guna vzcyvpvg.
+Fvzcyr vf orggre guna pbzcyrk.
+Pbzcyrk vf orggre guna pbzcyvpngrq.
+Syng vf orggre guna arfgrq.
+Fcnefr vf orggre guna qrafr.
+Ernqnovyvgl pbhagf.
+Fcrpvny pnfrf nera'g fcrpvny rabhtu gb oernx gur ehyrf.
+Nygubhtu cenpgvpnyvgl orngf chevgl.
+Reebef fubhyq arire cnff fvyragyl.
+Hayrff rkcyvpvgyl fvyraprq.
+Va gur snpr bs nzovthvgl, ershfr gur grzcgngvba gb thrff.
+Gurer fubhyq or bar-- naq cersrenoyl bayl bar --boivbhf jnl gb qb vg.
+Nygubhtu gung jnl znl abg or boivbhf ng svefg hayrff lbh'er Qhgpu.
+Abj vf orggre guna arire.
+Nygubhtu arire vf bsgra orggre guna *evtug* abj.
+Vs gur vzcyrzragngvba vf uneq gb rkcynva, vg'f n onq vqrn.
+Vs gur vzcyrzragngvba vf rnfl gb rkcynva, vg znl or n tbbq vqrn.
+Anzrfcnprf ner bar ubaxvat terng vqrn -- yrg'f qb zber bs gubfr!"""
+
+d = {}
+for c in (65, 97):
+    for i in range(26):
+        d[chr(i+c)] = chr((i+13) % 26 + c)
+
+print "".join([d.get(c, c) for c in s])
diff --git a/lib-python/2.2/threading.py b/lib-python/2.2/threading.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/threading.py
@@ -0,0 +1,698 @@
+"""Proposed new threading module, emulating a subset of Java's threading model."""
+
+import sys
+import time
+import thread
+import traceback
+import StringIO
+
+# Rename some stuff so "from threading import *" is safe
+
+_sys = sys
+del sys
+
+_time = time.time
+_sleep = time.sleep
+del time
+
+_start_new_thread = thread.start_new_thread
+_allocate_lock = thread.allocate_lock
+_get_ident = thread.get_ident
+ThreadError = thread.error
+del thread
+
+_print_exc = traceback.print_exc
+del traceback
+
+_StringIO = StringIO.StringIO
+del StringIO
+
+
+# Debug support (adapted from ihooks.py)
+
+_VERBOSE = 0
+
+if __debug__:
+
+    class _Verbose:
+
+        def __init__(self, verbose=None):
+            if verbose is None:
+                verbose = _VERBOSE
+            self.__verbose = verbose
+
+        def _note(self, format, *args):
+            if self.__verbose:
+                format = format % args
+                format = "%s: %s\n" % (
+                    currentThread().getName(), format)
+                _sys.stderr.write(format)
+
+else:
+    # Disable this when using "python -O"
+    class _Verbose:
+        def __init__(self, verbose=None):
+            pass
+        def _note(self, *args):
+            pass
+
+
+# Synchronization classes
+
+Lock = _allocate_lock
+
+def RLock(*args, **kwargs):
+    return apply(_RLock, args, kwargs)
+
+class _RLock(_Verbose):
+
+    def __init__(self, verbose=None):
+        _Verbose.__init__(self, verbose)
+        self.__block = _allocate_lock()
+        self.__owner = None
+        self.__count = 0
+
+    def __repr__(self):
+        return "<%s(%s, %d)>" % (
+                self.__class__.__name__,
+                self.__owner and self.__owner.getName(),
+                self.__count)
+
+    def acquire(self, blocking=1):
+        me = currentThread()
+        if self.__owner is me:
+            self.__count = self.__count + 1
+            if __debug__:
+                self._note("%s.acquire(%s): recursive success", self, blocking)
+            return 1
+        rc = self.__block.acquire(blocking)
+        if rc:
+            self.__owner = me
+            self.__count = 1
+            if __debug__:
+                self._note("%s.acquire(%s): initial succes", self, blocking)
+        else:
+            if __debug__:
+                self._note("%s.acquire(%s): failure", self, blocking)
+        return rc
+
+    def release(self):
+        me = currentThread()
+        assert self.__owner is me, "release() of un-acquire()d lock"
+        self.__count = count = self.__count - 1
+        if not count:
+            self.__owner = None
+            self.__block.release()
+            if __debug__:
+                self._note("%s.release(): final release", self)
+        else:
+            if __debug__:
+                self._note("%s.release(): non-final release", self)
+
+    # Internal methods used by condition variables
+
+    def _acquire_restore(self, (count, owner)):
+        self.__block.acquire()
+        self.__count = count
+        self.__owner = owner
+        if __debug__:
+            self._note("%s._acquire_restore()", self)
+
+    def _release_save(self):
+        if __debug__:
+            self._note("%s._release_save()", self)
+        count = self.__count
+        self.__count = 0
+        owner = self.__owner
+        self.__owner = None
+        self.__block.release()
+        return (count, owner)
+
+    def _is_owned(self):
+        return self.__owner is currentThread()
+
+
+def Condition(*args, **kwargs):
+    return apply(_Condition, args, kwargs)
+
+class _Condition(_Verbose):
+
+    def __init__(self, lock=None, verbose=None):
+        _Verbose.__init__(self, verbose)
+        if lock is None:
+            lock = RLock()
+        self.__lock = lock
+        # Export the lock's acquire() and release() methods
+        self.acquire = lock.acquire
+        self.release = lock.release
+        # If the lock defines _release_save() and/or _acquire_restore(),
+        # these override the default implementations (which just call
+        # release() and acquire() on the lock).  Ditto for _is_owned().
+        try:
+            self._release_save = lock._release_save
+        except AttributeError:
+            pass
+        try:
+            self._acquire_restore = lock._acquire_restore
+        except AttributeError:
+            pass
+        try:
+            self._is_owned = lock._is_owned
+        except AttributeError:
+            pass
+        self.__waiters = []
+
+    def __repr__(self):
+        return "<Condition(%s, %d)>" % (self.__lock, len(self.__waiters))
+
+    def _release_save(self):
+        self.__lock.release()           # No state to save
+
+    def _acquire_restore(self, x):
+        self.__lock.acquire()           # Ignore saved state
+
+    def _is_owned(self):
+        if self.__lock.acquire(0):
+            self.__lock.release()
+            return 0
+        else:
+            return 1
+
+    def wait(self, timeout=None):
+        me = currentThread()
+        assert self._is_owned(), "wait() of un-acquire()d lock"
+        waiter = _allocate_lock()
+        waiter.acquire()
+        self.__waiters.append(waiter)
+        saved_state = self._release_save()
+        try:    # restore state no matter what (e.g., KeyboardInterrupt)
+            if timeout is None:
+                waiter.acquire()
+                if __debug__:
+                    self._note("%s.wait(): got it", self)
+            else:
+                # Balancing act:  We can't afford a pure busy loop, so we
+                # have to sleep; but if we sleep the whole timeout time,
+                # we'll be unresponsive.  The scheme here sleeps very
+                # little at first, longer as time goes on, but never longer
+                # than 20 times per second (or the timeout time remaining).
+                endtime = _time() + timeout
+                delay = 0.0005 # 500 us -> initial delay of 1 ms
+                while 1:
+                    gotit = waiter.acquire(0)
+                    if gotit:
+                        break
+                    remaining = endtime - _time()
+                    if remaining <= 0:
+                        break
+                    delay = min(delay * 2, remaining, .05)
+                    _sleep(delay)
+                if not gotit:
+                    if __debug__:
+                        self._note("%s.wait(%s): timed out", self, timeout)
+                    try:
+                        self.__waiters.remove(waiter)
+                    except ValueError:
+                        pass
+                else:
+                    if __debug__:
+                        self._note("%s.wait(%s): got it", self, timeout)
+        finally:
+            self._acquire_restore(saved_state)
+
+    def notify(self, n=1):
+        me = currentThread()
+        assert self._is_owned(), "notify() of un-acquire()d lock"
+        __waiters = self.__waiters
+        waiters = __waiters[:n]
+        if not waiters:
+            if __debug__:
+                self._note("%s.notify(): no waiters", self)
+            return
+        self._note("%s.notify(): notifying %d waiter%s", self, n,
+                   n!=1 and "s" or "")
+        for waiter in waiters:
+            waiter.release()
+            try:
+                __waiters.remove(waiter)
+            except ValueError:
+                pass
+
+    def notifyAll(self):
+        self.notify(len(self.__waiters))
+
+
+def Semaphore(*args, **kwargs):
+    return apply(_Semaphore, args, kwargs)
+
+class _Semaphore(_Verbose):
+
+    # After Tim Peters' semaphore class, but not quite the same (no maximum)
+
+    def __init__(self, value=1, verbose=None):
+        assert value >= 0, "Semaphore initial value must be >= 0"
+        _Verbose.__init__(self, verbose)
+        self.__cond = Condition(Lock())
+        self.__value = value
+
+    def acquire(self, blocking=1):
+        rc = 0
+        self.__cond.acquire()
+        while self.__value == 0:
+            if not blocking:
+                break
+            if __debug__:
+                self._note("%s.acquire(%s): blocked waiting, value=%s",
+                           self, blocking, self.__value)
+            self.__cond.wait()
+        else:
+            self.__value = self.__value - 1
+            if __debug__:
+                self._note("%s.acquire: success, value=%s",
+                           self, self.__value)
+            rc = 1
+        self.__cond.release()
+        return rc
+
+    def release(self):
+        self.__cond.acquire()
+        self.__value = self.__value + 1
+        if __debug__:
+            self._note("%s.release: success, value=%s",
+                       self, self.__value)
+        self.__cond.notify()
+        self.__cond.release()
+
+
+def BoundedSemaphore(*args, **kwargs):
+    return apply(_BoundedSemaphore, args, kwargs)
+
+class _BoundedSemaphore(_Semaphore):
+    """Semaphore that checks that # releases is <= # acquires"""
+    def __init__(self, value=1, verbose=None):
+        _Semaphore.__init__(self, value, verbose)
+        self._initial_value = value
+
+    def release(self):
+        if self._Semaphore__value >= self._initial_value:
+            raise ValueError, "Semaphore released too many times"
+        return _Semaphore.release(self)
+
+
+def Event(*args, **kwargs):
+    return apply(_Event, args, kwargs)
+
+class _Event(_Verbose):
+
+    # After Tim Peters' event class (without is_posted())
+
+    def __init__(self, verbose=None):
+        _Verbose.__init__(self, verbose)
+        self.__cond = Condition(Lock())
+        self.__flag = 0
+
+    def isSet(self):
+        return self.__flag
+
+    def set(self):
+        self.__cond.acquire()
+        try:
+            self.__flag = 1
+            self.__cond.notifyAll()
+        finally:
+            self.__cond.release()
+
+    def clear(self):
+        self.__cond.acquire()
+        try:
+            self.__flag = 0
+        finally:
+            self.__cond.release()
+
+    def wait(self, timeout=None):
+        self.__cond.acquire()
+        try:
+            if not self.__flag:
+                self.__cond.wait(timeout)
+        finally:
+            self.__cond.release()
+
+# Helper to generate new thread names
+_counter = 0
+def _newname(template="Thread-%d"):
+    global _counter
+    _counter = _counter + 1
+    return template % _counter
+
+# Active thread administration
+_active_limbo_lock = _allocate_lock()
+_active = {}
+_limbo = {}
+
+
+# Main class for threads
+
+class Thread(_Verbose):
+
+    __initialized = 0
+
+    def __init__(self, group=None, target=None, name=None,
+                 args=(), kwargs={}, verbose=None):
+        assert group is None, "group argument must be None for now"
+        _Verbose.__init__(self, verbose)
+        self.__target = target
+        self.__name = str(name or _newname())
+        self.__args = args
+        self.__kwargs = kwargs
+        self.__daemonic = self._set_daemon()
+        self.__started = 0
+        self.__stopped = 0
+        self.__block = Condition(Lock())
+        self.__initialized = 1
+
+    def _set_daemon(self):
+        # Overridden in _MainThread and _DummyThread
+        return currentThread().isDaemon()
+
+    def __repr__(self):
+        assert self.__initialized, "Thread.__init__() was not called"
+        status = "initial"
+        if self.__started:
+            status = "started"
+        if self.__stopped:
+            status = "stopped"
+        if self.__daemonic:
+            status = status + " daemon"
+        return "<%s(%s, %s)>" % (self.__class__.__name__, self.__name, status)
+
+    def start(self):
+        assert self.__initialized, "Thread.__init__() not called"
+        assert not self.__started, "thread already started"
+        if __debug__:
+            self._note("%s.start(): starting thread", self)
+        _active_limbo_lock.acquire()
+        _limbo[self] = self
+        _active_limbo_lock.release()
+        _start_new_thread(self.__bootstrap, ())
+        self.__started = 1
+        _sleep(0.000001)    # 1 usec, to let the thread run (Solaris hack)
+
+    def run(self):
+        if self.__target:
+            apply(self.__target, self.__args, self.__kwargs)
+
+    def __bootstrap(self):
+        try:
+            self.__started = 1
+            _active_limbo_lock.acquire()
+            _active[_get_ident()] = self
+            del _limbo[self]
+            _active_limbo_lock.release()
+            if __debug__:
+                self._note("%s.__bootstrap(): thread started", self)
+            try:
+                self.run()
+            except SystemExit:
+                if __debug__:
+                    self._note("%s.__bootstrap(): raised SystemExit", self)
+            except:
+                if __debug__:
+                    self._note("%s.__bootstrap(): unhandled exception", self)
+                s = _StringIO()
+                _print_exc(file=s)
+                _sys.stderr.write("Exception in thread %s:\n%s\n" %
+                                 (self.getName(), s.getvalue()))
+            else:
+                if __debug__:
+                    self._note("%s.__bootstrap(): normal return", self)
+        finally:
+            self.__stop()
+            try:
+                self.__delete()
+            except:
+                pass
+
+    def __stop(self):
+        self.__block.acquire()
+        self.__stopped = 1
+        self.__block.notifyAll()
+        self.__block.release()
+
+    def __delete(self):
+        _active_limbo_lock.acquire()
+        del _active[_get_ident()]
+        _active_limbo_lock.release()
+
+    def join(self, timeout=None):
+        assert self.__initialized, "Thread.__init__() not called"
+        assert self.__started, "cannot join thread before it is started"
+        assert self is not currentThread(), "cannot join current thread"
+        if __debug__:
+            if not self.__stopped:
+                self._note("%s.join(): waiting until thread stops", self)
+        self.__block.acquire()
+        if timeout is None:
+            while not self.__stopped:
+                self.__block.wait()
+            if __debug__:
+                self._note("%s.join(): thread stopped", self)
+        else:
+            deadline = _time() + timeout
+            while not self.__stopped:
+                delay = deadline - _time()
+                if delay <= 0:
+                    if __debug__:
+                        self._note("%s.join(): timed out", self)
+                    break
+                self.__block.wait(delay)
+            else:
+                if __debug__:
+                    self._note("%s.join(): thread stopped", self)
+        self.__block.release()
+
+    def getName(self):
+        assert self.__initialized, "Thread.__init__() not called"
+        return self.__name
+
+    def setName(self, name):
+        assert self.__initialized, "Thread.__init__() not called"
+        self.__name = str(name)
+
+    def isAlive(self):
+        assert self.__initialized, "Thread.__init__() not called"
+        return self.__started and not self.__stopped
+
+    def isDaemon(self):
+        assert self.__initialized, "Thread.__init__() not called"
+        return self.__daemonic
+
+    def setDaemon(self, daemonic):
+        assert self.__initialized, "Thread.__init__() not called"
+        assert not self.__started, "cannot set daemon status of active thread"
+        self.__daemonic = daemonic
+
+# The timer class was contributed by Itamar Shtull-Trauring
+
+def Timer(*args, **kwargs):
+    return _Timer(*args, **kwargs)
+
+class _Timer(Thread):
+    """Call a function after a specified number of seconds:
+
+    t = Timer(30.0, f, args=[], kwargs={})
+    t.start()
+    t.cancel() # stop the timer's action if it's still waiting
+    """
+
+    def __init__(self, interval, function, args=[], kwargs={}):
+        Thread.__init__(self)
+        self.interval = interval
+        self.function = function
+        self.args = args
+        self.kwargs = kwargs
+        self.finished = Event()
+
+    def cancel(self):
+        """Stop the timer if it hasn't finished yet"""
+        self.finished.set()
+
+    def run(self):
+        self.finished.wait(self.interval)
+        if not self.finished.isSet():
+            self.function(*self.args, **self.kwargs)
+        self.finished.set()
+
+# Special thread class to represent the main thread
+# This is garbage collected through an exit handler
+
+class _MainThread(Thread):
+
+    def __init__(self):
+        Thread.__init__(self, name="MainThread")
+        self._Thread__started = 1
+        _active_limbo_lock.acquire()
+        _active[_get_ident()] = self
+        _active_limbo_lock.release()
+        import atexit
+        atexit.register(self.__exitfunc)
+
+    def _set_daemon(self):
+        return 0
+
+    def __exitfunc(self):
+        self._Thread__stop()
+        t = _pickSomeNonDaemonThread()
+        if t:
+            if __debug__:
+                self._note("%s: waiting for other threads", self)
+        while t:
+            t.join()
+            t = _pickSomeNonDaemonThread()
+        if __debug__:
+            self._note("%s: exiting", self)
+        self._Thread__delete()
+
+def _pickSomeNonDaemonThread():
+    for t in enumerate():
+        if not t.isDaemon() and t.isAlive():
+            return t
+    return None
+
+
+# Dummy thread class to represent threads not started here.
+# These aren't garbage collected when they die,
+# nor can they be waited for.
+# Their purpose is to return *something* from currentThread().
+# They are marked as daemon threads so we won't wait for them
+# when we exit (conform previous semantics).
+
+class _DummyThread(Thread):
+
+    def __init__(self):
+        Thread.__init__(self, name=_newname("Dummy-%d"))
+        self._Thread__started = 1
+        _active_limbo_lock.acquire()
+        _active[_get_ident()] = self
+        _active_limbo_lock.release()
+
+    def _set_daemon(self):
+        return 1
+
+    def join(self, timeout=None):
+        assert 0, "cannot join a dummy thread"
+
+
+# Global API functions
+
+def currentThread():
+    try:
+        return _active[_get_ident()]
+    except KeyError:
+        ##print "currentThread(): no current thread for", _get_ident()
+        return _DummyThread()
+
+def activeCount():
+    _active_limbo_lock.acquire()
+    count = len(_active) + len(_limbo)
+    _active_limbo_lock.release()
+    return count
+
+def enumerate():
+    _active_limbo_lock.acquire()
+    active = _active.values() + _limbo.values()
+    _active_limbo_lock.release()
+    return active
+
+
+# Create the main thread object
+
+_MainThread()
+
+
+# Self-test code
+
+def _test():
+
+    class BoundedQueue(_Verbose):
+
+        def __init__(self, limit):
+            _Verbose.__init__(self)
+            self.mon = RLock()
+            self.rc = Condition(self.mon)
+            self.wc = Condition(self.mon)
+            self.limit = limit
+            self.queue = []
+
+        def put(self, item):
+            self.mon.acquire()
+            while len(self.queue) >= self.limit:
+                self._note("put(%s): queue full", item)
+                self.wc.wait()
+            self.queue.append(item)
+            self._note("put(%s): appended, length now %d",
+                       item, len(self.queue))
+            self.rc.notify()
+            self.mon.release()
+
+        def get(self):
+            self.mon.acquire()
+            while not self.queue:
+                self._note("get(): queue empty")
+                self.rc.wait()
+            item = self.queue[0]
+            del self.queue[0]
+            self._note("get(): got %s, %d left", item, len(self.queue))
+            self.wc.notify()
+            self.mon.release()
+            return item
+
+    class ProducerThread(Thread):
+
+        def __init__(self, queue, quota):
+            Thread.__init__(self, name="Producer")
+            self.queue = queue
+            self.quota = quota
+
+        def run(self):
+            from random import random
+            counter = 0
+            while counter < self.quota:
+                counter = counter + 1
+                self.queue.put("%s.%d" % (self.getName(), counter))
+                _sleep(random() * 0.00001)
+
+
+    class ConsumerThread(Thread):
+
+        def __init__(self, queue, count):
+            Thread.__init__(self, name="Consumer")
+            self.queue = queue
+            self.count = count
+
+        def run(self):
+            while self.count > 0:
+                item = self.queue.get()
+                print item
+                self.count = self.count - 1
+
+    NP = 3
+    QL = 4
+    NI = 5
+
+    Q = BoundedQueue(QL)
+    P = []
+    for i in range(NP):
+        t = ProducerThread(Q, NI)
+        t.setName("Producer-%d" % (i+1))
+        P.append(t)
+    C = ConsumerThread(Q, NI*NP)
+    for t in P:
+        t.start()
+        _sleep(0.000001)
+    C.start()
+    for t in P:
+        t.join()
+    C.join()
+
+if __name__ == '__main__':
+    _test()
diff --git a/lib-python/2.2/toaiff.py b/lib-python/2.2/toaiff.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/toaiff.py
@@ -0,0 +1,106 @@
+"""Convert "arbitrary" sound files to AIFF (Apple and SGI's audio format).
+
+Input may be compressed.
+Uncompressed file type may be AIFF, WAV, VOC, 8SVX, NeXT/Sun, and others.
+An exception is raised if the file is not of a recognized type.
+Returned filename is either the input filename or a temporary filename;
+in the latter case the caller must ensure that it is removed.
+Other temporary files used are removed by the function.
+"""
+
+import os
+import tempfile
+import pipes
+import sndhdr
+
+__all__ = ["error", "toaiff"]
+
+table = {}
+
+t = pipes.Template()
+t.append('sox -t au - -t aiff -r 8000 -', '--')
+table['au'] = t
+
+# XXX The following is actually sub-optimal.
+# XXX The HCOM sampling rate can be 22k, 22k/2, 22k/3 or 22k/4.
+# XXX We must force the output sampling rate else the SGI won't play
+# XXX files sampled at 5.5k or 7.333k; however this means that files
+# XXX sampled at 11k are unnecessarily expanded.
+# XXX Similar comments apply to some other file types.
+t = pipes.Template()
+t.append('sox -t hcom - -t aiff -r 22050 -', '--')
+table['hcom'] = t
+
+t = pipes.Template()
+t.append('sox -t voc - -t aiff -r 11025 -', '--')
+table['voc'] = t
+
+t = pipes.Template()
+t.append('sox -t wav - -t aiff -', '--')
+table['wav'] = t
+
+t = pipes.Template()
+t.append('sox -t 8svx - -t aiff -r 16000 -', '--')
+table['8svx'] = t
+
+t = pipes.Template()
+t.append('sox -t sndt - -t aiff -r 16000 -', '--')
+table['sndt'] = t
+
+t = pipes.Template()
+t.append('sox -t sndr - -t aiff -r 16000 -', '--')
+table['sndr'] = t
+
+uncompress = pipes.Template()
+uncompress.append('uncompress', '--')
+
+
+class error(Exception):
+    pass
+
+def toaiff(filename):
+    temps = []
+    ret = None
+    try:
+        ret = _toaiff(filename, temps)
+    finally:
+        for temp in temps[:]:
+            if temp != ret:
+                try:
+                    os.unlink(temp)
+                except os.error:
+                    pass
+                temps.remove(temp)
+    return ret
+
+def _toaiff(filename, temps):
+    if filename[-2:] == '.Z':
+        fname = tempfile.mktemp()
+        temps.append(fname)
+        sts = uncompress.copy(filename, fname)
+        if sts:
+            raise error, filename + ': uncompress failed'
+    else:
+        fname = filename
+    try:
+        ftype = sndhdr.whathdr(fname)
+        if ftype:
+            ftype = ftype[0] # All we're interested in
+    except IOError, msg:
+        if type(msg) == type(()) and len(msg) == 2 and \
+                type(msg[0]) == type(0) and type(msg[1]) == type(''):
+            msg = msg[1]
+        if type(msg) != type(''):
+            msg = `msg`
+        raise error, filename + ': ' + msg
+    if ftype == 'aiff':
+        return fname
+    if ftype is None or not table.has_key(ftype):
+        raise error, \
+                filename + ': unsupported audio file type ' + `ftype`
+    temp = tempfile.mktemp()
+    temps.append(temp)
+    sts = table[ftype].copy(fname, temp)
+    if sts:
+        raise error, filename + ': conversion to aiff failed'
+    return temp
diff --git a/lib-python/2.2/token.py b/lib-python/2.2/token.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/token.py
@@ -0,0 +1,140 @@
+#! /usr/bin/env python
+
+"""Token constants (from "token.h")."""
+
+#  This file is automatically generated; please don't muck it up!
+#
+#  To update the symbols in this file, 'cd' to the top directory of
+#  the python source tree after building the interpreter and run:
+#
+#    python Lib/token.py
+
+#--start constants--
+ENDMARKER = 0
+NAME = 1
+NUMBER = 2
+STRING = 3
+NEWLINE = 4
+INDENT = 5
+DEDENT = 6
+LPAR = 7
+RPAR = 8
+LSQB = 9
+RSQB = 10
+COLON = 11
+COMMA = 12
+SEMI = 13
+PLUS = 14
+MINUS = 15
+STAR = 16
+SLASH = 17
+VBAR = 18
+AMPER = 19
+LESS = 20
+GREATER = 21
+EQUAL = 22
+DOT = 23
+PERCENT = 24
+BACKQUOTE = 25
+LBRACE = 26
+RBRACE = 27
+EQEQUAL = 28
+NOTEQUAL = 29
+LESSEQUAL = 30
+GREATEREQUAL = 31
+TILDE = 32
+CIRCUMFLEX = 33
+LEFTSHIFT = 34
+RIGHTSHIFT = 35
+DOUBLESTAR = 36
+PLUSEQUAL = 37
+MINEQUAL = 38
+STAREQUAL = 39
+SLASHEQUAL = 40
+PERCENTEQUAL = 41
+AMPEREQUAL = 42
+VBAREQUAL = 43
+CIRCUMFLEXEQUAL = 44
+LEFTSHIFTEQUAL = 45
+RIGHTSHIFTEQUAL = 46
+DOUBLESTAREQUAL = 47
+DOUBLESLASH = 48
+DOUBLESLASHEQUAL = 49
+OP = 50
+ERRORTOKEN = 51
+N_TOKENS = 52
+NT_OFFSET = 256
+#--end constants--
+
+tok_name = {}
+for _name, _value in globals().items():
+    if type(_value) is type(0):
+        tok_name[_value] = _name
+
+
+def ISTERMINAL(x):
+    return x < NT_OFFSET
+
+def ISNONTERMINAL(x):
+    return x >= NT_OFFSET
+
+def ISEOF(x):
+    return x == ENDMARKER
+
+
+def main():
+    import re
+    import sys
+    args = sys.argv[1:]
+    inFileName = args and args[0] or "Include/token.h"
+    outFileName = "Lib/token.py"
+    if len(args) > 1:
+        outFileName = args[1]
+    try:
+        fp = open(inFileName)
+    except IOError, err:
+        sys.stdout.write("I/O error: %s\n" % str(err))
+        sys.exit(1)
+    lines = fp.read().split("\n")
+    fp.close()
+    prog = re.compile(
+        "#define[ \t][ \t]*([A-Z][A-Z_]*)[ \t][ \t]*([0-9][0-9]*)",
+        re.IGNORECASE)
+    tokens = {}
+    for line in lines:
+        match = prog.match(line)
+        if match:
+            name, val = match.group(1, 2)
+            val = int(val)
+            tokens[val] = name          # reverse so we can sort them...
+    keys = tokens.keys()
+    keys.sort()
+    # load the output skeleton from the target:
+    try:
+        fp = open(outFileName)
+    except IOError, err:
+        sys.stderr.write("I/O error: %s\n" % str(err))
+        sys.exit(2)
+    format = fp.read().split("\n")
+    fp.close()
+    try:
+        start = format.index("#--start constants--") + 1
+        end = format.index("#--end constants--")
+    except ValueError:
+        sys.stderr.write("target does not contain format markers")
+        sys.exit(3)
+    lines = []
+    for val in keys:
+        lines.append("%s = %d" % (tokens[val], val))
+    format[start:end] = lines
+    try:
+        fp = open(outFileName, 'w')
+    except IOError, err:
+        sys.stderr.write("I/O error: %s\n" % str(err))
+        sys.exit(4)
+    fp.write("\n".join(format))
+    fp.close()
+
+
+if __name__ == "__main__":
+    main()
diff --git a/lib-python/2.2/tokenize.py b/lib-python/2.2/tokenize.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/tokenize.py
@@ -0,0 +1,287 @@
+"""Tokenization help for Python programs.
+
+generate_tokens(readline) is a generator that breaks a stream of
+text into Python tokens.  It accepts a readline-like method which is called
+repeatedly to get the next line of input (or "" for EOF).  It generates
+5-tuples with these members:
+
+    the token type (see token.py)
+    the token (a string)
+    the starting (row, column) indices of the token (a 2-tuple of ints)
+    the ending (row, column) indices of the token (a 2-tuple of ints)
+    the original line (string)
+
+It is designed to match the working of the Python tokenizer exactly, except
+that it produces COMMENT tokens for comments and gives type OP for all
+operators
+
+Older entry points
+    tokenize_loop(readline, tokeneater)
+    tokenize(readline, tokeneater=printtoken)
+are the same, except instead of generating tokens, tokeneater is a callback
+function to which the 5 fields described above are passed as 5 arguments,
+each time a new token is found."""
+
+from __future__ import generators
+
+__author__ = 'Ka-Ping Yee <ping at lfw.org>'
+__credits__ = \
+    'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro'
+
+import string, re
+from token import *
+
+import token
+__all__ = [x for x in dir(token) if x[0] != '_'] + ["COMMENT", "tokenize",
+           "generate_tokens", "NL"]
+del token
+
+COMMENT = N_TOKENS
+tok_name[COMMENT] = 'COMMENT'
+NL = N_TOKENS + 1
+tok_name[NL] = 'NL'
+N_TOKENS += 2
+
+def group(*choices): return '(' + '|'.join(choices) + ')'
+def any(*choices): return apply(group, choices) + '*'
+def maybe(*choices): return apply(group, choices) + '?'
+
+Whitespace = r'[ \f\t]*'
+Comment = r'#[^\r\n]*'
+Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
+Name = r'[a-zA-Z_]\w*'
+
+Hexnumber = r'0[xX][\da-fA-F]*[lL]?'
+Octnumber = r'0[0-7]*[lL]?'
+Decnumber = r'[1-9]\d*[lL]?'
+Intnumber = group(Hexnumber, Octnumber, Decnumber)
+Exponent = r'[eE][-+]?\d+'
+Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent)
+Expfloat = r'\d+' + Exponent
+Floatnumber = group(Pointfloat, Expfloat)
+Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]')
+Number = group(Imagnumber, Floatnumber, Intnumber)
+
+# Tail end of ' string.
+Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
+# Tail end of " string.
+Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
+# Tail end of ''' string.
+Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
+# Tail end of """ string.
+Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
+Triple = group("[uU]?[rR]?'''", '[uU]?[rR]?"""')
+# Single-line ' or " string.
+String = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
+               r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
+
+# Because of leftmost-then-longest match semantics, be sure to put the
+# longest operators first (e.g., if = came before ==, == would get
+# recognized as two instances of =).
+Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=",
+                 r"//=?",
+                 r"[+\-*/%&|^=<>]=?",
+                 r"~")
+
+Bracket = '[][(){}]'
+Special = group(r'\r?\n', r'[:;.,`]')
+Funny = group(Operator, Bracket, Special)
+
+PlainToken = group(Number, Funny, String, Name)
+Token = Ignore + PlainToken
+
+# First (or only) line of ' or " string.
+ContStr = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
+                group("'", r'\\\r?\n'),
+                r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
+                group('"', r'\\\r?\n'))
+PseudoExtras = group(r'\\\r?\n', Comment, Triple)
+PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
+
+tokenprog, pseudoprog, single3prog, double3prog = map(
+    re.compile, (Token, PseudoToken, Single3, Double3))
+endprogs = {"'": re.compile(Single), '"': re.compile(Double),
+            "'''": single3prog, '"""': double3prog,
+            "r'''": single3prog, 'r"""': double3prog,
+            "u'''": single3prog, 'u"""': double3prog,
+            "ur'''": single3prog, 'ur"""': double3prog,
+            "R'''": single3prog, 'R"""': double3prog,
+            "U'''": single3prog, 'U"""': double3prog,
+            "uR'''": single3prog, 'uR"""': double3prog,
+            "Ur'''": single3prog, 'Ur"""': double3prog,
+            "UR'''": single3prog, 'UR"""': double3prog,
+            'r': None, 'R': None, 'u': None, 'U': None}
+
+tabsize = 8
+
+class TokenError(Exception): pass
+
+class StopTokenizing(Exception): pass
+
+def printtoken(type, token, (srow, scol), (erow, ecol), line): # for testing
+    print "%d,%d-%d,%d:\t%s\t%s" % \
+        (srow, scol, erow, ecol, tok_name[type], repr(token))
+
+def tokenize(readline, tokeneater=printtoken):
+    """
+    The tokenize() function accepts two parameters: one representing the
+    input stream, and one providing an output mechanism for tokenize().
+
+    The first parameter, readline, must be a callable object which provides
+    the same interface as the readline() method of built-in file objects.
+    Each call to the function should return one line of input as a string.
+
+    The second parameter, tokeneater, must also be a callable object. It is
+    called once for each token, with five arguments, corresponding to the
+    tuples generated by generate_tokens().
+    """
+    try:
+        tokenize_loop(readline, tokeneater)
+    except StopTokenizing:
+        pass
+
+# backwards compatible interface
+def tokenize_loop(readline, tokeneater):
+    for token_info in generate_tokens(readline):
+        apply(tokeneater, token_info)
+
+def generate_tokens(readline):
+    """
+    The generate_tokens() generator requires one argment, readline, which
+    must be a callable object which provides the same interface as the
+    readline() method of built-in file objects. Each call to the function
+    should return one line of input as a string.
+
+    The generator produces 5-tuples with these members: the token type; the
+    token string; a 2-tuple (srow, scol) of ints specifying the row and
+    column where the token begins in the source; a 2-tuple (erow, ecol) of
+    ints specifying the row and column where the token ends in the source;
+    and the line on which the token was found. The line passed is the
+    logical line; continuation lines are included.
+    """
+    lnum = parenlev = continued = 0
+    namechars, numchars = string.ascii_letters + '_', '0123456789'
+    contstr, needcont = '', 0
+    contline = None
+    indents = [0]
+
+    while 1:                                   # loop over lines in stream
+        line = readline()
+        lnum = lnum + 1
+        pos, max = 0, len(line)
+
+        if contstr:                            # continued string
+            if not line:
+                raise TokenError, ("EOF in multi-line string", strstart)
+            endmatch = endprog.match(line)
+            if endmatch:
+                pos = end = endmatch.end(0)
+                yield (STRING, contstr + line[:end],
+                           strstart, (lnum, end), contline + line)
+                contstr, needcont = '', 0
+                contline = None
+            elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
+                yield (ERRORTOKEN, contstr + line,
+                           strstart, (lnum, len(line)), contline)
+                contstr = ''
+                contline = None
+                continue
+            else:
+                contstr = contstr + line
+                contline = contline + line
+                continue
+
+        elif parenlev == 0 and not continued:  # new statement
+            if not line: break
+            column = 0
+            while pos < max:                   # measure leading whitespace
+                if line[pos] == ' ': column = column + 1
+                elif line[pos] == '\t': column = (column/tabsize + 1)*tabsize
+                elif line[pos] == '\f': column = 0
+                else: break
+                pos = pos + 1
+            if pos == max: break
+
+            if line[pos] in '#\r\n':           # skip comments or blank lines
+                yield ((NL, COMMENT)[line[pos] == '#'], line[pos:],
+                           (lnum, pos), (lnum, len(line)), line)
+                continue
+
+            if column > indents[-1]:           # count indents or dedents
+                indents.append(column)
+                yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
+            while column < indents[-1]:
+                indents = indents[:-1]
+                yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
+
+        else:                                  # continued statement
+            if not line:
+                raise TokenError, ("EOF in multi-line statement", (lnum, 0))
+            continued = 0
+
+        while pos < max:
+            pseudomatch = pseudoprog.match(line, pos)
+            if pseudomatch:                                # scan for tokens
+                start, end = pseudomatch.span(1)
+                spos, epos, pos = (lnum, start), (lnum, end), end
+                token, initial = line[start:end], line[start]
+
+                if initial in numchars or \
+                   (initial == '.' and token != '.'):      # ordinary number
+                    yield (NUMBER, token, spos, epos, line)
+                elif initial in '\r\n':
+                    yield (parenlev > 0 and NL or NEWLINE,
+                               token, spos, epos, line)
+                elif initial == '#':
+                    yield (COMMENT, token, spos, epos, line)
+                elif token in ("'''", '"""',               # triple-quoted
+                               "r'''", 'r"""', "R'''", 'R"""',
+                               "u'''", 'u"""', "U'''", 'U"""',
+                               "ur'''", 'ur"""', "Ur'''", 'Ur"""',
+                               "uR'''", 'uR"""', "UR'''", 'UR"""'):
+                    endprog = endprogs[token]
+                    endmatch = endprog.match(line, pos)
+                    if endmatch:                           # all on one line
+                        pos = endmatch.end(0)
+                        token = line[start:pos]
+                        yield (STRING, token, spos, (lnum, pos), line)
+                    else:
+                        strstart = (lnum, start)           # multiple lines
+                        contstr = line[start:]
+                        contline = line
+                        break
+                elif initial in ("'", '"') or \
+                    token[:2] in ("r'", 'r"', "R'", 'R"',
+                                  "u'", 'u"', "U'", 'U"') or \
+                    token[:3] in ("ur'", 'ur"', "Ur'", 'Ur"',
+                                  "uR'", 'uR"', "UR'", 'UR"' ):
+                    if token[-1] == '\n':                  # continued string
+                        strstart = (lnum, start)
+                        endprog = (endprogs[initial] or endprogs[token[1]] or
+                                   endprogs[token[2]])
+                        contstr, needcont = line[start:], 1
+                        contline = line
+                        break
+                    else:                                  # ordinary string
+                        yield (STRING, token, spos, epos, line)
+                elif initial in namechars:                 # ordinary name
+                    yield (NAME, token, spos, epos, line)
+                elif initial == '\\':                      # continued stmt
+                    continued = 1
+                else:
+                    if initial in '([{': parenlev = parenlev + 1
+                    elif initial in ')]}': parenlev = parenlev - 1
+                    yield (OP, token, spos, epos, line)
+            else:
+                yield (ERRORTOKEN, line[pos],
+                           (lnum, pos), (lnum, pos+1), line)
+                pos = pos + 1
+
+    for indent in indents[1:]:                 # pop remaining indent levels
+        yield (DEDENT, '', (lnum, 0), (lnum, 0), '')
+    yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')
+
+if __name__ == '__main__':                     # testing
+    import sys
+    if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline)
+    else: tokenize(sys.stdin.readline)
diff --git a/lib-python/2.2/traceback.py b/lib-python/2.2/traceback.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/traceback.py
@@ -0,0 +1,301 @@
+"""Extract, format and print information about Python stack traces."""
+
+import linecache
+import sys
+import types
+
+__all__ = ['extract_stack', 'extract_tb', 'format_exception',
+           'format_exception_only', 'format_list', 'format_stack',
+           'format_tb', 'print_exc', 'print_exception', 'print_last',
+           'print_stack', 'print_tb', 'tb_lineno']
+
+def _print(file, str='', terminator='\n'):
+    file.write(str+terminator)
+
+
+def print_list(extracted_list, file=None):
+    """Print the list of tuples as returned by extract_tb() or
+    extract_stack() as a formatted stack trace to the given file."""
+    if not file:
+        file = sys.stderr
+    for filename, lineno, name, line in extracted_list:
+        _print(file,
+               '  File "%s", line %d, in %s' % (filename,lineno,name))
+        if line:
+            _print(file, '    %s' % line.strip())
+
+def format_list(extracted_list):
+    """Format a list of traceback entry tuples for printing.
+
+    Given a list of tuples as returned by extract_tb() or
+    extract_stack(), return a list of strings ready for printing.
+    Each string in the resulting list corresponds to the item with the
+    same index in the argument list.  Each string ends in a newline;
+    the strings may contain internal newlines as well, for those items
+    whose source text line is not None.
+    """
+    list = []
+    for filename, lineno, name, line in extracted_list:
+        item = '  File "%s", line %d, in %s\n' % (filename,lineno,name)
+        if line:
+            item = item + '    %s\n' % line.strip()
+        list.append(item)
+    return list
+
+
+def print_tb(tb, limit=None, file=None):
+    """Print up to 'limit' stack trace entries from the traceback 'tb'.
+
+    If 'limit' is omitted or None, all entries are printed.  If 'file'
+    is omitted or None, the output goes to sys.stderr; otherwise
+    'file' should be an open file or file-like object with a write()
+    method.
+    """
+    if not file:
+        file = sys.stderr
+    if limit is None:
+        if hasattr(sys, 'tracebacklimit'):
+            limit = sys.tracebacklimit
+    n = 0
+    while tb is not None and (limit is None or n < limit):
+        f = tb.tb_frame
+        lineno = tb_lineno(tb)
+        co = f.f_code
+        filename = co.co_filename
+        name = co.co_name
+        _print(file,
+               '  File "%s", line %d, in %s' % (filename,lineno,name))
+        line = linecache.getline(filename, lineno)
+        if line: _print(file, '    ' + line.strip())
+        tb = tb.tb_next
+        n = n+1
+
+def format_tb(tb, limit = None):
+    """A shorthand for 'format_list(extract_stack(f, limit))."""
+    return format_list(extract_tb(tb, limit))
+
+def extract_tb(tb, limit = None):
+    """Return list of up to limit pre-processed entries from traceback.
+
+    This is useful for alternate formatting of stack traces.  If
+    'limit' is omitted or None, all entries are extracted.  A
+    pre-processed stack trace entry is a quadruple (filename, line
+    number, function name, text) representing the information that is
+    usually printed for a stack trace.  The text is a string with
+    leading and trailing whitespace stripped; if the source is not
+    available it is None.
+    """
+    if limit is None:
+        if hasattr(sys, 'tracebacklimit'):
+            limit = sys.tracebacklimit
+    list = []
+    n = 0
+    while tb is not None and (limit is None or n < limit):
+        f = tb.tb_frame
+        lineno = tb_lineno(tb)
+        co = f.f_code
+        filename = co.co_filename
+        name = co.co_name
+        line = linecache.getline(filename, lineno)
+        if line: line = line.strip()
+        else: line = None
+        list.append((filename, lineno, name, line))
+        tb = tb.tb_next
+        n = n+1
+    return list
+
+
+def print_exception(etype, value, tb, limit=None, file=None):
+    """Print exception up to 'limit' stack trace entries from 'tb' to 'file'.
+
+    This differs from print_tb() in the following ways: (1) if
+    traceback is not None, it prints a header "Traceback (most recent
+    call last):"; (2) it prints the exception type and value after the
+    stack trace; (3) if type is SyntaxError and value has the
+    appropriate format, it prints the line where the syntax error
+    occurred with a caret on the next line indicating the approximate
+    position of the error.
+    """
+    if not file:
+        file = sys.stderr
+    if tb:
+        _print(file, 'Traceback (most recent call last):')
+        print_tb(tb, limit, file)
+    lines = format_exception_only(etype, value)
+    for line in lines[:-1]:
+        _print(file, line, ' ')
+    _print(file, lines[-1], '')
+
+def format_exception(etype, value, tb, limit = None):
+    """Format a stack trace and the exception information.
+
+    The arguments have the same meaning as the corresponding arguments
+    to print_exception().  The return value is a list of strings, each
+    ending in a newline and some containing internal newlines.  When
+    these lines are concatenated and printed, exactly the same text is
+    printed as does print_exception().
+    """
+    if tb:
+        list = ['Traceback (most recent call last):\n']
+        list = list + format_tb(tb, limit)
+    else:
+        list = []
+    list = list + format_exception_only(etype, value)
+    return list
+
+def format_exception_only(etype, value):
+    """Format the exception part of a traceback.
+
+    The arguments are the exception type and value such as given by
+    sys.last_type and sys.last_value. The return value is a list of
+    strings, each ending in a newline.  Normally, the list contains a
+    single string; however, for SyntaxError exceptions, it contains
+    several lines that (when printed) display detailed information
+    about where the syntax error occurred.  The message indicating
+    which exception occurred is the always last string in the list.
+    """
+    list = []
+    if type(etype) == types.ClassType:
+        stype = etype.__name__
+    else:
+        stype = etype
+    if value is None:
+        list.append(str(stype) + '\n')
+    else:
+        if etype is SyntaxError:
+            try:
+                msg, (filename, lineno, offset, line) = value
+            except:
+                pass
+            else:
+                if not filename: filename = "<string>"
+                list.append('  File "%s", line %d\n' %
+                            (filename, lineno))
+                if line is not None:
+                    i = 0
+                    while i < len(line) and line[i].isspace():
+                        i = i+1
+                    list.append('    %s\n' % line.strip())
+                    if offset is not None:
+                        s = '    '
+                        for c in line[i:offset-1]:
+                            if c.isspace():
+                                s = s + c
+                            else:
+                                s = s + ' '
+                        list.append('%s^\n' % s)
+                    value = msg
+        s = _some_str(value)
+        if s:
+            list.append('%s: %s\n' % (str(stype), s))
+        else:
+            list.append('%s\n' % str(stype))
+    return list
+
+def _some_str(value):
+    try:
+        return str(value)
+    except:
+        return '<unprintable %s object>' % type(value).__name__
+
+
+def print_exc(limit=None, file=None):
+    """Shorthand for 'print_exception(sys.exc_type, sys.exc_value, sys.exc_traceback, limit, file)'.
+    (In fact, it uses sys.exc_info() to retrieve the same information
+    in a thread-safe way.)"""
+    if not file:
+        file = sys.stderr
+    try:
+        etype, value, tb = sys.exc_info()
+        print_exception(etype, value, tb, limit, file)
+    finally:
+        etype = value = tb = None
+
+def print_last(limit=None, file=None):
+    """This is a shorthand for 'print_exception(sys.last_type,
+    sys.last_value, sys.last_traceback, limit, file)'."""
+    if not file:
+        file = sys.stderr
+    print_exception(sys.last_type, sys.last_value, sys.last_traceback,
+                    limit, file)
+
+
+def print_stack(f=None, limit=None, file=None):
+    """Print a stack trace from its invocation point.
+
+    The optional 'f' argument can be used to specify an alternate
+    stack frame at which to start. The optional 'limit' and 'file'
+    arguments have the same meaning as for print_exception().
+    """
+    if f is None:
+        try:
+            raise ZeroDivisionError
+        except ZeroDivisionError:
+            f = sys.exc_info()[2].tb_frame.f_back
+    print_list(extract_stack(f, limit), file)
+
+def format_stack(f=None, limit=None):
+    """Shorthand for 'format_list(extract_stack(f, limit))'."""
+    if f is None:
+        try:
+            raise ZeroDivisionError
+        except ZeroDivisionError:
+            f = sys.exc_info()[2].tb_frame.f_back
+    return format_list(extract_stack(f, limit))
+
+def extract_stack(f=None, limit = None):
+    """Extract the raw traceback from the current stack frame.
+
+    The return value has the same format as for extract_tb().  The
+    optional 'f' and 'limit' arguments have the same meaning as for
+    print_stack().  Each item in the list is a quadruple (filename,
+    line number, function name, text), and the entries are in order
+    from oldest to newest stack frame.
+    """
+    if f is None:
+        try:
+            raise ZeroDivisionError
+        except ZeroDivisionError:
+            f = sys.exc_info()[2].tb_frame.f_back
+    if limit is None:
+        if hasattr(sys, 'tracebacklimit'):
+            limit = sys.tracebacklimit
+    list = []
+    n = 0
+    while f is not None and (limit is None or n < limit):
+        lineno = f.f_lineno     # XXX Too bad if -O is used
+        co = f.f_code
+        filename = co.co_filename
+        name = co.co_name
+        line = linecache.getline(filename, lineno)
+        if line: line = line.strip()
+        else: line = None
+        list.append((filename, lineno, name, line))
+        f = f.f_back
+        n = n+1
+    list.reverse()
+    return list
+
+def tb_lineno(tb):
+    """Calculate correct line number of traceback given in tb.
+
+    Even works with -O on.
+    """
+    # Coded by Marc-Andre Lemburg from the example of PyCode_Addr2Line()
+    # in compile.c.
+    # Revised version by Jim Hugunin to work with JPython too.
+
+    c = tb.tb_frame.f_code
+    if not hasattr(c, 'co_lnotab'):
+        return tb.tb_lineno
+
+    tab = c.co_lnotab
+    line = c.co_firstlineno
+    stopat = tb.tb_lasti
+    addr = 0
+    for i in range(0, len(tab), 2):
+        addr = addr + ord(tab[i])
+        if addr > stopat:
+            break
+        line = line + ord(tab[i+1])
+    return line
diff --git a/lib-python/2.2/tty.py b/lib-python/2.2/tty.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/tty.py
@@ -0,0 +1,36 @@
+"""Terminal utilities."""
+
+# Author: Steen Lumholt.
+
+from termios import *
+
+__all__ = ["setraw", "setcbreak"]
+
+# Indexes for termios list.
+IFLAG = 0
+OFLAG = 1
+CFLAG = 2
+LFLAG = 3
+ISPEED = 4
+OSPEED = 5
+CC = 6
+
+def setraw(fd, when=TCSAFLUSH):
+    """Put terminal into a raw mode."""
+    mode = tcgetattr(fd)
+    mode[IFLAG] = mode[IFLAG] & ~(BRKINT | ICRNL | INPCK | ISTRIP | IXON)
+    mode[OFLAG] = mode[OFLAG] & ~(OPOST)
+    mode[CFLAG] = mode[CFLAG] & ~(CSIZE | PARENB)
+    mode[CFLAG] = mode[CFLAG] | CS8
+    mode[LFLAG] = mode[LFLAG] & ~(ECHO | ICANON | IEXTEN | ISIG)
+    mode[CC][VMIN] = 1
+    mode[CC][VTIME] = 0
+    tcsetattr(fd, when, mode)
+
+def setcbreak(fd, when=TCSAFLUSH):
+    """Put terminal into a cbreak mode."""
+    mode = tcgetattr(fd)
+    mode[LFLAG] = mode[LFLAG] & ~(ECHO | ICANON)
+    mode[CC][VMIN] = 1
+    mode[CC][VTIME] = 0
+    tcsetattr(fd, when, mode)
diff --git a/lib-python/2.2/types.py b/lib-python/2.2/types.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/types.py
@@ -0,0 +1,86 @@
+"""Define names for all type symbols known in the standard interpreter.
+
+Types that are part of optional modules (e.g. array) are not listed.
+"""
+from __future__ import generators
+
+import sys
+
+# Iterators in Python aren't a matter of type but of protocol.  A large
+# and changing number of builtin types implement *some* flavor of
+# iterator.  Don't check the type!  Use hasattr to check for both
+# "__iter__" and "next" attributes instead.
+
+NoneType = type(None)
+TypeType = type
+ObjectType = object
+
+IntType = int
+LongType = long
+FloatType = float
+try:
+    ComplexType = complex
+except NameError:
+    pass
+
+StringType = str
+try:
+    UnicodeType = unicode
+    StringTypes = (StringType, UnicodeType)
+except NameError:
+    StringTypes = (StringType,)
+
+BufferType = type(buffer(''))
+
+TupleType = tuple
+ListType = list
+DictType = DictionaryType = dict
+
+def _f(): pass
+FunctionType = type(_f)
+LambdaType = type(lambda: None)         # Same as FunctionType
+try:
+    CodeType = type(_f.func_code)
+except RuntimeError:
+    # Execution in restricted environment
+    pass
+
+def g():
+    yield 1
+GeneratorType = type(g())
+del g
+
+class _C:
+    def _m(self): pass
+ClassType = type(_C)
+UnboundMethodType = type(_C._m)         # Same as MethodType
+_x = _C()
+InstanceType = type(_x)
+MethodType = type(_x._m)
+
+BuiltinFunctionType = type(len)
+BuiltinMethodType = type([].append)     # Same as BuiltinFunctionType
+
+ModuleType = type(sys)
+FileType = file
+XRangeType = type(xrange(0))
+
+try:
+    raise TypeError
+except TypeError:
+    try:
+        tb = sys.exc_info()[2]
+        TracebackType = type(tb)
+        FrameType = type(tb.tb_frame)
+    except AttributeError:
+        # In the restricted environment, exc_info returns (None, None,
+        # None) Then, tb.tb_frame gives an attribute error
+        pass
+    tb = None; del tb
+
+SliceType = type(slice(0))
+EllipsisType = type(Ellipsis)
+
+DictProxyType = type(TypeType.__dict__)
+
+del sys, _f, _C, _x, generators                  # Not for export
diff --git a/lib-python/2.2/tzparse.py b/lib-python/2.2/tzparse.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/tzparse.py
@@ -0,0 +1,98 @@
+"""Parse a timezone specification."""
+
+# XXX Unfinished.
+# XXX Only the typical form "XXXhhYYY;ddd/hh,ddd/hh" is currently supported.
+
+import warnings
+warnings.warn(
+    "The tzparse module is obsolete and will disappear in the future",
+    DeprecationWarning)
+
+tzpat = ('^([A-Z][A-Z][A-Z])([-+]?[0-9]+)([A-Z][A-Z][A-Z]);'
+          '([0-9]+)/([0-9]+),([0-9]+)/([0-9]+)$')
+
+tzprog = None
+
+def tzparse(tzstr):
+    """Given a timezone spec, return a tuple of information
+    (tzname, delta, dstname, daystart, hourstart, dayend, hourend),
+    where 'tzname' is the name of the timezone, 'delta' is the offset
+    in hours from GMT, 'dstname' is the name of the daylight-saving
+    timezone, and 'daystart'/'hourstart' and 'dayend'/'hourend'
+    specify the starting and ending points for daylight saving time."""
+    global tzprog
+    if tzprog is None:
+        import re
+        tzprog = re.compile(tzpat)
+    match = tzprog.match(tzstr)
+    if not match:
+        raise ValueError, 'not the TZ syntax I understand'
+    subs = []
+    for i in range(1, 8):
+        subs.append(match.group(i))
+    for i in (1, 3, 4, 5, 6):
+        subs[i] = eval(subs[i])
+    [tzname, delta, dstname, daystart, hourstart, dayend, hourend] = subs
+    return (tzname, delta, dstname, daystart, hourstart, dayend, hourend)
+
+def tzlocaltime(secs, params):
+    """Given a Unix time in seconds and a tuple of information about
+    a timezone as returned by tzparse(), return the local time in the
+    form (year, month, day, hour, min, sec, yday, wday, tzname)."""
+    import time
+    (tzname, delta, dstname, daystart, hourstart, dayend, hourend) = params
+    year, month, days, hours, mins, secs, yday, wday, isdst = \
+            time.gmtime(secs - delta*3600)
+    if (daystart, hourstart) <= (yday+1, hours) < (dayend, hourend):
+        tzname = dstname
+        hours = hours + 1
+    return year, month, days, hours, mins, secs, yday, wday, tzname
+
+def tzset():
+    """Determine the current timezone from the "TZ" environment variable."""
+    global tzparams, timezone, altzone, daylight, tzname
+    import os
+    tzstr = os.environ['TZ']
+    tzparams = tzparse(tzstr)
+    timezone = tzparams[1] * 3600
+    altzone = timezone - 3600
+    daylight = 1
+    tzname = tzparams[0], tzparams[2]
+
+def isdst(secs):
+    """Return true if daylight-saving time is in effect for the given
+    Unix time in the current timezone."""
+    import time
+    (tzname, delta, dstname, daystart, hourstart, dayend, hourend) = \
+            tzparams
+    year, month, days, hours, mins, secs, yday, wday, isdst = \
+            time.gmtime(secs - delta*3600)
+    return (daystart, hourstart) <= (yday+1, hours) < (dayend, hourend)
+
+tzset()
+
+def localtime(secs):
+    """Get the local time in the current timezone."""
+    return tzlocaltime(secs, tzparams)
+
+def test():
+    from time import asctime, gmtime
+    import time, sys
+    now = time.time()
+    x = localtime(now)
+    tm = x[:-1] + (0,)
+    print 'now =', now, '=', asctime(tm), x[-1]
+    now = now - now % (24*3600)
+    if sys.argv[1:]: now = now + eval(sys.argv[1])
+    x = gmtime(now)
+    tm = x[:-1] + (0,)
+    print 'gmtime =', now, '=', asctime(tm), 'yday =', x[-2]
+    jan1 = now - x[-2]*24*3600
+    x = localtime(jan1)
+    tm = x[:-1] + (0,)
+    print 'jan1 =', jan1, '=', asctime(tm), x[-1]
+    for d in range(85, 95) + range(265, 275):
+        t = jan1 + d*24*3600
+        x = localtime(t)
+        tm = x[:-1] + (0,)
+        print 'd =', d, 't =', t, '=', asctime(tm), x[-1]
diff --git a/lib-python/2.2/unittest.py b/lib-python/2.2/unittest.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/unittest.py
@@ -0,0 +1,723 @@
+#!/usr/bin/env python
+'''
+Python unit testing framework, based on Erich Gamma's JUnit and Kent Beck's
+Smalltalk testing framework.
+
+This module contains the core framework classes that form the basis of
+specific test cases and suites (TestCase, TestSuite etc.), and also a
+text-based utility class for running the tests and reporting the results
+ (TextTestRunner).
+
+Simple usage:
+
+    import unittest
+
+    class IntegerArithmenticTestCase(unittest.TestCase):
+        def testAdd(self):  ## test method names begin 'test*'
+            self.assertEquals((1 + 2), 3)
+            self.assertEquals(0 + 1, 1)
+        def testMultiply(self):
+            self.assertEquals((0 * 10), 0)
+            self.assertEquals((5 * 8), 40)
+
+    if __name__ == '__main__':
+        unittest.main()
+
+Further information is available in the bundled documentation, and from
+
+  http://pyunit.sourceforge.net/
+
+Copyright (c) 1999, 2000, 2001 Steve Purcell
+This module is free software, and you may redistribute it and/or modify
+it under the same terms as Python itself, so long as this copyright message
+and disclaimer are retained in their original form.
+
+IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
+SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
+THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGE.
+
+THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE.  THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
+AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
+SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
+'''
+
+__author__ = "Steve Purcell"
+__email__ = "stephen_purcell at yahoo dot com"
+__version__ = "#Revision: 1.43 $"[11:-2]
+
+import time
+import sys
+import traceback
+import string
+import os
+import types
+
+##############################################################################
+# Test framework core
+##############################################################################
+
+class TestResult:
+    """Holder for test result information.
+
+    Test results are automatically managed by the TestCase and TestSuite
+    classes, and do not need to be explicitly manipulated by writers of tests.
+
+    Each instance holds the total number of tests run, and collections of
+    failures and errors that occurred among those test runs. The collections
+    contain tuples of (testcase, exceptioninfo), where exceptioninfo is the
+    formatted traceback of the error that occurred.
+    """
+    def __init__(self):
+        self.failures = []
+        self.errors = []
+        self.testsRun = 0
+        self.shouldStop = 0
+
+    def startTest(self, test):
+        "Called when the given test is about to be run"
+        self.testsRun = self.testsRun + 1
+
+    def stopTest(self, test):
+        "Called when the given test has been run"
+        pass
+
+    def addError(self, test, err):
+        """Called when an error has occurred. 'err' is a tuple of values as
+        returned by sys.exc_info().
+        """
+        self.errors.append((test, self._exc_info_to_string(err)))
+
+    def addFailure(self, test, err):
+        """Called when an error has occurred. 'err' is a tuple of values as
+        returned by sys.exc_info()."""
+        self.failures.append((test, self._exc_info_to_string(err)))
+
+    def addSuccess(self, test):
+        "Called when a test has completed successfully"
+        pass
+
+    def wasSuccessful(self):
+        "Tells whether or not this result was a success"
+        return len(self.failures) == len(self.errors) == 0
+
+    def stop(self):
+        "Indicates that the tests should be aborted"
+        self.shouldStop = 1
+
+    def _exc_info_to_string(self, err):
+        """Converts a sys.exc_info()-style tuple of values into a string."""
+        return string.join(apply(traceback.format_exception, err), '')
+
+    def __repr__(self):
+        return "<%s run=%i errors=%i failures=%i>" % \
+               (self.__class__, self.testsRun, len(self.errors),
+                len(self.failures))
+
+
+class TestCase:
+    """A class whose instances are single test cases.
+
+    By default, the test code itself should be placed in a method named
+    'runTest'.
+
+    If the fixture may be used for many test cases, create as
+    many test methods as are needed. When instantiating such a TestCase
+    subclass, specify in the constructor arguments the name of the test method
+    that the instance is to execute.
+
+    Test authors should subclass TestCase for their own tests. Construction
+    and deconstruction of the test's environment ('fixture') can be
+    implemented by overriding the 'setUp' and 'tearDown' methods respectively.
+
+    If it is necessary to override the __init__ method, the base class
+    __init__ method must always be called. It is important that subclasses
+    should not change the signature of their __init__ method, since instances
+    of the classes are instantiated automatically by parts of the framework
+    in order to be run.
+    """
+
+    # This attribute determines which exception will be raised when
+    # the instance's assertion methods fail; test methods raising this
+    # exception will be deemed to have 'failed' rather than 'errored'
+
+    failureException = AssertionError
+
+    def __init__(self, methodName='runTest'):
+        """Create an instance of the class that will use the named test
+           method when executed. Raises a ValueError if the instance does
+           not have a method with the specified name.
+        """
+        try:
+            self.__testMethodName = methodName
+            testMethod = getattr(self, methodName)
+            self.__testMethodDoc = testMethod.__doc__
+        except AttributeError:
+            raise ValueError, "no such test method in %s: %s" % \
+                  (self.__class__, methodName)
+
+    def setUp(self):
+        "Hook method for setting up the test fixture before exercising it."
+        pass
+
+    def tearDown(self):
+        "Hook method for deconstructing the test fixture after testing it."
+        pass
+
+    def countTestCases(self):
+        return 1
+
+    def defaultTestResult(self):
+        return TestResult()
+
+    def shortDescription(self):
+        """Returns a one-line description of the test, or None if no
+        description has been provided.
+
+        The default implementation of this method returns the first line of
+        the specified test method's docstring.
+        """
+        doc = self.__testMethodDoc
+        return doc and string.strip(string.split(doc, "\n")[0]) or None
+
+    def id(self):
+        return "%s.%s" % (self.__class__, self.__testMethodName)
+
+    def __str__(self):
+        return "%s (%s)" % (self.__testMethodName, self.__class__)
+
+    def __repr__(self):
+        return "<%s testMethod=%s>" % \
+               (self.__class__, self.__testMethodName)
+
+    def run(self, result=None):
+        return self(result)
+
+    def __call__(self, result=None):
+        if result is None: result = self.defaultTestResult()
+        result.startTest(self)
+        testMethod = getattr(self, self.__testMethodName)
+        try:
+            try:
+                self.setUp()
+            except KeyboardInterrupt:
+                raise
+            except:
+                result.addError(self, self.__exc_info())
+                return
+
+            ok = 0
+            try:
+                testMethod()
+                ok = 1
+            except self.failureException, e:
+                result.addFailure(self, self.__exc_info())
+            except KeyboardInterrupt:
+                raise
+            except:
+                result.addError(self, self.__exc_info())
+
+            try:
+                self.tearDown()
+            except KeyboardInterrupt:
+                raise
+            except:
+                result.addError(self, self.__exc_info())
+                ok = 0
+            if ok: result.addSuccess(self)
+        finally:
+            result.stopTest(self)
+
+    def debug(self):
+        """Run the test without collecting errors in a TestResult"""
+        self.setUp()
+        getattr(self, self.__testMethodName)()
+        self.tearDown()
+
+    def __exc_info(self):
+        """Return a version of sys.exc_info() with the traceback frame
+           minimised; usually the top level of the traceback frame is not
+           needed.
+        """
+        exctype, excvalue, tb = sys.exc_info()
+        if sys.platform[:4] == 'java': ## tracebacks look different in Jython
+            return (exctype, excvalue, tb)
+        newtb = tb.tb_next
+        if newtb is None:
+            return (exctype, excvalue, tb)
+        return (exctype, excvalue, newtb)
+
+    def fail(self, msg=None):
+        """Fail immediately, with the given message."""
+        raise self.failureException, msg
+
+    def failIf(self, expr, msg=None):
+        "Fail the test if the expression is true."
+        if expr: raise self.failureException, msg
+
+    def failUnless(self, expr, msg=None):
+        """Fail the test unless the expression is true."""
+        if not expr: raise self.failureException, msg
+
+    def failUnlessRaises(self, excClass, callableObj, *args, **kwargs):
+        """Fail unless an exception of class excClass is thrown
+           by callableObj when invoked with arguments args and keyword
+           arguments kwargs. If a different type of exception is
+           thrown, it will not be caught, and the test case will be
+           deemed to have suffered an error, exactly as for an
+           unexpected exception.
+        """
+        try:
+            apply(callableObj, args, kwargs)
+        except excClass:
+            return
+        else:
+            if hasattr(excClass,'__name__'): excName = excClass.__name__
+            else: excName = str(excClass)
+            raise self.failureException, excName
+
+    def failUnlessEqual(self, first, second, msg=None):
+        """Fail if the two objects are unequal as determined by the '!='
+           operator.
+        """
+        if first != second:
+            raise self.failureException, \
+                  (msg or '%s != %s' % (`first`, `second`))
+
+    def failIfEqual(self, first, second, msg=None):
+        """Fail if the two objects are equal as determined by the '=='
+           operator.
+        """
+        if first == second:
+            raise self.failureException, \
+                  (msg or '%s == %s' % (`first`, `second`))
+
+    assertEqual = assertEquals = failUnlessEqual
+
+    assertNotEqual = assertNotEquals = failIfEqual
+
+    assertRaises = failUnlessRaises
+
+    assert_ = failUnless
+
+
+
+class TestSuite:
+    """A test suite is a composite test consisting of a number of TestCases.
+
+    For use, create an instance of TestSuite, then add test case instances.
+    When all tests have been added, the suite can be passed to a test
+    runner, such as TextTestRunner. It will run the individual test cases
+    in the order in which they were added, aggregating the results. When
+    subclassing, do not forget to call the base class constructor.
+    """
+    def __init__(self, tests=()):
+        self._tests = []
+        self.addTests(tests)
+
+    def __repr__(self):
+        return "<%s tests=%s>" % (self.__class__, self._tests)
+
+    __str__ = __repr__
+
+    def countTestCases(self):
+        cases = 0
+        for test in self._tests:
+            cases = cases + test.countTestCases()
+        return cases
+
+    def addTest(self, test):
+        self._tests.append(test)
+
+    def addTests(self, tests):
+        for test in tests:
+            self.addTest(test)
+
+    def run(self, result):
+        return self(result)
+
+    def __call__(self, result):
+        for test in self._tests:
+            if result.shouldStop:
+                break
+            test(result)
+        return result
+
+    def debug(self):
+        """Run the tests without collecting errors in a TestResult"""
+        for test in self._tests: test.debug()
+
+
+class FunctionTestCase(TestCase):
+    """A test case that wraps a test function.
+
+    This is useful for slipping pre-existing test functions into the
+    PyUnit framework. Optionally, set-up and tidy-up functions can be
+    supplied. As with TestCase, the tidy-up ('tearDown') function will
+    always be called if the set-up ('setUp') function ran successfully.
+    """
+
+    def __init__(self, testFunc, setUp=None, tearDown=None,
+                 description=None):
+        TestCase.__init__(self)
+        self.__setUpFunc = setUp
+        self.__tearDownFunc = tearDown
+        self.__testFunc = testFunc
+        self.__description = description
+
+    def setUp(self):
+        if self.__setUpFunc is not None:
+            self.__setUpFunc()
+
+    def tearDown(self):
+        if self.__tearDownFunc is not None:
+            self.__tearDownFunc()
+
+    def runTest(self):
+        self.__testFunc()
+
+    def id(self):
+        return self.__testFunc.__name__
+
+    def __str__(self):
+        return "%s (%s)" % (self.__class__, self.__testFunc.__name__)
+
+    def __repr__(self):
+        return "<%s testFunc=%s>" % (self.__class__, self.__testFunc)
+
+    def shortDescription(self):
+        if self.__description is not None: return self.__description
+        doc = self.__testFunc.__doc__
+        return doc and string.strip(string.split(doc, "\n")[0]) or None
+
+
+
+##############################################################################
+# Locating and loading tests
+##############################################################################
+
+class TestLoader:
+    """This class is responsible for loading tests according to various
+    criteria and returning them wrapped in a Test
+    """
+    testMethodPrefix = 'test'
+    sortTestMethodsUsing = cmp
+    suiteClass = TestSuite
+
+    def loadTestsFromTestCase(self, testCaseClass):
+        """Return a suite of all tests cases contained in testCaseClass"""
+        return self.suiteClass(map(testCaseClass,
+                                   self.getTestCaseNames(testCaseClass)))
+
+    def loadTestsFromModule(self, module):
+        """Return a suite of all tests cases contained in the given module"""
+        tests = []
+        for name in dir(module):
+            obj = getattr(module, name)
+            if type(obj) == types.ClassType and issubclass(obj, TestCase):
+                tests.append(self.loadTestsFromTestCase(obj))
+        return self.suiteClass(tests)
+
+    def loadTestsFromName(self, name, module=None):
+        """Return a suite of all tests cases given a string specifier.
+
+        The name may resolve either to a module, a test case class, a
+        test method within a test case class, or a callable object which
+        returns a TestCase or TestSuite instance.
+
+        The method optionally resolves the names relative to a given module.
+        """
+        parts = string.split(name, '.')
+        if module is None:
+            if not parts:
+                raise ValueError, "incomplete test name: %s" % name
+            else:
+                parts_copy = parts[:]
+                while parts_copy:
+                    try:
+                        module = __import__(string.join(parts_copy,'.'))
+                        break
+                    except ImportError:
+                        del parts_copy[-1]
+                        if not parts_copy: raise
+                parts = parts[1:]
+        obj = module
+        for part in parts:
+            obj = getattr(obj, part)
+
+        import unittest
+        if type(obj) == types.ModuleType:
+            return self.loadTestsFromModule(obj)
+        elif type(obj) == types.ClassType and issubclass(obj, unittest.TestCase):
+            return self.loadTestsFromTestCase(obj)
+        elif type(obj) == types.UnboundMethodType:
+            return obj.im_class(obj.__name__)
+        elif callable(obj):
+            test = obj()
+            if not isinstance(test, unittest.TestCase) and \
+               not isinstance(test, unittest.TestSuite):
+                raise ValueError, \
+                      "calling %s returned %s, not a test" % (obj,test)
+            return test
+        else:
+            raise ValueError, "don't know how to make test from: %s" % obj
+
+    def loadTestsFromNames(self, names, module=None):
+        """Return a suite of all tests cases found using the given sequence
+        of string specifiers. See 'loadTestsFromName()'.
+        """
+        suites = []
+        for name in names:
+            suites.append(self.loadTestsFromName(name, module))
+        return self.suiteClass(suites)
+
+    def getTestCaseNames(self, testCaseClass):
+        """Return a sorted sequence of method names found within testCaseClass
+        """
+        testFnNames = filter(lambda n,p=self.testMethodPrefix: n[:len(p)] == p,
+                             dir(testCaseClass))
+        for baseclass in testCaseClass.__bases__:
+            for testFnName in self.getTestCaseNames(baseclass):
+                if testFnName not in testFnNames:  # handle overridden methods
+                    testFnNames.append(testFnName)
+        if self.sortTestMethodsUsing:
+            testFnNames.sort(self.sortTestMethodsUsing)
+        return testFnNames
+
+
+
+defaultTestLoader = TestLoader()
+
+
+##############################################################################
+# Patches for old functions: these functions should be considered obsolete
+##############################################################################
+
+def _makeLoader(prefix, sortUsing, suiteClass=None):
+    loader = TestLoader()
+    loader.sortTestMethodsUsing = sortUsing
+    loader.testMethodPrefix = prefix
+    if suiteClass: loader.suiteClass = suiteClass
+    return loader
+
+def getTestCaseNames(testCaseClass, prefix, sortUsing=cmp):
+    return _makeLoader(prefix, sortUsing).getTestCaseNames(testCaseClass)
+
+def makeSuite(testCaseClass, prefix='test', sortUsing=cmp, suiteClass=TestSuite):
+    return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromTestCase(testCaseClass)
+
+def findTestCases(module, prefix='test', sortUsing=cmp, suiteClass=TestSuite):
+    return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromModule(module)
+
+
+##############################################################################
+# Text UI
+##############################################################################
+
+class _WritelnDecorator:
+    """Used to decorate file-like objects with a handy 'writeln' method"""
+    def __init__(self,stream):
+        self.stream = stream
+
+    def __getattr__(self, attr):
+        return getattr(self.stream,attr)
+
+    def writeln(self, *args):
+        if args: apply(self.write, args)
+        self.write('\n') # text-mode streams translate to \r\n if needed
+
+
+class _TextTestResult(TestResult):
+    """A test result class that can print formatted text results to a stream.
+
+    Used by TextTestRunner.
+    """
+    separator1 = '=' * 70
+    separator2 = '-' * 70
+
+    def __init__(self, stream, descriptions, verbosity):
+        TestResult.__init__(self)
+        self.stream = stream
+        self.showAll = verbosity > 1
+        self.dots = verbosity == 1
+        self.descriptions = descriptions
+
+    def getDescription(self, test):
+        if self.descriptions:
+            return test.shortDescription() or str(test)
+        else:
+            return str(test)
+
+    def startTest(self, test):
+        TestResult.startTest(self, test)
+        if self.showAll:
+            self.stream.write(self.getDescription(test))
+            self.stream.write(" ... ")
+
+    def addSuccess(self, test):
+        TestResult.addSuccess(self, test)
+        if self.showAll:
+            self.stream.writeln("ok")
+        elif self.dots:
+            self.stream.write('.')
+
+    def addError(self, test, err):
+        TestResult.addError(self, test, err)
+        if self.showAll:
+            self.stream.writeln("ERROR")
+        elif self.dots:
+            self.stream.write('E')
+
+    def addFailure(self, test, err):
+        TestResult.addFailure(self, test, err)
+        if self.showAll:
+            self.stream.writeln("FAIL")
+        elif self.dots:
+            self.stream.write('F')
+
+    def printErrors(self):
+        if self.dots or self.showAll:
+            self.stream.writeln()
+        self.printErrorList('ERROR', self.errors)
+        self.printErrorList('FAIL', self.failures)
+
+    def printErrorList(self, flavour, errors):
+        for test, err in errors:
+            self.stream.writeln(self.separator1)
+            self.stream.writeln("%s: %s" % (flavour,self.getDescription(test)))
+            self.stream.writeln(self.separator2)
+            self.stream.writeln("%s" % err)
+
+
+class TextTestRunner:
+    """A test runner class that displays results in textual form.
+
+    It prints out the names of tests as they are run, errors as they
+    occur, and a summary of the results at the end of the test run.
+    """
+    def __init__(self, stream=sys.stderr, descriptions=1, verbosity=1):
+        self.stream = _WritelnDecorator(stream)
+        self.descriptions = descriptions
+        self.verbosity = verbosity
+
+    def _makeResult(self):
+        return _TextTestResult(self.stream, self.descriptions, self.verbosity)
+
+    def run(self, test):
+        "Run the given test case or test suite."
+        result = self._makeResult()
+        startTime = time.time()
+        test(result)
+        stopTime = time.time()
+        timeTaken = float(stopTime - startTime)
+        result.printErrors()
+        self.stream.writeln(result.separator2)
+        run = result.testsRun
+        self.stream.writeln("Ran %d test%s in %.3fs" %
+                            (run, run != 1 and "s" or "", timeTaken))
+        self.stream.writeln()
+        if not result.wasSuccessful():
+            self.stream.write("FAILED (")
+            failed, errored = map(len, (result.failures, result.errors))
+            if failed:
+                self.stream.write("failures=%d" % failed)
+            if errored:
+                if failed: self.stream.write(", ")
+                self.stream.write("errors=%d" % errored)
+            self.stream.writeln(")")
+        else:
+            self.stream.writeln("OK")
+        return result
+
+
+
+##############################################################################
+# Facilities for running tests from the command line
+##############################################################################
+
+class TestProgram:
+    """A command-line program that runs a set of tests; this is primarily
+       for making test modules conveniently executable.
+    """
+    USAGE = """\
+Usage: %(progName)s [options] [test] [...]
+
+Options:
+  -h, --help       Show this message
+  -v, --verbose    Verbose output
+  -q, --quiet      Minimal output
+
+Examples:
+  %(progName)s                               - run default set of tests
+  %(progName)s MyTestSuite                   - run suite 'MyTestSuite'
+  %(progName)s MyTestCase.testSomething      - run MyTestCase.testSomething
+  %(progName)s MyTestCase                    - run all 'test*' test methods
+                                               in MyTestCase
+"""
+    def __init__(self, module='__main__', defaultTest=None,
+                 argv=None, testRunner=None, testLoader=defaultTestLoader):
+        if type(module) == type(''):
+            self.module = __import__(module)
+            for part in string.split(module,'.')[1:]:
+                self.module = getattr(self.module, part)
+        else:
+            self.module = module
+        if argv is None:
+            argv = sys.argv
+        self.verbosity = 1
+        self.defaultTest = defaultTest
+        self.testRunner = testRunner
+        self.testLoader = testLoader
+        self.progName = os.path.basename(argv[0])
+        self.parseArgs(argv)
+        self.runTests()
+
+    def usageExit(self, msg=None):
+        if msg: print msg
+        print self.USAGE % self.__dict__
+        sys.exit(2)
+
+    def parseArgs(self, argv):
+        import getopt
+        try:
+            options, args = getopt.getopt(argv[1:], 'hHvq',
+                                          ['help','verbose','quiet'])
+            for opt, value in options:
+                if opt in ('-h','-H','--help'):
+                    self.usageExit()
+                if opt in ('-q','--quiet'):
+                    self.verbosity = 0
+                if opt in ('-v','--verbose'):
+                    self.verbosity = 2
+            if len(args) == 0 and self.defaultTest is None:
+                self.test = self.testLoader.loadTestsFromModule(self.module)
+                return
+            if len(args) > 0:
+                self.testNames = args
+            else:
+                self.testNames = (self.defaultTest,)
+            self.createTests()
+        except getopt.error, msg:
+            self.usageExit(msg)
+
+    def createTests(self):
+        self.test = self.testLoader.loadTestsFromNames(self.testNames,
+                                                       self.module)
+
+    def runTests(self):
+        if self.testRunner is None:
+            self.testRunner = TextTestRunner(verbosity=self.verbosity)
+        result = self.testRunner.run(self.test)
+        sys.exit(not result.wasSuccessful())
+
+main = TestProgram
+
+
+##############################################################################
+# Executing this module from the command line
+##############################################################################
+
+if __name__ == "__main__":
+    main(module=None)
diff --git a/lib-python/2.2/urllib.py b/lib-python/2.2/urllib.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/urllib.py
@@ -0,0 +1,1465 @@
+"""Open an arbitrary URL.
+
+See the following document for more info on URLs:
+"Names and Addresses, URIs, URLs, URNs, URCs", at
+http://www.w3.org/pub/WWW/Addressing/Overview.html
+
+See also the HTTP spec (from which the error codes are derived):
+"HTTP - Hypertext Transfer Protocol", at
+http://www.w3.org/pub/WWW/Protocols/
+
+Related standards and specs:
+- RFC1808: the "relative URL" spec. (authoritative status)
+- RFC1738 - the "URL standard". (authoritative status)
+- RFC1630 - the "URI spec". (informational status)
+
+The object returned by URLopener().open(file) will differ per
+protocol.  All you know is that is has methods read(), readline(),
+readlines(), fileno(), close() and info().  The read*(), fileno()
+and close() methods work like those of open files.
+The info() method returns a mimetools.Message object which can be
+used to query various info about the object, if available.
+(mimetools.Message objects are queried with the getheader() method.)
+"""
+
+import string
+import socket
+import os
+import stat
+import time
+import sys
+import types
+
+__all__ = ["urlopen", "URLopener", "FancyURLopener", "urlretrieve",
+           "urlcleanup", "quote", "quote_plus", "unquote", "unquote_plus",
+           "urlencode", "url2pathname", "pathname2url", "splittag",
+           "localhost", "thishost", "ftperrors", "basejoin", "unwrap",
+           "splittype", "splithost", "splituser", "splitpasswd", "splitport",
+           "splitnport", "splitquery", "splitattr", "splitvalue",
+           "splitgophertype", "getproxies"]
+
+__version__ = '1.15'    # XXX This version is not always updated :-(
+
+MAXFTPCACHE = 10        # Trim the ftp cache beyond this size
+
+# Helper for non-unix systems
+if os.name == 'mac':
+    from macurl2path import url2pathname, pathname2url
+elif os.name == 'nt':
+    from nturl2path import url2pathname, pathname2url
+elif os.name == 'riscos':
+    from rourl2path import url2pathname, pathname2url
+else:
+    def url2pathname(pathname):
+        return unquote(pathname)
+    def pathname2url(pathname):
+        return quote(pathname)
+
+# This really consists of two pieces:
+# (1) a class which handles opening of all sorts of URLs
+#     (plus assorted utilities etc.)
+# (2) a set of functions for parsing URLs
+# XXX Should these be separated out into different modules?
+
+
+# Shortcut for basic usage
+_urlopener = None
+def urlopen(url, data=None):
+    """urlopen(url [, data]) -> open file-like object"""
+    global _urlopener
+    if not _urlopener:
+        _urlopener = FancyURLopener()
+    if data is None:
+        return _urlopener.open(url)
+    else:
+        return _urlopener.open(url, data)
+def urlretrieve(url, filename=None, reporthook=None, data=None):
+    global _urlopener
+    if not _urlopener:
+        _urlopener = FancyURLopener()
+    return _urlopener.retrieve(url, filename, reporthook, data)
+def urlcleanup():
+    if _urlopener:
+        _urlopener.cleanup()
+
+
+ftpcache = {}
+class URLopener:
+    """Class to open URLs.
+    This is a class rather than just a subroutine because we may need
+    more than one set of global protocol-specific options.
+    Note -- this is a base class for those who don't want the
+    automatic handling of errors type 302 (relocated) and 401
+    (authorization needed)."""
+
+    __tempfiles = None
+
+    version = "Python-urllib/%s" % __version__
+
+    # Constructor
+    def __init__(self, proxies=None, **x509):
+        if proxies is None:
+            proxies = getproxies()
+        assert hasattr(proxies, 'has_key'), "proxies must be a mapping"
+        self.proxies = proxies
+        self.key_file = x509.get('key_file')
+        self.cert_file = x509.get('cert_file')
+        self.addheaders = [('User-agent', self.version)]
+        self.__tempfiles = []
+        self.__unlink = os.unlink # See cleanup()
+        self.tempcache = None
+        # Undocumented feature: if you assign {} to tempcache,
+        # it is used to cache files retrieved with
+        # self.retrieve().  This is not enabled by default
+        # since it does not work for changing documents (and I
+        # haven't got the logic to check expiration headers
+        # yet).
+        self.ftpcache = ftpcache
+        # Undocumented feature: you can use a different
+        # ftp cache by assigning to the .ftpcache member;
+        # in case you want logically independent URL openers
+        # XXX This is not threadsafe.  Bah.
+
+    def __del__(self):
+        self.close()
+
+    def close(self):
+        self.cleanup()
+
+    def cleanup(self):
+        # This code sometimes runs when the rest of this module
+        # has already been deleted, so it can't use any globals
+        # or import anything.
+        if self.__tempfiles:
+            for file in self.__tempfiles:
+                try:
+                    self.__unlink(file)
+                except OSError:
+                    pass
+            del self.__tempfiles[:]
+        if self.tempcache:
+            self.tempcache.clear()
+
+    def addheader(self, *args):
+        """Add a header to be used by the HTTP interface only
+        e.g. u.addheader('Accept', 'sound/basic')"""
+        self.addheaders.append(args)
+
+    # External interface
+    def open(self, fullurl, data=None):
+        """Use URLopener().open(file) instead of open(file, 'r')."""
+        fullurl = unwrap(toBytes(fullurl))
+        if self.tempcache and self.tempcache.has_key(fullurl):
+            filename, headers = self.tempcache[fullurl]
+            fp = open(filename, 'rb')
+            return addinfourl(fp, headers, fullurl)
+        urltype, url = splittype(fullurl)
+        if not urltype:
+            urltype = 'file'
+        if self.proxies.has_key(urltype):
+            proxy = self.proxies[urltype]
+            urltype, proxyhost = splittype(proxy)
+            host, selector = splithost(proxyhost)
+            url = (host, fullurl) # Signal special case to open_*()
+        else:
+            proxy = None
+        name = 'open_' + urltype
+        self.type = urltype
+        if '-' in name:
+            # replace - with _
+            name = '_'.join(name.split('-'))
+        if not hasattr(self, name):
+            if proxy:
+                return self.open_unknown_proxy(proxy, fullurl, data)
+            else:
+                return self.open_unknown(fullurl, data)
+        try:
+            if data is None:
+                return getattr(self, name)(url)
+            else:
+                return getattr(self, name)(url, data)
+        except socket.error, msg:
+            raise IOError, ('socket error', msg), sys.exc_info()[2]
+
+    def open_unknown(self, fullurl, data=None):
+        """Overridable interface to open unknown URL type."""
+        type, url = splittype(fullurl)
+        raise IOError, ('url error', 'unknown url type', type)
+
+    def open_unknown_proxy(self, proxy, fullurl, data=None):
+        """Overridable interface to open unknown URL type."""
+        type, url = splittype(fullurl)
+        raise IOError, ('url error', 'invalid proxy for %s' % type, proxy)
+
+    # External interface
+    def retrieve(self, url, filename=None, reporthook=None, data=None):
+        """retrieve(url) returns (filename, headers) for a local object
+        or (tempfilename, headers) for a remote object."""
+        url = unwrap(toBytes(url))
+        if self.tempcache and self.tempcache.has_key(url):
+            return self.tempcache[url]
+        type, url1 = splittype(url)
+        if not filename and (not type or type == 'file'):
+            try:
+                fp = self.open_local_file(url1)
+                hdrs = fp.info()
+                del fp
+                return url2pathname(splithost(url1)[1]), hdrs
+            except IOError, msg:
+                pass
+        fp = self.open(url, data)
+        headers = fp.info()
+        if not filename:
+            import tempfile
+            garbage, path = splittype(url)
+            garbage, path = splithost(path or "")
+            path, garbage = splitquery(path or "")
+            path, garbage = splitattr(path or "")
+            suffix = os.path.splitext(path)[1]
+            filename = tempfile.mktemp(suffix)
+            self.__tempfiles.append(filename)
+        result = filename, headers
+        if self.tempcache is not None:
+            self.tempcache[url] = result
+        tfp = open(filename, 'wb')
+        bs = 1024*8
+        size = -1
+        blocknum = 1
+        if reporthook:
+            if headers.has_key("content-length"):
+                size = int(headers["Content-Length"])
+            reporthook(0, bs, size)
+        block = fp.read(bs)
+        if reporthook:
+            reporthook(1, bs, size)
+        while block:
+            tfp.write(block)
+            block = fp.read(bs)
+            blocknum = blocknum + 1
+            if reporthook:
+                reporthook(blocknum, bs, size)
+        fp.close()
+        tfp.close()
+        del fp
+        del tfp
+        return result
+
+    # Each method named open_<type> knows how to open that type of URL
+
+    def open_http(self, url, data=None):
+        """Use HTTP protocol."""
+        import httplib
+        user_passwd = None
+        if type(url) is types.StringType:
+            host, selector = splithost(url)
+            if host:
+                user_passwd, host = splituser(host)
+                host = unquote(host)
+            realhost = host
+        else:
+            host, selector = url
+            urltype, rest = splittype(selector)
+            url = rest
+            user_passwd = None
+            if urltype.lower() != 'http':
+                realhost = None
+            else:
+                realhost, rest = splithost(rest)
+                if realhost:
+                    user_passwd, realhost = splituser(realhost)
+                if user_passwd:
+                    selector = "%s://%s%s" % (urltype, realhost, rest)
+                if proxy_bypass(realhost):
+                    host = realhost
+
+            #print "proxy via http:", host, selector
+        if not host: raise IOError, ('http error', 'no host given')
+        if user_passwd:
+            import base64
+            auth = base64.encodestring(user_passwd).strip()
+        else:
+            auth = None
+        h = httplib.HTTP(host)
+        if data is not None:
+            h.putrequest('POST', selector)
+            h.putheader('Content-type', 'application/x-www-form-urlencoded')
+            h.putheader('Content-length', '%d' % len(data))
+        else:
+            h.putrequest('GET', selector)
+        if auth: h.putheader('Authorization', 'Basic %s' % auth)
+        if realhost: h.putheader('Host', realhost)
+        for args in self.addheaders: apply(h.putheader, args)
+        h.endheaders()
+        if data is not None:
+            h.send(data)
+        errcode, errmsg, headers = h.getreply()
+        fp = h.getfile()
+        if errcode == 200:
+            return addinfourl(fp, headers, "http:" + url)
+        else:
+            if data is None:
+                return self.http_error(url, fp, errcode, errmsg, headers)
+            else:
+                return self.http_error(url, fp, errcode, errmsg, headers, data)
+
+    def http_error(self, url, fp, errcode, errmsg, headers, data=None):
+        """Handle http errors.
+        Derived class can override this, or provide specific handlers
+        named http_error_DDD where DDD is the 3-digit error code."""
+        # First check if there's a specific handler for this error
+        name = 'http_error_%d' % errcode
+        if hasattr(self, name):
+            method = getattr(self, name)
+            if data is None:
+                result = method(url, fp, errcode, errmsg, headers)
+            else:
+                result = method(url, fp, errcode, errmsg, headers, data)
+            if result: return result
+        return self.http_error_default(url, fp, errcode, errmsg, headers)
+
+    def http_error_default(self, url, fp, errcode, errmsg, headers):
+        """Default error handler: close the connection and raise IOError."""
+        void = fp.read()
+        fp.close()
+        raise IOError, ('http error', errcode, errmsg, headers)
+
+    if hasattr(socket, "ssl"):
+        def open_https(self, url, data=None):
+            """Use HTTPS protocol."""
+            import httplib
+            user_passwd = None
+            if type(url) is types.StringType:
+                host, selector = splithost(url)
+                if host:
+                    user_passwd, host = splituser(host)
+                    host = unquote(host)
+                realhost = host
+            else:
+                host, selector = url
+                urltype, rest = splittype(selector)
+                url = rest
+                user_passwd = None
+                if urltype.lower() != 'https':
+                    realhost = None
+                else:
+                    realhost, rest = splithost(rest)
+                    if realhost:
+                        user_passwd, realhost = splituser(realhost)
+                    if user_passwd:
+                        selector = "%s://%s%s" % (urltype, realhost, rest)
+                #print "proxy via https:", host, selector
+            if not host: raise IOError, ('https error', 'no host given')
+            if user_passwd:
+                import base64
+                auth = base64.encodestring(user_passwd).strip()
+            else:
+                auth = None
+            h = httplib.HTTPS(host, 0,
+                              key_file=self.key_file,
+                              cert_file=self.cert_file)
+            if data is not None:
+                h.putrequest('POST', selector)
+                h.putheader('Content-type',
+                            'application/x-www-form-urlencoded')
+                h.putheader('Content-length', '%d' % len(data))
+            else:
+                h.putrequest('GET', selector)
+            if auth: h.putheader('Authorization: Basic %s' % auth)
+            if realhost: h.putheader('Host', realhost)
+            for args in self.addheaders: apply(h.putheader, args)
+            h.endheaders()
+            if data is not None:
+                h.send(data)
+            errcode, errmsg, headers = h.getreply()
+            fp = h.getfile()
+            if errcode == 200:
+                return addinfourl(fp, headers, "https:" + url)
+            else:
+                if data is None:
+                    return self.http_error(url, fp, errcode, errmsg, headers)
+                else:
+                    return self.http_error(url, fp, errcode, errmsg, headers,
+                                           data)
+
+    def open_gopher(self, url):
+        """Use Gopher protocol."""
+        import gopherlib
+        host, selector = splithost(url)
+        if not host: raise IOError, ('gopher error', 'no host given')
+        host = unquote(host)
+        type, selector = splitgophertype(selector)
+        selector, query = splitquery(selector)
+        selector = unquote(selector)
+        if query:
+            query = unquote(query)
+            fp = gopherlib.send_query(selector, query, host)
+        else:
+            fp = gopherlib.send_selector(selector, host)
+        return addinfourl(fp, noheaders(), "gopher:" + url)
+
+    def open_file(self, url):
+        """Use local file or FTP depending on form of URL."""
+        if url[:2] == '//' and url[2:3] != '/' and url[2:12].lower() != 'localhost/':
+            return self.open_ftp(url)
+        else:
+            return self.open_local_file(url)
+
+    def open_local_file(self, url):
+        """Use local file."""
+        import mimetypes, mimetools, rfc822, StringIO
+        host, file = splithost(url)
+        localname = url2pathname(file)
+        try:
+            stats = os.stat(localname)
+        except OSError, e:
+            raise IOError(e.errno, e.strerror, e.filename)
+        size = stats[stat.ST_SIZE]
+        modified = rfc822.formatdate(stats[stat.ST_MTIME])
+        mtype = mimetypes.guess_type(url)[0]
+        headers = mimetools.Message(StringIO.StringIO(
+            'Content-Type: %s\nContent-Length: %d\nLast-modified: %s\n' %
+            (mtype or 'text/plain', size, modified)))
+        if not host:
+            urlfile = file
+            if file[:1] == '/':
+                urlfile = 'file://' + file
+            return addinfourl(open(localname, 'rb'),
+                              headers, urlfile)
+        host, port = splitport(host)
+        if not port \
+           and socket.gethostbyname(host) in (localhost(), thishost()):
+            urlfile = file
+            if file[:1] == '/':
+                urlfile = 'file://' + file
+            return addinfourl(open(localname, 'rb'),
+                              headers, urlfile)
+        raise IOError, ('local file error', 'not on local host')
+
+    def open_ftp(self, url):
+        """Use FTP protocol."""
+        import mimetypes, mimetools, StringIO
+        host, path = splithost(url)
+        if not host: raise IOError, ('ftp error', 'no host given')
+        host, port = splitport(host)
+        user, host = splituser(host)
+        if user: user, passwd = splitpasswd(user)
+        else: passwd = None
+        host = unquote(host)
+        user = unquote(user or '')
+        passwd = unquote(passwd or '')
+        host = socket.gethostbyname(host)
+        if not port:
+            import ftplib
+            port = ftplib.FTP_PORT
+        else:
+            port = int(port)
+        path, attrs = splitattr(path)
+        path = unquote(path)
+        dirs = path.split('/')
+        dirs, file = dirs[:-1], dirs[-1]
+        if dirs and not dirs[0]: dirs = dirs[1:]
+        if dirs and not dirs[0]: dirs[0] = '/'
+        key = user, host, port, '/'.join(dirs)
+        # XXX thread unsafe!
+        if len(self.ftpcache) > MAXFTPCACHE:
+            # Prune the cache, rather arbitrarily
+            for k in self.ftpcache.keys():
+                if k != key:
+                    v = self.ftpcache[k]
+                    del self.ftpcache[k]
+                    v.close()
+        try:
+            if not self.ftpcache.has_key(key):
+                self.ftpcache[key] = \
+                    ftpwrapper(user, passwd, host, port, dirs)
+            if not file: type = 'D'
+            else: type = 'I'
+            for attr in attrs:
+                attr, value = splitvalue(attr)
+                if attr.lower() == 'type' and \
+                   value in ('a', 'A', 'i', 'I', 'd', 'D'):
+                    type = value.upper()
+            (fp, retrlen) = self.ftpcache[key].retrfile(file, type)
+            mtype = mimetypes.guess_type("ftp:" + url)[0]
+            headers = ""
+            if mtype:
+                headers += "Content-Type: %s\n" % mtype
+            if retrlen is not None and retrlen >= 0:
+                headers += "Content-Length: %d\n" % retrlen
+            headers = mimetools.Message(StringIO.StringIO(headers))
+            return addinfourl(fp, headers, "ftp:" + url)
+        except ftperrors(), msg:
+            raise IOError, ('ftp error', msg), sys.exc_info()[2]
+
+    def open_data(self, url, data=None):
+        """Use "data" URL."""
+        # ignore POSTed data
+        #
+        # syntax of data URLs:
+        # dataurl   := "data:" [ mediatype ] [ ";base64" ] "," data
+        # mediatype := [ type "/" subtype ] *( ";" parameter )
+        # data      := *urlchar
+        # parameter := attribute "=" value
+        import StringIO, mimetools, time
+        try:
+            [type, data] = url.split(',', 1)
+        except ValueError:
+            raise IOError, ('data error', 'bad data URL')
+        if not type:
+            type = 'text/plain;charset=US-ASCII'
+        semi = type.rfind(';')
+        if semi >= 0 and '=' not in type[semi:]:
+            encoding = type[semi+1:]
+            type = type[:semi]
+        else:
+            encoding = ''
+        msg = []
+        msg.append('Date: %s'%time.strftime('%a, %d %b %Y %T GMT',
+                                            time.gmtime(time.time())))
+        msg.append('Content-type: %s' % type)
+        if encoding == 'base64':
+            import base64
+            data = base64.decodestring(data)
+        else:
+            data = unquote(data)
+        msg.append('Content-length: %d' % len(data))
+        msg.append('')
+        msg.append(data)
+        msg = '\n'.join(msg)
+        f = StringIO.StringIO(msg)
+        headers = mimetools.Message(f, 0)
+        f.fileno = None     # needed for addinfourl
+        return addinfourl(f, headers, url)
+
+
+class FancyURLopener(URLopener):
+    """Derived class with handlers for errors we can handle (perhaps)."""
+
+    def __init__(self, *args):
+        apply(URLopener.__init__, (self,) + args)
+        self.auth_cache = {}
+        self.tries = 0
+        self.maxtries = 10
+
+    def http_error_default(self, url, fp, errcode, errmsg, headers):
+        """Default error handling -- don't raise an exception."""
+        return addinfourl(fp, headers, "http:" + url)
+
+    def http_error_302(self, url, fp, errcode, errmsg, headers, data=None):
+        """Error 302 -- relocated (temporarily)."""
+        self.tries += 1
+        if self.maxtries and self.tries >= self.maxtries:
+            if hasattr(self, "http_error_500"):
+                meth = self.http_error_500
+            else:
+                meth = self.http_error_default
+            self.tries = 0
+            return meth(url, fp, 500,
+                        "Internal Server Error: Redirect Recursion", headers)
+        result = self.redirect_internal(url, fp, errcode, errmsg, headers,
+                                        data)
+        self.tries = 0
+        return result
+
+    def redirect_internal(self, url, fp, errcode, errmsg, headers, data):
+        if headers.has_key('location'):
+            newurl = headers['location']
+        elif headers.has_key('uri'):
+            newurl = headers['uri']
+        else:
+            return
+        void = fp.read()
+        fp.close()
+        # In case the server sent a relative URL, join with original:
+        newurl = basejoin(self.type + ":" + url, newurl)
+        if data is None:
+            return self.open(newurl)
+        else:
+            return self.open(newurl, data)
+
+    def http_error_301(self, url, fp, errcode, errmsg, headers, data=None):
+        """Error 301 -- also relocated (permanently)."""
+        return self.http_error_302(url, fp, errcode, errmsg, headers, data)
+
+    def http_error_303(self, url, fp, errcode, errmsg, headers, data=None):
+        """Error 303 -- also relocated (essentially identical to 302)."""
+        return self.http_error_302(url, fp, errcode, errmsg, headers, data)
+
+    def http_error_401(self, url, fp, errcode, errmsg, headers, data=None):
+        """Error 401 -- authentication required.
+        See this URL for a description of the basic authentication scheme:
+        http://www.ics.uci.edu/pub/ietf/http/draft-ietf-http-v10-spec-00.txt"""
+        if not headers.has_key('www-authenticate'):
+            URLopener.http_error_default(self, url, fp,
+                                         errcode, errmsg, headers)
+        stuff = headers['www-authenticate']
+        import re
+        match = re.match('[ \t]*([^ \t]+)[ \t]+realm="([^"]*)"', stuff)
+        if not match:
+            URLopener.http_error_default(self, url, fp,
+                                         errcode, errmsg, headers)
+        scheme, realm = match.groups()
+        if scheme.lower() != 'basic':
+            URLopener.http_error_default(self, url, fp,
+                                         errcode, errmsg, headers)
+        name = 'retry_' + self.type + '_basic_auth'
+        if data is None:
+            return getattr(self,name)(url, realm)
+        else:
+            return getattr(self,name)(url, realm, data)
+
+    def retry_http_basic_auth(self, url, realm, data=None):
+        host, selector = splithost(url)
+        i = host.find('@') + 1
+        host = host[i:]
+        user, passwd = self.get_user_passwd(host, realm, i)
+        if not (user or passwd): return None
+        host = quote(user, safe='') + ':' + quote(passwd, safe='') + '@' + host
+        newurl = 'http://' + host + selector
+        if data is None:
+            return self.open(newurl)
+        else:
+            return self.open(newurl, data)
+
+    def retry_https_basic_auth(self, url, realm, data=None):
+        host, selector = splithost(url)
+        i = host.find('@') + 1
+        host = host[i:]
+        user, passwd = self.get_user_passwd(host, realm, i)
+        if not (user or passwd): return None
+        host = quote(user, safe='') + ':' + quote(passwd, safe='') + '@' + host
+        newurl = '//' + host + selector
+        return self.open_https(newurl, data)
+
+    def get_user_passwd(self, host, realm, clear_cache = 0):
+        key = realm + '@' + host.lower()
+        if self.auth_cache.has_key(key):
+            if clear_cache:
+                del self.auth_cache[key]
+            else:
+                return self.auth_cache[key]
+        user, passwd = self.prompt_user_passwd(host, realm)
+        if user or passwd: self.auth_cache[key] = (user, passwd)
+        return user, passwd
+
+    def prompt_user_passwd(self, host, realm):
+        """Override this in a GUI environment!"""
+        import getpass
+        try:
+            user = raw_input("Enter username for %s at %s: " % (realm,
+                                                                host))
+            passwd = getpass.getpass("Enter password for %s in %s at %s: " %
+                (user, realm, host))
+            return user, passwd
+        except KeyboardInterrupt:
+            print
+            return None, None
+
+
+# Utility functions
+
+_localhost = None
+def localhost():
+    """Return the IP address of the magic hostname 'localhost'."""
+    global _localhost
+    if not _localhost:
+        _localhost = socket.gethostbyname('localhost')
+    return _localhost
+
+_thishost = None
+def thishost():
+    """Return the IP address of the current host."""
+    global _thishost
+    if not _thishost:
+        _thishost = socket.gethostbyname(socket.gethostname())
+    return _thishost
+
+_ftperrors = None
+def ftperrors():
+    """Return the set of errors raised by the FTP class."""
+    global _ftperrors
+    if not _ftperrors:
+        import ftplib
+        _ftperrors = ftplib.all_errors
+    return _ftperrors
+
+_noheaders = None
+def noheaders():
+    """Return an empty mimetools.Message object."""
+    global _noheaders
+    if not _noheaders:
+        import mimetools
+        import StringIO
+        _noheaders = mimetools.Message(StringIO.StringIO(), 0)
+        _noheaders.fp.close()   # Recycle file descriptor
+    return _noheaders
+
+
+# Utility classes
+
+class ftpwrapper:
+    """Class used by open_ftp() for cache of open FTP connections."""
+
+    def __init__(self, user, passwd, host, port, dirs):
+        self.user = user
+        self.passwd = passwd
+        self.host = host
+        self.port = port
+        self.dirs = dirs
+        self.init()
+
+    def init(self):
+        import ftplib
+        self.busy = 0
+        self.ftp = ftplib.FTP()
+        self.ftp.connect(self.host, self.port)
+        self.ftp.login(self.user, self.passwd)
+        for dir in self.dirs:
+            self.ftp.cwd(dir)
+
+    def retrfile(self, file, type):
+        import ftplib
+        self.endtransfer()
+        if type in ('d', 'D'): cmd = 'TYPE A'; isdir = 1
+        else: cmd = 'TYPE ' + type; isdir = 0
+        try:
+            self.ftp.voidcmd(cmd)
+        except ftplib.all_errors:
+            self.init()
+            self.ftp.voidcmd(cmd)
+        conn = None
+        if file and not isdir:
+            # Use nlst to see if the file exists at all
+            try:
+                self.ftp.nlst(file)
+            except ftplib.error_perm, reason:
+                raise IOError, ('ftp error', reason), sys.exc_info()[2]
+            # Restore the transfer mode!
+            self.ftp.voidcmd(cmd)
+            # Try to retrieve as a file
+            try:
+                cmd = 'RETR ' + file
+                conn = self.ftp.ntransfercmd(cmd)
+            except ftplib.error_perm, reason:
+                if str(reason)[:3] != '550':
+                    raise IOError, ('ftp error', reason), sys.exc_info()[2]
+        if not conn:
+            # Set transfer mode to ASCII!
+            self.ftp.voidcmd('TYPE A')
+            # Try a directory listing
+            if file: cmd = 'LIST ' + file
+            else: cmd = 'LIST'
+            conn = self.ftp.ntransfercmd(cmd)
+        self.busy = 1
+        # Pass back both a suitably decorated object and a retrieval length
+        return (addclosehook(conn[0].makefile('rb'),
+                             self.endtransfer), conn[1])
+    def endtransfer(self):
+        if not self.busy:
+            return
+        self.busy = 0
+        try:
+            self.ftp.voidresp()
+        except ftperrors():
+            pass
+
+    def close(self):
+        self.endtransfer()
+        try:
+            self.ftp.close()
+        except ftperrors():
+            pass
+
+class addbase:
+    """Base class for addinfo and addclosehook."""
+
+    def __init__(self, fp):
+        self.fp = fp
+        self.read = self.fp.read
+        self.readline = self.fp.readline
+        if hasattr(self.fp, "readlines"): self.readlines = self.fp.readlines
+        if hasattr(self.fp, "fileno"): self.fileno = self.fp.fileno
+
+    def __repr__(self):
+        return '<%s at %s whose fp = %s>' % (self.__class__.__name__,
+                                             `id(self)`, `self.fp`)
+
+    def close(self):
+        self.read = None
+        self.readline = None
+        self.readlines = None
+        self.fileno = None
+        if self.fp: self.fp.close()
+        self.fp = None
+
+class addclosehook(addbase):
+    """Class to add a close hook to an open file."""
+
+    def __init__(self, fp, closehook, *hookargs):
+        addbase.__init__(self, fp)
+        self.closehook = closehook
+        self.hookargs = hookargs
+
+    def close(self):
+        addbase.close(self)
+        if self.closehook:
+            apply(self.closehook, self.hookargs)
+            self.closehook = None
+            self.hookargs = None
+
+class addinfo(addbase):
+    """class to add an info() method to an open file."""
+
+    def __init__(self, fp, headers):
+        addbase.__init__(self, fp)
+        self.headers = headers
+
+    def info(self):
+        return self.headers
+
+class addinfourl(addbase):
+    """class to add info() and geturl() methods to an open file."""
+
+    def __init__(self, fp, headers, url):
+        addbase.__init__(self, fp)
+        self.headers = headers
+        self.url = url
+
+    def info(self):
+        return self.headers
+
+    def geturl(self):
+        return self.url
+
+
+def basejoin(base, url):
+    """Utility to combine a URL with a base URL to form a new URL."""
+    type, path = splittype(url)
+    if type:
+        # if url is complete (i.e., it contains a type), return it
+        return url
+    host, path = splithost(path)
+    type, basepath = splittype(base) # inherit type from base
+    if host:
+        # if url contains host, just inherit type
+        if type: return type + '://' + host + path
+        else:
+            # no type inherited, so url must have started with //
+            # just return it
+            return url
+    host, basepath = splithost(basepath) # inherit host
+    basepath, basetag = splittag(basepath) # remove extraneous cruft
+    basepath, basequery = splitquery(basepath) # idem
+    if path[:1] != '/':
+        # non-absolute path name
+        if path[:1] in ('#', '?'):
+            # path is just a tag or query, attach to basepath
+            i = len(basepath)
+        else:
+            # else replace last component
+            i = basepath.rfind('/')
+        if i < 0:
+            # basepath not absolute
+            if host:
+                # host present, make absolute
+                basepath = '/'
+            else:
+                # else keep non-absolute
+                basepath = ''
+        else:
+            # remove last file component
+            basepath = basepath[:i+1]
+        # Interpret ../ (important because of symlinks)
+        while basepath and path[:3] == '../':
+            path = path[3:]
+            i = basepath[:-1].rfind('/')
+            if i > 0:
+                basepath = basepath[:i+1]
+            elif i == 0:
+                basepath = '/'
+                break
+            else:
+                basepath = ''
+
+        path = basepath + path
+    if host and path and path[0] != '/':
+        path = '/' + path
+    if type and host: return type + '://' + host + path
+    elif type: return type + ':' + path
+    elif host: return '//' + host + path # don't know what this means
+    else: return path
+
+
+# Utilities to parse URLs (most of these return None for missing parts):
+# unwrap('<URL:type://host/path>') --> 'type://host/path'
+# splittype('type:opaquestring') --> 'type', 'opaquestring'
+# splithost('//host[:port]/path') --> 'host[:port]', '/path'
+# splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'
+# splitpasswd('user:passwd') -> 'user', 'passwd'
+# splitport('host:port') --> 'host', 'port'
+# splitquery('/path?query') --> '/path', 'query'
+# splittag('/path#tag') --> '/path', 'tag'
+# splitattr('/path;attr1=value1;attr2=value2;...') ->
+#   '/path', ['attr1=value1', 'attr2=value2', ...]
+# splitvalue('attr=value') --> 'attr', 'value'
+# splitgophertype('/Xselector') --> 'X', 'selector'
+# unquote('abc%20def') -> 'abc def'
+# quote('abc def') -> 'abc%20def')
+
+def toBytes(url):
+    """toBytes(u"URL") --> 'URL'."""
+    # Most URL schemes require ASCII. If that changes, the conversion
+    # can be relaxed
+    if type(url) is types.UnicodeType:
+        try:
+            url = url.encode("ASCII")
+        except UnicodeError:
+            raise UnicodeError("URL " + repr(url) +
+                               " contains non-ASCII characters")
+    return url
+
+def unwrap(url):
+    """unwrap('<URL:type://host/path>') --> 'type://host/path'."""
+    url = url.strip()
+    if url[:1] == '<' and url[-1:] == '>':
+        url = url[1:-1].strip()
+    if url[:4] == 'URL:': url = url[4:].strip()
+    return url
+
+_typeprog = None
+def splittype(url):
+    """splittype('type:opaquestring') --> 'type', 'opaquestring'."""
+    global _typeprog
+    if _typeprog is None:
+        import re
+        _typeprog = re.compile('^([^/:]+):')
+
+    match = _typeprog.match(url)
+    if match:
+        scheme = match.group(1)
+        return scheme.lower(), url[len(scheme) + 1:]
+    return None, url
+
+_hostprog = None
+def splithost(url):
+    """splithost('//host[:port]/path') --> 'host[:port]', '/path'."""
+    global _hostprog
+    if _hostprog is None:
+        import re
+        _hostprog = re.compile('^//([^/]*)(.*)$')
+
+    match = _hostprog.match(url)
+    if match: return match.group(1, 2)
+    return None, url
+
+_userprog = None
+def splituser(host):
+    """splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'."""
+    global _userprog
+    if _userprog is None:
+        import re
+        _userprog = re.compile('^(.*)@(.*)$')
+
+    match = _userprog.match(host)
+    if match: return map(unquote, match.group(1, 2))
+    return None, host
+
+_passwdprog = None
+def splitpasswd(user):
+    """splitpasswd('user:passwd') -> 'user', 'passwd'."""
+    global _passwdprog
+    if _passwdprog is None:
+        import re
+        _passwdprog = re.compile('^([^:]*):(.*)$')
+
+    match = _passwdprog.match(user)
+    if match: return match.group(1, 2)
+    return user, None
+
+# splittag('/path#tag') --> '/path', 'tag'
+_portprog = None
+def splitport(host):
+    """splitport('host:port') --> 'host', 'port'."""
+    global _portprog
+    if _portprog is None:
+        import re
+        _portprog = re.compile('^(.*):([0-9]+)$')
+
+    match = _portprog.match(host)
+    if match: return match.group(1, 2)
+    return host, None
+
+_nportprog = None
+def splitnport(host, defport=-1):
+    """Split host and port, returning numeric port.
+    Return given default port if no ':' found; defaults to -1.
+    Return numerical port if a valid number are found after ':'.
+    Return None if ':' but not a valid number."""
+    global _nportprog
+    if _nportprog is None:
+        import re
+        _nportprog = re.compile('^(.*):(.*)$')
+
+    match = _nportprog.match(host)
+    if match:
+        host, port = match.group(1, 2)
+        try:
+            if not port: raise ValueError, "no digits"
+            nport = int(port)
+        except ValueError:
+            nport = None
+        return host, nport
+    return host, defport
+
+_queryprog = None
+def splitquery(url):
+    """splitquery('/path?query') --> '/path', 'query'."""
+    global _queryprog
+    if _queryprog is None:
+        import re
+        _queryprog = re.compile('^(.*)\?([^?]*)$')
+
+    match = _queryprog.match(url)
+    if match: return match.group(1, 2)
+    return url, None
+
+_tagprog = None
+def splittag(url):
+    """splittag('/path#tag') --> '/path', 'tag'."""
+    global _tagprog
+    if _tagprog is None:
+        import re
+        _tagprog = re.compile('^(.*)#([^#]*)$')
+
+    match = _tagprog.match(url)
+    if match: return match.group(1, 2)
+    return url, None
+
+def splitattr(url):
+    """splitattr('/path;attr1=value1;attr2=value2;...') ->
+        '/path', ['attr1=value1', 'attr2=value2', ...]."""
+    words = url.split(';')
+    return words[0], words[1:]
+
+_valueprog = None
+def splitvalue(attr):
+    """splitvalue('attr=value') --> 'attr', 'value'."""
+    global _valueprog
+    if _valueprog is None:
+        import re
+        _valueprog = re.compile('^([^=]*)=(.*)$')
+
+    match = _valueprog.match(attr)
+    if match: return match.group(1, 2)
+    return attr, None
+
+def splitgophertype(selector):
+    """splitgophertype('/Xselector') --> 'X', 'selector'."""
+    if selector[:1] == '/' and selector[1:2]:
+        return selector[1], selector[2:]
+    return None, selector
+
+def unquote(s):
+    """unquote('abc%20def') -> 'abc def'."""
+    mychr = chr
+    myatoi = int
+    list = s.split('%')
+    res = [list[0]]
+    myappend = res.append
+    del list[0]
+    for item in list:
+        if item[1:2]:
+            try:
+                myappend(mychr(myatoi(item[:2], 16))
+                     + item[2:])
+            except ValueError:
+                myappend('%' + item)
+        else:
+            myappend('%' + item)
+    return "".join(res)
+
+def unquote_plus(s):
+    """unquote('%7e/abc+def') -> '~/abc def'"""
+    if '+' in s:
+        # replace '+' with ' '
+        s = ' '.join(s.split('+'))
+    return unquote(s)
+
+always_safe = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+               'abcdefghijklmnopqrstuvwxyz'
+               '0123456789' '_.-')
+
+_fast_safe_test = always_safe + '/'
+_fast_safe = None
+
+def _fast_quote(s):
+    global _fast_safe
+    if _fast_safe is None:
+        _fast_safe = {}
+        for c in _fast_safe_test:
+            _fast_safe[c] = c
+    res = list(s)
+    for i in range(len(res)):
+        c = res[i]
+        if not _fast_safe.has_key(c):
+            res[i] = '%%%02X' % ord(c)
+    return ''.join(res)
+
+def quote(s, safe = '/'):
+    """quote('abc def') -> 'abc%20def'
+
+    Each part of a URL, e.g. the path info, the query, etc., has a
+    different set of reserved characters that must be quoted.
+
+    RFC 2396 Uniform Resource Identifiers (URI): Generic Syntax lists
+    the following reserved characters.
+
+    reserved    = ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" |
+                  "$" | ","
+
+    Each of these characters is reserved in some component of a URL,
+    but not necessarily in all of them.
+
+    By default, the quote function is intended for quoting the path
+    section of a URL.  Thus, it will not encode '/'.  This character
+    is reserved, but in typical usage the quote function is being
+    called on a path where the existing slash characters are used as
+    reserved characters.
+    """
+    safe = always_safe + safe
+    if _fast_safe_test == safe:
+        return _fast_quote(s)
+    res = list(s)
+    for i in range(len(res)):
+        c = res[i]
+        if c not in safe:
+            res[i] = '%%%02X' % ord(c)
+    return ''.join(res)
+
+def quote_plus(s, safe = ''):
+    """Quote the query fragment of a URL; replacing ' ' with '+'"""
+    if ' ' in s:
+        l = s.split(' ')
+        for i in range(len(l)):
+            l[i] = quote(l[i], safe)
+        return '+'.join(l)
+    else:
+        return quote(s, safe)
+
+def urlencode(query,doseq=0):
+    """Encode a sequence of two-element tuples or dictionary into a URL query string.
+
+    If any values in the query arg are sequences and doseq is true, each
+    sequence element is converted to a separate parameter.
+
+    If the query arg is a sequence of two-element tuples, the order of the
+    parameters in the output will match the order of parameters in the
+    input.
+    """
+
+    if hasattr(query,"items"):
+        # mapping objects
+        query = query.items()
+    else:
+        # it's a bother at times that strings and string-like objects are
+        # sequences...
+        try:
+            # non-sequence items should not work with len()
+            x = len(query)
+            # non-empty strings will fail this
+            if len(query) and type(query[0]) != types.TupleType:
+                raise TypeError
+            # zero-length sequences of all types will get here and succeed,
+            # but that's a minor nit - since the original implementation
+            # allowed empty dicts that type of behavior probably should be
+            # preserved for consistency
+        except TypeError:
+            ty,va,tb = sys.exc_info()
+            raise TypeError, "not a valid non-string sequence or mapping object", tb
+
+    l = []
+    if not doseq:
+        # preserve old behavior
+        for k, v in query:
+            k = quote_plus(str(k))
+            v = quote_plus(str(v))
+            l.append(k + '=' + v)
+    else:
+        for k, v in query:
+            k = quote_plus(str(k))
+            if type(v) == types.StringType:
+                v = quote_plus(v)
+                l.append(k + '=' + v)
+            elif type(v) == types.UnicodeType:
+                # is there a reasonable way to convert to ASCII?
+                # encode generates a string, but "replace" or "ignore"
+                # lose information and "strict" can raise UnicodeError
+                v = quote_plus(v.encode("ASCII","replace"))
+                l.append(k + '=' + v)
+            else:
+                try:
+                    # is this a sufficient test for sequence-ness?
+                    x = len(v)
+                except TypeError:
+                    # not a sequence
+                    v = quote_plus(str(v))
+                    l.append(k + '=' + v)
+                else:
+                    # loop over the sequence
+                    for elt in v:
+                        l.append(k + '=' + quote_plus(str(elt)))
+    return '&'.join(l)
+
+# Proxy handling
+def getproxies_environment():
+    """Return a dictionary of scheme -> proxy server URL mappings.
+
+    Scan the environment for variables named <scheme>_proxy;
+    this seems to be the standard convention.  If you need a
+    different way, you can pass a proxies dictionary to the
+    [Fancy]URLopener constructor.
+
+    """
+    proxies = {}
+    for name, value in os.environ.items():
+        name = name.lower()
+        if value and name[-6:] == '_proxy':
+            proxies[name[:-6]] = value
+    return proxies
+
+if os.name == 'mac':
+    def getproxies():
+        """Return a dictionary of scheme -> proxy server URL mappings.
+
+        By convention the mac uses Internet Config to store
+        proxies.  An HTTP proxy, for instance, is stored under
+        the HttpProxy key.
+
+        """
+        try:
+            import ic
+        except ImportError:
+            return {}
+
+        try:
+            config = ic.IC()
+        except ic.error:
+            return {}
+        proxies = {}
+        # HTTP:
+        if config.has_key('UseHTTPProxy') and config['UseHTTPProxy']:
+            try:
+                value = config['HTTPProxyHost']
+            except ic.error:
+                pass
+            else:
+                proxies['http'] = 'http://%s' % value
+        # FTP: XXXX To be done.
+        # Gopher: XXXX To be done.
+        return proxies
+
+    def proxy_bypass(x):
+        return 0
+
+elif os.name == 'nt':
+    def getproxies_registry():
+        """Return a dictionary of scheme -> proxy server URL mappings.
+
+        Win32 uses the registry to store proxies.
+
+        """
+        proxies = {}
+        try:
+            import _winreg
+        except ImportError:
+            # Std module, so should be around - but you never know!
+            return proxies
+        try:
+            internetSettings = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER,
+                r'Software\Microsoft\Windows\CurrentVersion\Internet Settings')
+            proxyEnable = _winreg.QueryValueEx(internetSettings,
+                                               'ProxyEnable')[0]
+            if proxyEnable:
+                # Returned as Unicode but problems if not converted to ASCII
+                proxyServer = str(_winreg.QueryValueEx(internetSettings,
+                                                       'ProxyServer')[0])
+                if '=' in proxyServer:
+                    # Per-protocol settings
+                    for p in proxyServer.split(';'):
+                        protocol, address = p.split('=', 1)
+                        # See if address has a type:// prefix
+                        import re
+                        if not re.match('^([^/:]+)://', address):
+                            address = '%s://%s' % (protocol, address)
+                        proxies[protocol] = address
+                else:
+                    # Use one setting for all protocols
+                    if proxyServer[:5] == 'http:':
+                        proxies['http'] = proxyServer
+                    else:
+                        proxies['http'] = 'http://%s' % proxyServer
+                        proxies['ftp'] = 'ftp://%s' % proxyServer
+            internetSettings.Close()
+        except (WindowsError, ValueError, TypeError):
+            # Either registry key not found etc, or the value in an
+            # unexpected format.
+            # proxies already set up to be empty so nothing to do
+            pass
+        return proxies
+
+    def getproxies():
+        """Return a dictionary of scheme -> proxy server URL mappings.
+
+        Returns settings gathered from the environment, if specified,
+        or the registry.
+
+        """
+        return getproxies_environment() or getproxies_registry()
+
+    def proxy_bypass(host):
+        try:
+            import _winreg
+            import re
+            import socket
+        except ImportError:
+            # Std modules, so should be around - but you never know!
+            return 0
+        try:
+            internetSettings = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER,
+                r'Software\Microsoft\Windows\CurrentVersion\Internet Settings')
+            proxyEnable = _winreg.QueryValueEx(internetSettings,
+                                               'ProxyEnable')[0]
+            proxyOverride = str(_winreg.QueryValueEx(internetSettings,
+                                                     'ProxyOverride')[0])
+            # ^^^^ Returned as Unicode but problems if not converted to ASCII
+        except WindowsError:
+            return 0
+        if not proxyEnable or not proxyOverride:
+            return 0
+        # try to make a host list from name and IP address.
+        host = [host]
+        try:
+            addr = socket.gethostbyname(host[0])
+            if addr != host:
+                host.append(addr)
+        except socket.error:
+            pass
+        # make a check value list from the registry entry: replace the
+        # '<local>' string by the localhost entry and the corresponding
+        # canonical entry.
+        proxyOverride = proxyOverride.split(';')
+        i = 0
+        while i < len(proxyOverride):
+            if proxyOverride[i] == '<local>':
+                proxyOverride[i:i+1] = ['localhost',
+                                        '127.0.0.1',
+                                        socket.gethostname(),
+                                        socket.gethostbyname(
+                                            socket.gethostname())]
+            i += 1
+        # print proxyOverride
+        # now check if we match one of the registry values.
+        for test in proxyOverride:
+            test = test.replace(".", r"\.")     # mask dots
+            test = test.replace("*", r".*")     # change glob sequence
+            test = test.replace("?", r".")      # change glob char
+            for val in host:
+                # print "%s <--> %s" %( test, val )
+                if re.match(test, val, re.I):
+                    return 1
+        return 0
+
+else:
+    # By default use environment variables
+    getproxies = getproxies_environment
+
+    def proxy_bypass(host):
+        return 0
+
+# Test and time quote() and unquote()
+def test1():
+    import time
+    s = ''
+    for i in range(256): s = s + chr(i)
+    s = s*4
+    t0 = time.time()
+    qs = quote(s)
+    uqs = unquote(qs)
+    t1 = time.time()
+    if uqs != s:
+        print 'Wrong!'
+    print `s`
+    print `qs`
+    print `uqs`
+    print round(t1 - t0, 3), 'sec'
+
+
+def reporthook(blocknum, blocksize, totalsize):
+    # Report during remote transfers
+    print "Block number: %d, Block size: %d, Total size: %d" % (
+        blocknum, blocksize, totalsize)
+
+# Test program
+def test(args=[]):
+    if not args:
+        args = [
+            '/etc/passwd',
+            'file:/etc/passwd',
+            'file://localhost/etc/passwd',
+            'ftp://ftp.python.org/pub/python/README',
+##          'gopher://gopher.micro.umn.edu/1/',
+            'http://www.python.org/index.html',
+            ]
+        if hasattr(URLopener, "open_https"):
+            args.append('https://synergy.as.cmu.edu/~geek/')
+    try:
+        for url in args:
+            print '-'*10, url, '-'*10
+            fn, h = urlretrieve(url, None, reporthook)
+            print fn
+            if h:
+                print '======'
+                for k in h.keys(): print k + ':', h[k]
+                print '======'
+            fp = open(fn, 'rb')
+            data = fp.read()
+            del fp
+            if '\r' in data:
+                table = string.maketrans("", "")
+                data = data.translate(table, "\r")
+            print data
+            fn, h = None, None
+        print '-'*40
+    finally:
+        urlcleanup()
+
+def main():
+    import getopt, sys
+    try:
+        opts, args = getopt.getopt(sys.argv[1:], "th")
+    except getopt.error, msg:
+        print msg
+        print "Use -h for help"
+        return
+    t = 0
+    for o, a in opts:
+        if o == '-t':
+            t = t + 1
+        if o == '-h':
+            print "Usage: python urllib.py [-t] [url ...]"
+            print "-t runs self-test;",
+            print "otherwise, contents of urls are printed"
+            return
+    if t:
+        if t > 1:
+            test1()
+        test(args)
+    else:
+        if not args:
+            print "Use -h for help"
+        for url in args:
+            print urlopen(url).read(),
+
+# Run test program when run as a script
+if __name__ == '__main__':
+    main()
diff --git a/lib-python/2.2/urllib2.py b/lib-python/2.2/urllib2.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/urllib2.py
@@ -0,0 +1,1144 @@
+"""An extensible library for opening URLs using a variety of protocols
+
+The simplest way to use this module is to call the urlopen function,
+which accepts a string containing a URL or a Request object (described
+below).  It opens the URL and returns the results as file-like
+object; the returned object has some extra methods described below.
+
+The OpenerDirectory manages a collection of Handler objects that do
+all the actual work.  Each Handler implements a particular protocol or
+option.  The OpenerDirector is a composite object that invokes the
+Handlers needed to open the requested URL.  For example, the
+HTTPHandler performs HTTP GET and POST requests and deals with
+non-error returns.  The HTTPRedirectHandler automatically deals with
+HTTP 301, 302, 303 and 307 redirect errors, and the HTTPDigestAuthHandler
+deals with digest authentication.
+
+urlopen(url, data=None) -- basic usage is that same as original
+urllib.  pass the url and optionally data to post to an HTTP URL, and
+get a file-like object back.  One difference is that you can also pass
+a Request instance instead of URL.  Raises a URLError (subclass of
+IOError); for HTTP errors, raises an HTTPError, which can also be
+treated as a valid response.
+
+build_opener -- function that creates a new OpenerDirector instance.
+will install the default handlers.  accepts one or more Handlers as
+arguments, either instances or Handler classes that it will
+instantiate.  if one of the argument is a subclass of the default
+handler, the argument will be installed instead of the default.
+
+install_opener -- installs a new opener as the default opener.
+
+objects of interest:
+OpenerDirector --
+
+Request -- an object that encapsulates the state of a request.  the
+state can be a simple as the URL.  it can also include extra HTTP
+headers, e.g. a User-Agent.
+
+BaseHandler --
+
+exceptions:
+URLError-- a subclass of IOError, individual protocols have their own
+specific subclass
+
+HTTPError-- also a valid HTTP response, so you can treat an HTTP error
+as an exceptional event or valid response
+
+internals:
+BaseHandler and parent
+_call_chain conventions
+
+Example usage:
+
+import urllib2
+
+# set up authentication info
+authinfo = urllib2.HTTPBasicAuthHandler()
+authinfo.add_password('realm', 'host', 'username', 'password')
+
+proxy_support = urllib2.ProxyHandler({"http" : "http://ahad-haam:3128"})
+
+# build a new opener that adds authentication and caching FTP handlers
+opener = urllib2.build_opener(proxy_support, authinfo, urllib2.CacheFTPHandler)
+
+# install it
+urllib2.install_opener(opener)
+
+f = urllib2.urlopen('http://www.python.org/')
+
+
+"""
+
+# XXX issues:
+# If an authentication error handler that tries to perform
+# authentication for some reason but fails, how should the error be
+# signalled?  The client needs to know the HTTP error code.  But if
+# the handler knows that the problem was, e.g., that it didn't know
+# that hash algo that requested in the challenge, it would be good to
+# pass that information along to the client, too.
+
+# XXX to do:
+# name!
+# documentation (getting there)
+# complex proxies
+# abstract factory for opener
+# ftp errors aren't handled cleanly
+# gopher can return a socket.error
+# check digest against correct (i.e. non-apache) implementation
+
+import socket
+import httplib
+import inspect
+import re
+import base64
+import types
+import urlparse
+import md5
+import mimetypes
+import mimetools
+import rfc822
+import ftplib
+import sys
+import time
+import os
+import stat
+import gopherlib
+import posixpath
+
+try:
+    from cStringIO import StringIO
+except ImportError:
+    from StringIO import StringIO
+
+try:
+    import sha
+except ImportError:
+    # need 1.5.2 final
+    sha = None
+
+# not sure how many of these need to be gotten rid of
+from urllib import unwrap, unquote, splittype, splithost, \
+     addinfourl, splitport, splitgophertype, splitquery, \
+     splitattr, ftpwrapper, noheaders
+
+# support for proxies via environment variables
+from urllib import getproxies
+
+# support for FileHandler
+from urllib import localhost, url2pathname
+
+__version__ = "2.0a1"
+
+_opener = None
+def urlopen(url, data=None):
+    global _opener
+    if _opener is None:
+        _opener = build_opener()
+    return _opener.open(url, data)
+
+def install_opener(opener):
+    global _opener
+    _opener = opener
+
+# do these error classes make sense?
+# make sure all of the IOError stuff is overridden.  we just want to be
+ # subtypes.
+
+class URLError(IOError):
+    # URLError is a sub-type of IOError, but it doesn't share any of
+    # the implementation.  need to override __init__ and __str__
+    def __init__(self, reason):
+        self.reason = reason
+
+    def __str__(self):
+        return '<urlopen error %s>' % self.reason
+
+class HTTPError(URLError, addinfourl):
+    """Raised when HTTP error occurs, but also acts like non-error return"""
+    __super_init = addinfourl.__init__
+
+    def __init__(self, url, code, msg, hdrs, fp):
+        self.__super_init(fp, hdrs, url)
+        self.code = code
+        self.msg = msg
+        self.hdrs = hdrs
+        self.fp = fp
+        # XXX
+        self.filename = url
+
+    def __str__(self):
+        return 'HTTP Error %s: %s' % (self.code, self.msg)
+
+    def __del__(self):
+        # XXX is this safe? what if user catches exception, then
+        # extracts fp and discards exception?
+        if self.fp:
+            self.fp.close()
+
+class GopherError(URLError):
+    pass
+
+
+class Request:
+
+    def __init__(self, url, data=None, headers={}):
+        # unwrap('<URL:type://host/path>') --> 'type://host/path'
+        self.__original = unwrap(url)
+        self.type = None
+        # self.__r_type is what's left after doing the splittype
+        self.host = None
+        self.port = None
+        self.data = data
+        self.headers = {}
+        self.headers.update(headers)
+
+    def __getattr__(self, attr):
+        # XXX this is a fallback mechanism to guard against these
+        # methods getting called in a non-standard order.  this may be
+        # too complicated and/or unnecessary.
+        # XXX should the __r_XXX attributes be public?
+        if attr[:12] == '_Request__r_':
+            name = attr[12:]
+            if hasattr(Request, 'get_' + name):
+                getattr(self, 'get_' + name)()
+                return getattr(self, attr)
+        raise AttributeError, attr
+
+    def get_method(self):
+        if self.has_data():
+            return "POST"
+        else:
+            return "GET"
+
+    def add_data(self, data):
+        self.data = data
+
+    def has_data(self):
+        return self.data is not None
+
+    def get_data(self):
+        return self.data
+
+    def get_full_url(self):
+        return self.__original
+
+    def get_type(self):
+        if self.type is None:
+            self.type, self.__r_type = splittype(self.__original)
+            if self.type is None:
+                raise ValueError, "unknown url type: %s" % self.__original
+        return self.type
+
+    def get_host(self):
+        if self.host is None:
+            self.host, self.__r_host = splithost(self.__r_type)
+            if self.host:
+                self.host = unquote(self.host)
+        return self.host
+
+    def get_selector(self):
+        return self.__r_host
+
+    def set_proxy(self, host, type):
+        self.host, self.type = host, type
+        self.__r_host = self.__original
+
+    def add_header(self, key, val):
+        # useful for something like authentication
+        self.headers[key] = val
+
+class OpenerDirector:
+    def __init__(self):
+        server_version = "Python-urllib/%s" % __version__
+        self.addheaders = [('User-agent', server_version)]
+        # manage the individual handlers
+        self.handlers = []
+        self.handle_open = {}
+        self.handle_error = {}
+
+    def add_handler(self, handler):
+        added = 0
+        for meth in dir(handler):
+            if meth[-5:] == '_open':
+                protocol = meth[:-5]
+                if self.handle_open.has_key(protocol):
+                    self.handle_open[protocol].append(handler)
+                else:
+                    self.handle_open[protocol] = [handler]
+                added = 1
+                continue
+            i = meth.find('_')
+            j = meth[i+1:].find('_') + i + 1
+            if j != -1 and meth[i+1:j] == 'error':
+                proto = meth[:i]
+                kind = meth[j+1:]
+                try:
+                    kind = int(kind)
+                except ValueError:
+                    pass
+                dict = self.handle_error.get(proto, {})
+                if dict.has_key(kind):
+                    dict[kind].append(handler)
+                else:
+                    dict[kind] = [handler]
+                self.handle_error[proto] = dict
+                added = 1
+                continue
+        if added:
+            self.handlers.append(handler)
+            handler.add_parent(self)
+
+    def __del__(self):
+        self.close()
+
+    def close(self):
+        for handler in self.handlers:
+            handler.close()
+        self.handlers = []
+
+    def _call_chain(self, chain, kind, meth_name, *args):
+        # XXX raise an exception if no one else should try to handle
+        # this url.  return None if you can't but someone else could.
+        handlers = chain.get(kind, ())
+        for handler in handlers:
+            func = getattr(handler, meth_name)
+
+            result = func(*args)
+            if result is not None:
+                return result
+
+    def open(self, fullurl, data=None):
+        # accept a URL or a Request object
+        if isinstance(fullurl, (types.StringType, types.UnicodeType)):
+            req = Request(fullurl, data)
+        else:
+            req = fullurl
+            if data is not None:
+                req.add_data(data)
+        assert isinstance(req, Request) # really only care about interface
+
+        result = self._call_chain(self.handle_open, 'default',
+                                  'default_open', req)
+        if result:
+            return result
+
+        type_ = req.get_type()
+        result = self._call_chain(self.handle_open, type_, type_ + \
+                                  '_open', req)
+        if result:
+            return result
+
+        return self._call_chain(self.handle_open, 'unknown',
+                                'unknown_open', req)
+
+    def error(self, proto, *args):
+        if proto in ['http', 'https']:
+            # XXX http[s] protocols are special-cased
+            dict = self.handle_error['http'] # https is not different than http
+            proto = args[2]  # YUCK!
+            meth_name = 'http_error_%d' % proto
+            http_err = 1
+            orig_args = args
+        else:
+            dict = self.handle_error
+            meth_name = proto + '_error'
+            http_err = 0
+        args = (dict, proto, meth_name) + args
+        result = self._call_chain(*args)
+        if result:
+            return result
+
+        if http_err:
+            args = (dict, 'default', 'http_error_default') + orig_args
+            return self._call_chain(*args)
+
+# XXX probably also want an abstract factory that knows things like
+ # the fact that a ProxyHandler needs to get inserted first.
+# would also know when it makes sense to skip a superclass in favor of
+ # a subclass and when it might make sense to include both
+
+def build_opener(*handlers):
+    """Create an opener object from a list of handlers.
+
+    The opener will use several default handlers, including support
+    for HTTP and FTP.  If there is a ProxyHandler, it must be at the
+    front of the list of handlers.  (Yuck.)
+
+    If any of the handlers passed as arguments are subclasses of the
+    default handlers, the default handlers will not be used.
+    """
+
+    opener = OpenerDirector()
+    default_classes = [ProxyHandler, UnknownHandler, HTTPHandler,
+                       HTTPDefaultErrorHandler, HTTPRedirectHandler,
+                       FTPHandler, FileHandler]
+    if hasattr(httplib, 'HTTPS'):
+        default_classes.append(HTTPSHandler)
+    skip = []
+    for klass in default_classes:
+        for check in handlers:
+            if inspect.isclass(check):
+                if issubclass(check, klass):
+                    skip.append(klass)
+            elif isinstance(check, klass):
+                skip.append(klass)
+    for klass in skip:
+        default_classes.remove(klass)
+
+    for klass in default_classes:
+        opener.add_handler(klass())
+
+    for h in handlers:
+        if inspect.isclass(h):
+            h = h()
+        opener.add_handler(h)
+    return opener
+
+class BaseHandler:
+    def add_parent(self, parent):
+        self.parent = parent
+    def close(self):
+        self.parent = None
+
+class HTTPDefaultErrorHandler(BaseHandler):
+    def http_error_default(self, req, fp, code, msg, hdrs):
+        raise HTTPError(req.get_full_url(), code, msg, hdrs, fp)
+
+class HTTPRedirectHandler(BaseHandler):
+    def redirect_request(self, req, fp, code, msg, headers, newurl):
+        """Return a Request or None in response to a redirect.
+
+        This is called by the http_error_30x methods when a redirection
+        response is received.  If a redirection should take place, return a new
+        Request to allow http_error_30x to perform the redirect.  Otherwise,
+        raise HTTPError if no-one else should try to handle this url.  Return
+        None if you can't but another Handler might.
+
+        """
+        m = req.get_method()
+        if (code in (301, 302, 303, 307) and m in ("GET", "HEAD")
+            or code in (301, 302, 303) and m == "POST"):
+            # Strictly (according to RFC 2616), 301 or 302 in response
+            # to a POST MUST NOT cause a redirection without confirmation
+            # from the user (of urllib2, in this case).  In practice,
+            # essentially all clients do redirect in this case, so we
+            # do the same.
+            return Request(newurl, headers=req.headers)
+        else:
+            raise HTTPError(req.get_full_url(), code, msg, headers, fp)
+
+    # Implementation note: To avoid the server sending us into an
+    # infinite loop, the request object needs to track what URLs we
+    # have already seen.  Do this by adding a handler-specific
+    # attribute to the Request object.
+    def http_error_302(self, req, fp, code, msg, headers):
+        if headers.has_key('location'):
+            newurl = headers['location']
+        elif headers.has_key('uri'):
+            newurl = headers['uri']
+        else:
+            return
+        newurl = urlparse.urljoin(req.get_full_url(), newurl)
+
+        # XXX Probably want to forget about the state of the current
+        # request, although that might interact poorly with other
+        # handlers that also use handler-specific request attributes
+        new = self.redirect_request(req, fp, code, msg, headers, newurl)
+        if new is None:
+            return
+
+        # loop detection
+        new.error_302_dict = {}
+        if hasattr(req, 'error_302_dict'):
+            if len(req.error_302_dict)>10 or \
+               req.error_302_dict.has_key(newurl):
+                raise HTTPError(req.get_full_url(), code,
+                                self.inf_msg + msg, headers, fp)
+            new.error_302_dict.update(req.error_302_dict)
+        new.error_302_dict[newurl] = newurl
+
+        # Don't close the fp until we are sure that we won't use it
+        # with HTTPError.
+        fp.read()
+        fp.close()
+
+        return self.parent.open(new)
+
+    http_error_301 = http_error_303 = http_error_307 = http_error_302
+
+    inf_msg = "The HTTP server returned a redirect error that would " \
+              "lead to an infinite loop.\n" \
+              "The last 30x error message was:\n"
+
+class ProxyHandler(BaseHandler):
+    def __init__(self, proxies=None):
+        if proxies is None:
+            proxies = getproxies()
+        assert hasattr(proxies, 'has_key'), "proxies must be a mapping"
+        self.proxies = proxies
+        for type, url in proxies.items():
+            setattr(self, '%s_open' % type,
+                    lambda r, proxy=url, type=type, meth=self.proxy_open: \
+                    meth(r, proxy, type))
+
+    def proxy_open(self, req, proxy, type):
+        orig_type = req.get_type()
+        type, r_type = splittype(proxy)
+        host, XXX = splithost(r_type)
+        if '@' in host:
+            user_pass, host = host.split('@', 1)
+            if ':' in user_pass:
+                user, password = user_pass.split(':', 1)
+                user_pass = base64.encodestring('%s:%s' % (unquote(user),
+                                                           unquote(password)))
+                req.add_header('Proxy-Authorization', 'Basic ' + user_pass)
+        host = unquote(host)
+        req.set_proxy(host, type)
+        if orig_type == type:
+            # let other handlers take care of it
+            # XXX this only makes sense if the proxy is before the
+            # other handlers
+            return None
+        else:
+            # need to start over, because the other handlers don't
+            # grok the proxy's URL type
+            return self.parent.open(req)
+
+# feature suggested by Duncan Booth
+# XXX custom is not a good name
+class CustomProxy:
+    # either pass a function to the constructor or override handle
+    def __init__(self, proto, func=None, proxy_addr=None):
+        self.proto = proto
+        self.func = func
+        self.addr = proxy_addr
+
+    def handle(self, req):
+        if self.func and self.func(req):
+            return 1
+
+    def get_proxy(self):
+        return self.addr
+
+class CustomProxyHandler(BaseHandler):
+    def __init__(self, *proxies):
+        self.proxies = {}
+
+    def proxy_open(self, req):
+        proto = req.get_type()
+        try:
+            proxies = self.proxies[proto]
+        except KeyError:
+            return None
+        for p in proxies:
+            if p.handle(req):
+                req.set_proxy(p.get_proxy())
+                return self.parent.open(req)
+        return None
+
+    def do_proxy(self, p, req):
+        return self.parent.open(req)
+
+    def add_proxy(self, cpo):
+        if self.proxies.has_key(cpo.proto):
+            self.proxies[cpo.proto].append(cpo)
+        else:
+            self.proxies[cpo.proto] = [cpo]
+
+class HTTPPasswordMgr:
+    def __init__(self):
+        self.passwd = {}
+
+    def add_password(self, realm, uri, user, passwd):
+        # uri could be a single URI or a sequence
+        if isinstance(uri, (types.StringType, types.UnicodeType)):
+            uri = [uri]
+        uri = tuple(map(self.reduce_uri, uri))
+        if not self.passwd.has_key(realm):
+            self.passwd[realm] = {}
+        self.passwd[realm][uri] = (user, passwd)
+
+    def find_user_password(self, realm, authuri):
+        domains = self.passwd.get(realm, {})
+        authuri = self.reduce_uri(authuri)
+        for uris, authinfo in domains.items():
+            for uri in uris:
+                if self.is_suburi(uri, authuri):
+                    return authinfo
+        return None, None
+
+    def reduce_uri(self, uri):
+        """Accept netloc or URI and extract only the netloc and path"""
+        parts = urlparse.urlparse(uri)
+        if parts[1]:
+            return parts[1], parts[2] or '/'
+        else:
+            return parts[2], '/'
+
+    def is_suburi(self, base, test):
+        """Check if test is below base in a URI tree
+
+        Both args must be URIs in reduced form.
+        """
+        if base == test:
+            return 1
+        if base[0] != test[0]:
+            return 0
+        common = posixpath.commonprefix((base[1], test[1]))
+        if len(common) == len(base[1]):
+            return 1
+        return 0
+
+
+class HTTPPasswordMgrWithDefaultRealm(HTTPPasswordMgr):
+
+    def find_user_password(self, realm, authuri):
+        user, password = HTTPPasswordMgr.find_user_password(self,realm,authuri)
+        if user is not None:
+            return user, password
+        return HTTPPasswordMgr.find_user_password(self, None, authuri)
+
+
+class AbstractBasicAuthHandler:
+
+    rx = re.compile('[ \t]*([^ \t]+)[ \t]+realm="([^"]*)"')
+
+    # XXX there can actually be multiple auth-schemes in a
+    # www-authenticate header.  should probably be a lot more careful
+    # in parsing them to extract multiple alternatives
+
+    def __init__(self, password_mgr=None):
+        if password_mgr is None:
+            password_mgr = HTTPPasswordMgr()
+        self.passwd = password_mgr
+        self.add_password = self.passwd.add_password
+
+    def http_error_auth_reqed(self, authreq, host, req, headers):
+        # XXX could be multiple headers
+        authreq = headers.get(authreq, None)
+        if authreq:
+            mo = AbstractBasicAuthHandler.rx.match(authreq)
+            if mo:
+                scheme, realm = mo.groups()
+                if scheme.lower() == 'basic':
+                    return self.retry_http_basic_auth(host, req, realm)
+
+    def retry_http_basic_auth(self, host, req, realm):
+        user,pw = self.passwd.find_user_password(realm, host)
+        if pw:
+            raw = "%s:%s" % (user, pw)
+            auth = 'Basic %s' % base64.encodestring(raw).strip()
+            if req.headers.get(self.auth_header, None) == auth:
+                return None
+            req.add_header(self.auth_header, auth)
+            return self.parent.open(req)
+        else:
+            return None
+
+class HTTPBasicAuthHandler(AbstractBasicAuthHandler, BaseHandler):
+
+    auth_header = 'Authorization'
+
+    def http_error_401(self, req, fp, code, msg, headers):
+        host = urlparse.urlparse(req.get_full_url())[1]
+        return self.http_error_auth_reqed('www-authenticate',
+                                          host, req, headers)
+
+
+class ProxyBasicAuthHandler(AbstractBasicAuthHandler, BaseHandler):
+
+    auth_header = 'Proxy-Authorization'
+
+    def http_error_407(self, req, fp, code, msg, headers):
+        host = req.get_host()
+        return self.http_error_auth_reqed('proxy-authenticate',
+                                          host, req, headers)
+
+
+class AbstractDigestAuthHandler:
+
+    def __init__(self, passwd=None):
+        if passwd is None:
+            passwd = HTTPPasswordMgr()
+        self.passwd = passwd
+        self.add_password = self.passwd.add_password
+
+    def http_error_auth_reqed(self, authreq, host, req, headers):
+        authreq = headers.get(self.auth_header, None)
+        if authreq:
+            kind = authreq.split()[0]
+            if kind == 'Digest':
+                return self.retry_http_digest_auth(req, authreq)
+
+    def retry_http_digest_auth(self, req, auth):
+        token, challenge = auth.split(' ', 1)
+        chal = parse_keqv_list(parse_http_list(challenge))
+        auth = self.get_authorization(req, chal)
+        if auth:
+            auth_val = 'Digest %s' % auth
+            if req.headers.get(self.auth_header, None) == auth_val:
+                return None
+            req.add_header(self.auth_header, auth_val)
+            resp = self.parent.open(req)
+            return resp
+
+    def get_authorization(self, req, chal):
+        try:
+            realm = chal['realm']
+            nonce = chal['nonce']
+            algorithm = chal.get('algorithm', 'MD5')
+            # mod_digest doesn't send an opaque, even though it isn't
+            # supposed to be optional
+            opaque = chal.get('opaque', None)
+        except KeyError:
+            return None
+
+        H, KD = self.get_algorithm_impls(algorithm)
+        if H is None:
+            return None
+
+        user, pw = self.passwd.find_user_password(realm,
+                                                  req.get_full_url())
+        if user is None:
+            return None
+
+        # XXX not implemented yet
+        if req.has_data():
+            entdig = self.get_entity_digest(req.get_data(), chal)
+        else:
+            entdig = None
+
+        A1 = "%s:%s:%s" % (user, realm, pw)
+        A2 = "%s:%s" % (req.has_data() and 'POST' or 'GET',
+                        # XXX selector: what about proxies and full urls
+                        req.get_selector())
+        respdig = KD(H(A1), "%s:%s" % (nonce, H(A2)))
+        # XXX should the partial digests be encoded too?
+
+        base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \
+               'response="%s"' % (user, realm, nonce, req.get_selector(),
+                                  respdig)
+        if opaque:
+            base = base + ', opaque="%s"' % opaque
+        if entdig:
+            base = base + ', digest="%s"' % entdig
+        if algorithm != 'MD5':
+            base = base + ', algorithm="%s"' % algorithm
+        return base
+
+    def get_algorithm_impls(self, algorithm):
+        # lambdas assume digest modules are imported at the top level
+        if algorithm == 'MD5':
+            H = lambda x, e=encode_digest:e(md5.new(x).digest())
+        elif algorithm == 'SHA':
+            H = lambda x, e=encode_digest:e(sha.new(x).digest())
+        # XXX MD5-sess
+        KD = lambda s, d, H=H: H("%s:%s" % (s, d))
+        return H, KD
+
+    def get_entity_digest(self, data, chal):
+        # XXX not implemented yet
+        return None
+
+
+class HTTPDigestAuthHandler(BaseHandler, AbstractDigestAuthHandler):
+    """An authentication protocol defined by RFC 2069
+
+    Digest authentication improves on basic authentication because it
+    does not transmit passwords in the clear.
+    """
+
+    header = 'Authorization'
+
+    def http_error_401(self, req, fp, code, msg, headers):
+        host = urlparse.urlparse(req.get_full_url())[1]
+        self.http_error_auth_reqed('www-authenticate', host, req, headers)
+
+
+class ProxyDigestAuthHandler(BaseHandler, AbstractDigestAuthHandler):
+
+    header = 'Proxy-Authorization'
+
+    def http_error_407(self, req, fp, code, msg, headers):
+        host = req.get_host()
+        self.http_error_auth_reqed('proxy-authenticate', host, req, headers)
+
+
+def encode_digest(digest):
+    hexrep = []
+    for c in digest:
+        n = (ord(c) >> 4) & 0xf
+        hexrep.append(hex(n)[-1])
+        n = ord(c) & 0xf
+        hexrep.append(hex(n)[-1])
+    return ''.join(hexrep)
+
+
+class AbstractHTTPHandler(BaseHandler):
+
+    def do_open(self, http_class, req):
+        host = req.get_host()
+        if not host:
+            raise URLError('no host given')
+
+        h = http_class(host) # will parse host:port
+        if req.has_data():
+            data = req.get_data()
+            h.putrequest('POST', req.get_selector())
+            if not req.headers.has_key('Content-type'):
+                h.putheader('Content-type',
+                            'application/x-www-form-urlencoded')
+            if not req.headers.has_key('Content-length'):
+                h.putheader('Content-length', '%d' % len(data))
+        else:
+            h.putrequest('GET', req.get_selector())
+
+        scheme, sel = splittype(req.get_selector())
+        sel_host, sel_path = splithost(sel)
+        h.putheader('Host', sel_host or host)
+        for args in self.parent.addheaders:
+            h.putheader(*args)
+        for k, v in req.headers.items():
+            h.putheader(k, v)
+        # httplib will attempt to connect() here.  be prepared
+        # to convert a socket error to a URLError.
+        try:
+            h.endheaders()
+        except socket.error, err:
+            raise URLError(err)
+        if req.has_data():
+            h.send(data)
+
+        code, msg, hdrs = h.getreply()
+        fp = h.getfile()
+        if code == 200:
+            return addinfourl(fp, hdrs, req.get_full_url())
+        else:
+            return self.parent.error('http', req, fp, code, msg, hdrs)
+
+
+class HTTPHandler(AbstractHTTPHandler):
+
+    def http_open(self, req):
+        return self.do_open(httplib.HTTP, req)
+
+
+if hasattr(httplib, 'HTTPS'):
+    class HTTPSHandler(AbstractHTTPHandler):
+
+        def https_open(self, req):
+            return self.do_open(httplib.HTTPS, req)
+
+
+class UnknownHandler(BaseHandler):
+    def unknown_open(self, req):
+        type = req.get_type()
+        raise URLError('unknown url type: %s' % type)
+
+def parse_keqv_list(l):
+    """Parse list of key=value strings where keys are not duplicated."""
+    parsed = {}
+    for elt in l:
+        k, v = elt.split('=', 1)
+        if v[0] == '"' and v[-1] == '"':
+            v = v[1:-1]
+        parsed[k] = v
+    return parsed
+
+def parse_http_list(s):
+    """Parse lists as described by RFC 2068 Section 2.
+
+    In particular, parse comman-separated lists where the elements of
+    the list may include quoted-strings.  A quoted-string could
+    contain a comma.
+    """
+    # XXX this function could probably use more testing
+
+    list = []
+    end = len(s)
+    i = 0
+    inquote = 0
+    start = 0
+    while i < end:
+        cur = s[i:]
+        c = cur.find(',')
+        q = cur.find('"')
+        if c == -1:
+            list.append(s[start:])
+            break
+        if q == -1:
+            if inquote:
+                raise ValueError, "unbalanced quotes"
+            else:
+                list.append(s[start:i+c])
+                i = i + c + 1
+                continue
+        if inquote:
+            if q < c:
+                list.append(s[start:i+c])
+                i = i + c + 1
+                start = i
+                inquote = 0
+            else:
+                i = i + q
+        else:
+            if c < q:
+                list.append(s[start:i+c])
+                i = i + c + 1
+                start = i
+            else:
+                inquote = 1
+                i = i + q + 1
+    return map(lambda x: x.strip(), list)
+
+class FileHandler(BaseHandler):
+    # Use local file or FTP depending on form of URL
+    def file_open(self, req):
+        url = req.get_selector()
+        if url[:2] == '//' and url[2:3] != '/':
+            req.type = 'ftp'
+            return self.parent.open(req)
+        else:
+            return self.open_local_file(req)
+
+    # names for the localhost
+    names = None
+    def get_names(self):
+        if FileHandler.names is None:
+            FileHandler.names = (socket.gethostbyname('localhost'),
+                                 socket.gethostbyname(socket.gethostname()))
+        return FileHandler.names
+
+    # not entirely sure what the rules are here
+    def open_local_file(self, req):
+        host = req.get_host()
+        file = req.get_selector()
+        localfile = url2pathname(file)
+        stats = os.stat(localfile)
+        size = stats[stat.ST_SIZE]
+        modified = rfc822.formatdate(stats[stat.ST_MTIME])
+        mtype = mimetypes.guess_type(file)[0]
+        stats = os.stat(localfile)
+        headers = mimetools.Message(StringIO(
+            'Content-Type: %s\nContent-Length: %d\nLast-modified: %s\n' %
+            (mtype or 'text/plain', size, modified)))
+        if host:
+            host, port = splitport(host)
+        if not host or \
+           (not port and socket.gethostbyname(host) in self.get_names()):
+            return addinfourl(open(localfile, 'rb'),
+                              headers, 'file:'+file)
+        raise URLError('file not on local host')
+
+class FTPHandler(BaseHandler):
+    def ftp_open(self, req):
+        host = req.get_host()
+        if not host:
+            raise IOError, ('ftp error', 'no host given')
+        # XXX handle custom username & password
+        try:
+            host = socket.gethostbyname(host)
+        except socket.error, msg:
+            raise URLError(msg)
+        host, port = splitport(host)
+        if port is None:
+            port = ftplib.FTP_PORT
+        path, attrs = splitattr(req.get_selector())
+        path = unquote(path)
+        dirs = path.split('/')
+        dirs, file = dirs[:-1], dirs[-1]
+        if dirs and not dirs[0]:
+            dirs = dirs[1:]
+        user = passwd = '' # XXX
+        try:
+            fw = self.connect_ftp(user, passwd, host, port, dirs)
+            type = file and 'I' or 'D'
+            for attr in attrs:
+                attr, value = splitattr(attr)
+                if attr.lower() == 'type' and \
+                   value in ('a', 'A', 'i', 'I', 'd', 'D'):
+                    type = value.upper()
+            fp, retrlen = fw.retrfile(file, type)
+            headers = ""
+            mtype = mimetypes.guess_type(req.get_full_url())[0]
+            if mtype:
+                headers += "Content-Type: %s\n" % mtype
+            if retrlen is not None and retrlen >= 0:
+                headers += "Content-Length: %d\n" % retrlen
+            sf = StringIO(headers)
+            headers = mimetools.Message(sf)
+            return addinfourl(fp, headers, req.get_full_url())
+        except ftplib.all_errors, msg:
+            raise IOError, ('ftp error', msg), sys.exc_info()[2]
+
+    def connect_ftp(self, user, passwd, host, port, dirs):
+        fw = ftpwrapper(user, passwd, host, port, dirs)
+##        fw.ftp.set_debuglevel(1)
+        return fw
+
+class CacheFTPHandler(FTPHandler):
+    # XXX would be nice to have pluggable cache strategies
+    # XXX this stuff is definitely not thread safe
+    def __init__(self):
+        self.cache = {}
+        self.timeout = {}
+        self.soonest = 0
+        self.delay = 60
+        self.max_conns = 16
+
+    def setTimeout(self, t):
+        self.delay = t
+
+    def setMaxConns(self, m):
+        self.max_conns = m
+
+    def connect_ftp(self, user, passwd, host, port, dirs):
+        key = user, passwd, host, port
+        if self.cache.has_key(key):
+            self.timeout[key] = time.time() + self.delay
+        else:
+            self.cache[key] = ftpwrapper(user, passwd, host, port, dirs)
+            self.timeout[key] = time.time() + self.delay
+        self.check_cache()
+        return self.cache[key]
+
+    def check_cache(self):
+        # first check for old ones
+        t = time.time()
+        if self.soonest <= t:
+            for k, v in self.timeout.items():
+                if v < t:
+                    self.cache[k].close()
+                    del self.cache[k]
+                    del self.timeout[k]
+        self.soonest = min(self.timeout.values())
+
+        # then check the size
+        if len(self.cache) == self.max_conns:
+            for k, v in self.timeout.items():
+                if v == self.soonest:
+                    del self.cache[k]
+                    del self.timeout[k]
+                    break
+            self.soonest = min(self.timeout.values())
+
+class GopherHandler(BaseHandler):
+    def gopher_open(self, req):
+        host = req.get_host()
+        if not host:
+            raise GopherError('no host given')
+        host = unquote(host)
+        selector = req.get_selector()
+        type, selector = splitgophertype(selector)
+        selector, query = splitquery(selector)
+        selector = unquote(selector)
+        if query:
+            query = unquote(query)
+            fp = gopherlib.send_query(selector, query, host)
+        else:
+            fp = gopherlib.send_selector(selector, host)
+        return addinfourl(fp, noheaders(), req.get_full_url())
+
+#bleck! don't use this yet
+class OpenerFactory:
+
+    default_handlers = [UnknownHandler, HTTPHandler,
+                        HTTPDefaultErrorHandler, HTTPRedirectHandler,
+                        FTPHandler, FileHandler]
+    proxy_handlers = [ProxyHandler]
+    handlers = []
+    replacement_handlers = []
+
+    def add_proxy_handler(self, ph):
+        self.proxy_handlers = self.proxy_handlers + [ph]
+
+    def add_handler(self, h):
+        self.handlers = self.handlers + [h]
+
+    def replace_handler(self, h):
+        pass
+
+    def build_opener(self):
+        opener = OpenerDirector()
+        for ph in self.proxy_handlers:
+            if inspect.isclass(ph):
+                ph = ph()
+            opener.add_handler(ph)
+
+if __name__ == "__main__":
+    # XXX some of the test code depends on machine configurations that
+    # are internal to CNRI.   Need to set up a public server with the
+    # right authentication configuration for test purposes.
+    if socket.gethostname() == 'bitdiddle':
+        localhost = 'bitdiddle.cnri.reston.va.us'
+    elif socket.gethostname() == 'bitdiddle.concentric.net':
+        localhost = 'localhost'
+    else:
+        localhost = None
+    urls = [
+        # Thanks to Fred for finding these!
+        'gopher://gopher.lib.ncsu.edu/11/library/stacks/Alex',
+        'gopher://gopher.vt.edu:10010/10/33',
+
+        'file:/etc/passwd',
+        'file://nonsensename/etc/passwd',
+        'ftp://www.python.org/pub/python/misc/sousa.au',
+        'ftp://www.python.org/pub/tmp/blat',
+        'http://www.espn.com/', # redirect
+        'http://www.python.org/Spanish/Inquistion/',
+        ('http://www.python.org/cgi-bin/faqw.py',
+         'query=pythonistas&querytype=simple&casefold=yes&req=search'),
+        'http://www.python.org/',
+        'ftp://gatekeeper.research.compaq.com/pub/DEC/SRC/research-reports/00README-Legal-Rules-Regs',
+            ]
+
+##    if localhost is not None:
+##        urls = urls + [
+##            'file://%s/etc/passwd' % localhost,
+##            'http://%s/simple/' % localhost,
+##            'http://%s/digest/' % localhost,
+##            'http://%s/not/found.h' % localhost,
+##            ]
+
+##        bauth = HTTPBasicAuthHandler()
+##        bauth.add_password('basic_test_realm', localhost, 'jhylton',
+##                           'password')
+##        dauth = HTTPDigestAuthHandler()
+##        dauth.add_password('digest_test_realm', localhost, 'jhylton',
+##                           'password')
+
+
+    cfh = CacheFTPHandler()
+    cfh.setTimeout(1)
+
+##    # XXX try out some custom proxy objects too!
+##    def at_cnri(req):
+##        host = req.get_host()
+##        print host
+##        if host[-18:] == '.cnri.reston.va.us':
+##            return 1
+##    p = CustomProxy('http', at_cnri, 'proxy.cnri.reston.va.us')
+##    ph = CustomProxyHandler(p)
+
+##    install_opener(build_opener(dauth, bauth, cfh, GopherHandler, ph))
+    install_opener(build_opener(cfh, GopherHandler))
+
+    for url in urls:
+        if isinstance(url, types.TupleType):
+            url, req = url
+        else:
+            req = None
+        print url
+        try:
+            f = urlopen(url, req)
+        except IOError, err:
+            print "IOError:", err
+        except socket.error, err:
+            print "socket.error:", err
+        else:
+            buf = f.read()
+            f.close()
+            print "read %d bytes" % len(buf)
+        print
+        time.sleep(0.1)
diff --git a/lib-python/2.2/urlparse.py b/lib-python/2.2/urlparse.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/urlparse.py
@@ -0,0 +1,276 @@
+"""Parse (absolute and relative) URLs.
+
+See RFC 1808: "Relative Uniform Resource Locators", by R. Fielding,
+UC Irvine, June 1995.
+"""
+
+__all__ = ["urlparse", "urlunparse", "urljoin", "urldefrag",
+           "urlsplit", "urlunsplit"]
+
+# A classification of schemes ('' means apply by default)
+uses_relative = ['ftp', 'http', 'gopher', 'nntp', 'wais', 'file',
+                 'https', 'shttp',
+                 'prospero', 'rtsp', 'rtspu', '']
+uses_netloc = ['ftp', 'http', 'gopher', 'nntp', 'telnet', 'wais',
+               'file',
+               'https', 'shttp', 'snews',
+               'prospero', 'rtsp', 'rtspu', '']
+non_hierarchical = ['gopher', 'hdl', 'mailto', 'news', 'telnet', 'wais',
+                    'snews', 'sip',
+                    ]
+uses_params = ['ftp', 'hdl', 'prospero', 'http',
+               'https', 'shttp', 'rtsp', 'rtspu', 'sip',
+               '']
+uses_query = ['http', 'wais',
+              'https', 'shttp',
+              'gopher', 'rtsp', 'rtspu', 'sip',
+              '']
+uses_fragment = ['ftp', 'hdl', 'http', 'gopher', 'news', 'nntp', 'wais',
+                 'https', 'shttp', 'snews',
+                 'file', 'prospero', '']
+
+# Characters valid in scheme names
+scheme_chars = ('abcdefghijklmnopqrstuvwxyz'
+                'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+                '0123456789'
+                '+-.')
+
+MAX_CACHE_SIZE = 20
+_parse_cache = {}
+
+def clear_cache():
+    """Clear the parse cache."""
+    global _parse_cache
+    _parse_cache = {}
+
+
+def urlparse(url, scheme='', allow_fragments=1):
+    """Parse a URL into 6 components:
+    <scheme>://<netloc>/<path>;<params>?<query>#<fragment>
+    Return a 6-tuple: (scheme, netloc, path, params, query, fragment).
+    Note that we don't break the components up in smaller bits
+    (e.g. netloc is a single string) and we don't expand % escapes."""
+    tuple = urlsplit(url, scheme, allow_fragments)
+    scheme, netloc, url, query, fragment = tuple
+    if scheme in uses_params and ';' in url:
+        url, params = _splitparams(url)
+    else:
+        params = ''
+    return scheme, netloc, url, params, query, fragment
+
+def _splitparams(url):
+    if '/'  in url:
+        i = url.find(';', url.rfind('/'))
+        if i < 0:
+            return url, ''
+    else:
+        i = url.find(';')
+    return url[:i], url[i+1:]
+
+def urlsplit(url, scheme='', allow_fragments=1):
+    """Parse a URL into 5 components:
+    <scheme>://<netloc>/<path>?<query>#<fragment>
+    Return a 5-tuple: (scheme, netloc, path, query, fragment).
+    Note that we don't break the components up in smaller bits
+    (e.g. netloc is a single string) and we don't expand % escapes."""
+    key = url, scheme, allow_fragments
+    cached = _parse_cache.get(key, None)
+    if cached:
+        return cached
+    if len(_parse_cache) >= MAX_CACHE_SIZE: # avoid runaway growth
+        clear_cache()
+    netloc = query = fragment = ''
+    i = url.find(':')
+    if i > 0:
+        if url[:i] == 'http': # optimize the common case
+            scheme = url[:i].lower()
+            url = url[i+1:]
+            if url[:2] == '//':
+                i = url.find('/', 2)
+                if i < 0:
+                    i = url.find('#')
+                    if i < 0:
+                        i = len(url)
+                netloc = url[2:i]
+                url = url[i:]
+            if allow_fragments and '#' in url:
+                url, fragment = url.split('#', 1)
+            if '?' in url:
+                url, query = url.split('?', 1)
+            tuple = scheme, netloc, url, query, fragment
+            _parse_cache[key] = tuple
+            return tuple
+        for c in url[:i]:
+            if c not in scheme_chars:
+                break
+        else:
+            scheme, url = url[:i].lower(), url[i+1:]
+    if scheme in uses_netloc:
+        if url[:2] == '//':
+            i = url.find('/', 2)
+            if i < 0:
+                i = len(url)
+            netloc, url = url[2:i], url[i:]
+    if allow_fragments and scheme in uses_fragment and '#' in url:
+        url, fragment = url.split('#', 1)
+    if scheme in uses_query and '?' in url:
+        url, query = url.split('?', 1)
+    tuple = scheme, netloc, url, query, fragment
+    _parse_cache[key] = tuple
+    return tuple
+
+def urlunparse((scheme, netloc, url, params, query, fragment)):
+    """Put a parsed URL back together again.  This may result in a
+    slightly different, but equivalent URL, if the URL that was parsed
+    originally had redundant delimiters, e.g. a ? with an empty query
+    (the draft states that these are equivalent)."""
+    if params:
+        url = "%s;%s" % (url, params)
+    return urlunsplit((scheme, netloc, url, query, fragment))
+
+def urlunsplit((scheme, netloc, url, query, fragment)):
+    if netloc or (scheme and scheme in uses_netloc and url[:2] != '//'):
+        if url and url[:1] != '/': url = '/' + url
+        url = '//' + (netloc or '') + url
+    if scheme:
+        url = scheme + ':' + url
+    if query:
+        url = url + '?' + query
+    if fragment:
+        url = url + '#' + fragment
+    return url
+
+def urljoin(base, url, allow_fragments = 1):
+    """Join a base URL and a possibly relative URL to form an absolute
+    interpretation of the latter."""
+    if not base:
+        return url
+    if not url:
+        return base
+    bscheme, bnetloc, bpath, bparams, bquery, bfragment = \
+            urlparse(base, '', allow_fragments)
+    scheme, netloc, path, params, query, fragment = \
+            urlparse(url, bscheme, allow_fragments)
+    if scheme != bscheme or scheme not in uses_relative:
+        return url
+    if scheme in uses_netloc:
+        if netloc:
+            return urlunparse((scheme, netloc, path,
+                               params, query, fragment))
+        netloc = bnetloc
+    if path[:1] == '/':
+        return urlunparse((scheme, netloc, path,
+                           params, query, fragment))
+    if not path:
+        if not params:
+            params = bparams
+            if not query:
+                query = bquery
+        return urlunparse((scheme, netloc, bpath,
+                           params, query, fragment))
+    segments = bpath.split('/')[:-1] + path.split('/')
+    # XXX The stuff below is bogus in various ways...
+    if segments[-1] == '.':
+        segments[-1] = ''
+    while '.' in segments:
+        segments.remove('.')
+    while 1:
+        i = 1
+        n = len(segments) - 1
+        while i < n:
+            if (segments[i] == '..'
+                and segments[i-1] not in ('', '..')):
+                del segments[i-1:i+1]
+                break
+            i = i+1
+        else:
+            break
+    if segments == ['', '..']:
+        segments[-1] = ''
+    elif len(segments) >= 2 and segments[-1] == '..':
+        segments[-2:] = ['']
+    return urlunparse((scheme, netloc, '/'.join(segments),
+                       params, query, fragment))
+
+def urldefrag(url):
+    """Removes any existing fragment from URL.
+
+    Returns a tuple of the defragmented URL and the fragment.  If
+    the URL contained no fragments, the second element is the
+    empty string.
+    """
+    if '#' in url:
+        s, n, p, a, q, frag = urlparse(url)
+        defrag = urlunparse((s, n, p, a, q, ''))
+        return defrag, frag
+    else:
+        return url, ''
+
+
+test_input = """
+      http://a/b/c/d
+
+      g:h        = <URL:g:h>
+      http:g     = <URL:http://a/b/c/g>
+      http:      = <URL:http://a/b/c/d>
+      g          = <URL:http://a/b/c/g>
+      ./g        = <URL:http://a/b/c/g>
+      g/         = <URL:http://a/b/c/g/>
+      /g         = <URL:http://a/g>
+      //g        = <URL:http://g>
+      ?y         = <URL:http://a/b/c/d?y>
+      g?y        = <URL:http://a/b/c/g?y>
+      g?y/./x    = <URL:http://a/b/c/g?y/./x>
+      .          = <URL:http://a/b/c/>
+      ./         = <URL:http://a/b/c/>
+      ..         = <URL:http://a/b/>
+      ../        = <URL:http://a/b/>
+      ../g       = <URL:http://a/b/g>
+      ../..      = <URL:http://a/>
+      ../../g    = <URL:http://a/g>
+      ../../../g = <URL:http://a/../g>
+      ./../g     = <URL:http://a/b/g>
+      ./g/.      = <URL:http://a/b/c/g/>
+      /./g       = <URL:http://a/./g>
+      g/./h      = <URL:http://a/b/c/g/h>
+      g/../h     = <URL:http://a/b/c/h>
+      http:g     = <URL:http://a/b/c/g>
+      http:      = <URL:http://a/b/c/d>
+      http:?y         = <URL:http://a/b/c/d?y>
+      http:g?y        = <URL:http://a/b/c/g?y>
+      http:g?y/./x    = <URL:http://a/b/c/g?y/./x>
+"""
+# XXX The result for //g is actually http://g/; is this a problem?
+
+def test():
+    import sys
+    base = ''
+    if sys.argv[1:]:
+        fn = sys.argv[1]
+        if fn == '-':
+            fp = sys.stdin
+        else:
+            fp = open(fn)
+    else:
+        import StringIO
+        fp = StringIO.StringIO(test_input)
+    while 1:
+        line = fp.readline()
+        if not line: break
+        words = line.split()
+        if not words:
+            continue
+        url = words[0]
+        parts = urlparse(url)
+        print '%-10s : %s' % (url, parts)
+        abs = urljoin(base, url)
+        if not base:
+            base = abs
+        wrapped = '<URL:%s>' % abs
+        print '%-10s = %s' % (url, wrapped)
+        if len(words) == 3 and words[1] == '=':
+            if wrapped != words[2]:
+                print 'EXPECTED', words[2], '!!!!!!!!!!'
+
+if __name__ == '__main__':
+    test()
diff --git a/lib-python/2.2/user.py b/lib-python/2.2/user.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/user.py
@@ -0,0 +1,45 @@
+"""Hook to allow user-specified customization code to run.
+
+As a policy, Python doesn't run user-specified code on startup of
+Python programs (interactive sessions execute the script specified in
+the PYTHONSTARTUP environment variable if it exists).
+
+However, some programs or sites may find it convenient to allow users
+to have a standard customization file, which gets run when a program
+requests it.  This module implements such a mechanism.  A program
+that wishes to use the mechanism must execute the statement
+
+    import user
+
+The user module looks for a file .pythonrc.py in the user's home
+directory and if it can be opened, execfile()s it in its own global
+namespace.  Errors during this phase are not caught; that's up to the
+program that imports the user module, if it wishes.
+
+The user's .pythonrc.py could conceivably test for sys.version if it
+wishes to do different things depending on the Python version.
+
+"""
+
+import os
+
+home = os.curdir                        # Default
+if os.environ.has_key('HOME'):
+    home = os.environ['HOME']
+elif os.name == 'posix':
+    home = os.path.expanduser("~/")
+elif os.name == 'nt':                   # Contributed by Jeff Bauer
+    if os.environ.has_key('HOMEPATH'):
+        if os.environ.has_key('HOMEDRIVE'):
+            home = os.environ['HOMEDRIVE'] + os.environ['HOMEPATH']
+        else:
+            home = os.environ['HOMEPATH']
+
+pythonrc = os.path.join(home, ".pythonrc.py")
+try:
+    f = open(pythonrc)
+except IOError:
+    pass
+else:
+    f.close()
+    execfile(pythonrc)
diff --git a/lib-python/2.2/uu.py b/lib-python/2.2/uu.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/uu.py
@@ -0,0 +1,195 @@
+#! /usr/bin/env python
+
+# Copyright 1994 by Lance Ellinghouse
+# Cathedral City, California Republic, United States of America.
+#                        All Rights Reserved
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose and without fee is hereby granted,
+# provided that the above copyright notice appear in all copies and that
+# both that copyright notice and this permission notice appear in
+# supporting documentation, and that the name of Lance Ellinghouse
+# not be used in advertising or publicity pertaining to distribution
+# of the software without specific, written prior permission.
+# LANCE ELLINGHOUSE DISCLAIMS ALL WARRANTIES WITH REGARD TO
+# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
+# FITNESS, IN NO EVENT SHALL LANCE ELLINGHOUSE CENTRUM BE LIABLE
+# FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+#
+# Modified by Jack Jansen, CWI, July 1995:
+# - Use binascii module to do the actual line-by-line conversion
+#   between ascii and binary. This results in a 1000-fold speedup. The C
+#   version is still 5 times faster, though.
+# - Arguments more compliant with python standard
+
+"""Implementation of the UUencode and UUdecode functions.
+
+encode(in_file, out_file [,name, mode])
+decode(in_file [, out_file, mode])
+"""
+
+import binascii
+import os
+import sys
+from types import StringType
+
+__all__ = ["Error", "encode", "decode"]
+
+class Error(Exception):
+    pass
+
+def encode(in_file, out_file, name=None, mode=None):
+    """Uuencode file"""
+    #
+    # If in_file is a pathname open it and change defaults
+    #
+    if in_file == '-':
+        in_file = sys.stdin
+    elif isinstance(in_file, StringType):
+        if name is None:
+            name = os.path.basename(in_file)
+        if mode is None:
+            try:
+                mode = os.stat(in_file)[0]
+            except AttributeError:
+                pass
+        in_file = open(in_file, 'rb')
+    #
+    # Open out_file if it is a pathname
+    #
+    if out_file == '-':
+        out_file = sys.stdout
+    elif isinstance(out_file, StringType):
+        out_file = open(out_file, 'w')
+    #
+    # Set defaults for name and mode
+    #
+    if name is None:
+        name = '-'
+    if mode is None:
+        mode = 0666
+    #
+    # Write the data
+    #
+    out_file.write('begin %o %s\n' % ((mode&0777),name))
+    str = in_file.read(45)
+    while len(str) > 0:
+        out_file.write(binascii.b2a_uu(str))
+        str = in_file.read(45)
+    out_file.write(' \nend\n')
+
+
+def decode(in_file, out_file=None, mode=None, quiet=0):
+    """Decode uuencoded file"""
+    #
+    # Open the input file, if needed.
+    #
+    if in_file == '-':
+        in_file = sys.stdin
+    elif isinstance(in_file, StringType):
+        in_file = open(in_file)
+    #
+    # Read until a begin is encountered or we've exhausted the file
+    #
+    while 1:
+        hdr = in_file.readline()
+        if not hdr:
+            raise Error, 'No valid begin line found in input file'
+        if hdr[:5] != 'begin':
+            continue
+        hdrfields = hdr.split(" ", 2)
+        if len(hdrfields) == 3 and hdrfields[0] == 'begin':
+            try:
+                int(hdrfields[1], 8)
+                break
+            except ValueError:
+                pass
+    if out_file is None:
+        out_file = hdrfields[2].rstrip()
+        if os.path.exists(out_file):
+            raise Error, 'Cannot overwrite existing file: %s' % out_file
+    if mode is None:
+        mode = int(hdrfields[1], 8)
+    #
+    # Open the output file
+    #
+    opened = False
+    if out_file == '-':
+        out_file = sys.stdout
+    elif isinstance(out_file, StringType):
+        fp = open(out_file, 'wb')
+        try:
+            os.path.chmod(out_file, mode)
+        except AttributeError:
+            pass
+        out_file = fp
+        opened = True
+    #
+    # Main decoding loop
+    #
+    s = in_file.readline()
+    while s and s.strip() != 'end':
+        try:
+            data = binascii.a2b_uu(s)
+        except binascii.Error, v:
+            # Workaround for broken uuencoders by /Fredrik Lundh
+            nbytes = (((ord(s[0])-32) & 63) * 4 + 5) / 3
+            data = binascii.a2b_uu(s[:nbytes])
+            if not quiet:
+                sys.stderr.write("Warning: %s\n" % str(v))
+        out_file.write(data)
+        s = in_file.readline()
+    if not s:
+        raise Error, 'Truncated input file'
+    if opened:
+        out_file.close()
+
+def test():
+    """uuencode/uudecode main program"""
+    import getopt
+
+    dopt = 0
+    topt = 0
+    input = sys.stdin
+    output = sys.stdout
+    ok = 1
+    try:
+        optlist, args = getopt.getopt(sys.argv[1:], 'dt')
+    except getopt.error:
+        ok = 0
+    if not ok or len(args) > 2:
+        print 'Usage:', sys.argv[0], '[-d] [-t] [input [output]]'
+        print ' -d: Decode (in stead of encode)'
+        print ' -t: data is text, encoded format unix-compatible text'
+        sys.exit(1)
+
+    for o, a in optlist:
+        if o == '-d': dopt = 1
+        if o == '-t': topt = 1
+
+    if len(args) > 0:
+        input = args[0]
+    if len(args) > 1:
+        output = args[1]
+
+    if dopt:
+        if topt:
+            if isinstance(output, StringType):
+                output = open(output, 'w')
+            else:
+                print sys.argv[0], ': cannot do -t to stdout'
+                sys.exit(1)
+        decode(input, output)
+    else:
+        if topt:
+            if isinstance(input, StringType):
+                input = open(input, 'r')
+            else:
+                print sys.argv[0], ': cannot do -t from stdin'
+                sys.exit(1)
+        encode(input, output)
+
+if __name__ == '__main__':
+    test()
diff --git a/lib-python/2.2/warnings.py b/lib-python/2.2/warnings.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/warnings.py
@@ -0,0 +1,258 @@
+"""Python part of the warnings subsystem."""
+
+import sys, re, types
+
+__all__ = ["warn", "showwarning", "formatwarning", "filterwarnings",
+           "resetwarnings"]
+
+defaultaction = "default"
+filters = []
+onceregistry = {}
+
+def warn(message, category=None, stacklevel=1):
+    """Issue a warning, or maybe ignore it or raise an exception."""
+    # Check category argument
+    if category is None:
+        category = UserWarning
+    assert issubclass(category, Warning)
+    # Get context information
+    try:
+        caller = sys._getframe(stacklevel)
+    except ValueError:
+        globals = sys.__dict__
+        lineno = 1
+    else:
+        globals = caller.f_globals
+        lineno = caller.f_lineno
+    if globals.has_key('__name__'):
+        module = globals['__name__']
+    else:
+        module = "<string>"
+    filename = globals.get('__file__')
+    if filename:
+        fnl = filename.lower()
+        if fnl.endswith(".pyc") or fnl.endswith(".pyo"):
+            filename = filename[:-1]
+    else:
+        if module == "__main__":
+            filename = sys.argv[0]
+        if not filename:
+            filename = module
+    registry = globals.setdefault("__warningregistry__", {})
+    warn_explicit(message, category, filename, lineno, module, registry)
+
+def warn_explicit(message, category, filename, lineno,
+                  module=None, registry=None):
+    if module is None:
+        module = filename
+        if module[-3:].lower() == ".py":
+            module = module[:-3] # XXX What about leading pathname?
+    if registry is None:
+        registry = {}
+    key = (message, category, lineno)
+    # Quick test for common case
+    if registry.get(key):
+        return
+    # Search the filters
+    for item in filters:
+        action, msg, cat, mod, ln = item
+        if (msg.match(message) and
+            issubclass(category, cat) and
+            mod.match(module) and
+            (ln == 0 or lineno == ln)):
+            break
+    else:
+        action = defaultaction
+    # Early exit actions
+    if action == "ignore":
+        registry[key] = 1
+        return
+    if action == "error":
+        raise category(message)
+    # Other actions
+    if action == "once":
+        registry[key] = 1
+        oncekey = (message, category)
+        if onceregistry.get(oncekey):
+            return
+        onceregistry[oncekey] = 1
+    elif action == "always":
+        pass
+    elif action == "module":
+        registry[key] = 1
+        altkey = (message, category, 0)
+        if registry.get(altkey):
+            return
+        registry[altkey] = 1
+    elif action == "default":
+        registry[key] = 1
+    else:
+        # Unrecognized actions are errors
+        raise RuntimeError(
+              "Unrecognized action (%s) in warnings.filters:\n %s" %
+              (`action`, str(item)))
+    # Print message and context
+    showwarning(message, category, filename, lineno)
+
+def showwarning(message, category, filename, lineno, file=None):
+    """Hook to write a warning to a file; replace if you like."""
+    if file is None:
+        file = sys.stderr
+    try:
+        file.write(formatwarning(message, category, filename, lineno))
+    except IOError:
+        pass # the file (probably stderr) is invalid - this warning gets lost.
+
+def formatwarning(message, category, filename, lineno):
+    """Function to format a warning the standard way."""
+    import linecache
+    s =  "%s:%s: %s: %s\n" % (filename, lineno, category.__name__, message)
+    line = linecache.getline(filename, lineno).strip()
+    if line:
+        s = s + "  " + line + "\n"
+    return s
+
+def filterwarnings(action, message="", category=Warning, module="", lineno=0,
+                   append=0):
+    """Insert an entry into the list of warnings filters (at the front).
+
+    Use assertions to check that all arguments have the right type."""
+    assert action in ("error", "ignore", "always", "default", "module",
+                      "once"), "invalid action: %s" % `action`
+    assert isinstance(message, types.StringType), "message must be a string"
+    assert isinstance(category, types.ClassType), "category must be a class"
+    assert issubclass(category, Warning), "category must be a Warning subclass"
+    assert type(module) is types.StringType, "module must be a string"
+    assert type(lineno) is types.IntType and lineno >= 0, \
+           "lineno must be an int >= 0"
+    item = (action, re.compile(message, re.I), category,
+            re.compile(module), lineno)
+    if append:
+        filters.append(item)
+    else:
+        filters.insert(0, item)
+
+def resetwarnings():
+    """Clear the list of warning filters, so that no filters are active."""
+    filters[:] = []
+
+class _OptionError(Exception):
+    """Exception used by option processing helpers."""
+    pass
+
+# Helper to process -W options passed via sys.warnoptions
+def _processoptions(args):
+    for arg in args:
+        try:
+            _setoption(arg)
+        except _OptionError, msg:
+            print >>sys.stderr, "Invalid -W option ignored:", msg
+
+# Helper for _processoptions()
+def _setoption(arg):
+    parts = arg.split(':')
+    if len(parts) > 5:
+        raise _OptionError("too many fields (max 5): %s" % `arg`)
+    while len(parts) < 5:
+        parts.append('')
+    action, message, category, module, lineno = [s.strip()
+                                                 for s in parts]
+    action = _getaction(action)
+    message = re.escape(message)
+    category = _getcategory(category)
+    module = re.escape(module)
+    if module:
+        module = module + '$'
+    if lineno:
+        try:
+            lineno = int(lineno)
+            if lineno < 0:
+                raise ValueError
+        except (ValueError, OverflowError):
+            raise _OptionError("invalid lineno %s" % `lineno`)
+    else:
+        lineno = 0
+    filterwarnings(action, message, category, module, lineno)
+
+# Helper for _setoption()
+def _getaction(action):
+    if not action:
+        return "default"
+    if action == "all": return "always" # Alias
+    for a in ['default', 'always', 'ignore', 'module', 'once', 'error']:
+        if a.startswith(action):
+            return a
+    raise _OptionError("invalid action: %s" % `action`)
+
+# Helper for _setoption()
+def _getcategory(category):
+    if not category:
+        return Warning
+    if re.match("^[a-zA-Z0-9_]+$", category):
+        try:
+            cat = eval(category)
+        except NameError:
+            raise _OptionError("unknown warning category: %s" % `category`)
+    else:
+        i = category.rfind(".")
+        module = category[:i]
+        klass = category[i+1:]
+        try:
+            m = __import__(module, None, None, [klass])
+        except ImportError:
+            raise _OptionError("invalid module name: %s" % `module`)
+        try:
+            cat = getattr(m, klass)
+        except AttributeError:
+            raise _OptionError("unknown warning category: %s" % `category`)
+    if (not isinstance(cat, types.ClassType) or
+        not issubclass(cat, Warning)):
+        raise _OptionError("invalid warning category: %s" % `category`)
+    return cat
+
+# Self-test
+def _test():
+    import getopt
+    testoptions = []
+    try:
+        opts, args = getopt.getopt(sys.argv[1:], "W:")
+    except getopt.error, msg:
+        print >>sys.stderr, msg
+        return
+    for o, a in opts:
+        testoptions.append(a)
+    try:
+        _processoptions(testoptions)
+    except _OptionError, msg:
+        print >>sys.stderr, msg
+        return
+    for item in filters: print item
+    hello = "hello world"
+    warn(hello); warn(hello); warn(hello); warn(hello)
+    warn(hello, UserWarning)
+    warn(hello, DeprecationWarning)
+    for i in range(3):
+        warn(hello)
+    filterwarnings("error", "", Warning, "", 0)
+    try:
+        warn(hello)
+    except Exception, msg:
+        print "Caught", msg.__class__.__name__ + ":", msg
+    else:
+        print "No exception"
+    resetwarnings()
+    try:
+        filterwarnings("booh", "", Warning, "", 0)
+    except Exception, msg:
+        print "Caught", msg.__class__.__name__ + ":", msg
+    else:
+        print "No exception"
+
+# Module initialization
+if __name__ == "__main__":
+    import __main__
+    sys.modules['warnings'] = __main__
+    _test()
+else:
+    _processoptions(sys.warnoptions)
+    filterwarnings("ignore", category=OverflowWarning, append=1)
diff --git a/lib-python/2.2/wave.py b/lib-python/2.2/wave.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/wave.py
@@ -0,0 +1,489 @@
+"""Stuff to parse WAVE files.
+
+Usage.
+
+Reading WAVE files:
+      f = wave.open(file, 'r')
+where file is either the name of a file or an open file pointer.
+The open file pointer must have methods read(), seek(), and close().
+When the setpos() and rewind() methods are not used, the seek()
+method is not  necessary.
+
+This returns an instance of a class with the following public methods:
+      getnchannels()  -- returns number of audio channels (1 for
+                         mono, 2 for stereo)
+      getsampwidth()  -- returns sample width in bytes
+      getframerate()  -- returns sampling frequency
+      getnframes()    -- returns number of audio frames
+      getcomptype()   -- returns compression type ('NONE' for linear samples)
+      getcompname()   -- returns human-readable version of
+                         compression type ('not compressed' linear samples)
+      getparams()     -- returns a tuple consisting of all of the
+                         above in the above order
+      getmarkers()    -- returns None (for compatibility with the
+                         aifc module)
+      getmark(id)     -- raises an error since the mark does not
+                         exist (for compatibility with the aifc module)
+      readframes(n)   -- returns at most n frames of audio
+      rewind()        -- rewind to the beginning of the audio stream
+      setpos(pos)     -- seek to the specified position
+      tell()          -- return the current position
+      close()         -- close the instance (make it unusable)
+The position returned by tell() and the position given to setpos()
+are compatible and have nothing to do with the actual position in the
+file.
+The close() method is called automatically when the class instance
+is destroyed.
+
+Writing WAVE files:
+      f = wave.open(file, 'w')
+where file is either the name of a file or an open file pointer.
+The open file pointer must have methods write(), tell(), seek(), and
+close().
+
+This returns an instance of a class with the following public methods:
+      setnchannels(n) -- set the number of channels
+      setsampwidth(n) -- set the sample width
+      setframerate(n) -- set the frame rate
+      setnframes(n)   -- set the number of frames
+      setcomptype(type, name)
+                      -- set the compression type and the
+                         human-readable compression type
+      setparams(tuple)
+                      -- set all parameters at once
+      tell()          -- return current position in output file
+      writeframesraw(data)
+                      -- write audio frames without pathing up the
+                         file header
+      writeframes(data)
+                      -- write audio frames and patch up the file header
+      close()         -- patch up the file header and close the
+                         output file
+You should set the parameters before the first writeframesraw or
+writeframes.  The total number of frames does not need to be set,
+but when it is set to the correct value, the header does not have to
+be patched up.
+It is best to first set all parameters, perhaps possibly the
+compression type, and then write audio frames using writeframesraw.
+When all frames have been written, either call writeframes('') or
+close() to patch up the sizes in the header.
+The close() method is called automatically when the class instance
+is destroyed.
+"""
+
+import __builtin__
+
+__all__ = ["open", "openfp", "Error"]
+
+class Error(Exception):
+    pass
+
+WAVE_FORMAT_PCM = 0x0001
+
+_array_fmts = None, 'b', 'h', None, 'l'
+
+# Determine endian-ness
+import struct
+if struct.pack("h", 1) == "\000\001":
+    big_endian = 1
+else:
+    big_endian = 0
+
+from chunk import Chunk
+
+class Wave_read:
+    """Variables used in this class:
+
+    These variables are available to the user though appropriate
+    methods of this class:
+    _file -- the open file with methods read(), close(), and seek()
+              set through the __init__() method
+    _nchannels -- the number of audio channels
+              available through the getnchannels() method
+    _nframes -- the number of audio frames
+              available through the getnframes() method
+    _sampwidth -- the number of bytes per audio sample
+              available through the getsampwidth() method
+    _framerate -- the sampling frequency
+              available through the getframerate() method
+    _comptype -- the AIFF-C compression type ('NONE' if AIFF)
+              available through the getcomptype() method
+    _compname -- the human-readable AIFF-C compression type
+              available through the getcomptype() method
+    _soundpos -- the position in the audio stream
+              available through the tell() method, set through the
+              setpos() method
+
+    These variables are used internally only:
+    _fmt_chunk_read -- 1 iff the FMT chunk has been read
+    _data_seek_needed -- 1 iff positioned correctly in audio
+              file for readframes()
+    _data_chunk -- instantiation of a chunk class for the DATA chunk
+    _framesize -- size of one frame in the file
+    """
+
+    def initfp(self, file):
+        self._convert = None
+        self._soundpos = 0
+        self._file = Chunk(file, bigendian = 0)
+        if self._file.getname() != 'RIFF':
+            raise Error, 'file does not start with RIFF id'
+        if self._file.read(4) != 'WAVE':
+            raise Error, 'not a WAVE file'
+        self._fmt_chunk_read = 0
+        self._data_chunk = None
+        while 1:
+            self._data_seek_needed = 1
+            try:
+                chunk = Chunk(self._file, bigendian = 0)
+            except EOFError:
+                break
+            chunkname = chunk.getname()
+            if chunkname == 'fmt ':
+                self._read_fmt_chunk(chunk)
+                self._fmt_chunk_read = 1
+            elif chunkname == 'data':
+                if not self._fmt_chunk_read:
+                    raise Error, 'data chunk before fmt chunk'
+                self._data_chunk = chunk
+                self._nframes = chunk.chunksize // self._framesize
+                self._data_seek_needed = 0
+                break
+            chunk.skip()
+        if not self._fmt_chunk_read or not self._data_chunk:
+            raise Error, 'fmt chunk and/or data chunk missing'
+
+    def __init__(self, f):
+        self._i_opened_the_file = None
+        if type(f) == type(''):
+            f = __builtin__.open(f, 'rb')
+            self._i_opened_the_file = f
+        # else, assume it is an open file object already
+        self.initfp(f)
+
+    def __del__(self):
+        self.close()
+    #
+    # User visible methods.
+    #
+    def getfp(self):
+        return self._file
+
+    def rewind(self):
+        self._data_seek_needed = 1
+        self._soundpos = 0
+
+    def close(self):
+        if self._i_opened_the_file:
+            self._i_opened_the_file.close()
+            self._i_opened_the_file = None
+        self._file = None
+
+    def tell(self):
+        return self._soundpos
+
+    def getnchannels(self):
+        return self._nchannels
+
+    def getnframes(self):
+        return self._nframes
+
+    def getsampwidth(self):
+        return self._sampwidth
+
+    def getframerate(self):
+        return self._framerate
+
+    def getcomptype(self):
+        return self._comptype
+
+    def getcompname(self):
+        return self._compname
+
+    def getparams(self):
+        return self.getnchannels(), self.getsampwidth(), \
+               self.getframerate(), self.getnframes(), \
+               self.getcomptype(), self.getcompname()
+
+    def getmarkers(self):
+        return None
+
+    def getmark(self, id):
+        raise Error, 'no marks'
+
+    def setpos(self, pos):
+        if pos < 0 or pos > self._nframes:
+            raise Error, 'position not in range'
+        self._soundpos = pos
+        self._data_seek_needed = 1
+
+    def readframes(self, nframes):
+        if self._data_seek_needed:
+            self._data_chunk.seek(0, 0)
+            pos = self._soundpos * self._framesize
+            if pos:
+                self._data_chunk.seek(pos, 0)
+            self._data_seek_needed = 0
+        if nframes == 0:
+            return ''
+        if self._sampwidth > 1 and big_endian:
+            # unfortunately the fromfile() method does not take
+            # something that only looks like a file object, so
+            # we have to reach into the innards of the chunk object
+            import array
+            chunk = self._data_chunk
+            data = array.array(_array_fmts[self._sampwidth])
+            nitems = nframes * self._nchannels
+            if nitems * self._sampwidth > chunk.chunksize - chunk.size_read:
+                nitems = (chunk.chunksize - chunk.size_read) / self._sampwidth
+            data.fromfile(chunk.file.file, nitems)
+            # "tell" data chunk how much was read
+            chunk.size_read = chunk.size_read + nitems * self._sampwidth
+            # do the same for the outermost chunk
+            chunk = chunk.file
+            chunk.size_read = chunk.size_read + nitems * self._sampwidth
+            data.byteswap()
+            data = data.tostring()
+        else:
+            data = self._data_chunk.read(nframes * self._framesize)
+        if self._convert and data:
+            data = self._convert(data)
+        self._soundpos = self._soundpos + len(data) // (self._nchannels * self._sampwidth)
+        return data
+
+    #
+    # Internal methods.
+    #
+
+    def _read_fmt_chunk(self, chunk):
+        wFormatTag, self._nchannels, self._framerate, dwAvgBytesPerSec, wBlockAlign = struct.unpack('<hhllh', chunk.read(14))
+        if wFormatTag == WAVE_FORMAT_PCM:
+            sampwidth = struct.unpack('<h', chunk.read(2))[0]
+            self._sampwidth = (sampwidth + 7) // 8
+        else:
+            raise Error, 'unknown format: ' + `wFormatTag`
+        self._framesize = self._nchannels * self._sampwidth
+        self._comptype = 'NONE'
+        self._compname = 'not compressed'
+
+class Wave_write:
+    """Variables used in this class:
+
+    These variables are user settable through appropriate methods
+    of this class:
+    _file -- the open file with methods write(), close(), tell(), seek()
+              set through the __init__() method
+    _comptype -- the AIFF-C compression type ('NONE' in AIFF)
+              set through the setcomptype() or setparams() method
+    _compname -- the human-readable AIFF-C compression type
+              set through the setcomptype() or setparams() method
+    _nchannels -- the number of audio channels
+              set through the setnchannels() or setparams() method
+    _sampwidth -- the number of bytes per audio sample
+              set through the setsampwidth() or setparams() method
+    _framerate -- the sampling frequency
+              set through the setframerate() or setparams() method
+    _nframes -- the number of audio frames written to the header
+              set through the setnframes() or setparams() method
+
+    These variables are used internally only:
+    _datalength -- the size of the audio samples written to the header
+    _nframeswritten -- the number of frames actually written
+    _datawritten -- the size of the audio samples actually written
+    """
+
+    def __init__(self, f):
+        self._i_opened_the_file = None
+        if type(f) == type(''):
+            f = __builtin__.open(f, 'wb')
+            self._i_opened_the_file = f
+        self.initfp(f)
+
+    def initfp(self, file):
+        self._file = file
+        self._convert = None
+        self._nchannels = 0
+        self._sampwidth = 0
+        self._framerate = 0
+        self._nframes = 0
+        self._nframeswritten = 0
+        self._datawritten = 0
+        self._datalength = 0
+
+    def __del__(self):
+        self.close()
+
+    #
+    # User visible methods.
+    #
+    def setnchannels(self, nchannels):
+        if self._datawritten:
+            raise Error, 'cannot change parameters after starting to write'
+        if nchannels < 1:
+            raise Error, 'bad # of channels'
+        self._nchannels = nchannels
+
+    def getnchannels(self):
+        if not self._nchannels:
+            raise Error, 'number of channels not set'
+        return self._nchannels
+
+    def setsampwidth(self, sampwidth):
+        if self._datawritten:
+            raise Error, 'cannot change parameters after starting to write'
+        if sampwidth < 1 or sampwidth > 4:
+            raise Error, 'bad sample width'
+        self._sampwidth = sampwidth
+
+    def getsampwidth(self):
+        if not self._sampwidth:
+            raise Error, 'sample width not set'
+        return self._sampwidth
+
+    def setframerate(self, framerate):
+        if self._datawritten:
+            raise Error, 'cannot change parameters after starting to write'
+        if framerate <= 0:
+            raise Error, 'bad frame rate'
+        self._framerate = framerate
+
+    def getframerate(self):
+        if not self._framerate:
+            raise Error, 'frame rate not set'
+        return self._framerate
+
+    def setnframes(self, nframes):
+        if self._datawritten:
+            raise Error, 'cannot change parameters after starting to write'
+        self._nframes = nframes
+
+    def getnframes(self):
+        return self._nframeswritten
+
+    def setcomptype(self, comptype, compname):
+        if self._datawritten:
+            raise Error, 'cannot change parameters after starting to write'
+        if comptype not in ('NONE',):
+            raise Error, 'unsupported compression type'
+        self._comptype = comptype
+        self._compname = compname
+
+    def getcomptype(self):
+        return self._comptype
+
+    def getcompname(self):
+        return self._compname
+
+    def setparams(self, (nchannels, sampwidth, framerate, nframes, comptype, compname)):
+        if self._datawritten:
+            raise Error, 'cannot change parameters after starting to write'
+        self.setnchannels(nchannels)
+        self.setsampwidth(sampwidth)
+        self.setframerate(framerate)
+        self.setnframes(nframes)
+        self.setcomptype(comptype, compname)
+
+    def getparams(self):
+        if not self._nchannels or not self._sampwidth or not self._framerate:
+            raise Error, 'not all parameters set'
+        return self._nchannels, self._sampwidth, self._framerate, \
+              self._nframes, self._comptype, self._compname
+
+    def setmark(self, id, pos, name):
+        raise Error, 'setmark() not supported'
+
+    def getmark(self, id):
+        raise Error, 'no marks'
+
+    def getmarkers(self):
+        return None
+
+    def tell(self):
+        return self._nframeswritten
+
+    def writeframesraw(self, data):
+        self._ensure_header_written(len(data))
+        nframes = len(data) // (self._sampwidth * self._nchannels)
+        if self._convert:
+            data = self._convert(data)
+        if self._sampwidth > 1 and big_endian:
+            import array
+            data = array.array(_array_fmts[self._sampwidth], data)
+            data.byteswap()
+            data.tofile(self._file)
+            self._datawritten = self._datawritten + len(data) * self._sampwidth
+        else:
+            self._file.write(data)
+            self._datawritten = self._datawritten + len(data)
+        self._nframeswritten = self._nframeswritten + nframes
+
+    def writeframes(self, data):
+        self.writeframesraw(data)
+        if self._datalength != self._datawritten:
+            self._patchheader()
+
+    def close(self):
+        if self._file:
+            self._ensure_header_written(0)
+            if self._datalength != self._datawritten:
+                self._patchheader()
+            self._file.flush()
+            self._file = None
+        if self._i_opened_the_file:
+            self._i_opened_the_file.close()
+            self._i_opened_the_file = None
+
+    #
+    # Internal methods.
+    #
+
+    def _ensure_header_written(self, datasize):
+        if not self._datawritten:
+            if not self._nchannels:
+                raise Error, '# channels not specified'
+            if not self._sampwidth:
+                raise Error, 'sample width not specified'
+            if not self._framerate:
+                raise Error, 'sampling rate not specified'
+            self._write_header(datasize)
+
+    def _write_header(self, initlength):
+        self._file.write('RIFF')
+        if not self._nframes:
+            self._nframes = initlength / (self._nchannels * self._sampwidth)
+        self._datalength = self._nframes * self._nchannels * self._sampwidth
+        self._form_length_pos = self._file.tell()
+        self._file.write(struct.pack('<l4s4slhhllhh4s',
+            36 + self._datalength, 'WAVE', 'fmt ', 16,
+            WAVE_FORMAT_PCM, self._nchannels, self._framerate,
+            self._nchannels * self._framerate * self._sampwidth,
+            self._nchannels * self._sampwidth,
+            self._sampwidth * 8, 'data'))
+        self._data_length_pos = self._file.tell()
+        self._file.write(struct.pack('<l', self._datalength))
+
+    def _patchheader(self):
+        if self._datawritten == self._datalength:
+            return
+        curpos = self._file.tell()
+        self._file.seek(self._form_length_pos, 0)
+        self._file.write(struct.pack('<l', 36 + self._datawritten))
+        self._file.seek(self._data_length_pos, 0)
+        self._file.write(struct.pack('<l', self._datawritten))
+        self._file.seek(curpos, 0)
+        self._datalength = self._datawritten
+
+def open(f, mode=None):
+    if mode is None:
+        if hasattr(f, 'mode'):
+            mode = f.mode
+        else:
+            mode = 'rb'
+    if mode in ('r', 'rb'):
+        return Wave_read(f)
+    elif mode in ('w', 'wb'):
+        return Wave_write(f)
+    else:
+        raise Error, "mode must be 'r', 'rb', 'w', or 'wb'"
+
+openfp = open # B/W compatibility
diff --git a/lib-python/2.2/weakref.py b/lib-python/2.2/weakref.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/weakref.py
@@ -0,0 +1,280 @@
+"""Weak reference support for Python.
+
+This module is an implementation of PEP 205:
+
+http://python.sourceforge.net/peps/pep-0205.html
+"""
+
+# Naming convention: Variables named "wr" are weak reference objects;
+# they are called this instead of "ref" to avoid name collisions with
+# the module-global ref() function imported from _weakref.
+
+import UserDict
+
+from _weakref import \
+     getweakrefcount, \
+     getweakrefs, \
+     ref, \
+     proxy, \
+     CallableProxyType, \
+     ProxyType, \
+     ReferenceType
+
+from exceptions import ReferenceError
+
+
+ProxyTypes = (ProxyType, CallableProxyType)
+
+__all__ = ["ref", "proxy", "getweakrefcount", "getweakrefs",
+           "WeakKeyDictionary", "ReferenceType", "ProxyType",
+           "CallableProxyType", "ProxyTypes", "WeakValueDictionary"]
+
+
+class WeakValueDictionary(UserDict.UserDict):
+    """Mapping class that references values weakly.
+
+    Entries in the dictionary will be discarded when no strong
+    reference to the value exists anymore
+    """
+    # We inherit the constructor without worrying about the input
+    # dictionary; since it uses our .update() method, we get the right
+    # checks (if the other dictionary is a WeakValueDictionary,
+    # objects are unwrapped on the way out, and we always wrap on the
+    # way in).
+
+    def __getitem__(self, key):
+        o = self.data[key]()
+        if o is None:
+            raise KeyError, key
+        else:
+            return o
+
+    def __repr__(self):
+        return "<WeakValueDictionary at %s>" % id(self)
+
+    def __setitem__(self, key, value):
+        self.data[key] = ref(value, self.__makeremove(key))
+
+    def copy(self):
+        new = WeakValueDictionary()
+        for key, wr in self.data.items():
+            o = wr()
+            if o is not None:
+                new[key] = o
+        return new
+
+    def get(self, key, default=None):
+        try:
+            wr = self.data[key]
+        except KeyError:
+            return default
+        else:
+            o = wr()
+            if o is None:
+                # This should only happen
+                return default
+            else:
+                return o
+
+    def items(self):
+        L = []
+        for key, wr in self.data.items():
+            o = wr()
+            if o is not None:
+                L.append((key, o))
+        return L
+
+    def iteritems(self):
+        return WeakValuedItemIterator(self)
+
+    def iterkeys(self):
+        return self.data.iterkeys()
+    __iter__ = iterkeys
+
+    def itervalues(self):
+        return WeakValuedValueIterator(self)
+
+    def popitem(self):
+        while 1:
+            key, wr = self.data.popitem()
+            o = wr()
+            if o is not None:
+                return key, o
+
+    def setdefault(self, key, default):
+        try:
+            wr = self.data[key]
+        except KeyError:
+            self.data[key] = ref(default, self.__makeremove(key))
+            return default
+        else:
+            return wr()
+
+    def update(self, dict):
+        d = self.data
+        for key, o in dict.items():
+            d[key] = ref(o, self.__makeremove(key))
+
+    def values(self):
+        L = []
+        for wr in self.data.values():
+            o = wr()
+            if o is not None:
+                L.append(o)
+        return L
+
+    def __makeremove(self, key):
+        def remove(o, selfref=ref(self), key=key):
+            self = selfref()
+            if self is not None:
+                del self.data[key]
+        return remove
+
+
+class WeakKeyDictionary(UserDict.UserDict):
+    """ Mapping class that references keys weakly.
+
+    Entries in the dictionary will be discarded when there is no
+    longer a strong reference to the key. This can be used to
+    associate additional data with an object owned by other parts of
+    an application without adding attributes to those objects. This
+    can be especially useful with objects that override attribute
+    accesses.
+    """
+
+    def __init__(self, dict=None):
+        self.data = {}
+        def remove(k, selfref=ref(self)):
+            self = selfref()
+            if self is not None:
+                del self.data[k]
+        self._remove = remove
+        if dict is not None: self.update(dict)
+
+    def __delitem__(self, key):
+        del self.data[ref(key)]
+
+    def __getitem__(self, key):
+        return self.data[ref(key)]
+
+    def __repr__(self):
+        return "<WeakKeyDictionary at %s>" % id(self)
+
+    def __setitem__(self, key, value):
+        self.data[ref(key, self._remove)] = value
+
+    def copy(self):
+        new = WeakKeyDictionary()
+        for key, value in self.data.items():
+            o = key()
+            if o is not None:
+                new[o] = value
+        return new
+
+    def get(self, key, default=None):
+        return self.data.get(ref(key),default)
+
+    def has_key(self, key):
+        try:
+            wr = ref(key)
+        except TypeError:
+            return 0
+        return self.data.has_key(wr)
+
+    def items(self):
+        L = []
+        for key, value in self.data.items():
+            o = key()
+            if o is not None:
+                L.append((o, value))
+        return L
+
+    def iteritems(self):
+        return WeakKeyedItemIterator(self)
+
+    def iterkeys(self):
+        return WeakKeyedKeyIterator(self)
+    __iter__ = iterkeys
+
+    def itervalues(self):
+        return self.data.itervalues()
+
+    def keys(self):
+        L = []
+        for wr in self.data.keys():
+            o = wr()
+            if o is not None:
+                L.append(o)
+        return L
+
+    def popitem(self):
+        while 1:
+            key, value = self.data.popitem()
+            o = key()
+            if o is not None:
+                return o, value
+
+    def setdefault(self, key, default):
+        return self.data.setdefault(ref(key, self._remove),default)
+
+    def update(self, dict):
+        d = self.data
+        for key, value in dict.items():
+            d[ref(key, self._remove)] = value
+
+
+class BaseIter:
+    def __iter__(self):
+        return self
+
+
+class WeakKeyedKeyIterator(BaseIter):
+    def __init__(self, weakdict):
+        self._next = weakdict.data.iterkeys().next
+
+    def next(self):
+        while 1:
+            wr = self._next()
+            obj = wr()
+            if obj is not None:
+                return obj
+
+
+class WeakKeyedItemIterator(BaseIter):
+    def __init__(self, weakdict):
+        self._next = weakdict.data.iteritems().next
+
+    def next(self):
+        while 1:
+            wr, value = self._next()
+            key = wr()
+            if key is not None:
+                return key, value
+
+
+class WeakValuedValueIterator(BaseIter):
+    def __init__(self, weakdict):
+        self._next = weakdict.data.itervalues().next
+
+    def next(self):
+        while 1:
+            wr = self._next()
+            obj = wr()
+            if obj is not None:
+                return obj
+
+
+class WeakValuedItemIterator(BaseIter):
+    def __init__(self, weakdict):
+        self._next = weakdict.data.iteritems().next
+
+    def next(self):
+        while 1:
+            key, wr = self._next()
+            value = wr()
+            if value is not None:
+                return key, value
+
+
+# no longer needed
+del UserDict
diff --git a/lib-python/2.2/webbrowser.py b/lib-python/2.2/webbrowser.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/webbrowser.py
@@ -0,0 +1,330 @@
+"""Interfaces for launching and remotely controlling Web browsers."""
+
+import os
+import sys
+
+__all__ = ["Error", "open", "get", "register"]
+
+class Error(Exception):
+    pass
+
+_browsers = {}          # Dictionary of available browser controllers
+_tryorder = []          # Preference order of available browsers
+
+def register(name, klass, instance=None):
+    """Register a browser connector and, optionally, connection."""
+    _browsers[name.lower()] = [klass, instance]
+
+def get(using=None):
+    """Return a browser launcher instance appropriate for the environment."""
+    if using:
+        alternatives = [using]
+    else:
+        alternatives = _tryorder
+    for browser in alternatives:
+        if browser.find('%s') > -1:
+            # User gave us a command line, don't mess with it.
+            return GenericBrowser(browser)
+        else:
+            # User gave us a browser name.
+            try:
+                command = _browsers[browser.lower()]
+            except KeyError:
+                command = _synthesize(browser)
+            if command[1] is None:
+                return command[0]()
+            else:
+                return command[1]
+    raise Error("could not locate runnable browser")
+
+# Please note: the following definition hides a builtin function.
+
+def open(url, new=0, autoraise=1):
+    get().open(url, new, autoraise)
+
+def open_new(url):
+    get().open(url, 1)
+
+
+def _synthesize(browser):
+    """Attempt to synthesize a controller base on existing controllers.
+
+    This is useful to create a controller when a user specifies a path to
+    an entry in the BROWSER environment variable -- we can copy a general
+    controller to operate using a specific installation of the desired
+    browser in this way.
+
+    If we can't create a controller in this way, or if there is no
+    executable for the requested browser, return [None, None].
+
+    """
+    if not os.path.exists(browser):
+        return [None, None]
+    name = os.path.basename(browser)
+    try:
+        command = _browsers[name.lower()]
+    except KeyError:
+        return [None, None]
+    # now attempt to clone to fit the new name:
+    controller = command[1]
+    if controller and name.lower() == controller.basename:
+        import copy
+        controller = copy.copy(controller)
+        controller.name = browser
+        controller.basename = os.path.basename(browser)
+        register(browser, None, controller)
+        return [None, controller]
+    return [None, None]
+
+
+def _iscommand(cmd):
+    """Return true if cmd can be found on the executable search path."""
+    path = os.environ.get("PATH")
+    if not path:
+        return 0
+    for d in path.split(os.pathsep):
+        exe = os.path.join(d, cmd)
+        if os.path.isfile(exe):
+            return 1
+    return 0
+
+
+PROCESS_CREATION_DELAY = 4
+
+
+class GenericBrowser:
+    def __init__(self, cmd):
+        self.name, self.args = cmd.split(None, 1)
+        self.basename = os.path.basename(self.name)
+
+    def open(self, url, new=0, autoraise=1):
+        assert "'" not in url
+        command = "%s %s" % (self.name, self.args)
+        os.system(command % url)
+
+    def open_new(self, url):
+        self.open(url)
+
+
+class Netscape:
+    "Launcher class for Netscape browsers."
+    def __init__(self, name):
+        self.name = name
+        self.basename = os.path.basename(name)
+
+    def _remote(self, action, autoraise):
+        raise_opt = ("-noraise", "-raise")[autoraise]
+        cmd = "%s %s -remote '%s' >/dev/null 2>&1" % (self.name,
+                                                      raise_opt,
+                                                      action)
+        rc = os.system(cmd)
+        if rc:
+            import time
+            os.system("%s &" % self.name)
+            time.sleep(PROCESS_CREATION_DELAY)
+            rc = os.system(cmd)
+        return not rc
+
+    def open(self, url, new=0, autoraise=1):
+        if new:
+            self._remote("openURL(%s, new-window)"%url, autoraise)
+        else:
+            self._remote("openURL(%s)" % url, autoraise)
+
+    def open_new(self, url):
+        self.open(url, 1)
+
+
+class Konqueror:
+    """Controller for the KDE File Manager (kfm, or Konqueror).
+
+    See http://developer.kde.org/documentation/other/kfmclient.html
+    for more information on the Konqueror remote-control interface.
+
+    """
+    def __init__(self):
+        if _iscommand("konqueror"):
+            self.name = self.basename = "konqueror"
+        else:
+            self.name = self.basename = "kfm"
+
+    def _remote(self, action):
+        cmd = "kfmclient %s >/dev/null 2>&1" % action
+        rc = os.system(cmd)
+        if rc:
+            import time
+            if self.basename == "konqueror":
+                os.system(self.name + " --silent &")
+            else:
+                os.system(self.name + " -d &")
+            time.sleep(PROCESS_CREATION_DELAY)
+            rc = os.system(cmd)
+        return not rc
+
+    def open(self, url, new=1, autoraise=1):
+        # XXX Currently I know no way to prevent KFM from
+        # opening a new win.
+        assert "'" not in url
+        self._remote("openURL '%s'" % url)
+
+    open_new = open
+
+
+class Grail:
+    # There should be a way to maintain a connection to Grail, but the
+    # Grail remote control protocol doesn't really allow that at this
+    # point.  It probably neverwill!
+    def _find_grail_rc(self):
+        import glob
+        import pwd
+        import socket
+        import tempfile
+        tempdir = os.path.join(tempfile.gettempdir(),
+                               ".grail-unix")
+        user = pwd.getpwuid(os.getuid())[0]
+        filename = os.path.join(tempdir, user + "-*")
+        maybes = glob.glob(filename)
+        if not maybes:
+            return None
+        s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+        for fn in maybes:
+            # need to PING each one until we find one that's live
+            try:
+                s.connect(fn)
+            except socket.error:
+                # no good; attempt to clean it out, but don't fail:
+                try:
+                    os.unlink(fn)
+                except IOError:
+                    pass
+            else:
+                return s
+
+    def _remote(self, action):
+        s = self._find_grail_rc()
+        if not s:
+            return 0
+        s.send(action)
+        s.close()
+        return 1
+
+    def open(self, url, new=0, autoraise=1):
+        if new:
+            self._remote("LOADNEW " + url)
+        else:
+            self._remote("LOAD " + url)
+
+    def open_new(self, url):
+        self.open(url, 1)
+
+
+class WindowsDefault:
+    def open(self, url, new=0, autoraise=1):
+        os.startfile(url)
+
+    def open_new(self, url):
+        self.open(url)
+
+#
+# Platform support for Unix
+#
+
+# This is the right test because all these Unix browsers require either
+# a console terminal of an X display to run.  Note that we cannot split
+# the TERM and DISPLAY cases, because we might be running Python from inside
+# an xterm.
+if os.environ.get("TERM") or os.environ.get("DISPLAY"):
+    _tryorder = ["mozilla","netscape","kfm","grail","links","lynx","w3m"]
+
+    # Easy cases first -- register console browsers if we have them.
+    if os.environ.get("TERM"):
+        # The Links browser <http://artax.karlin.mff.cuni.cz/~mikulas/links/>
+        if _iscommand("links"):
+            register("links", None, GenericBrowser("links '%s'"))
+        # The Lynx browser <http://lynx.browser.org/>
+        if _iscommand("lynx"):
+            register("lynx", None, GenericBrowser("lynx '%s'"))
+        # The w3m browser <http://ei5nazha.yz.yamagata-u.ac.jp/~aito/w3m/eng/>
+        if _iscommand("w3m"):
+            register("w3m", None, GenericBrowser("w3m '%s'"))
+
+    # X browsers have more in the way of options
+    if os.environ.get("DISPLAY"):
+        # First, the Netscape series
+        if _iscommand("mozilla"):
+            register("mozilla", None, Netscape("mozilla"))
+        if _iscommand("netscape"):
+            register("netscape", None, Netscape("netscape"))
+
+        # Next, Mosaic -- old but still in use.
+        if _iscommand("mosaic"):
+            register("mosaic", None, GenericBrowser(
+                "mosaic '%s' >/dev/null &"))
+
+        # Konqueror/kfm, the KDE browser.
+        if _iscommand("kfm") or _iscommand("konqueror"):
+            register("kfm", Konqueror, Konqueror())
+
+        # Grail, the Python browser.
+        if _iscommand("grail"):
+            register("grail", Grail, None)
+
+
+class InternetConfig:
+    def open(self, url, new=0, autoraise=1):
+        ic.launchurl(url)
+
+    def open_new(self, url):
+        self.open(url)
+
+
+#
+# Platform support for Windows
+#
+
+if sys.platform[:3] == "win":
+    _tryorder = ["netscape", "windows-default"]
+    register("windows-default", WindowsDefault)
+
+#
+# Platform support for MacOS
+#
+
+try:
+    import ic
+except ImportError:
+    pass
+else:
+    # internet-config is the only supported controller on MacOS,
+    # so don't mess with the default!
+    _tryorder = ["internet-config"]
+    register("internet-config", InternetConfig)
+
+#
+# Platform support for OS/2
+#
+
+if sys.platform[:3] == "os2" and _iscommand("netscape.exe"):
+    _tryorder = ["os2netscape"]
+    register("os2netscape", None,
+             GenericBrowser("start netscape.exe %s"))
+
+# OK, now that we know what the default preference orders for each
+# platform are, allow user to override them with the BROWSER variable.
+#
+if os.environ.has_key("BROWSER"):
+    # It's the user's responsibility to register handlers for any unknown
+    # browser referenced by this value, before calling open().
+    _tryorder = os.environ["BROWSER"].split(os.pathsep)
+
+for cmd in _tryorder:
+    if not _browsers.has_key(cmd.lower()):
+        if _iscommand(cmd.lower()):
+            register(cmd.lower(), None, GenericBrowser(
+                "%s '%%s'" % cmd.lower()))
+cmd = None # to make del work if _tryorder was empty
+del cmd
+
+_tryorder = filter(lambda x: _browsers.has_key(x.lower())
+                   or x.find("%s") > -1, _tryorder)
+# what to do if _tryorder is now empty?
diff --git a/lib-python/2.2/whichdb.py b/lib-python/2.2/whichdb.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/whichdb.py
@@ -0,0 +1,87 @@
+"""Guess which db package to use to open a db file."""
+
+import os
+
+def whichdb(filename):
+    """Guess which db package to use to open a db file.
+
+    Return values:
+
+    - None if the database file can't be read;
+    - empty string if the file can be read but can't be recognized
+    - the module name (e.g. "dbm" or "gdbm") if recognized.
+
+    Importing the given module may still fail, and opening the
+    database using that module may still fail.
+    """
+
+    import struct
+
+    # Check for dbm first -- this has a .pag and a .dir file
+    try:
+        f = open(filename + os.extsep + "pag", "rb")
+        f.close()
+        f = open(filename + os.extsep + "dir", "rb")
+        f.close()
+        return "dbm"
+    except IOError:
+        pass
+
+    # Check for dumbdbm next -- this has a .dir and and a .dat file
+    try:
+        # First check for presence of files
+        sizes = os.stat(filename + os.extsep + "dat").st_size, \
+                os.stat(filename + os.extsep + "dir").st_size
+        # dumbdbm files with no keys are empty
+        if sizes == (0, 0):
+            return "dumbdbm"
+        f = open(filename + os.extsep + "dir", "rb")
+        try:
+            if f.read(1) in ["'", '"']:
+                return "dumbdbm"
+        finally:
+            f.close()
+    except (OSError, IOError):
+        pass
+
+    # See if the file exists, return None if not
+    try:
+        f = open(filename, "rb")
+    except IOError:
+        return None
+
+    # Read the start of the file -- the magic number
+    s16 = f.read(16)
+    f.close()
+    s = s16[0:4]
+
+    # Return "" if not at least 4 bytes
+    if len(s) != 4:
+        return ""
+
+    # Convert to 4-byte int in native byte order -- return "" if impossible
+    try:
+        (magic,) = struct.unpack("=l", s)
+    except struct.error:
+        return ""
+
+    # Check for GNU dbm
+    if magic == 0x13579ace:
+        return "gdbm"
+
+    # Check for BSD hash
+    if magic in (0x00061561, 0x61150600):
+        return "dbhash"
+
+    # BSD hash v2 has a 12-byte NULL pad in front of the file type
+    try:
+        (magic,) = struct.unpack("=l", s16[-4:])
+    except struct.error:
+        return ""
+
+    # Check for BSD hash
+    if magic in (0x00061561, 0x61150600):
+        return "dbhash"
+
+    # Unknown
+    return ""
diff --git a/lib-python/2.2/whrandom.py b/lib-python/2.2/whrandom.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/whrandom.py
@@ -0,0 +1,140 @@
+"""Wichman-Hill random number generator.
+
+Wichmann, B. A. & Hill, I. D. (1982)
+Algorithm AS 183:
+An efficient and portable pseudo-random number generator
+Applied Statistics 31 (1982) 188-190
+
+see also:
+        Correction to Algorithm AS 183
+        Applied Statistics 33 (1984) 123
+
+        McLeod, A. I. (1985)
+        A remark on Algorithm AS 183
+        Applied Statistics 34 (1985),198-200
+
+
+USE:
+whrandom.random()       yields double precision random numbers
+                        uniformly distributed between 0 and 1.
+
+whrandom.seed(x, y, z)  must be called before whrandom.random()
+                        to seed the generator
+
+There is also an interface to create multiple independent
+random generators, and to choose from other ranges.
+
+
+
+Multi-threading note: the random number generator used here is not
+thread-safe; it is possible that nearly simultaneous calls in
+different theads return the same random value.  To avoid this, you
+have to use a lock around all calls.  (I didn't want to slow this
+down in the serial case by using a lock here.)
+"""
+
+# Translated by Guido van Rossum from C source provided by
+# Adrian Baddeley.
+
+
+class whrandom:
+    def __init__(self, x = 0, y = 0, z = 0):
+        """Initialize an instance.
+        Without arguments, initialize from current time.
+        With arguments (x, y, z), initialize from them."""
+        self.seed(x, y, z)
+
+    def seed(self, x = 0, y = 0, z = 0):
+        """Set the seed from (x, y, z).
+        These must be integers in the range [0, 256)."""
+        if not type(x) == type(y) == type(z) == type(0):
+            raise TypeError, 'seeds must be integers'
+        if not (0 <= x < 256 and 0 <= y < 256 and 0 <= z < 256):
+            raise ValueError, 'seeds must be in range(0, 256)'
+        if 0 == x == y == z:
+            # Initialize from current time
+            import time
+            t = long(time.time() * 256)
+            t = int((t&0xffffff) ^ (t>>24))
+            t, x = divmod(t, 256)
+            t, y = divmod(t, 256)
+            t, z = divmod(t, 256)
+        # Zero is a poor seed, so substitute 1
+        self._seed = (x or 1, y or 1, z or 1)
+
+    def random(self):
+        """Get the next random number in the range [0.0, 1.0)."""
+        # This part is thread-unsafe:
+        # BEGIN CRITICAL SECTION
+        x, y, z = self._seed
+        #
+        x = (171 * x) % 30269
+        y = (172 * y) % 30307
+        z = (170 * z) % 30323
+        #
+        self._seed = x, y, z
+        # END CRITICAL SECTION
+        #
+        return (x/30269.0 + y/30307.0 + z/30323.0) % 1.0
+
+    def uniform(self, a, b):
+        """Get a random number in the range [a, b)."""
+        return a + (b-a) * self.random()
+
+    def randint(self, a, b):
+        """Get a random integer in the range [a, b] including
+        both end points.
+
+        (Deprecated; use randrange below.)"""
+        return self.randrange(a, b+1)
+
+    def choice(self, seq):
+        """Choose a random element from a non-empty sequence."""
+        return seq[int(self.random() * len(seq))]
+
+    def randrange(self, start, stop=None, step=1, int=int, default=None):
+        """Choose a random item from range(start, stop[, step]).
+
+        This fixes the problem with randint() which includes the
+        endpoint; in Python this is usually not what you want.
+        Do not supply the 'int' and 'default' arguments."""
+        # This code is a bit messy to make it fast for the
+        # common case while still doing adequate error checking
+        istart = int(start)
+        if istart != start:
+            raise ValueError, "non-integer arg 1 for randrange()"
+        if stop is default:
+            if istart > 0:
+                return int(self.random() * istart)
+            raise ValueError, "empty range for randrange()"
+        istop = int(stop)
+        if istop != stop:
+            raise ValueError, "non-integer stop for randrange()"
+        if step == 1:
+            if istart < istop:
+                return istart + int(self.random() *
+                                   (istop - istart))
+            raise ValueError, "empty range for randrange()"
+        istep = int(step)
+        if istep != step:
+            raise ValueError, "non-integer step for randrange()"
+        if istep > 0:
+            n = (istop - istart + istep - 1) / istep
+        elif istep < 0:
+            n = (istop - istart + istep + 1) / istep
+        else:
+            raise ValueError, "zero step for randrange()"
+
+        if n <= 0:
+            raise ValueError, "empty range for randrange()"
+        return istart + istep*int(self.random() * n)
+
+
+# Initialize from the current time
+_inst = whrandom()
+seed = _inst.seed
+random = _inst.random
+uniform = _inst.uniform
+randint = _inst.randint
+choice = _inst.choice
+randrange = _inst.randrange
diff --git a/lib-python/2.2/xdrlib.py b/lib-python/2.2/xdrlib.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/xdrlib.py
@@ -0,0 +1,285 @@
+"""Implements (a subset of) Sun XDR -- eXternal Data Representation.
+
+See: RFC 1014
+
+"""
+
+import struct
+try:
+    from cStringIO import StringIO as _StringIO
+except ImportError:
+    from StringIO import StringIO as _StringIO
+
+__all__ = ["Error", "Packer", "Unpacker", "ConversionError"]
+
+# exceptions
+class Error:
+    """Exception class for this module. Use:
+
+    except xdrlib.Error, var:
+        # var has the Error instance for the exception
+
+    Public ivars:
+        msg -- contains the message
+
+    """
+    def __init__(self, msg):
+        self.msg = msg
+    def __repr__(self):
+        return repr(self.msg)
+    def __str__(self):
+        return str(self.msg)
+
+
+class ConversionError(Error):
+    pass
+
+
+
+class Packer:
+    """Pack various data representations into a buffer."""
+
+    def __init__(self):
+        self.reset()
+
+    def reset(self):
+        self.__buf = _StringIO()
+
+    def get_buffer(self):
+        return self.__buf.getvalue()
+    # backwards compatibility
+    get_buf = get_buffer
+
+    def pack_uint(self, x):
+        self.__buf.write(struct.pack('>L', x))
+
+    pack_int = pack_uint
+    pack_enum = pack_int
+
+    def pack_bool(self, x):
+        if x: self.__buf.write('\0\0\0\1')
+        else: self.__buf.write('\0\0\0\0')
+
+    def pack_uhyper(self, x):
+        self.pack_uint(x>>32 & 0xffffffffL)
+        self.pack_uint(x & 0xffffffffL)
+
+    pack_hyper = pack_uhyper
+
+    def pack_float(self, x):
+        try: self.__buf.write(struct.pack('>f', x))
+        except struct.error, msg:
+            raise ConversionError, msg
+
+    def pack_double(self, x):
+        try: self.__buf.write(struct.pack('>d', x))
+        except struct.error, msg:
+            raise ConversionError, msg
+
+    def pack_fstring(self, n, s):
+        if n < 0:
+            raise ValueError, 'fstring size must be nonnegative'
+        n = ((n+3)/4)*4
+        data = s[:n]
+        data = data + (n - len(data)) * '\0'
+        self.__buf.write(data)
+
+    pack_fopaque = pack_fstring
+
+    def pack_string(self, s):
+        n = len(s)
+        self.pack_uint(n)
+        self.pack_fstring(n, s)
+
+    pack_opaque = pack_string
+    pack_bytes = pack_string
+
+    def pack_list(self, list, pack_item):
+        for item in list:
+            self.pack_uint(1)
+            pack_item(item)
+        self.pack_uint(0)
+
+    def pack_farray(self, n, list, pack_item):
+        if len(list) != n:
+            raise ValueError, 'wrong array size'
+        for item in list:
+            pack_item(item)
+
+    def pack_array(self, list, pack_item):
+        n = len(list)
+        self.pack_uint(n)
+        self.pack_farray(n, list, pack_item)
+
+
+
+class Unpacker:
+    """Unpacks various data representations from the given buffer."""
+
+    def __init__(self, data):
+        self.reset(data)
+
+    def reset(self, data):
+        self.__buf = data
+        self.__pos = 0
+
+    def get_position(self):
+        return self.__pos
+
+    def set_position(self, position):
+        self.__pos = position
+
+    def get_buffer(self):
+        return self.__buf
+
+    def done(self):
+        if self.__pos < len(self.__buf):
+            raise Error('unextracted data remains')
+
+    def unpack_uint(self):
+        i = self.__pos
+        self.__pos = j = i+4
+        data = self.__buf[i:j]
+        if len(data) < 4:
+            raise EOFError
+        x = struct.unpack('>L', data)[0]
+        try:
+            return int(x)
+        except OverflowError:
+            return x
+
+    def unpack_int(self):
+        i = self.__pos
+        self.__pos = j = i+4
+        data = self.__buf[i:j]
+        if len(data) < 4:
+            raise EOFError
+        return struct.unpack('>l', data)[0]
+
+    unpack_enum = unpack_int
+    unpack_bool = unpack_int
+
+    def unpack_uhyper(self):
+        hi = self.unpack_uint()
+        lo = self.unpack_uint()
+        return long(hi)<<32 | lo
+
+    def unpack_hyper(self):
+        x = self.unpack_uhyper()
+        if x >= 0x8000000000000000L:
+            x = x - 0x10000000000000000L
+        return x
+
+    def unpack_float(self):
+        i = self.__pos
+        self.__pos = j = i+4
+        data = self.__buf[i:j]
+        if len(data) < 4:
+            raise EOFError
+        return struct.unpack('>f', data)[0]
+
+    def unpack_double(self):
+        i = self.__pos
+        self.__pos = j = i+8
+        data = self.__buf[i:j]
+        if len(data) < 8:
+            raise EOFError
+        return struct.unpack('>d', data)[0]
+
+    def unpack_fstring(self, n):
+        if n < 0:
+            raise ValueError, 'fstring size must be nonnegative'
+        i = self.__pos
+        j = i + (n+3)/4*4
+        if j > len(self.__buf):
+            raise EOFError
+        self.__pos = j
+        return self.__buf[i:i+n]
+
+    unpack_fopaque = unpack_fstring
+
+    def unpack_string(self):
+        n = self.unpack_uint()
+        return self.unpack_fstring(n)
+
+    unpack_opaque = unpack_string
+    unpack_bytes = unpack_string
+
+    def unpack_list(self, unpack_item):
+        list = []
+        while 1:
+            x = self.unpack_uint()
+            if x == 0: break
+            if x != 1:
+                raise ConversionError, '0 or 1 expected, got ' + `x`
+            item = unpack_item()
+            list.append(item)
+        return list
+
+    def unpack_farray(self, n, unpack_item):
+        list = []
+        for i in range(n):
+            list.append(unpack_item())
+        return list
+
+    def unpack_array(self, unpack_item):
+        n = self.unpack_uint()
+        return self.unpack_farray(n, unpack_item)
+
+
+# test suite
+def _test():
+    p = Packer()
+    packtest = [
+        (p.pack_uint,    (9,)),
+        (p.pack_bool,    (None,)),
+        (p.pack_bool,    ('hello',)),
+        (p.pack_uhyper,  (45L,)),
+        (p.pack_float,   (1.9,)),
+        (p.pack_double,  (1.9,)),
+        (p.pack_string,  ('hello world',)),
+        (p.pack_list,    (range(5), p.pack_uint)),
+        (p.pack_array,   (['what', 'is', 'hapnin', 'doctor'], p.pack_string)),
+        ]
+    succeedlist = [1] * len(packtest)
+    count = 0
+    for method, args in packtest:
+        print 'pack test', count,
+        try:
+            apply(method, args)
+            print 'succeeded'
+        except ConversionError, var:
+            print 'ConversionError:', var.msg
+            succeedlist[count] = 0
+        count = count + 1
+    data = p.get_buffer()
+    # now verify
+    up = Unpacker(data)
+    unpacktest = [
+        (up.unpack_uint,   (), lambda x: x == 9),
+        (up.unpack_bool,   (), lambda x: not x),
+        (up.unpack_bool,   (), lambda x: x),
+        (up.unpack_uhyper, (), lambda x: x == 45L),
+        (up.unpack_float,  (), lambda x: 1.89 < x < 1.91),
+        (up.unpack_double, (), lambda x: 1.89 < x < 1.91),
+        (up.unpack_string, (), lambda x: x == 'hello world'),
+        (up.unpack_list,   (up.unpack_uint,), lambda x: x == range(5)),
+        (up.unpack_array,  (up.unpack_string,),
+         lambda x: x == ['what', 'is', 'hapnin', 'doctor']),
+        ]
+    count = 0
+    for method, args, pred in unpacktest:
+        print 'unpack test', count,
+        try:
+            if succeedlist[count]:
+                x = apply(method, args)
+                print pred(x) and 'succeeded' or 'failed', ':', x
+            else:
+                print 'skipping'
+        except ConversionError, var:
+            print 'ConversionError:', var.msg
+        count = count + 1
+
+
+if __name__ == '__main__':
+    _test()
diff --git a/lib-python/2.2/xml/__init__.py b/lib-python/2.2/xml/__init__.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/xml/__init__.py
@@ -0,0 +1,42 @@
+"""Core XML support for Python.
+
+This package contains three sub-packages:
+
+dom -- The W3C Document Object Model.  This supports DOM Level 1 +
+       Namespaces.
+
+parsers -- Python wrappers for XML parsers (currently only supports Expat).
+
+sax -- The Simple API for XML, developed by XML-Dev, led by David
+       Megginson and ported to Python by Lars Marius Garshol.  This
+       supports the SAX 2 API.
+"""
+
+
+__all__ = ["dom", "parsers", "sax"]
+
+# When being checked-out without options, this has the form
+# "<dollar>Revision: x.y </dollar>"
+# When exported using -kv, it is "x.y".
+__version__ = "$Revision$".split()[-2:][0]
+
+
+_MINIMUM_XMLPLUS_VERSION = (0, 6, 1)
+
+
+try:
+    import _xmlplus
+except ImportError:
+    pass
+else:
+    try:
+        v = _xmlplus.version_info
+    except AttributeError:
+        # _xmlplue is too old; ignore it
+        pass
+    else:
+        if v >= _MINIMUM_XMLPLUS_VERSION:
+            import sys
+            sys.modules[__name__] = _xmlplus
+        else:
+            del v
diff --git a/lib-python/2.2/xml/dom/__init__.py b/lib-python/2.2/xml/dom/__init__.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/xml/dom/__init__.py
@@ -0,0 +1,125 @@
+"""W3C Document Object Model implementation for Python.
+
+The Python mapping of the Document Object Model is documented in the
+Python Library Reference in the section on the xml.dom package.
+
+This package contains the following modules:
+
+minidom -- A simple implementation of the Level 1 DOM with namespace
+           support added (based on the Level 2 specification) and other
+           minor Level 2 functionality.
+
+pulldom -- DOM builder supporting on-demand tree-building for selected
+           subtrees of the document.
+
+"""
+
+
+class Node:
+    """Class giving the NodeType constants."""
+
+    # DOM implementations may use this as a base class for their own
+    # Node implementations.  If they don't, the constants defined here
+    # should still be used as the canonical definitions as they match
+    # the values given in the W3C recommendation.  Client code can
+    # safely refer to these values in all tests of Node.nodeType
+    # values.
+
+    ELEMENT_NODE                = 1
+    ATTRIBUTE_NODE              = 2
+    TEXT_NODE                   = 3
+    CDATA_SECTION_NODE          = 4
+    ENTITY_REFERENCE_NODE       = 5
+    ENTITY_NODE                 = 6
+    PROCESSING_INSTRUCTION_NODE = 7
+    COMMENT_NODE                = 8
+    DOCUMENT_NODE               = 9
+    DOCUMENT_TYPE_NODE          = 10
+    DOCUMENT_FRAGMENT_NODE      = 11
+    NOTATION_NODE               = 12
+
+
+#ExceptionCode
+INDEX_SIZE_ERR                 = 1
+DOMSTRING_SIZE_ERR             = 2
+HIERARCHY_REQUEST_ERR          = 3
+WRONG_DOCUMENT_ERR             = 4
+INVALID_CHARACTER_ERR          = 5
+NO_DATA_ALLOWED_ERR            = 6
+NO_MODIFICATION_ALLOWED_ERR    = 7
+NOT_FOUND_ERR                  = 8
+NOT_SUPPORTED_ERR              = 9
+INUSE_ATTRIBUTE_ERR            = 10
+INVALID_STATE_ERR              = 11
+SYNTAX_ERR                     = 12
+INVALID_MODIFICATION_ERR       = 13
+NAMESPACE_ERR                  = 14
+INVALID_ACCESS_ERR             = 15
+
+
+class DOMException(Exception):
+    """Abstract base class for DOM exceptions.
+    Exceptions with specific codes are specializations of this class."""
+
+    def __init__(self, *args, **kw):
+        if self.__class__ is DOMException:
+            raise RuntimeError(
+                "DOMException should not be instantiated directly")
+        apply(Exception.__init__, (self,) + args, kw)
+
+    def _get_code(self):
+        return self.code
+
+
+class IndexSizeErr(DOMException):
+    code = INDEX_SIZE_ERR
+
+class DomstringSizeErr(DOMException):
+    code = DOMSTRING_SIZE_ERR
+
+class HierarchyRequestErr(DOMException):
+    code = HIERARCHY_REQUEST_ERR
+
+class WrongDocumentErr(DOMException):
+    code = WRONG_DOCUMENT_ERR
+
+class InvalidCharacterErr(DOMException):
+    code = INVALID_CHARACTER_ERR
+
+class NoDataAllowedErr(DOMException):
+    code = NO_DATA_ALLOWED_ERR
+
+class NoModificationAllowedErr(DOMException):
+    code = NO_MODIFICATION_ALLOWED_ERR
+
+class NotFoundErr(DOMException):
+    code = NOT_FOUND_ERR
+
+class NotSupportedErr(DOMException):
+    code = NOT_SUPPORTED_ERR
+
+class InuseAttributeErr(DOMException):
+    code = INUSE_ATTRIBUTE_ERR
+
+class InvalidStateErr(DOMException):
+    code = INVALID_STATE_ERR
+
+class SyntaxErr(DOMException):
+    code = SYNTAX_ERR
+
+class InvalidModificationErr(DOMException):
+    code = INVALID_MODIFICATION_ERR
+
+class NamespaceErr(DOMException):
+    code = NAMESPACE_ERR
+
+class InvalidAccessErr(DOMException):
+    code = INVALID_ACCESS_ERR
+
+
+XML_NAMESPACE = "http://www.w3.org/XML/1998/namespace"
+XMLNS_NAMESPACE = "http://www.w3.org/2000/xmlns/"
+XHTML_NAMESPACE = "http://www.w3.org/1999/xhtml"
+EMPTY_NAMESPACE = None
+
+from domreg import getDOMImplementation,registerDOMImplementation
diff --git a/lib-python/2.2/xml/dom/domreg.py b/lib-python/2.2/xml/dom/domreg.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/xml/dom/domreg.py
@@ -0,0 +1,76 @@
+"""Registration facilities for DOM. This module should not be used
+directly. Instead, the functions getDOMImplementation and
+registerDOMImplementation should be imported from xml.dom."""
+
+# This is a list of well-known implementations.  Well-known names
+# should be published by posting to xml-sig at python.org, and are
+# subsequently recorded in this file.
+
+well_known_implementations = {
+    'minidom':'xml.dom.minidom',
+    '4DOM': 'xml.dom.DOMImplementation',
+    }
+
+# DOM implementations not officially registered should register
+# themselves with their
+
+registered = {}
+
+def registerDOMImplementation(name, factory):
+    """registerDOMImplementation(name, factory)
+
+    Register the factory function with the name. The factory function
+    should return an object which implements the DOMImplementation
+    interface. The factory function can either return the same object,
+    or a new one (e.g. if that implementation supports some
+    customization)."""
+    
+    registered[name] = factory
+
+def _good_enough(dom, features):
+    "_good_enough(dom, features) -> Return 1 if the dom offers the features"
+    for f,v in features:
+        if not dom.hasFeature(f,v):
+            return 0
+    return 1
+
+def getDOMImplementation(name = None, features = ()):
+    """getDOMImplementation(name = None, features = ()) -> DOM implementation.
+
+    Return a suitable DOM implementation. The name is either
+    well-known, the module name of a DOM implementation, or None. If
+    it is not None, imports the corresponding module and returns
+    DOMImplementation object if the import succeeds.
+
+    If name is not given, consider the available implementations to
+    find one with the required feature set. If no implementation can
+    be found, raise an ImportError. The features list must be a sequence
+    of (feature, version) pairs which are passed to hasFeature."""
+    
+    import os
+    creator = None
+    mod = well_known_implementations.get(name)
+    if mod:
+        mod = __import__(mod, {}, {}, ['getDOMImplementation'])
+        return mod.getDOMImplementation()
+    elif name:
+        return registered[name]()
+    elif os.environ.has_key("PYTHON_DOM"):
+        return getDOMImplementation(name = os.environ["PYTHON_DOM"])
+
+    # User did not specify a name, try implementations in arbitrary
+    # order, returning the one that has the required features
+    for creator in registered.values():
+        dom = creator()
+        if _good_enough(dom, features):
+            return dom
+
+    for creator in well_known_implementations.keys():
+        try:
+            dom = getDOMImplementation(name = creator)
+        except StandardError: # typically ImportError, or AttributeError
+            continue
+        if _good_enough(dom, features):
+            return dom
+
+    raise ImportError,"no suitable DOM implementation found"
diff --git a/lib-python/2.2/xml/dom/minidom.py b/lib-python/2.2/xml/dom/minidom.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/xml/dom/minidom.py
@@ -0,0 +1,970 @@
+"""\
+minidom.py -- a lightweight DOM implementation.
+
+parse("foo.xml")
+
+parseString("<foo><bar/></foo>")
+
+Todo:
+=====
+ * convenience methods for getting elements and text.
+ * more testing
+ * bring some of the writer and linearizer code into conformance with this
+        interface
+ * SAX 2 namespaces
+"""
+
+import string
+_string = string
+del string
+
+from xml.dom import HierarchyRequestErr, EMPTY_NAMESPACE
+
+# localize the types, and allow support for Unicode values if available:
+import types
+_TupleType = types.TupleType
+try:
+    _StringTypes = (types.StringType, types.UnicodeType)
+except AttributeError:
+    _StringTypes = (types.StringType,)
+del types
+
+import xml.dom
+
+
+if list is type([]):
+    class NodeList(list):
+        def item(self, index):
+            if 0 <= index < len(self):
+                return self[index]
+
+        length = property(lambda self: len(self),
+                          doc="The number of nodes in the NodeList.")
+
+else:
+    def NodeList():
+        return []
+    
+
+class Node(xml.dom.Node):
+    allnodes = {}
+    _debug = 0
+    _makeParentNodes = 1
+    debug = None
+    childNodeTypes = ()
+    namespaceURI = None # this is non-null only for elements and attributes
+    parentNode = None
+    ownerDocument = None
+
+    def __init__(self):
+        self.childNodes = NodeList()
+        if Node._debug:
+            index = repr(id(self)) + repr(self.__class__)
+            Node.allnodes[index] = repr(self.__dict__)
+            if Node.debug is None:
+                Node.debug = _get_StringIO()
+                #open("debug4.out", "w")
+            Node.debug.write("create %s\n" % index)
+
+    def __nonzero__(self):
+        return 1
+
+    def toxml(self):
+        writer = _get_StringIO()
+        self.writexml(writer)
+        return writer.getvalue()
+
+    def toprettyxml(self, indent="\t", newl="\n"):
+        # indent = the indentation string to prepend, per level
+        # newl = the newline string to append
+        writer = _get_StringIO()
+        self.writexml(writer, "", indent, newl)
+        return writer.getvalue()
+
+    def hasChildNodes(self):
+        if self.childNodes:
+            return 1
+        else:
+            return 0
+
+    def _get_firstChild(self):
+        if self.childNodes:
+            return self.childNodes[0]
+
+    def _get_lastChild(self):
+        if self.childNodes:
+            return self.childNodes[-1]
+
+    try:
+        property
+    except NameError:
+        def __getattr__(self, key):
+            if key[0:2] == "__":
+                raise AttributeError, key
+            # getattr should never call getattr!
+            if self.__dict__.has_key("inGetAttr"):
+                del self.inGetAttr
+                raise AttributeError, key
+
+            prefix, attrname = key[:5], key[5:]
+            if prefix == "_get_":
+                self.inGetAttr = 1
+                if hasattr(self, attrname):
+                    del self.inGetAttr
+                    return (lambda self=self, attrname=attrname:
+                                    getattr(self, attrname))
+                else:
+                    del self.inGetAttr
+                    raise AttributeError, key
+            else:
+                self.inGetAttr = 1
+                try:
+                    func = getattr(self, "_get_" + key)
+                except AttributeError:
+                    raise AttributeError, key
+                del self.inGetAttr
+                return func()
+    else:
+        firstChild = property(_get_firstChild,
+                              doc="First child node, or None.")
+        lastChild = property(_get_lastChild,
+                             doc="Last child node, or None.")
+
+    def insertBefore(self, newChild, refChild):
+        if newChild.nodeType == self.DOCUMENT_FRAGMENT_NODE:
+            for c in tuple(newChild.childNodes):
+                self.insertBefore(c, refChild)
+            ### The DOM does not clearly specify what to return in this case
+            return newChild
+        if newChild.nodeType not in self.childNodeTypes:
+            raise HierarchyRequestErr, \
+                  "%s cannot be child of %s" % (repr(newChild), repr(self))
+        if newChild.parentNode is not None:
+            newChild.parentNode.removeChild(newChild)
+        if refChild is None:
+            self.appendChild(newChild)
+        else:
+            index = self.childNodes.index(refChild)
+            self.childNodes.insert(index, newChild)
+            newChild.nextSibling = refChild
+            refChild.previousSibling = newChild
+            if index:
+                node = self.childNodes[index-1]
+                node.nextSibling = newChild
+                newChild.previousSibling = node
+            else:
+                newChild.previousSibling = None
+            if self._makeParentNodes:
+                newChild.parentNode = self
+        return newChild
+
+    def appendChild(self, node):
+        if node.nodeType == self.DOCUMENT_FRAGMENT_NODE:
+            for c in tuple(node.childNodes):
+                self.appendChild(c)
+            ### The DOM does not clearly specify what to return in this case
+            return node
+        if node.nodeType not in self.childNodeTypes:
+            raise HierarchyRequestErr, \
+                  "%s cannot be child of %s" % (repr(node), repr(self))
+        if node.parentNode is not None:
+            node.parentNode.removeChild(node)
+        if self.childNodes:
+            last = self.lastChild
+            node.previousSibling = last
+            last.nextSibling = node
+        else:
+            node.previousSibling = None
+        node.nextSibling = None
+        self.childNodes.append(node)
+        if self._makeParentNodes:
+            node.parentNode = self
+        return node
+
+    def replaceChild(self, newChild, oldChild):
+        if newChild.nodeType == self.DOCUMENT_FRAGMENT_NODE:
+            refChild = oldChild.nextSibling
+            self.removeChild(oldChild)
+            return self.insertBefore(newChild, refChild)
+        if newChild.nodeType not in self.childNodeTypes:
+            raise HierarchyRequestErr, \
+                  "%s cannot be child of %s" % (repr(newChild), repr(self))
+        if newChild.parentNode is not None:
+            newChild.parentNode.removeChild(newChild)
+        if newChild is oldChild:
+            return
+        index = self.childNodes.index(oldChild)
+        self.childNodes[index] = newChild
+        if self._makeParentNodes:
+            newChild.parentNode = self
+            oldChild.parentNode = None
+        newChild.nextSibling = oldChild.nextSibling
+        newChild.previousSibling = oldChild.previousSibling
+        oldChild.nextSibling = None
+        oldChild.previousSibling = None
+        if newChild.previousSibling:
+            newChild.previousSibling.nextSibling = newChild
+        if newChild.nextSibling:
+            newChild.nextSibling.previousSibling = newChild
+        return oldChild
+
+    def removeChild(self, oldChild):
+        self.childNodes.remove(oldChild)
+        if oldChild.nextSibling is not None:
+            oldChild.nextSibling.previousSibling = oldChild.previousSibling
+        if oldChild.previousSibling is not None:
+            oldChild.previousSibling.nextSibling = oldChild.nextSibling
+        oldChild.nextSibling = oldChild.previousSibling = None
+
+        if self._makeParentNodes:
+            oldChild.parentNode = None
+        return oldChild
+
+    def normalize(self):
+        L = []
+        for child in self.childNodes:
+            if child.nodeType == Node.TEXT_NODE:
+                data = child.data
+                if data and L and L[-1].nodeType == child.nodeType:
+                    # collapse text node
+                    node = L[-1]
+                    node.data = node.nodeValue = node.data + child.data
+                    node.nextSibling = child.nextSibling
+                    child.unlink()
+                elif data:
+                    if L:
+                        L[-1].nextSibling = child
+                        child.previousSibling = L[-1]
+                    else:
+                        child.previousSibling = None
+                    L.append(child)
+                else:
+                    # empty text node; discard
+                    child.unlink()
+            else:
+                if L:
+                    L[-1].nextSibling = child
+                    child.previousSibling = L[-1]
+                else:
+                    child.previousSibling = None
+                L.append(child)
+                if child.nodeType == Node.ELEMENT_NODE:
+                    child.normalize()
+        self.childNodes[:] = L
+
+    def cloneNode(self, deep):
+        import new
+        clone = new.instance(self.__class__, self.__dict__.copy())
+        if self._makeParentNodes:
+            clone.parentNode = None
+        clone.childNodes = NodeList()
+        if deep:
+            for child in self.childNodes:
+                clone.appendChild(child.cloneNode(1))
+        return clone
+
+    # DOM Level 3 (Working Draft 2001-Jan-26)
+
+    def isSameNode(self, other):
+        return self is other
+
+    # minidom-specific API:
+
+    def unlink(self):
+        self.parentNode = self.ownerDocument = None
+        for child in self.childNodes:
+            child.unlink()
+        self.childNodes = None
+        self.previousSibling = None
+        self.nextSibling = None
+        if Node._debug:
+            index = repr(id(self)) + repr(self.__class__)
+            self.debug.write("Deleting: %s\n" % index)
+            del Node.allnodes[index]
+
+def _write_data(writer, data):
+    "Writes datachars to writer."
+    replace = _string.replace
+    data = replace(data, "&", "&amp;")
+    data = replace(data, "<", "&lt;")
+    data = replace(data, "\"", "&quot;")
+    data = replace(data, ">", "&gt;")
+    writer.write(data)
+
+def _getElementsByTagNameHelper(parent, name, rc):
+    for node in parent.childNodes:
+        if node.nodeType == Node.ELEMENT_NODE and \
+            (name == "*" or node.tagName == name):
+            rc.append(node)
+        _getElementsByTagNameHelper(node, name, rc)
+    return rc
+
+def _getElementsByTagNameNSHelper(parent, nsURI, localName, rc):
+    for node in parent.childNodes:
+        if node.nodeType == Node.ELEMENT_NODE:
+            if ((localName == "*" or node.localName == localName) and
+                (nsURI == "*" or node.namespaceURI == nsURI)):
+                rc.append(node)
+            _getElementsByTagNameNSHelper(node, nsURI, localName, rc)
+    return rc
+
+class DocumentFragment(Node):
+    nodeType = Node.DOCUMENT_FRAGMENT_NODE
+    nodeName = "#document-fragment"
+    nodeValue = None
+    attributes = None
+    parentNode = None
+    childNodeTypes = (Node.ELEMENT_NODE,
+                      Node.TEXT_NODE,
+                      Node.CDATA_SECTION_NODE,
+                      Node.ENTITY_REFERENCE_NODE,
+                      Node.PROCESSING_INSTRUCTION_NODE,
+                      Node.COMMENT_NODE,
+                      Node.NOTATION_NODE)
+
+
+class Attr(Node):
+    nodeType = Node.ATTRIBUTE_NODE
+    attributes = None
+    ownerElement = None
+    childNodeTypes = (Node.TEXT_NODE, Node.ENTITY_REFERENCE_NODE)
+
+    def __init__(self, qName, namespaceURI=EMPTY_NAMESPACE, localName=None, prefix=None):
+        # skip setattr for performance
+        d = self.__dict__
+        d["localName"] = localName or qName
+        d["nodeName"] = d["name"] = qName
+        d["namespaceURI"] = namespaceURI
+        d["prefix"] = prefix
+        Node.__init__(self)
+        # nodeValue and value are set elsewhere
+
+    def __setattr__(self, name, value):
+        d = self.__dict__
+        if name in ("value", "nodeValue"):
+            d["value"] = d["nodeValue"] = value
+        elif name in ("name", "nodeName"):
+            d["name"] = d["nodeName"] = value
+        else:
+            d[name] = value
+
+    def cloneNode(self, deep):
+        clone = Node.cloneNode(self, deep)
+        if clone.__dict__.has_key("ownerElement"):
+            del clone.ownerElement
+        return clone
+
+
+class NamedNodeMap:
+    """The attribute list is a transient interface to the underlying
+    dictionaries.  Mutations here will change the underlying element's
+    dictionary.
+
+    Ordering is imposed artificially and does not reflect the order of
+    attributes as found in an input document.
+    """
+
+    def __init__(self, attrs, attrsNS, ownerElement):
+        self._attrs = attrs
+        self._attrsNS = attrsNS
+        self._ownerElement = ownerElement
+
+    try:
+        property
+    except NameError:
+        def __getattr__(self, name):
+            if name == "length":
+                return len(self._attrs)
+            raise AttributeError, name
+    else:
+        length = property(lambda self: len(self._attrs),
+                          doc="Number of nodes in the NamedNodeMap.")
+
+    def item(self, index):
+        try:
+            return self[self._attrs.keys()[index]]
+        except IndexError:
+            return None
+
+    def items(self):
+        L = []
+        for node in self._attrs.values():
+            L.append((node.nodeName, node.value))
+        return L
+
+    def itemsNS(self):
+        L = []
+        for node in self._attrs.values():
+            L.append(((node.namespaceURI, node.localName), node.value))
+        return L
+
+    def keys(self):
+        return self._attrs.keys()
+
+    def keysNS(self):
+        return self._attrsNS.keys()
+
+    def values(self):
+        return self._attrs.values()
+
+    def get(self, name, value = None):
+        return self._attrs.get(name, value)
+
+    def __len__(self):
+        return self.length
+
+    def __cmp__(self, other):
+        if self._attrs is getattr(other, "_attrs", None):
+            return 0
+        else:
+            return cmp(id(self), id(other))
+
+    #FIXME: is it appropriate to return .value?
+    def __getitem__(self, attname_or_tuple):
+        if type(attname_or_tuple) is _TupleType:
+            return self._attrsNS[attname_or_tuple]
+        else:
+            return self._attrs[attname_or_tuple]
+
+    # same as set
+    def __setitem__(self, attname, value):
+        if type(value) in _StringTypes:
+            node = Attr(attname)
+            node.value = value
+            node.ownerDocument = self._ownerElement.ownerDocument
+        else:
+            if not isinstance(value, Attr):
+                raise TypeError, "value must be a string or Attr object"
+            node = value
+        self.setNamedItem(node)
+
+    def setNamedItem(self, node):
+        if not isinstance(node, Attr):
+            raise HierarchyRequestErr, \
+                  "%s cannot be child of %s" % (repr(node), repr(self))
+        old = self._attrs.get(node.name)
+        if old:
+            old.unlink()
+        self._attrs[node.name] = node
+        self._attrsNS[(node.namespaceURI, node.localName)] = node
+        node.ownerElement = self._ownerElement
+        return old
+
+    def setNamedItemNS(self, node):
+        return self.setNamedItem(node)
+
+    def __delitem__(self, attname_or_tuple):
+        node = self[attname_or_tuple]
+        node.unlink()
+        del self._attrs[node.name]
+        del self._attrsNS[(node.namespaceURI, node.localName)]
+        self.length = len(self._attrs)
+
+AttributeList = NamedNodeMap
+
+
+class Element(Node):
+    nodeType = Node.ELEMENT_NODE
+    nextSibling = None
+    previousSibling = None
+    childNodeTypes = (Node.ELEMENT_NODE, Node.PROCESSING_INSTRUCTION_NODE,
+                      Node.COMMENT_NODE, Node.TEXT_NODE,
+                      Node.CDATA_SECTION_NODE, Node.ENTITY_REFERENCE_NODE)
+
+    def __init__(self, tagName, namespaceURI=EMPTY_NAMESPACE, prefix=None,
+                 localName=None):
+        Node.__init__(self)
+        self.tagName = self.nodeName = tagName
+        self.localName = localName or tagName
+        self.prefix = prefix
+        self.namespaceURI = namespaceURI
+        self.nodeValue = None
+
+        self._attrs = {}   # attributes are double-indexed:
+        self._attrsNS = {} #    tagName -> Attribute
+                           #    URI,localName -> Attribute
+                           # in the future: consider lazy generation
+                           # of attribute objects this is too tricky
+                           # for now because of headaches with
+                           # namespaces.
+
+    def cloneNode(self, deep):
+        clone = Node.cloneNode(self, deep)
+        clone._attrs = {}
+        clone._attrsNS = {}
+        for attr in self._attrs.values():
+            node = attr.cloneNode(1)
+            clone._attrs[node.name] = node
+            clone._attrsNS[(node.namespaceURI, node.localName)] = node
+            node.ownerElement = clone
+        return clone
+
+    def unlink(self):
+        for attr in self._attrs.values():
+            attr.unlink()
+        self._attrs = None
+        self._attrsNS = None
+        Node.unlink(self)
+
+    def getAttribute(self, attname):
+        try:
+            return self._attrs[attname].value
+        except KeyError:
+            return ""
+
+    def getAttributeNS(self, namespaceURI, localName):
+        try:
+            return self._attrsNS[(namespaceURI, localName)].value
+        except KeyError:
+            return ""
+
+    def setAttribute(self, attname, value):
+        attr = Attr(attname)
+        # for performance
+        d = attr.__dict__
+        d["value"] = d["nodeValue"] = value
+        d["ownerDocument"] = self.ownerDocument
+        self.setAttributeNode(attr)
+
+    def setAttributeNS(self, namespaceURI, qualifiedName, value):
+        prefix, localname = _nssplit(qualifiedName)
+        # for performance
+        attr = Attr(qualifiedName, namespaceURI, localname, prefix)
+        d = attr.__dict__
+        d["value"] = d["nodeValue"] = value
+        d["ownerDocument"] = self.ownerDocument
+        self.setAttributeNode(attr)
+
+    def getAttributeNode(self, attrname):
+        return self._attrs.get(attrname)
+
+    def getAttributeNodeNS(self, namespaceURI, localName):
+        return self._attrsNS.get((namespaceURI, localName))
+
+    def setAttributeNode(self, attr):
+        if attr.ownerElement not in (None, self):
+            raise xml.dom.InuseAttributeErr("attribute node already owned")
+        old = self._attrs.get(attr.name, None)
+        if old:
+            old.unlink()
+        self._attrs[attr.name] = attr
+        self._attrsNS[(attr.namespaceURI, attr.localName)] = attr
+
+        # This creates a circular reference, but Element.unlink()
+        # breaks the cycle since the references to the attribute
+        # dictionaries are tossed.
+        attr.ownerElement = self
+
+        if old is not attr:
+            # It might have already been part of this node, in which case
+            # it doesn't represent a change, and should not be returned.
+            return old
+
+    setAttributeNodeNS = setAttributeNode
+
+    def removeAttribute(self, name):
+        attr = self._attrs[name]
+        self.removeAttributeNode(attr)
+
+    def removeAttributeNS(self, namespaceURI, localName):
+        attr = self._attrsNS[(namespaceURI, localName)]
+        self.removeAttributeNode(attr)
+
+    def removeAttributeNode(self, node):
+        node.unlink()
+        del self._attrs[node.name]
+        del self._attrsNS[(node.namespaceURI, node.localName)]
+
+    removeAttributeNodeNS = removeAttributeNode
+
+    def hasAttribute(self, name):
+        return self._attrs.has_key(name)
+
+    def hasAttributeNS(self, namespaceURI, localName):
+        return self._attrsNS.has_key((namespaceURI, localName))
+
+    def getElementsByTagName(self, name):
+        return _getElementsByTagNameHelper(self, name, NodeList())
+
+    def getElementsByTagNameNS(self, namespaceURI, localName):
+        return _getElementsByTagNameNSHelper(self, namespaceURI, localName,
+                                             NodeList())
+
+    def __repr__(self):
+        return "<DOM Element: %s at %s>" % (self.tagName, id(self))
+
+    def writexml(self, writer, indent="", addindent="", newl=""):
+        # indent = current indentation
+        # addindent = indentation to add to higher levels
+        # newl = newline string
+        writer.write(indent+"<" + self.tagName)
+
+        attrs = self._get_attributes()
+        a_names = attrs.keys()
+        a_names.sort()
+
+        for a_name in a_names:
+            writer.write(" %s=\"" % a_name)
+            _write_data(writer, attrs[a_name].value)
+            writer.write("\"")
+        if self.childNodes:
+            writer.write(">%s"%(newl))
+            for node in self.childNodes:
+                node.writexml(writer,indent+addindent,addindent,newl)
+            writer.write("%s</%s>%s" % (indent,self.tagName,newl))
+        else:
+            writer.write("/>%s"%(newl))
+
+    def _get_attributes(self):
+        return NamedNodeMap(self._attrs, self._attrsNS, self)
+
+    try:
+        property
+    except NameError:
+        pass
+    else:
+        attributes = property(_get_attributes,
+                              doc="NamedNodeMap of attributes on the element.")
+
+    def hasAttributes(self):
+        if self._attrs or self._attrsNS:
+            return 1
+        else:
+            return 0
+
+class Comment(Node):
+    nodeType = Node.COMMENT_NODE
+    nodeName = "#comment"
+    attributes = None
+    childNodeTypes = ()
+
+    def __init__(self, data):
+        Node.__init__(self)
+        self.data = self.nodeValue = data
+
+    def writexml(self, writer, indent="", addindent="", newl=""):
+        writer.write("%s<!--%s-->%s" % (indent,self.data,newl))
+
+class ProcessingInstruction(Node):
+    nodeType = Node.PROCESSING_INSTRUCTION_NODE
+    attributes = None
+    childNodeTypes = ()
+
+    def __init__(self, target, data):
+        Node.__init__(self)
+        self.target = self.nodeName = target
+        self.data = self.nodeValue = data
+
+    def writexml(self, writer, indent="", addindent="", newl=""):
+        writer.write("%s<?%s %s?>%s" % (indent,self.target, self.data, newl))
+
+class CharacterData(Node):
+    def __init__(self, data):
+        if type(data) not in _StringTypes:
+            raise TypeError, "node contents must be a string"
+        Node.__init__(self)
+        self.data = self.nodeValue = data
+        self.length = len(data)
+
+    def __repr__(self):
+        if len(self.data) > 10:
+            dotdotdot = "..."
+        else:
+            dotdotdot = ""
+        return "<DOM %s node \"%s%s\">" % (
+            self.__class__.__name__, self.data[0:10], dotdotdot)
+
+    def substringData(self, offset, count):
+        if offset < 0:
+            raise xml.dom.IndexSizeErr("offset cannot be negative")
+        if offset >= len(self.data):
+            raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
+        if count < 0:
+            raise xml.dom.IndexSizeErr("count cannot be negative")
+        return self.data[offset:offset+count]
+
+    def appendData(self, arg):
+        self.data = self.data + arg
+        self.nodeValue = self.data
+        self.length = len(self.data)
+
+    def insertData(self, offset, arg):
+        if offset < 0:
+            raise xml.dom.IndexSizeErr("offset cannot be negative")
+        if offset >= len(self.data):
+            raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
+        if arg:
+            self.data = "%s%s%s" % (
+                self.data[:offset], arg, self.data[offset:])
+            self.nodeValue = self.data
+            self.length = len(self.data)
+
+    def deleteData(self, offset, count):
+        if offset < 0:
+            raise xml.dom.IndexSizeErr("offset cannot be negative")
+        if offset >= len(self.data):
+            raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
+        if count < 0:
+            raise xml.dom.IndexSizeErr("count cannot be negative")
+        if count:
+            self.data = self.data[:offset] + self.data[offset+count:]
+            self.nodeValue = self.data
+            self.length = len(self.data)
+
+    def replaceData(self, offset, count, arg):
+        if offset < 0:
+            raise xml.dom.IndexSizeErr("offset cannot be negative")
+        if offset >= len(self.data):
+            raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
+        if count < 0:
+            raise xml.dom.IndexSizeErr("count cannot be negative")
+        if count:
+            self.data = "%s%s%s" % (
+                self.data[:offset], arg, self.data[offset+count:])
+            self.nodeValue = self.data
+            self.length = len(self.data)
+
+class Text(CharacterData):
+    nodeType = Node.TEXT_NODE
+    nodeName = "#text"
+    attributes = None
+    childNodeTypes = ()
+
+    def splitText(self, offset):
+        if offset < 0 or offset > len(self.data):
+            raise xml.dom.IndexSizeErr("illegal offset value")
+        newText = Text(self.data[offset:])
+        next = self.nextSibling
+        if self.parentNode and self in self.parentNode.childNodes:
+            if next is None:
+                self.parentNode.appendChild(newText)
+            else:
+                self.parentNode.insertBefore(newText, next)
+        self.data = self.data[:offset]
+        self.nodeValue = self.data
+        self.length = len(self.data)
+        return newText
+
+    def writexml(self, writer, indent="", addindent="", newl=""):
+        _write_data(writer, "%s%s%s"%(indent, self.data, newl))
+
+
+class CDATASection(Text):
+    nodeType = Node.CDATA_SECTION_NODE
+    nodeName = "#cdata-section"
+
+    def writexml(self, writer, indent="", addindent="", newl=""):
+        writer.write("<![CDATA[%s]]>" % self.data)
+
+
+def _nssplit(qualifiedName):
+    fields = _string.split(qualifiedName, ':', 1)
+    if len(fields) == 2:
+        return fields
+    elif len(fields) == 1:
+        return (None, fields[0])
+
+
+class DocumentType(Node):
+    nodeType = Node.DOCUMENT_TYPE_NODE
+    nodeValue = None
+    attributes = None
+    name = None
+    publicId = None
+    systemId = None
+    internalSubset = None
+    entities = None
+    notations = None
+
+    def __init__(self, qualifiedName):
+        Node.__init__(self)
+        if qualifiedName:
+            prefix, localname = _nssplit(qualifiedName)
+            self.name = localname
+
+
+class DOMImplementation:
+    def hasFeature(self, feature, version):
+        if version not in ("1.0", "2.0"):
+            return 0
+        feature = _string.lower(feature)
+        return feature == "core"
+
+    def createDocument(self, namespaceURI, qualifiedName, doctype):
+        if doctype and doctype.parentNode is not None:
+            raise xml.dom.WrongDocumentErr(
+                "doctype object owned by another DOM tree")
+        doc = self._createDocument()
+        if doctype is None:
+            doctype = self.createDocumentType(qualifiedName, None, None)
+        if not qualifiedName:
+            # The spec is unclear what to raise here; SyntaxErr
+            # would be the other obvious candidate. Since Xerces raises
+            # InvalidCharacterErr, and since SyntaxErr is not listed
+            # for createDocument, that seems to be the better choice.
+            # XXX: need to check for illegal characters here and in
+            # createElement.
+            raise xml.dom.InvalidCharacterErr("Element with no name")
+        prefix, localname = _nssplit(qualifiedName)
+        if prefix == "xml" \
+           and namespaceURI != "http://www.w3.org/XML/1998/namespace":
+            raise xml.dom.NamespaceErr("illegal use of 'xml' prefix")
+        if prefix and not namespaceURI:
+            raise xml.dom.NamespaceErr(
+                "illegal use of prefix without namespaces")
+        element = doc.createElementNS(namespaceURI, qualifiedName)
+        doc.appendChild(element)
+        doctype.parentNode = doctype.ownerDocument = doc
+        doc.doctype = doctype
+        doc.implementation = self
+        return doc
+
+    def createDocumentType(self, qualifiedName, publicId, systemId):
+        doctype = DocumentType(qualifiedName)
+        doctype.publicId = publicId
+        doctype.systemId = systemId
+        return doctype
+
+    # internal
+    def _createDocument(self):
+        return Document()
+
+class Document(Node):
+    nodeType = Node.DOCUMENT_NODE
+    nodeName = "#document"
+    nodeValue = None
+    attributes = None
+    doctype = None
+    parentNode = None
+    previousSibling = nextSibling = None
+
+    implementation = DOMImplementation()
+    childNodeTypes = (Node.ELEMENT_NODE, Node.PROCESSING_INSTRUCTION_NODE,
+                      Node.COMMENT_NODE, Node.DOCUMENT_TYPE_NODE)
+
+    def appendChild(self, node):
+        if node.nodeType not in self.childNodeTypes:
+            raise HierarchyRequestErr, \
+                  "%s cannot be child of %s" % (repr(node), repr(self))
+        if node.parentNode is not None:
+            node.parentNode.removeChild(node)
+
+        if node.nodeType == Node.ELEMENT_NODE \
+           and self._get_documentElement():
+            raise xml.dom.HierarchyRequestErr(
+                "two document elements disallowed")
+        return Node.appendChild(self, node)
+
+    def removeChild(self, oldChild):
+        self.childNodes.remove(oldChild)
+        oldChild.nextSibling = oldChild.previousSibling = None
+        oldChild.parentNode = None
+        if self.documentElement is oldChild:
+            self.documentElement = None
+
+        return oldChild
+
+    def _get_documentElement(self):
+        for node in self.childNodes:
+            if node.nodeType == Node.ELEMENT_NODE:
+                return node
+
+    try:
+        property
+    except NameError:
+        pass
+    else:
+        documentElement = property(_get_documentElement,
+                                   doc="Top-level element of this document.")
+
+    def unlink(self):
+        if self.doctype is not None:
+            self.doctype.unlink()
+            self.doctype = None
+        Node.unlink(self)
+
+    def createDocumentFragment(self):
+        d = DocumentFragment()
+        d.ownerDoc = self
+        return d
+
+    def createElement(self, tagName):
+        e = Element(tagName)
+        e.ownerDocument = self
+        return e
+
+    def createTextNode(self, data):
+        t = Text(data)
+        t.ownerDocument = self
+        return t
+
+    def createCDATASection(self, data):
+        c = CDATASection(data)
+        c.ownerDocument = self
+        return c
+
+    def createComment(self, data):
+        c = Comment(data)
+        c.ownerDocument = self
+        return c
+
+    def createProcessingInstruction(self, target, data):
+        p = ProcessingInstruction(target, data)
+        p.ownerDocument = self
+        return p
+
+    def createAttribute(self, qName):
+        a = Attr(qName)
+        a.ownerDocument = self
+        a.value = ""
+        return a
+
+    def createElementNS(self, namespaceURI, qualifiedName):
+        prefix, localName = _nssplit(qualifiedName)
+        e = Element(qualifiedName, namespaceURI, prefix, localName)
+        e.ownerDocument = self
+        return e
+
+    def createAttributeNS(self, namespaceURI, qualifiedName):
+        prefix, localName = _nssplit(qualifiedName)
+        a = Attr(qualifiedName, namespaceURI, localName, prefix)
+        a.ownerDocument = self
+        a.value = ""
+        return a
+
+    def getElementsByTagName(self, name):
+        return _getElementsByTagNameHelper(self, name, NodeList())
+
+    def getElementsByTagNameNS(self, namespaceURI, localName):
+        return _getElementsByTagNameNSHelper(self, namespaceURI, localName,
+                                             NodeList())
+
+    def writexml(self, writer, indent="", addindent="", newl=""):
+        writer.write('<?xml version="1.0" ?>\n')
+        for node in self.childNodes:
+            node.writexml(writer, indent, addindent, newl)
+
+def _get_StringIO():
+    # we can't use cStringIO since it doesn't support Unicode strings
+    from StringIO import StringIO
+    return StringIO()
+
+def _doparse(func, args, kwargs):
+    events = apply(func, args, kwargs)
+    toktype, rootNode = events.getEvent()
+    events.expandNode(rootNode)
+    events.clear()
+    return rootNode
+
+def parse(*args, **kwargs):
+    """Parse a file into a DOM by filename or file object."""
+    from xml.dom import pulldom
+    return _doparse(pulldom.parse, args, kwargs)
+
+def parseString(*args, **kwargs):
+    """Parse a file into a DOM from a string."""
+    from xml.dom import pulldom
+    return _doparse(pulldom.parseString, args, kwargs)
+
+def getDOMImplementation():
+    return Document.implementation
diff --git a/lib-python/2.2/xml/dom/pulldom.py b/lib-python/2.2/xml/dom/pulldom.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/xml/dom/pulldom.py
@@ -0,0 +1,341 @@
+import xml.sax
+import xml.sax.handler
+import types
+
+try:
+    _StringTypes = [types.StringType, types.UnicodeType]
+except AttributeError:
+    _StringTypes = [types.StringType]
+
+START_ELEMENT = "START_ELEMENT"
+END_ELEMENT = "END_ELEMENT"
+COMMENT = "COMMENT"
+START_DOCUMENT = "START_DOCUMENT"
+END_DOCUMENT = "END_DOCUMENT"
+PROCESSING_INSTRUCTION = "PROCESSING_INSTRUCTION"
+IGNORABLE_WHITESPACE = "IGNORABLE_WHITESPACE"
+CHARACTERS = "CHARACTERS"
+
+class PullDOM(xml.sax.ContentHandler):
+    _locator = None
+    document = None
+
+    def __init__(self, documentFactory=None):
+        self.documentFactory = documentFactory
+        self.firstEvent = [None, None]
+        self.lastEvent = self.firstEvent
+        self.elementStack = []
+        self.push = self.elementStack.append
+        try:
+            self.pop = self.elementStack.pop
+        except AttributeError:
+            # use class' pop instead
+            pass
+        self._ns_contexts = [{}] # contains uri -> prefix dicts
+        self._current_context = self._ns_contexts[-1]
+        self.pending_events = []
+
+    def pop(self):
+        result = self.elementStack[-1]
+        del self.elementStack[-1]
+        return result
+
+    def setDocumentLocator(self, locator):
+        self._locator = locator
+
+    def startPrefixMapping(self, prefix, uri):
+        if not hasattr(self, '_xmlns_attrs'):
+            self._xmlns_attrs = []
+        self._xmlns_attrs.append((prefix or 'xmlns', uri))
+        self._ns_contexts.append(self._current_context.copy())
+        self._current_context[uri] = prefix or None
+
+    def endPrefixMapping(self, prefix):
+        self._current_context = self._ns_contexts.pop()
+
+    def startElementNS(self, name, tagName , attrs):
+        # Retrieve xml namespace declaration attributes.
+        xmlns_uri = 'http://www.w3.org/2000/xmlns/'
+        xmlns_attrs = getattr(self, '_xmlns_attrs', None)
+        if xmlns_attrs is not None:
+            for aname, value in xmlns_attrs:
+                attrs._attrs[(xmlns_uri, aname)] = value
+            self._xmlns_attrs = []
+        uri, localname = name
+        if uri:
+            # When using namespaces, the reader may or may not
+            # provide us with the original name. If not, create
+            # *a* valid tagName from the current context.
+            if tagName is None:
+                prefix = self._current_context[uri]
+                if prefix:
+                    tagName = prefix + ":" + localname
+                else:
+                    tagName = localname
+            if self.document:
+                node = self.document.createElementNS(uri, tagName)
+            else:
+                node = self.buildDocument(uri, tagName)
+        else:
+            # When the tagname is not prefixed, it just appears as
+            # localname
+            if self.document:
+                node = self.document.createElement(localname)
+            else:
+                node = self.buildDocument(None, localname)
+
+        for aname,value in attrs.items():
+            a_uri, a_localname = aname
+            if a_uri == xmlns_uri:
+                if a_localname == 'xmlns':
+                    qname = a_localname
+                else:
+                    qname = 'xmlns:' + a_localname
+                attr = self.document.createAttributeNS(a_uri, qname)
+                node.setAttributeNodeNS(attr)
+            elif a_uri:
+                prefix = self._current_context[a_uri]
+                if prefix:
+                    qname = prefix + ":" + a_localname
+                else:
+                    qname = a_localname
+                attr = self.document.createAttributeNS(a_uri, qname)
+                node.setAttributeNodeNS(attr)
+            else:
+                attr = self.document.createAttribute(a_localname)
+                node.setAttributeNode(attr)
+            attr.value = value
+
+        self.lastEvent[1] = [(START_ELEMENT, node), None]
+        self.lastEvent = self.lastEvent[1]
+        self.push(node)
+
+    def endElementNS(self, name, tagName):
+        self.lastEvent[1] = [(END_ELEMENT, self.pop()), None]
+        self.lastEvent = self.lastEvent[1]
+
+    def startElement(self, name, attrs):
+        if self.document:
+            node = self.document.createElement(name)
+        else:
+            node = self.buildDocument(None, name)
+
+        for aname,value in attrs.items():
+            attr = self.document.createAttribute(aname)
+            attr.value = value
+            node.setAttributeNode(attr)
+
+        self.lastEvent[1] = [(START_ELEMENT, node), None]
+        self.lastEvent = self.lastEvent[1]
+        self.push(node)
+
+    def endElement(self, name):
+        self.lastEvent[1] = [(END_ELEMENT, self.pop()), None]
+        self.lastEvent = self.lastEvent[1]
+
+    def comment(self, s):
+        if self.document:
+            node = self.document.createComment(s)
+            self.lastEvent[1] = [(COMMENT, node), None]
+            self.lastEvent = self.lastEvent[1]
+        else:
+            event = [(COMMENT, s), None]
+            self.pending_events.append(event)
+
+    def processingInstruction(self, target, data):
+        if self.document:
+            node = self.document.createProcessingInstruction(target, data)
+            self.lastEvent[1] = [(PROCESSING_INSTRUCTION, node), None]
+            self.lastEvent = self.lastEvent[1]
+        else:
+            event = [(PROCESSING_INSTRUCTION, target, data), None]
+            self.pending_events.append(event)
+
+    def ignorableWhitespace(self, chars):
+        node = self.document.createTextNode(chars)
+        self.lastEvent[1] = [(IGNORABLE_WHITESPACE, node), None]
+        self.lastEvent = self.lastEvent[1]
+
+    def characters(self, chars):
+        node = self.document.createTextNode(chars)
+        self.lastEvent[1] = [(CHARACTERS, node), None]
+        self.lastEvent = self.lastEvent[1]
+
+    def startDocument(self):
+        if self.documentFactory is None:
+            import xml.dom.minidom
+            self.documentFactory = xml.dom.minidom.Document.implementation
+
+    def buildDocument(self, uri, tagname):
+        # Can't do that in startDocument, since we need the tagname
+        # XXX: obtain DocumentType
+        node = self.documentFactory.createDocument(uri, tagname, None)
+        self.document = node
+        self.lastEvent[1] = [(START_DOCUMENT, node), None]
+        self.lastEvent = self.lastEvent[1]
+        self.push(node)
+        # Put everything we have seen so far into the document
+        for e in self.pending_events:
+            if e[0][0] == PROCESSING_INSTRUCTION:
+                _,target,data = e[0]
+                n = self.document.createProcessingInstruction(target, data)
+                e[0] = (PROCESSING_INSTRUCTION, n)
+            elif e[0][0] == COMMENT:
+                n = self.document.createComment(e[0][1])
+                e[0] = (COMMENT, n)
+            else:
+                raise AssertionError("Unknown pending event ",e[0][0])
+            self.lastEvent[1] = e
+            self.lastEvent = e
+        self.pending_events = None
+        return node.firstChild
+
+    def endDocument(self):
+        self.lastEvent[1] = [(END_DOCUMENT, self.document), None]
+        self.pop()
+
+    def clear(self):
+        "clear(): Explicitly release parsing structures"
+        self.document = None
+
+class ErrorHandler:
+    def warning(self, exception):
+        print exception
+    def error(self, exception):
+        raise exception
+    def fatalError(self, exception):
+        raise exception
+
+class DOMEventStream:
+    def __init__(self, stream, parser, bufsize):
+        self.stream = stream
+        self.parser = parser
+        self.bufsize = bufsize
+        if not hasattr(self.parser, 'feed'):
+            self.getEvent = self._slurp
+        self.reset()
+
+    def reset(self):
+        self.pulldom = PullDOM()
+        # This content handler relies on namespace support
+        self.parser.setFeature(xml.sax.handler.feature_namespaces, 1)
+        self.parser.setContentHandler(self.pulldom)
+
+    def __getitem__(self, pos):
+        rc = self.getEvent()
+        if rc:
+            return rc
+        raise IndexError
+
+    def expandNode(self, node):
+        event = self.getEvent()
+        parents = [node]
+        while event:
+            token, cur_node = event
+            if cur_node is node:
+                return
+            if token != END_ELEMENT:
+                parents[-1].appendChild(cur_node)
+            if token == START_ELEMENT:
+                parents.append(cur_node)
+            elif token == END_ELEMENT:
+                del parents[-1]
+            event = self.getEvent()
+
+    def getEvent(self):
+        # use IncrementalParser interface, so we get the desired
+        # pull effect
+        if not self.pulldom.firstEvent[1]:
+            self.pulldom.lastEvent = self.pulldom.firstEvent
+        while not self.pulldom.firstEvent[1]:
+            buf = self.stream.read(self.bufsize)
+            if not buf:
+                self.parser.close()
+                return None
+            self.parser.feed(buf)
+        rc = self.pulldom.firstEvent[1][0]
+        self.pulldom.firstEvent[1] = self.pulldom.firstEvent[1][1]
+        return rc
+
+    def _slurp(self):
+        """ Fallback replacement for getEvent() using the
+            standard SAX2 interface, which means we slurp the
+            SAX events into memory (no performance gain, but
+            we are compatible to all SAX parsers).
+        """
+        self.parser.parse(self.stream)
+        self.getEvent = self._emit
+        return self._emit()
+
+    def _emit(self):
+        """ Fallback replacement for getEvent() that emits
+            the events that _slurp() read previously.
+        """
+        rc = self.pulldom.firstEvent[1][0]
+        self.pulldom.firstEvent[1] = self.pulldom.firstEvent[1][1]
+        return rc
+
+    def clear(self):
+        """clear(): Explicitly release parsing objects"""
+        self.pulldom.clear()
+        del self.pulldom
+        self.parser = None
+        self.stream = None
+
+class SAX2DOM(PullDOM):
+
+    def startElementNS(self, name, tagName , attrs):
+        PullDOM.startElementNS(self, name, tagName, attrs)
+        curNode = self.elementStack[-1]
+        parentNode = self.elementStack[-2]
+        parentNode.appendChild(curNode)
+
+    def startElement(self, name, attrs):
+        PullDOM.startElement(self, name, attrs)
+        curNode = self.elementStack[-1]
+        parentNode = self.elementStack[-2]
+        parentNode.appendChild(curNode)
+
+    def processingInstruction(self, target, data):
+        PullDOM.processingInstruction(self, target, data)
+        node = self.lastEvent[0][1]
+        parentNode = self.elementStack[-1]
+        parentNode.appendChild(node)
+
+    def ignorableWhitespace(self, chars):
+        PullDOM.ignorableWhitespace(self, chars)
+        node = self.lastEvent[0][1]
+        parentNode = self.elementStack[-1]
+        parentNode.appendChild(node)
+
+    def characters(self, chars):
+        PullDOM.characters(self, chars)
+        node = self.lastEvent[0][1]
+        parentNode = self.elementStack[-1]
+        parentNode.appendChild(node)
+
+
+default_bufsize = (2 ** 14) - 20
+
+def parse(stream_or_string, parser=None, bufsize=None):
+    if bufsize is None:
+        bufsize = default_bufsize
+    if type(stream_or_string) in _StringTypes:
+        stream = open(stream_or_string)
+    else:
+        stream = stream_or_string
+    if not parser:
+        parser = xml.sax.make_parser()
+    return DOMEventStream(stream, parser, bufsize)
+
+def parseString(string, parser=None):
+    try:
+        from cStringIO import StringIO
+    except ImportError:
+        from StringIO import StringIO
+
+    bufsize = len(string)
+    buf = StringIO(string)
+    if not parser:
+        parser = xml.sax.make_parser()
+    return DOMEventStream(buf, parser, bufsize)
diff --git a/lib-python/2.2/xml/parsers/__init__.py b/lib-python/2.2/xml/parsers/__init__.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/xml/parsers/__init__.py
@@ -0,0 +1,8 @@
+"""Python interfaces to XML parsers.
+
+This package contains one module:
+
+expat -- Python wrapper for James Clark's Expat parser, with namespace
+         support.
+
+"""
diff --git a/lib-python/2.2/xml/parsers/expat.py b/lib-python/2.2/xml/parsers/expat.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/xml/parsers/expat.py
@@ -0,0 +1,13 @@
+"""Interface to the Expat non-validating XML parser."""
+__version__ = '$Revision$'
+
+import sys
+
+try:
+    from pyexpat import *
+except ImportError:
+    del sys.modules[__name__]
+    del sys
+    raise
+
+del sys
diff --git a/lib-python/2.2/xml/sax/__init__.py b/lib-python/2.2/xml/sax/__init__.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/xml/sax/__init__.py
@@ -0,0 +1,108 @@
+"""Simple API for XML (SAX) implementation for Python.
+
+This module provides an implementation of the SAX 2 interface;
+information about the Java version of the interface can be found at
+http://www.megginson.com/SAX/.  The Python version of the interface is
+documented at <...>.
+
+This package contains the following modules:
+
+handler -- Base classes and constants which define the SAX 2 API for
+           the 'client-side' of SAX for Python.
+
+saxutils -- Implementation of the convenience classes commonly used to
+            work with SAX.
+
+xmlreader -- Base classes and constants which define the SAX 2 API for
+             the parsers used with SAX for Python.
+
+expatreader -- Driver that allows use of the Expat parser with SAX.
+"""
+
+from xmlreader import InputSource
+from handler import ContentHandler, ErrorHandler
+from _exceptions import SAXException, SAXNotRecognizedException, \
+                        SAXParseException, SAXNotSupportedException, \
+                        SAXReaderNotAvailable
+
+
+def parse(source, handler, errorHandler=ErrorHandler()):
+    parser = make_parser()
+    parser.setContentHandler(handler)
+    parser.setErrorHandler(errorHandler)
+    parser.parse(source)
+
+def parseString(string, handler, errorHandler=ErrorHandler()):
+    try:
+        from cStringIO import StringIO
+    except ImportError:
+        from StringIO import StringIO
+
+    if errorHandler is None:
+        errorHandler = ErrorHandler()
+    parser = make_parser()
+    parser.setContentHandler(handler)
+    parser.setErrorHandler(errorHandler)
+
+    inpsrc = InputSource()
+    inpsrc.setByteStream(StringIO(string))
+    parser.parse(inpsrc)
+
+# this is the parser list used by the make_parser function if no
+# alternatives are given as parameters to the function
+
+default_parser_list = ["xml.sax.expatreader"]
+
+# tell modulefinder that importing sax potentially imports expatreader
+_false = 0
+if _false:
+    import xml.sax.expatreader
+
+import os, string, sys
+if os.environ.has_key("PY_SAX_PARSER"):
+    default_parser_list = string.split(os.environ["PY_SAX_PARSER"], ",")
+del os
+
+_key = "python.xml.sax.parser"
+if sys.platform[:4] == "java" and sys.registry.containsKey(_key):
+    default_parser_list = string.split(sys.registry.getProperty(_key), ",")
+
+
+def make_parser(parser_list = []):
+    """Creates and returns a SAX parser.
+
+    Creates the first parser it is able to instantiate of the ones
+    given in the list created by doing parser_list +
+    default_parser_list.  The lists must contain the names of Python
+    modules containing both a SAX parser and a create_parser function."""
+
+    for parser_name in parser_list + default_parser_list:
+        try:
+            return _create_parser(parser_name)
+        except ImportError,e:
+            import sys
+            if sys.modules.has_key(parser_name):
+                # The parser module was found, but importing it
+                # failed unexpectedly, pass this exception through
+                raise
+        except SAXReaderNotAvailable:
+            # The parser module detected that it won't work properly,
+            # so try the next one
+            pass
+
+    raise SAXReaderNotAvailable("No parsers found", None)
+
+# --- Internal utility methods used by make_parser
+
+if sys.platform[ : 4] == "java":
+    def _create_parser(parser_name):
+        from org.python.core import imp
+        drv_module = imp.importName(parser_name, 0, globals())
+        return drv_module.create_parser()
+
+else:
+    def _create_parser(parser_name):
+        drv_module = __import__(parser_name,{},{},['create_parser'])
+        return drv_module.create_parser()
+
+del sys
diff --git a/lib-python/2.2/xml/sax/_exceptions.py b/lib-python/2.2/xml/sax/_exceptions.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/xml/sax/_exceptions.py
@@ -0,0 +1,126 @@
+"""Different kinds of SAX Exceptions"""
+import sys
+if sys.platform[:4] == "java":
+    from java.lang import Exception
+del sys
+
+# ===== SAXEXCEPTION =====
+
+class SAXException(Exception):
+    """Encapsulate an XML error or warning. This class can contain
+    basic error or warning information from either the XML parser or
+    the application: you can subclass it to provide additional
+    functionality, or to add localization. Note that although you will
+    receive a SAXException as the argument to the handlers in the
+    ErrorHandler interface, you are not actually required to throw
+    the exception; instead, you can simply read the information in
+    it."""
+
+    def __init__(self, msg, exception=None):
+        """Creates an exception. The message is required, but the exception
+        is optional."""
+        self._msg = msg
+        self._exception = exception
+        Exception.__init__(self, msg)
+
+    def getMessage(self):
+        "Return a message for this exception."
+        return self._msg
+
+    def getException(self):
+        "Return the embedded exception, or None if there was none."
+        return self._exception
+
+    def __str__(self):
+        "Create a string representation of the exception."
+        return self._msg
+
+    def __getitem__(self, ix):
+        """Avoids weird error messages if someone does exception[ix] by
+        mistake, since Exception has __getitem__ defined."""
+        raise AttributeError("__getitem__")
+
+
+# ===== SAXPARSEEXCEPTION =====
+
+class SAXParseException(SAXException):
+    """Encapsulate an XML parse error or warning.
+
+    This exception will include information for locating the error in
+    the original XML document. Note that although the application will
+    receive a SAXParseException as the argument to the handlers in the
+    ErrorHandler interface, the application is not actually required
+    to throw the exception; instead, it can simply read the
+    information in it and take a different action.
+
+    Since this exception is a subclass of SAXException, it inherits
+    the ability to wrap another exception."""
+
+    def __init__(self, msg, exception, locator):
+        "Creates the exception. The exception parameter is allowed to be None."
+        SAXException.__init__(self, msg, exception)
+        self._locator = locator
+
+        # We need to cache this stuff at construction time.
+        # If this exception is thrown, the objects through which we must
+        # traverse to get this information may be deleted by the time
+        # it gets caught.
+        self._systemId = self._locator.getSystemId()
+        self._colnum = self._locator.getColumnNumber()
+        self._linenum = self._locator.getLineNumber()
+
+    def getColumnNumber(self):
+        """The column number of the end of the text where the exception
+        occurred."""
+        return self._colnum
+
+    def getLineNumber(self):
+        "The line number of the end of the text where the exception occurred."
+        return self._linenum
+
+    def getPublicId(self):
+        "Get the public identifier of the entity where the exception occurred."
+        return self._locator.getPublicId()
+
+    def getSystemId(self):
+        "Get the system identifier of the entity where the exception occurred."
+        return self._systemId
+
+    def __str__(self):
+        "Create a string representation of the exception."
+        sysid = self.getSystemId()
+        if sysid is None:
+            sysid = "<unknown>"
+        return "%s:%d:%d: %s" % (sysid, self.getLineNumber(),
+                                 self.getColumnNumber(), self._msg)
+
+
+# ===== SAXNOTRECOGNIZEDEXCEPTION =====
+
+class SAXNotRecognizedException(SAXException):
+    """Exception class for an unrecognized identifier.
+
+    An XMLReader will raise this exception when it is confronted with an
+    unrecognized feature or property. SAX applications and extensions may
+    use this class for similar purposes."""
+
+
+# ===== SAXNOTSUPPORTEDEXCEPTION =====
+
+class SAXNotSupportedException(SAXException):
+    """Exception class for an unsupported operation.
+
+    An XMLReader will raise this exception when a service it cannot
+    perform is requested (specifically setting a state or value). SAX
+    applications and extensions may use this class for similar
+    purposes."""
+
+# ===== SAXNOTSUPPORTEDEXCEPTION =====
+
+class SAXReaderNotAvailable(SAXNotSupportedException):
+    """Exception class for a missing driver.
+
+    An XMLReader module (driver) should raise this exception when it
+    is first imported, e.g. when a support module cannot be imported.
+    It also may be raised during parsing, e.g. if executing an external
+    program is not permitted."""
diff --git a/lib-python/2.2/xml/sax/expatreader.py b/lib-python/2.2/xml/sax/expatreader.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/xml/sax/expatreader.py
@@ -0,0 +1,333 @@
+"""
+SAX driver for the Pyexpat C module.  This driver works with
+pyexpat.__version__ == '2.22'.
+"""
+
+version = "0.20"
+
+from xml.sax._exceptions import *
+
+# xml.parsers.expat does not raise ImportError in Jython
+import sys
+if sys.platform[:4] == "java":
+    raise SAXReaderNotAvailable("expat not available in Java", None)
+del sys
+
+try:
+    from xml.parsers import expat
+except ImportError:
+    raise SAXReaderNotAvailable("expat not supported", None)
+else:
+    if not hasattr(expat, "ParserCreate"):
+        raise SAXReaderNotAvailable("expat not supported", None)
+from xml.sax import xmlreader, saxutils, handler
+
+AttributesImpl = xmlreader.AttributesImpl
+AttributesNSImpl = xmlreader.AttributesNSImpl
+
+import string
+import weakref
+
+# --- ExpatLocator
+
+class ExpatLocator(xmlreader.Locator):
+    """Locator for use with the ExpatParser class.
+
+    This uses a weak reference to the parser object to avoid creating
+    a circular reference between the parser and the content handler.
+    """
+    def __init__(self, parser):
+        self._ref = weakref.ref(parser)
+
+    def getColumnNumber(self):
+        parser = self._ref()
+        if parser is None or parser._parser is None:
+            return None
+        return parser._parser.ErrorColumnNumber
+
+    def getLineNumber(self):
+        parser = self._ref()
+        if parser is None or parser._parser is None:
+            return 1
+        return parser._parser.ErrorLineNumber
+
+    def getPublicId(self):
+        parser = self._ref()
+        if parser is None:
+            return None
+        return parser._source.getPublicId()
+
+    def getSystemId(self):
+        parser = self._ref()
+        if parser is None:
+            return None
+        return parser._source.getSystemId()
+
+
+# --- ExpatParser
+
+class ExpatParser(xmlreader.IncrementalParser, xmlreader.Locator):
+    "SAX driver for the Pyexpat C module."
+
+    def __init__(self, namespaceHandling=0, bufsize=2**16-20):
+        xmlreader.IncrementalParser.__init__(self, bufsize)
+        self._source = xmlreader.InputSource()
+        self._parser = None
+        self._namespaces = namespaceHandling
+        self._lex_handler_prop = None
+        self._parsing = 0
+        self._entity_stack = []
+        self._ns_stack = []
+
+    # XMLReader methods
+
+    def parse(self, source):
+        "Parse an XML document from a URL or an InputSource."
+        source = saxutils.prepare_input_source(source)
+
+        self._source = source
+        self.reset()
+        self._cont_handler.setDocumentLocator(ExpatLocator(self))
+        xmlreader.IncrementalParser.parse(self, source)
+
+    def prepareParser(self, source):
+        if source.getSystemId() != None:
+            self._parser.SetBase(source.getSystemId())
+
+    # Redefined setContentHandle to allow changing handlers during parsing
+
+    def setContentHandler(self, handler):
+        xmlreader.IncrementalParser.setContentHandler(self, handler)
+        if self._parsing:
+            self._reset_cont_handler()
+
+    def getFeature(self, name):
+        if name == handler.feature_namespaces:
+            return self._namespaces
+        raise SAXNotRecognizedException("Feature '%s' not recognized" % name)
+
+    def setFeature(self, name, state):
+        if self._parsing:
+            raise SAXNotSupportedException("Cannot set features while parsing")
+        if name == handler.feature_namespaces:
+            self._namespaces = state
+        else:
+            raise SAXNotRecognizedException("Feature '%s' not recognized" %
+                                            name)
+
+    def getProperty(self, name):
+        if name == handler.property_lexical_handler:
+            return self._lex_handler_prop
+        raise SAXNotRecognizedException("Property '%s' not recognized" % name)
+
+    def setProperty(self, name, value):
+        if name == handler.property_lexical_handler:
+            self._lex_handler_prop = value
+            if self._parsing:
+                self._reset_lex_handler_prop()
+        else:
+            raise SAXNotRecognizedException("Property '%s' not recognized" % name)
+
+    # IncrementalParser methods
+
+    def feed(self, data, isFinal = 0):
+        if not self._parsing:
+            self.reset()
+            self._parsing = 1
+            self._cont_handler.startDocument()
+
+        try:
+            # The isFinal parameter is internal to the expat reader.
+            # If it is set to true, expat will check validity of the entire
+            # document. When feeding chunks, they are not normally final -
+            # except when invoked from close.
+            self._parser.Parse(data, isFinal)
+        except expat.error:
+            error_code = self._parser.ErrorCode
+            exc = SAXParseException(expat.ErrorString(error_code), None, self)
+            # FIXME: when to invoke error()?
+            self._err_handler.fatalError(exc)
+
+    def close(self):
+        if self._entity_stack:
+            # If we are completing an external entity, do nothing here
+            return
+        self.feed("", isFinal = 1)
+        self._cont_handler.endDocument()
+        self._parsing = 0
+        # break cycle created by expat handlers pointing to our methods
+        self._parser = None
+
+    def _reset_cont_handler(self):
+        self._parser.ProcessingInstructionHandler = \
+                                    self._cont_handler.processingInstruction
+        self._parser.CharacterDataHandler = self._cont_handler.characters
+
+    def _reset_lex_handler_prop(self):
+        self._parser.CommentHandler = self._lex_handler_prop.comment
+        self._parser.StartCdataSectionHandler = self._lex_handler_prop.startCDATA
+        self._parser.EndCdataSectionHandler = self._lex_handler_prop.endCDATA
+
+    def reset(self):
+        if self._namespaces:
+            self._parser = expat.ParserCreate(None, " ")
+            self._parser.StartElementHandler = self.start_element_ns
+            self._parser.EndElementHandler = self.end_element_ns
+        else:
+            self._parser = expat.ParserCreate()
+            self._parser.StartElementHandler = self.start_element
+            self._parser.EndElementHandler = self.end_element
+
+        self._reset_cont_handler()
+        self._parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl
+        self._parser.NotationDeclHandler = self.notation_decl
+        self._parser.StartNamespaceDeclHandler = self.start_namespace_decl
+        self._parser.EndNamespaceDeclHandler = self.end_namespace_decl
+
+        self._decl_handler_prop = None
+        if self._lex_handler_prop:
+            self._reset_lex_handler_prop()
+#         self._parser.DefaultHandler =
+#         self._parser.DefaultHandlerExpand =
+#         self._parser.NotStandaloneHandler =
+        self._parser.ExternalEntityRefHandler = self.external_entity_ref
+
+        self._parsing = 0
+        self._entity_stack = []
+
+    # Locator methods
+
+    def getColumnNumber(self):
+        if self._parser is None:
+            return None
+        return self._parser.ErrorColumnNumber
+
+    def getLineNumber(self):
+        if self._parser is None:
+            return 1
+        return self._parser.ErrorLineNumber
+
+    def getPublicId(self):
+        return self._source.getPublicId()
+
+    def getSystemId(self):
+        return self._source.getSystemId()
+
+    # event handlers
+    def start_element(self, name, attrs):
+        self._cont_handler.startElement(name, AttributesImpl(attrs))
+
+    def end_element(self, name):
+        self._cont_handler.endElement(name)
+
+    def start_element_ns(self, name, attrs):
+        pair = string.split(name)
+        if len(pair) == 1:
+            pair = (None, name)
+            qname = name
+        else:
+            pair = tuple(pair)
+            qname = pair[1]
+            if self._ns_stack:
+                prefix = self._ns_stack[-1][pair[0]][-1]
+                if prefix:
+                    qname = "%s:%s" % (prefix, pair[1])
+
+        newattrs = {}
+        qnames = {}
+        for (aname, value) in attrs.items():
+            apair = string.split(aname)
+            if len(apair) == 1:
+                apair = (None, aname)
+                aqname = aname
+            else:
+                apair = tuple(apair)
+                # XXX need to guess the prefix
+                prefix = self._ns_stack[-1][apair[0]][-1]
+                aqname = "%s:%s" % (prefix, apair[1])
+
+            newattrs[apair] = value
+            qnames[apair] = aqname
+
+        self._cont_handler.startElementNS(pair, qname,
+                                          AttributesNSImpl(newattrs, qnames))
+
+    def end_element_ns(self, name):
+        pair = string.split(name)
+        if len(pair) == 1:
+            pair = (None, name)
+            qname = name
+        else:
+            pair = tuple(pair)
+            qname = pair[1]
+            if self._ns_stack:
+                prefix = self._ns_stack[-1][pair[0]][-1]
+                if prefix:
+                    qname = "%s:%s" % (prefix, pair[1])
+
+        self._cont_handler.endElementNS(pair, qname)
+
+    # this is not used (call directly to ContentHandler)
+    def processing_instruction(self, target, data):
+        self._cont_handler.processingInstruction(target, data)
+
+    # this is not used (call directly to ContentHandler)
+    def character_data(self, data):
+        self._cont_handler.characters(data)
+
+    def start_namespace_decl(self, prefix, uri):
+        if self._ns_stack:
+            d = self._ns_stack[-1].copy()
+            if d.has_key(uri):
+                L = d[uri][:]
+                d[uri] = L
+                L.append(prefix)
+            else:
+                d[uri] = [prefix]
+        else:
+            d = {uri: [prefix]}
+        self._ns_stack.append(d)
+        self._cont_handler.startPrefixMapping(prefix, uri)
+
+    def end_namespace_decl(self, prefix):
+        del self._ns_stack[-1]
+        self._cont_handler.endPrefixMapping(prefix)
+
+    def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name):
+        self._dtd_handler.unparsedEntityDecl(name, pubid, sysid, notation_name)
+
+    def notation_decl(self, name, base, sysid, pubid):
+        self._dtd_handler.notationDecl(name, pubid, sysid)
+
+    def external_entity_ref(self, context, base, sysid, pubid):
+        source = self._ent_handler.resolveEntity(pubid, sysid)
+        source = saxutils.prepare_input_source(source,
+                                               self._source.getSystemId() or
+                                               "")
+
+        self._entity_stack.append((self._parser, self._source))
+        self._parser = self._parser.ExternalEntityParserCreate(context)
+        self._source = source
+
+        try:
+            xmlreader.IncrementalParser.parse(self, source)
+        except:
+            return 0  # FIXME: save error info here?
+
+        (self._parser, self._source) = self._entity_stack[-1]
+        del self._entity_stack[-1]
+        return 1
+
+# ---
+
+def create_parser(*args, **kwargs):
+    return apply(ExpatParser, args, kwargs)
+
+# ---
+
+if __name__ == "__main__":
+    import xml.sax
+    p = create_parser()
+    p.setContentHandler(xml.sax.XMLGenerator())
+    p.setErrorHandler(xml.sax.ErrorHandler())
+    p.parse("../../../hamlet.xml")
diff --git a/lib-python/2.2/xml/sax/handler.py b/lib-python/2.2/xml/sax/handler.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/xml/sax/handler.py
@@ -0,0 +1,321 @@
+"""
+This module contains the core classes of version 2.0 of SAX for Python.
+This file provides only default classes with absolutely minimum
+functionality, from which drivers and applications can be subclassed.
+
+Many of these classes are empty and are included only as documentation
+of the interfaces.
+
+$Id$
+"""
+
+version = '2.0beta'
+
+#============================================================================
+#
+# HANDLER INTERFACES
+#
+#============================================================================
+
+# ===== ERRORHANDLER =====
+
+class ErrorHandler:
+    """Basic interface for SAX error handlers.
+
+    If you create an object that implements this interface, then
+    register the object with your XMLReader, the parser will call the
+    methods in your object to report all warnings and errors. There
+    are three levels of errors available: warnings, (possibly)
+    recoverable errors, and unrecoverable errors. All methods take a
+    SAXParseException as the only parameter."""
+
+    def error(self, exception):
+        "Handle a recoverable error."
+        raise exception
+
+    def fatalError(self, exception):
+        "Handle a non-recoverable error."
+        raise exception
+
+    def warning(self, exception):
+        "Handle a warning."
+        print exception
+
+
+# ===== CONTENTHANDLER =====
+
+class ContentHandler:
+    """Interface for receiving logical document content events.
+
+    This is the main callback interface in SAX, and the one most
+    important to applications. The order of events in this interface
+    mirrors the order of the information in the document."""
+
+    def __init__(self):
+        self._locator = None
+
+    def setDocumentLocator(self, locator):
+        """Called by the parser to give the application a locator for
+        locating the origin of document events.
+
+        SAX parsers are strongly encouraged (though not absolutely
+        required) to supply a locator: if it does so, it must supply
+        the locator to the application by invoking this method before
+        invoking any of the other methods in the DocumentHandler
+        interface.
+
+        The locator allows the application to determine the end
+        position of any document-related event, even if the parser is
+        not reporting an error. Typically, the application will use
+        this information for reporting its own errors (such as
+        character content that does not match an application's
+        business rules). The information returned by the locator is
+        probably not sufficient for use with a search engine.
+
+        Note that the locator will return correct information only
+        during the invocation of the events in this interface. The
+        application should not attempt to use it at any other time."""
+        self._locator = locator
+
+    def startDocument(self):
+        """Receive notification of the beginning of a document.
+
+        The SAX parser will invoke this method only once, before any
+        other methods in this interface or in DTDHandler (except for
+        setDocumentLocator)."""
+
+    def endDocument(self):
+        """Receive notification of the end of a document.
+
+        The SAX parser will invoke this method only once, and it will
+        be the last method invoked during the parse. The parser shall
+        not invoke this method until it has either abandoned parsing
+        (because of an unrecoverable error) or reached the end of
+        input."""
+
+    def startPrefixMapping(self, prefix, uri):
+        """Begin the scope of a prefix-URI Namespace mapping.
+
+        The information from this event is not necessary for normal
+        Namespace processing: the SAX XML reader will automatically
+        replace prefixes for element and attribute names when the
+        http://xml.org/sax/features/namespaces feature is true (the
+        default).
+
+        There are cases, however, when applications need to use
+        prefixes in character data or in attribute values, where they
+        cannot safely be expanded automatically; the
+        start/endPrefixMapping event supplies the information to the
+        application to expand prefixes in those contexts itself, if
+        necessary.
+
+        Note that start/endPrefixMapping events are not guaranteed to
+        be properly nested relative to each-other: all
+        startPrefixMapping events will occur before the corresponding
+        startElement event, and all endPrefixMapping events will occur
+        after the corresponding endElement event, but their order is
+        not guaranteed."""
+
+    def endPrefixMapping(self, prefix):
+        """End the scope of a prefix-URI mapping.
+
+        See startPrefixMapping for details. This event will always
+        occur after the corresponding endElement event, but the order
+        of endPrefixMapping events is not otherwise guaranteed."""
+
+    def startElement(self, name, attrs):
+        """Signals the start of an element in non-namespace mode.
+
+        The name parameter contains the raw XML 1.0 name of the
+        element type as a string and the attrs parameter holds an
+        instance of the Attributes class containing the attributes of
+        the element."""
+
+    def endElement(self, name):
+        """Signals the end of an element in non-namespace mode.
+
+        The name parameter contains the name of the element type, just
+        as with the startElement event."""
+
+    def startElementNS(self, name, qname, attrs):
+        """Signals the start of an element in namespace mode.
+
+        The name parameter contains the name of the element type as a
+        (uri, localname) tuple, the qname parameter the raw XML 1.0
+        name used in the source document, and the attrs parameter
+        holds an instance of the Attributes class containing the
+        attributes of the element."""
+
+    def endElementNS(self, name, qname):
+        """Signals the end of an element in namespace mode.
+
+        The name parameter contains the name of the element type, just
+        as with the startElementNS event."""
+
+    def characters(self, content):
+        """Receive notification of character data.
+
+        The Parser will call this method to report each chunk of
+        character data. SAX parsers may return all contiguous
+        character data in a single chunk, or they may split it into
+        several chunks; however, all of the characters in any single
+        event must come from the same external entity so that the
+        Locator provides useful information."""
+
+    def ignorableWhitespace(self, whitespace):
+        """Receive notification of ignorable whitespace in element content.
+
+        Validating Parsers must use this method to report each chunk
+        of ignorable whitespace (see the W3C XML 1.0 recommendation,
+        section 2.10): non-validating parsers may also use this method
+        if they are capable of parsing and using content models.
+
+        SAX parsers may return all contiguous whitespace in a single
+        chunk, or they may split it into several chunks; however, all
+        of the characters in any single event must come from the same
+        external entity, so that the Locator provides useful
+        information.
+
+        The application must not attempt to read from the array
+        outside of the specified range."""
+
+    def processingInstruction(self, target, data):
+        """Receive notification of a processing instruction.
+
+        The Parser will invoke this method once for each processing
+        instruction found: note that processing instructions may occur
+        before or after the main document element.
+
+        A SAX parser should never report an XML declaration (XML 1.0,
+        section 2.8) or a text declaration (XML 1.0, section 4.3.1)
+        using this method."""
+
+    def skippedEntity(self, name):
+        """Receive notification of a skipped entity.
+
+        The Parser will invoke this method once for each entity
+        skipped. Non-validating processors may skip entities if they
+        have not seen the declarations (because, for example, the
+        entity was declared in an external DTD subset). All processors
+        may skip external entities, depending on the values of the
+        http://xml.org/sax/features/external-general-entities and the
+        http://xml.org/sax/features/external-parameter-entities
+        properties."""
+
+
+# ===== DTDHandler =====
+
+class DTDHandler:
+    """Handle DTD events.
+
+    This interface specifies only those DTD events required for basic
+    parsing (unparsed entities and attributes)."""
+
+    def notationDecl(self, name, publicId, systemId):
+        "Handle a notation declaration event."
+
+    def unparsedEntityDecl(self, name, publicId, systemId, ndata):
+        "Handle an unparsed entity declaration event."
+
+
+# ===== ENTITYRESOLVER =====
+
+class EntityResolver:
+    """Basic interface for resolving entities. If you create an object
+    implementing this interface, then register the object with your
+    Parser, the parser will call the method in your object to
+    resolve all external entities. Note that DefaultHandler implements
+    this interface with the default behaviour."""
+
+    def resolveEntity(self, publicId, systemId):
+        """Resolve the system identifier of an entity and return either
+        the system identifier to read from as a string, or an InputSource
+        to read from."""
+        return systemId
+
+
+#============================================================================
+#
+# CORE FEATURES
+#
+#============================================================================
+
+feature_namespaces = "http://xml.org/sax/features/namespaces"
+# true: Perform Namespace processing (default).
+# false: Optionally do not perform Namespace processing
+#        (implies namespace-prefixes).
+# access: (parsing) read-only; (not parsing) read/write
+
+feature_namespace_prefixes = "http://xml.org/sax/features/namespace-prefixes"
+# true: Report the original prefixed names and attributes used for Namespace
+#       declarations.
+# false: Do not report attributes used for Namespace declarations, and
+#        optionally do not report original prefixed names (default).
+# access: (parsing) read-only; (not parsing) read/write
+
+feature_string_interning = "http://xml.org/sax/features/string-interning"
+# true: All element names, prefixes, attribute names, Namespace URIs, and
+#       local names are interned using the built-in intern function.
+# false: Names are not necessarily interned, although they may be (default).
+# access: (parsing) read-only; (not parsing) read/write
+
+feature_validation = "http://xml.org/sax/features/validation"
+# true: Report all validation errors (implies external-general-entities and
+#       external-parameter-entities).
+# false: Do not report validation errors.
+# access: (parsing) read-only; (not parsing) read/write
+
+feature_external_ges = "http://xml.org/sax/features/external-general-entities"
+# true: Include all external general (text) entities.
+# false: Do not include external general entities.
+# access: (parsing) read-only; (not parsing) read/write
+
+feature_external_pes = "http://xml.org/sax/features/external-parameter-entities"
+# true: Include all external parameter entities, including the external
+#       DTD subset.
+# false: Do not include any external parameter entities, even the external
+#        DTD subset.
+# access: (parsing) read-only; (not parsing) read/write
+
+all_features = [feature_namespaces,
+                feature_namespace_prefixes,
+                feature_string_interning,
+                feature_validation,
+                feature_external_ges,
+                feature_external_pes]
+
+
+#============================================================================
+#
+# CORE PROPERTIES
+#
+#============================================================================
+
+property_lexical_handler = "http://xml.org/sax/properties/lexical-handler"
+# data type: xml.sax.sax2lib.LexicalHandler
+# description: An optional extension handler for lexical events like comments.
+# access: read/write
+
+property_declaration_handler = "http://xml.org/sax/properties/declaration-handler"
+# data type: xml.sax.sax2lib.DeclHandler
+# description: An optional extension handler for DTD-related events other
+#              than notations and unparsed entities.
+# access: read/write
+
+property_dom_node = "http://xml.org/sax/properties/dom-node"
+# data type: org.w3c.dom.Node
+# description: When parsing, the current DOM node being visited if this is
+#              a DOM iterator; when not parsing, the root DOM node for
+#              iteration.
+# access: (parsing) read-only; (not parsing) read/write
+
+property_xml_string = "http://xml.org/sax/properties/xml-string"
+# data type: String
+# description: The literal string of characters that was the source for
+#              the current event.
+# access: read-only
+
+all_properties = [property_lexical_handler,
+                  property_dom_node,
+                  property_declaration_handler,
+                  property_xml_string]
diff --git a/lib-python/2.2/xml/sax/saxutils.py b/lib-python/2.2/xml/sax/saxutils.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/xml/sax/saxutils.py
@@ -0,0 +1,260 @@
+"""\
+A library of useful helper classes to the SAX classes, for the
+convenience of application and driver writers.
+"""
+
+import os, urlparse, urllib, types
+import handler
+import xmlreader
+
+try:
+    _StringTypes = [types.StringType, types.UnicodeType]
+except AttributeError:
+    _StringTypes = [types.StringType]
+
+
+def escape(data, entities={}):
+    """Escape &, <, and > in a string of data.
+
+    You can escape other strings of data by passing a dictionary as
+    the optional entities parameter.  The keys and values must all be
+    strings; each key will be replaced with its corresponding value.
+    """
+    data = data.replace("&", "&amp;")
+    data = data.replace("<", "&lt;")
+    data = data.replace(">", "&gt;")
+    for chars, entity in entities.items():
+        data = data.replace(chars, entity)
+    return data
+
+def quoteattr(data, entities={}):
+    """Escape and quote an attribute value.
+
+    Escape &, <, and > in a string of data, then quote it for use as
+    an attribute value.  The \" character will be escaped as well, if
+    necessary.
+
+    You can escape other strings of data by passing a dictionary as
+    the optional entities parameter.  The keys and values must all be
+    strings; each key will be replaced with its corresponding value.
+    """
+    data = escape(data, entities)
+    if '"' in data:
+        if "'" in data:
+            data = '"%s"' % data.replace('"', "&quot;")
+        else:
+            data = "'%s'" % data
+    else:
+        data = '"%s"' % data
+    return data
+
+
+class XMLGenerator(handler.ContentHandler):
+
+    def __init__(self, out=None, encoding="iso-8859-1"):
+        if out is None:
+            import sys
+            out = sys.stdout
+        handler.ContentHandler.__init__(self)
+        self._out = out
+        self._ns_contexts = [{}] # contains uri -> prefix dicts
+        self._current_context = self._ns_contexts[-1]
+        self._undeclared_ns_maps = []
+        self._encoding = encoding
+
+    # ContentHandler methods
+
+    def startDocument(self):
+        self._out.write('<?xml version="1.0" encoding="%s"?>\n' %
+                        self._encoding)
+
+    def startPrefixMapping(self, prefix, uri):
+        self._ns_contexts.append(self._current_context.copy())
+        self._current_context[uri] = prefix
+        self._undeclared_ns_maps.append((prefix, uri))
+
+    def endPrefixMapping(self, prefix):
+        self._current_context = self._ns_contexts[-1]
+        del self._ns_contexts[-1]
+
+    def startElement(self, name, attrs):
+        self._out.write('<' + name)
+        for (name, value) in attrs.items():
+            self._out.write(' %s=%s' % (name, quoteattr(value)))
+        self._out.write('>')
+
+    def endElement(self, name):
+        self._out.write('</%s>' % name)
+
+    def startElementNS(self, name, qname, attrs):
+        if name[0] is None:
+            # if the name was not namespace-scoped, use the unqualified part
+            name = name[1]
+        else:
+            # else try to restore the original prefix from the namespace
+            name = self._current_context[name[0]] + ":" + name[1]
+        self._out.write('<' + name)
+
+        for pair in self._undeclared_ns_maps:
+            self._out.write(' xmlns:%s="%s"' % pair)
+        self._undeclared_ns_maps = []
+
+        for (name, value) in attrs.items():
+            name = self._current_context[name[0]] + ":" + name[1]
+            self._out.write(' %s=%s' % (name, quoteattr(value)))
+        self._out.write('>')
+
+    def endElementNS(self, name, qname):
+        if name[0] is None:
+            name = name[1]
+        else:
+            name = self._current_context[name[0]] + ":" + name[1]
+        self._out.write('</%s>' % name)
+
+    def characters(self, content):
+        self._out.write(escape(content))
+
+    def ignorableWhitespace(self, content):
+        self._out.write(content)
+
+    def processingInstruction(self, target, data):
+        self._out.write('<?%s %s?>' % (target, data))
+
+
+class XMLFilterBase(xmlreader.XMLReader):
+    """This class is designed to sit between an XMLReader and the
+    client application's event handlers.  By default, it does nothing
+    but pass requests up to the reader and events on to the handlers
+    unmodified, but subclasses can override specific methods to modify
+    the event stream or the configuration requests as they pass
+    through."""
+
+    def __init__(self, parent = None):
+        xmlreader.XMLReader.__init__(self)
+        self._parent = parent
+
+    # ErrorHandler methods
+
+    def error(self, exception):
+        self._err_handler.error(exception)
+
+    def fatalError(self, exception):
+        self._err_handler.fatalError(exception)
+
+    def warning(self, exception):
+        self._err_handler.warning(exception)
+
+    # ContentHandler methods
+
+    def setDocumentLocator(self, locator):
+        self._cont_handler.setDocumentLocator(locator)
+
+    def startDocument(self):
+        self._cont_handler.startDocument()
+
+    def endDocument(self):
+        self._cont_handler.endDocument()
+
+    def startPrefixMapping(self, prefix, uri):
+        self._cont_handler.startPrefixMapping(prefix, uri)
+
+    def endPrefixMapping(self, prefix):
+        self._cont_handler.endPrefixMapping(prefix)
+
+    def startElement(self, name, attrs):
+        self._cont_handler.startElement(name, attrs)
+
+    def endElement(self, name):
+        self._cont_handler.endElement(name)
+
+    def startElementNS(self, name, qname, attrs):
+        self._cont_handler.startElement(name, attrs)
+
+    def endElementNS(self, name, qname):
+        self._cont_handler.endElementNS(name, qname)
+
+    def characters(self, content):
+        self._cont_handler.characters(content)
+
+    def ignorableWhitespace(self, chars):
+        self._cont_handler.ignorableWhitespace(chars)
+
+    def processingInstruction(self, target, data):
+        self._cont_handler.processingInstruction(target, data)
+
+    def skippedEntity(self, name):
+        self._cont_handler.skippedEntity(name)
+
+    # DTDHandler methods
+
+    def notationDecl(self, name, publicId, systemId):
+        self._dtd_handler.notationDecl(name, publicId, systemId)
+
+    def unparsedEntityDecl(self, name, publicId, systemId, ndata):
+        self._dtd_handler.unparsedEntityDecl(name, publicId, systemId, ndata)
+
+    # EntityResolver methods
+
+    def resolveEntity(self, publicId, systemId):
+        self._ent_handler.resolveEntity(publicId, systemId)
+
+    # XMLReader methods
+
+    def parse(self, source):
+        self._parent.setContentHandler(self)
+        self._parent.setErrorHandler(self)
+        self._parent.setEntityResolver(self)
+        self._parent.setDTDHandler(self)
+        self._parent.parse(source)
+
+    def setLocale(self, locale):
+        self._parent.setLocale(locale)
+
+    def getFeature(self, name):
+        return self._parent.getFeature(name)
+
+    def setFeature(self, name, state):
+        self._parent.setFeature(name, state)
+
+    def getProperty(self, name):
+        return self._parent.getProperty(name)
+
+    def setProperty(self, name, value):
+        self._parent.setProperty(name, value)
+
+    # XMLFilter methods
+
+    def getParent(self):
+        return self._parent
+
+    def setParent(self, parent):
+        self._parent = parent
+
+# --- Utility functions
+
+def prepare_input_source(source, base = ""):
+    """This function takes an InputSource and an optional base URL and
+    returns a fully resolved InputSource object ready for reading."""
+
+    if type(source) in _StringTypes:
+        source = xmlreader.InputSource(source)
+    elif hasattr(source, "read"):
+        f = source
+        source = xmlreader.InputSource()
+        source.setByteStream(f)
+        if hasattr(f, "name"):
+            source.setSystemId(f.name)
+
+    if source.getByteStream() is None:
+        sysid = source.getSystemId()
+        if os.path.isfile(sysid):
+            basehead = os.path.split(os.path.normpath(base))[0]
+            source.setSystemId(os.path.join(basehead, sysid))
+            f = open(sysid, "rb")
+        else:
+            source.setSystemId(urlparse.urljoin(base, sysid))
+            f = urllib.urlopen(source.getSystemId())
+
+        source.setByteStream(f)
+
+    return source
diff --git a/lib-python/2.2/xml/sax/xmlreader.py b/lib-python/2.2/xml/sax/xmlreader.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/xml/sax/xmlreader.py
@@ -0,0 +1,378 @@
+"""An XML Reader is the SAX 2 name for an XML parser. XML Parsers
+should be based on this code. """
+
+import handler
+
+from _exceptions import SAXNotSupportedException, SAXNotRecognizedException
+
+
+# ===== XMLREADER =====
+
+class XMLReader:
+    """Interface for reading an XML document using callbacks.
+
+    XMLReader is the interface that an XML parser's SAX2 driver must
+    implement. This interface allows an application to set and query
+    features and properties in the parser, to register event handlers
+    for document processing, and to initiate a document parse.
+
+    All SAX interfaces are assumed to be synchronous: the parse
+    methods must not return until parsing is complete, and readers
+    must wait for an event-handler callback to return before reporting
+    the next event."""
+
+    def __init__(self):
+        self._cont_handler = handler.ContentHandler()
+        self._dtd_handler = handler.DTDHandler()
+        self._ent_handler = handler.EntityResolver()
+        self._err_handler = handler.ErrorHandler()
+
+    def parse(self, source):
+        "Parse an XML document from a system identifier or an InputSource."
+        raise NotImplementedError("This method must be implemented!")
+
+    def getContentHandler(self):
+        "Returns the current ContentHandler."
+        return self._cont_handler
+
+    def setContentHandler(self, handler):
+        "Registers a new object to receive document content events."
+        self._cont_handler = handler
+
+    def getDTDHandler(self):
+        "Returns the current DTD handler."
+        return self._dtd_handler
+
+    def setDTDHandler(self, handler):
+        "Register an object to receive basic DTD-related events."
+        self._dtd_handler = handler
+
+    def getEntityResolver(self):
+        "Returns the current EntityResolver."
+        return self._ent_handler
+
+    def setEntityResolver(self, resolver):
+        "Register an object to resolve external entities."
+        self._ent_handler = resolver
+
+    def getErrorHandler(self):
+        "Returns the current ErrorHandler."
+        return self._err_handler
+
+    def setErrorHandler(self, handler):
+        "Register an object to receive error-message events."
+        self._err_handler = handler
+
+    def setLocale(self, locale):
+        """Allow an application to set the locale for errors and warnings.
+
+        SAX parsers are not required to provide localization for errors
+        and warnings; if they cannot support the requested locale,
+        however, they must throw a SAX exception. Applications may
+        request a locale change in the middle of a parse."""
+        raise SAXNotSupportedException("Locale support not implemented")
+
+    def getFeature(self, name):
+        "Looks up and returns the state of a SAX2 feature."
+        raise SAXNotRecognizedException("Feature '%s' not recognized" % name)
+
+    def setFeature(self, name, state):
+        "Sets the state of a SAX2 feature."
+        raise SAXNotRecognizedException("Feature '%s' not recognized" % name)
+
+    def getProperty(self, name):
+        "Looks up and returns the value of a SAX2 property."
+        raise SAXNotRecognizedException("Property '%s' not recognized" % name)
+
+    def setProperty(self, name, value):
+        "Sets the value of a SAX2 property."
+        raise SAXNotRecognizedException("Property '%s' not recognized" % name)
+
+class IncrementalParser(XMLReader):
+    """This interface adds three extra methods to the XMLReader
+    interface that allow XML parsers to support incremental
+    parsing. Support for this interface is optional, since not all
+    underlying XML parsers support this functionality.
+
+    When the parser is instantiated it is ready to begin accepting
+    data from the feed method immediately. After parsing has been
+    finished with a call to close the reset method must be called to
+    make the parser ready to accept new data, either from feed or
+    using the parse method.
+
+    Note that these methods must _not_ be called during parsing, that
+    is, after parse has been called and before it returns.
+
+    By default, the class also implements the parse method of the XMLReader
+    interface using the feed, close and reset methods of the
+    IncrementalParser interface as a convenience to SAX 2.0 driver
+    writers."""
+
+    def __init__(self, bufsize=2**16):
+        self._bufsize = bufsize
+        XMLReader.__init__(self)
+
+    def parse(self, source):
+        import saxutils
+        source = saxutils.prepare_input_source(source)
+
+        self.prepareParser(source)
+        file = source.getByteStream()
+        buffer = file.read(self._bufsize)
+        while buffer != "":
+            self.feed(buffer)
+            buffer = file.read(self._bufsize)
+        self.close()
+
+    def feed(self, data):
+        """This method gives the raw XML data in the data parameter to
+        the parser and makes it parse the data, emitting the
+        corresponding events. It is allowed for XML constructs to be
+        split across several calls to feed.
+
+        feed may raise SAXException."""
+        raise NotImplementedError("This method must be implemented!")
+
+    def prepareParser(self, source):
+        """This method is called by the parse implementation to allow
+        the SAX 2.0 driver to prepare itself for parsing."""
+        raise NotImplementedError("prepareParser must be overridden!")
+
+    def close(self):
+        """This method is called when the entire XML document has been
+        passed to the parser through the feed method, to notify the
+        parser that there are no more data. This allows the parser to
+        do the final checks on the document and empty the internal
+        data buffer.
+
+        The parser will not be ready to parse another document until
+        the reset method has been called.
+
+        close may raise SAXException."""
+        raise NotImplementedError("This method must be implemented!")
+
+    def reset(self):
+        """This method is called after close has been called to reset
+        the parser so that it is ready to parse new documents. The
+        results of calling parse or feed after close without calling
+        reset are undefined."""
+        raise NotImplementedError("This method must be implemented!")
+
+# ===== LOCATOR =====
+
+class Locator:
+    """Interface for associating a SAX event with a document
+    location. A locator object will return valid results only during
+    calls to DocumentHandler methods; at any other time, the
+    results are unpredictable."""
+
+    def getColumnNumber(self):
+        "Return the column number where the current event ends."
+        return -1
+
+    def getLineNumber(self):
+        "Return the line number where the current event ends."
+        return -1
+
+    def getPublicId(self):
+        "Return the public identifier for the current event."
+        return None
+
+    def getSystemId(self):
+        "Return the system identifier for the current event."
+        return None
+
+# ===== INPUTSOURCE =====
+
+class InputSource:
+    """Encapsulation of the information needed by the XMLReader to
+    read entities.
+
+    This class may include information about the public identifier,
+    system identifier, byte stream (possibly with character encoding
+    information) and/or the character stream of an entity.
+
+    Applications will create objects of this class for use in the
+    XMLReader.parse method and for returning from
+    EntityResolver.resolveEntity.
+
+    An InputSource belongs to the application, the XMLReader is not
+    allowed to modify InputSource objects passed to it from the
+    application, although it may make copies and modify those."""
+
+    def __init__(self, system_id = None):
+        self.__system_id = system_id
+        self.__public_id = None
+        self.__encoding  = None
+        self.__bytefile  = None
+        self.__charfile  = None
+
+    def setPublicId(self, public_id):
+        "Sets the public identifier of this InputSource."
+        self.__public_id = public_id
+
+    def getPublicId(self):
+        "Returns the public identifier of this InputSource."
+        return self.__public_id
+
+    def setSystemId(self, system_id):
+        "Sets the system identifier of this InputSource."
+        self.__system_id = system_id
+
+    def getSystemId(self):
+        "Returns the system identifier of this InputSource."
+        return self.__system_id
+
+    def setEncoding(self, encoding):
+        """Sets the character encoding of this InputSource.
+
+        The encoding must be a string acceptable for an XML encoding
+        declaration (see section 4.3.3 of the XML recommendation).
+
+        The encoding attribute of the InputSource is ignored if the
+        InputSource also contains a character stream."""
+        self.__encoding = encoding
+
+    def getEncoding(self):
+        "Get the character encoding of this InputSource."
+        return self.__encoding
+
+    def setByteStream(self, bytefile):
+        """Set the byte stream (a Python file-like object which does
+        not perform byte-to-character conversion) for this input
+        source.
+
+        The SAX parser will ignore this if there is also a character
+        stream specified, but it will use a byte stream in preference
+        to opening a URI connection itself.
+
+        If the application knows the character encoding of the byte
+        stream, it should set it with the setEncoding method."""
+        self.__bytefile = bytefile
+
+    def getByteStream(self):
+        """Get the byte stream for this input source.
+
+        The getEncoding method will return the character encoding for
+        this byte stream, or None if unknown."""
+        return self.__bytefile
+
+    def setCharacterStream(self, charfile):
+        """Set the character stream for this input source. (The stream
+        must be a Python 2.0 Unicode-wrapped file-like that performs
+        conversion to Unicode strings.)
+
+        If there is a character stream specified, the SAX parser will
+        ignore any byte stream and will not attempt to open a URI
+        connection to the system identifier."""
+        self.__charfile = charfile
+
+    def getCharacterStream(self):
+        "Get the character stream for this input source."
+        return self.__charfile
+
+# ===== ATTRIBUTESIMPL =====
+
+class AttributesImpl:
+
+    def __init__(self, attrs):
+        """Non-NS-aware implementation.
+
+        attrs should be of the form {name : value}."""
+        self._attrs = attrs
+
+    def getLength(self):
+        return len(self._attrs)
+
+    def getType(self, name):
+        return "CDATA"
+
+    def getValue(self, name):
+        return self._attrs[name]
+
+    def getValueByQName(self, name):
+        return self._attrs[name]
+
+    def getNameByQName(self, name):
+        if not self._attrs.has_key(name):
+            raise KeyError, name
+        return name
+
+    def getQNameByName(self, name):
+        if not self._attrs.has_key(name):
+            raise KeyError, name
+        return name
+
+    def getNames(self):
+        return self._attrs.keys()
+
+    def getQNames(self):
+        return self._attrs.keys()
+
+    def __len__(self):
+        return len(self._attrs)
+
+    def __getitem__(self, name):
+        return self._attrs[name]
+
+    def keys(self):
+        return self._attrs.keys()
+
+    def has_key(self, name):
+        return self._attrs.has_key(name)
+
+    def get(self, name, alternative=None):
+        return self._attrs.get(name, alternative)
+
+    def copy(self):
+        return self.__class__(self._attrs)
+
+    def items(self):
+        return self._attrs.items()
+
+    def values(self):
+        return self._attrs.values()
+
+# ===== ATTRIBUTESNSIMPL =====
+
+class AttributesNSImpl(AttributesImpl):
+
+    def __init__(self, attrs, qnames):
+        """NS-aware implementation.
+
+        attrs should be of the form {(ns_uri, lname): value, ...}.
+        qnames of the form {(ns_uri, lname): qname, ...}."""
+        self._attrs = attrs
+        self._qnames = qnames
+
+    def getValueByQName(self, name):
+        for (nsname, qname) in self._qnames.items():
+            if qname == name:
+                return self._attrs[nsname]
+
+        raise KeyError, name
+
+    def getNameByQName(self, name):
+        for (nsname, qname) in self._qnames.items():
+            if qname == name:
+                return nsname
+
+        raise KeyError, name
+
+    def getQNameByName(self, name):
+        return self._qnames[name]
+
+    def getQNames(self):
+        return self._qnames.values()
+
+    def copy(self):
+        return self.__class__(self._attrs, self._qnames)
+
+
+def _test():
+    XMLReader()
+    IncrementalParser()
+    Locator()
+
+if __name__ == "__main__":
+    _test()
diff --git a/lib-python/2.2/xmllib.py b/lib-python/2.2/xmllib.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/xmllib.py
@@ -0,0 +1,929 @@
+"""A parser for XML, using the derived class as static DTD."""
+
+# Author: Sjoerd Mullender.
+
+import re
+import string
+
+
+version = '0.3'
+
+class Error(RuntimeError):
+    pass
+
+# Regular expressions used for parsing
+
+_S = '[ \t\r\n]+'                       # white space
+_opS = '[ \t\r\n]*'                     # optional white space
+_Name = '[a-zA-Z_:][-a-zA-Z0-9._:]*'    # valid XML name
+_QStr = "(?:'[^']*'|\"[^\"]*\")"        # quoted XML string
+illegal = re.compile('[^\t\r\n -\176\240-\377]') # illegal chars in content
+interesting = re.compile('[]&<]')
+
+amp = re.compile('&')
+ref = re.compile('&(' + _Name + '|#[0-9]+|#x[0-9a-fA-F]+)[^-a-zA-Z0-9._:]')
+entityref = re.compile('&(?P<name>' + _Name + ')[^-a-zA-Z0-9._:]')
+charref = re.compile('&#(?P<char>[0-9]+[^0-9]|x[0-9a-fA-F]+[^0-9a-fA-F])')
+space = re.compile(_S + '$')
+newline = re.compile('\n')
+
+attrfind = re.compile(
+    _S + '(?P<name>' + _Name + ')'
+    '(' + _opS + '=' + _opS +
+    '(?P<value>'+_QStr+'|[-a-zA-Z0-9.:+*%?!\(\)_#=~]+))?')
+starttagopen = re.compile('<' + _Name)
+starttagend = re.compile(_opS + '(?P<slash>/?)>')
+starttagmatch = re.compile('<(?P<tagname>'+_Name+')'
+                      '(?P<attrs>(?:'+attrfind.pattern+')*)'+
+                      starttagend.pattern)
+endtagopen = re.compile('</')
+endbracket = re.compile(_opS + '>')
+endbracketfind = re.compile('(?:[^>\'"]|'+_QStr+')*>')
+tagfind = re.compile(_Name)
+cdataopen = re.compile(r'<!\[CDATA\[')
+cdataclose = re.compile(r'\]\]>')
+# this matches one of the following:
+# SYSTEM SystemLiteral
+# PUBLIC PubidLiteral SystemLiteral
+_SystemLiteral = '(?P<%s>'+_QStr+')'
+_PublicLiteral = '(?P<%s>"[-\'\(\)+,./:=?;!*#@$_%% \n\ra-zA-Z0-9]*"|' \
+                        "'[-\(\)+,./:=?;!*#@$_%% \n\ra-zA-Z0-9]*')"
+_ExternalId = '(?:SYSTEM|' \
+                 'PUBLIC'+_S+_PublicLiteral%'pubid'+ \
+              ')'+_S+_SystemLiteral%'syslit'
+doctype = re.compile('<!DOCTYPE'+_S+'(?P<name>'+_Name+')'
+                     '(?:'+_S+_ExternalId+')?'+_opS)
+xmldecl = re.compile('<\?xml'+_S+
+                     'version'+_opS+'='+_opS+'(?P<version>'+_QStr+')'+
+                     '(?:'+_S+'encoding'+_opS+'='+_opS+
+                        "(?P<encoding>'[A-Za-z][-A-Za-z0-9._]*'|"
+                        '"[A-Za-z][-A-Za-z0-9._]*"))?'
+                     '(?:'+_S+'standalone'+_opS+'='+_opS+
+                        '(?P<standalone>\'(?:yes|no)\'|"(?:yes|no)"))?'+
+                     _opS+'\?>')
+procopen = re.compile(r'<\?(?P<proc>' + _Name + ')' + _opS)
+procclose = re.compile(_opS + r'\?>')
+commentopen = re.compile('<!--')
+commentclose = re.compile('-->')
+doubledash = re.compile('--')
+attrtrans = string.maketrans(' \r\n\t', '    ')
+
+# definitions for XML namespaces
+_NCName = '[a-zA-Z_][-a-zA-Z0-9._]*'    # XML Name, minus the ":"
+ncname = re.compile(_NCName + '$')
+qname = re.compile('(?:(?P<prefix>' + _NCName + '):)?' # optional prefix
+                   '(?P<local>' + _NCName + ')$')
+
+xmlns = re.compile('xmlns(?::(?P<ncname>'+_NCName+'))?$')
+
+# XML parser base class -- find tags and call handler functions.
+# Usage: p = XMLParser(); p.feed(data); ...; p.close().
+# The dtd is defined by deriving a class which defines methods with
+# special names to handle tags: start_foo and end_foo to handle <foo>
+# and </foo>, respectively.  The data between tags is passed to the
+# parser by calling self.handle_data() with some data as argument (the
+# data may be split up in arbitrary chunks).
+
+class XMLParser:
+    attributes = {}                     # default, to be overridden
+    elements = {}                       # default, to be overridden
+
+    # parsing options, settable using keyword args in __init__
+    __accept_unquoted_attributes = 0
+    __accept_missing_endtag_name = 0
+    __map_case = 0
+    __accept_utf8 = 0
+    __translate_attribute_references = 1
+
+    # Interface -- initialize and reset this instance
+    def __init__(self, **kw):
+        self.__fixed = 0
+        if kw.has_key('accept_unquoted_attributes'):
+            self.__accept_unquoted_attributes = kw['accept_unquoted_attributes']
+        if kw.has_key('accept_missing_endtag_name'):
+            self.__accept_missing_endtag_name = kw['accept_missing_endtag_name']
+        if kw.has_key('map_case'):
+            self.__map_case = kw['map_case']
+        if kw.has_key('accept_utf8'):
+            self.__accept_utf8 = kw['accept_utf8']
+        if kw.has_key('translate_attribute_references'):
+            self.__translate_attribute_references = kw['translate_attribute_references']
+        self.reset()
+
+    def __fixelements(self):
+        self.__fixed = 1
+        self.elements = {}
+        self.__fixdict(self.__dict__)
+        self.__fixclass(self.__class__)
+
+    def __fixclass(self, kl):
+        self.__fixdict(kl.__dict__)
+        for k in kl.__bases__:
+            self.__fixclass(k)
+
+    def __fixdict(self, dict):
+        for key in dict.keys():
+            if key[:6] == 'start_':
+                tag = key[6:]
+                start, end = self.elements.get(tag, (None, None))
+                if start is None:
+                    self.elements[tag] = getattr(self, key), end
+            elif key[:4] == 'end_':
+                tag = key[4:]
+                start, end = self.elements.get(tag, (None, None))
+                if end is None:
+                    self.elements[tag] = start, getattr(self, key)
+
+    # Interface -- reset this instance.  Loses all unprocessed data
+    def reset(self):
+        self.rawdata = ''
+        self.stack = []
+        self.nomoretags = 0
+        self.literal = 0
+        self.lineno = 1
+        self.__at_start = 1
+        self.__seen_doctype = None
+        self.__seen_starttag = 0
+        self.__use_namespaces = 0
+        self.__namespaces = {'xml':None}   # xml is implicitly declared
+        # backward compatibility hack: if elements not overridden,
+        # fill it in ourselves
+        if self.elements is XMLParser.elements:
+            self.__fixelements()
+
+    # For derived classes only -- enter literal mode (CDATA) till EOF
+    def setnomoretags(self):
+        self.nomoretags = self.literal = 1
+
+    # For derived classes only -- enter literal mode (CDATA)
+    def setliteral(self, *args):
+        self.literal = 1
+
+    # Interface -- feed some data to the parser.  Call this as
+    # often as you want, with as little or as much text as you
+    # want (may include '\n').  (This just saves the text, all the
+    # processing is done by goahead().)
+    def feed(self, data):
+        self.rawdata = self.rawdata + data
+        self.goahead(0)
+
+    # Interface -- handle the remaining data
+    def close(self):
+        self.goahead(1)
+        if self.__fixed:
+            self.__fixed = 0
+            # remove self.elements so that we don't leak
+            del self.elements
+
+    # Interface -- translate references
+    def translate_references(self, data, all = 1):
+        if not self.__translate_attribute_references:
+            return data
+        i = 0
+        while 1:
+            res = amp.search(data, i)
+            if res is None:
+                return data
+            s = res.start(0)
+            res = ref.match(data, s)
+            if res is None:
+                self.syntax_error("bogus `&'")
+                i = s+1
+                continue
+            i = res.end(0)
+            str = res.group(1)
+            rescan = 0
+            if str[0] == '#':
+                if str[1] == 'x':
+                    str = chr(int(str[2:], 16))
+                else:
+                    str = chr(int(str[1:]))
+                if data[i - 1] != ';':
+                    self.syntax_error("`;' missing after char reference")
+                    i = i-1
+            elif all:
+                if self.entitydefs.has_key(str):
+                    str = self.entitydefs[str]
+                    rescan = 1
+                elif data[i - 1] != ';':
+                    self.syntax_error("bogus `&'")
+                    i = s + 1 # just past the &
+                    continue
+                else:
+                    self.syntax_error("reference to unknown entity `&%s;'" % str)
+                    str = '&' + str + ';'
+            elif data[i - 1] != ';':
+                self.syntax_error("bogus `&'")
+                i = s + 1 # just past the &
+                continue
+
+            # when we get here, str contains the translated text and i points
+            # to the end of the string that is to be replaced
+            data = data[:s] + str + data[i:]
+            if rescan:
+                i = s
+            else:
+                i = s + len(str)
+
+    # Interface - return a dictionary of all namespaces currently valid
+    def getnamespace(self):
+        nsdict = {}
+        for t, d, nst in self.stack:
+            nsdict.update(d)
+        return nsdict
+
+    # Internal -- handle data as far as reasonable.  May leave state
+    # and data to be processed by a subsequent call.  If 'end' is
+    # true, force handling all data as if followed by EOF marker.
+    def goahead(self, end):
+        rawdata = self.rawdata
+        i = 0
+        n = len(rawdata)
+        while i < n:
+            if i > 0:
+                self.__at_start = 0
+            if self.nomoretags:
+                data = rawdata[i:n]
+                self.handle_data(data)
+                self.lineno = self.lineno + data.count('\n')
+                i = n
+                break
+            res = interesting.search(rawdata, i)
+            if res:
+                j = res.start(0)
+            else:
+                j = n
+            if i < j:
+                data = rawdata[i:j]
+                if self.__at_start and space.match(data) is None:
+                    self.syntax_error('illegal data at start of file')
+                self.__at_start = 0
+                if not self.stack and space.match(data) is None:
+                    self.syntax_error('data not in content')
+                if not self.__accept_utf8 and illegal.search(data):
+                    self.syntax_error('illegal character in content')
+                self.handle_data(data)
+                self.lineno = self.lineno + data.count('\n')
+            i = j
+            if i == n: break
+            if rawdata[i] == '<':
+                if starttagopen.match(rawdata, i):
+                    if self.literal:
+                        data = rawdata[i]
+                        self.handle_data(data)
+                        self.lineno = self.lineno + data.count('\n')
+                        i = i+1
+                        continue
+                    k = self.parse_starttag(i)
+                    if k < 0: break
+                    self.__seen_starttag = 1
+                    self.lineno = self.lineno + rawdata[i:k].count('\n')
+                    i = k
+                    continue
+                if endtagopen.match(rawdata, i):
+                    k = self.parse_endtag(i)
+                    if k < 0: break
+                    self.lineno = self.lineno + rawdata[i:k].count('\n')
+                    i =  k
+                    continue
+                if commentopen.match(rawdata, i):
+                    if self.literal:
+                        data = rawdata[i]
+                        self.handle_data(data)
+                        self.lineno = self.lineno + data.count('\n')
+                        i = i+1
+                        continue
+                    k = self.parse_comment(i)
+                    if k < 0: break
+                    self.lineno = self.lineno + rawdata[i:k].count('\n')
+                    i = k
+                    continue
+                if cdataopen.match(rawdata, i):
+                    k = self.parse_cdata(i)
+                    if k < 0: break
+                    self.lineno = self.lineno + rawdata[i:k].count('\n')
+                    i = k
+                    continue
+                res = xmldecl.match(rawdata, i)
+                if res:
+                    if not self.__at_start:
+                        self.syntax_error("<?xml?> declaration not at start of document")
+                    version, encoding, standalone = res.group('version',
+                                                              'encoding',
+                                                              'standalone')
+                    if version[1:-1] != '1.0':
+                        raise Error('only XML version 1.0 supported')
+                    if encoding: encoding = encoding[1:-1]
+                    if standalone: standalone = standalone[1:-1]
+                    self.handle_xml(encoding, standalone)
+                    i = res.end(0)
+                    continue
+                res = procopen.match(rawdata, i)
+                if res:
+                    k = self.parse_proc(i)
+                    if k < 0: break
+                    self.lineno = self.lineno + rawdata[i:k].count('\n')
+                    i = k
+                    continue
+                res = doctype.match(rawdata, i)
+                if res:
+                    if self.literal:
+                        data = rawdata[i]
+                        self.handle_data(data)
+                        self.lineno = self.lineno + data.count('\n')
+                        i = i+1
+                        continue
+                    if self.__seen_doctype:
+                        self.syntax_error('multiple DOCTYPE elements')
+                    if self.__seen_starttag:
+                        self.syntax_error('DOCTYPE not at beginning of document')
+                    k = self.parse_doctype(res)
+                    if k < 0: break
+                    self.__seen_doctype = res.group('name')
+                    if self.__map_case:
+                        self.__seen_doctype = self.__seen_doctype.lower()
+                    self.lineno = self.lineno + rawdata[i:k].count('\n')
+                    i = k
+                    continue
+            elif rawdata[i] == '&':
+                if self.literal:
+                    data = rawdata[i]
+                    self.handle_data(data)
+                    i = i+1
+                    continue
+                res = charref.match(rawdata, i)
+                if res is not None:
+                    i = res.end(0)
+                    if rawdata[i-1] != ';':
+                        self.syntax_error("`;' missing in charref")
+                        i = i-1
+                    if not self.stack:
+                        self.syntax_error('data not in content')
+                    self.handle_charref(res.group('char')[:-1])
+                    self.lineno = self.lineno + res.group(0).count('\n')
+                    continue
+                res = entityref.match(rawdata, i)
+                if res is not None:
+                    i = res.end(0)
+                    if rawdata[i-1] != ';':
+                        self.syntax_error("`;' missing in entityref")
+                        i = i-1
+                    name = res.group('name')
+                    if self.__map_case:
+                        name = name.lower()
+                    if self.entitydefs.has_key(name):
+                        self.rawdata = rawdata = rawdata[:res.start(0)] + self.entitydefs[name] + rawdata[i:]
+                        n = len(rawdata)
+                        i = res.start(0)
+                    else:
+                        self.unknown_entityref(name)
+                    self.lineno = self.lineno + res.group(0).count('\n')
+                    continue
+            elif rawdata[i] == ']':
+                if self.literal:
+                    data = rawdata[i]
+                    self.handle_data(data)
+                    i = i+1
+                    continue
+                if n-i < 3:
+                    break
+                if cdataclose.match(rawdata, i):
+                    self.syntax_error("bogus `]]>'")
+                self.handle_data(rawdata[i])
+                i = i+1
+                continue
+            else:
+                raise Error('neither < nor & ??')
+            # We get here only if incomplete matches but
+            # nothing else
+            break
+        # end while
+        if i > 0:
+            self.__at_start = 0
+        if end and i < n:
+            data = rawdata[i]
+            self.syntax_error("bogus `%s'" % data)
+            if not self.__accept_utf8 and illegal.search(data):
+                self.syntax_error('illegal character in content')
+            self.handle_data(data)
+            self.lineno = self.lineno + data.count('\n')
+            self.rawdata = rawdata[i+1:]
+            return self.goahead(end)
+        self.rawdata = rawdata[i:]
+        if end:
+            if not self.__seen_starttag:
+                self.syntax_error('no elements in file')
+            if self.stack:
+                self.syntax_error('missing end tags')
+                while self.stack:
+                    self.finish_endtag(self.stack[-1][0])
+
+    # Internal -- parse comment, return length or -1 if not terminated
+    def parse_comment(self, i):
+        rawdata = self.rawdata
+        if rawdata[i:i+4] != '<!--':
+            raise Error('unexpected call to handle_comment')
+        res = commentclose.search(rawdata, i+4)
+        if res is None:
+            return -1
+        if doubledash.search(rawdata, i+4, res.start(0)):
+            self.syntax_error("`--' inside comment")
+        if rawdata[res.start(0)-1] == '-':
+            self.syntax_error('comment cannot end in three dashes')
+        if not self.__accept_utf8 and \
+           illegal.search(rawdata, i+4, res.start(0)):
+            self.syntax_error('illegal character in comment')
+        self.handle_comment(rawdata[i+4: res.start(0)])
+        return res.end(0)
+
+    # Internal -- handle DOCTYPE tag, return length or -1 if not terminated
+    def parse_doctype(self, res):
+        rawdata = self.rawdata
+        n = len(rawdata)
+        name = res.group('name')
+        if self.__map_case:
+            name = name.lower()
+        pubid, syslit = res.group('pubid', 'syslit')
+        if pubid is not None:
+            pubid = pubid[1:-1]         # remove quotes
+            pubid = ' '.join(pubid.split()) # normalize
+        if syslit is not None: syslit = syslit[1:-1] # remove quotes
+        j = k = res.end(0)
+        if k >= n:
+            return -1
+        if rawdata[k] == '[':
+            level = 0
+            k = k+1
+            dq = sq = 0
+            while k < n:
+                c = rawdata[k]
+                if not sq and c == '"':
+                    dq = not dq
+                elif not dq and c == "'":
+                    sq = not sq
+                elif sq or dq:
+                    pass
+                elif level <= 0 and c == ']':
+                    res = endbracket.match(rawdata, k+1)
+                    if res is None:
+                        return -1
+                    self.handle_doctype(name, pubid, syslit, rawdata[j+1:k])
+                    return res.end(0)
+                elif c == '<':
+                    level = level + 1
+                elif c == '>':
+                    level = level - 1
+                    if level < 0:
+                        self.syntax_error("bogus `>' in DOCTYPE")
+                k = k+1
+        res = endbracketfind.match(rawdata, k)
+        if res is None:
+            return -1
+        if endbracket.match(rawdata, k) is None:
+            self.syntax_error('garbage in DOCTYPE')
+        self.handle_doctype(name, pubid, syslit, None)
+        return res.end(0)
+
+    # Internal -- handle CDATA tag, return length or -1 if not terminated
+    def parse_cdata(self, i):
+        rawdata = self.rawdata
+        if rawdata[i:i+9] != '<![CDATA[':
+            raise Error('unexpected call to parse_cdata')
+        res = cdataclose.search(rawdata, i+9)
+        if res is None:
+            return -1
+        if not self.__accept_utf8 and \
+           illegal.search(rawdata, i+9, res.start(0)):
+            self.syntax_error('illegal character in CDATA')
+        if not self.stack:
+            self.syntax_error('CDATA not in content')
+        self.handle_cdata(rawdata[i+9:res.start(0)])
+        return res.end(0)
+
+    __xml_namespace_attributes = {'ns':None, 'src':None, 'prefix':None}
+    # Internal -- handle a processing instruction tag
+    def parse_proc(self, i):
+        rawdata = self.rawdata
+        end = procclose.search(rawdata, i)
+        if end is None:
+            return -1
+        j = end.start(0)
+        if not self.__accept_utf8 and illegal.search(rawdata, i+2, j):
+            self.syntax_error('illegal character in processing instruction')
+        res = tagfind.match(rawdata, i+2)
+        if res is None:
+            raise Error('unexpected call to parse_proc')
+        k = res.end(0)
+        name = res.group(0)
+        if self.__map_case:
+            name = name.lower()
+        if name == 'xml:namespace':
+            self.syntax_error('old-fashioned namespace declaration')
+            self.__use_namespaces = -1
+            # namespace declaration
+            # this must come after the <?xml?> declaration (if any)
+            # and before the <!DOCTYPE> (if any).
+            if self.__seen_doctype or self.__seen_starttag:
+                self.syntax_error('xml:namespace declaration too late in document')
+            attrdict, namespace, k = self.parse_attributes(name, k, j)
+            if namespace:
+                self.syntax_error('namespace declaration inside namespace declaration')
+            for attrname in attrdict.keys():
+                if not self.__xml_namespace_attributes.has_key(attrname):
+                    self.syntax_error("unknown attribute `%s' in xml:namespace tag" % attrname)
+            if not attrdict.has_key('ns') or not attrdict.has_key('prefix'):
+                self.syntax_error('xml:namespace without required attributes')
+            prefix = attrdict.get('prefix')
+            if ncname.match(prefix) is None:
+                self.syntax_error('xml:namespace illegal prefix value')
+                return end.end(0)
+            if self.__namespaces.has_key(prefix):
+                self.syntax_error('xml:namespace prefix not unique')
+            self.__namespaces[prefix] = attrdict['ns']
+        else:
+            if name.lower() == 'xml':
+                self.syntax_error('illegal processing instruction target name')
+            self.handle_proc(name, rawdata[k:j])
+        return end.end(0)
+
+    # Internal -- parse attributes between i and j
+    def parse_attributes(self, tag, i, j):
+        rawdata = self.rawdata
+        attrdict = {}
+        namespace = {}
+        while i < j:
+            res = attrfind.match(rawdata, i)
+            if res is None:
+                break
+            attrname, attrvalue = res.group('name', 'value')
+            if self.__map_case:
+                attrname = attrname.lower()
+            i = res.end(0)
+            if attrvalue is None:
+                self.syntax_error("no value specified for attribute `%s'" % attrname)
+                attrvalue = attrname
+            elif attrvalue[:1] == "'" == attrvalue[-1:] or \
+                 attrvalue[:1] == '"' == attrvalue[-1:]:
+                attrvalue = attrvalue[1:-1]
+            elif not self.__accept_unquoted_attributes:
+                self.syntax_error("attribute `%s' value not quoted" % attrname)
+            res = xmlns.match(attrname)
+            if res is not None:
+                # namespace declaration
+                ncname = res.group('ncname')
+                namespace[ncname or ''] = attrvalue or None
+                if not self.__use_namespaces:
+                    self.__use_namespaces = len(self.stack)+1
+                continue
+            if '<' in attrvalue:
+                self.syntax_error("`<' illegal in attribute value")
+            if attrdict.has_key(attrname):
+                self.syntax_error("attribute `%s' specified twice" % attrname)
+            attrvalue = attrvalue.translate(attrtrans)
+            attrdict[attrname] = self.translate_references(attrvalue)
+        return attrdict, namespace, i
+
+    # Internal -- handle starttag, return length or -1 if not terminated
+    def parse_starttag(self, i):
+        rawdata = self.rawdata
+        # i points to start of tag
+        end = endbracketfind.match(rawdata, i+1)
+        if end is None:
+            return -1
+        tag = starttagmatch.match(rawdata, i)
+        if tag is None or tag.end(0) != end.end(0):
+            self.syntax_error('garbage in starttag')
+            return end.end(0)
+        nstag = tagname = tag.group('tagname')
+        if self.__map_case:
+            nstag = tagname = nstag.lower()
+        if not self.__seen_starttag and self.__seen_doctype and \
+           tagname != self.__seen_doctype:
+            self.syntax_error('starttag does not match DOCTYPE')
+        if self.__seen_starttag and not self.stack:
+            self.syntax_error('multiple elements on top level')
+        k, j = tag.span('attrs')
+        attrdict, nsdict, k = self.parse_attributes(tagname, k, j)
+        self.stack.append((tagname, nsdict, nstag))
+        if self.__use_namespaces:
+            res = qname.match(tagname)
+        else:
+            res = None
+        if res is not None:
+            prefix, nstag = res.group('prefix', 'local')
+            if prefix is None:
+                prefix = ''
+            ns = None
+            for t, d, nst in self.stack:
+                if d.has_key(prefix):
+                    ns = d[prefix]
+            if ns is None and prefix != '':
+                ns = self.__namespaces.get(prefix)
+            if ns is not None:
+                nstag = ns + ' ' + nstag
+            elif prefix != '':
+                nstag = prefix + ':' + nstag # undo split
+            self.stack[-1] = tagname, nsdict, nstag
+        # translate namespace of attributes
+        attrnamemap = {} # map from new name to old name (used for error reporting)
+        for key in attrdict.keys():
+            attrnamemap[key] = key
+        if self.__use_namespaces:
+            nattrdict = {}
+            for key, val in attrdict.items():
+                okey = key
+                res = qname.match(key)
+                if res is not None:
+                    aprefix, key = res.group('prefix', 'local')
+                    if self.__map_case:
+                        key = key.lower()
+                    if aprefix is None:
+                        aprefix = ''
+                    ans = None
+                    for t, d, nst in self.stack:
+                        if d.has_key(aprefix):
+                            ans = d[aprefix]
+                    if ans is None and aprefix != '':
+                        ans = self.__namespaces.get(aprefix)
+                    if ans is not None:
+                        key = ans + ' ' + key
+                    elif aprefix != '':
+                        key = aprefix + ':' + key
+                    elif ns is not None:
+                        key = ns + ' ' + key
+                nattrdict[key] = val
+                attrnamemap[key] = okey
+            attrdict = nattrdict
+        attributes = self.attributes.get(nstag)
+        if attributes is not None:
+            for key in attrdict.keys():
+                if not attributes.has_key(key):
+                    self.syntax_error("unknown attribute `%s' in tag `%s'" % (attrnamemap[key], tagname))
+            for key, val in attributes.items():
+                if val is not None and not attrdict.has_key(key):
+                    attrdict[key] = val
+        method = self.elements.get(nstag, (None, None))[0]
+        self.finish_starttag(nstag, attrdict, method)
+        if tag.group('slash') == '/':
+            self.finish_endtag(tagname)
+        return tag.end(0)
+
+    # Internal -- parse endtag
+    def parse_endtag(self, i):
+        rawdata = self.rawdata
+        end = endbracketfind.match(rawdata, i+1)
+        if end is None:
+            return -1
+        res = tagfind.match(rawdata, i+2)
+        if res is None:
+            if self.literal:
+                self.handle_data(rawdata[i])
+                return i+1
+            if not self.__accept_missing_endtag_name:
+                self.syntax_error('no name specified in end tag')
+            tag = self.stack[-1][0]
+            k = i+2
+        else:
+            tag = res.group(0)
+            if self.__map_case:
+                tag = tag.lower()
+            if self.literal:
+                if not self.stack or tag != self.stack[-1][0]:
+                    self.handle_data(rawdata[i])
+                    return i+1
+            k = res.end(0)
+        if endbracket.match(rawdata, k) is None:
+            self.syntax_error('garbage in end tag')
+        self.finish_endtag(tag)
+        return end.end(0)
+
+    # Internal -- finish processing of start tag
+    def finish_starttag(self, tagname, attrdict, method):
+        if method is not None:
+            self.handle_starttag(tagname, method, attrdict)
+        else:
+            self.unknown_starttag(tagname, attrdict)
+
+    # Internal -- finish processing of end tag
+    def finish_endtag(self, tag):
+        self.literal = 0
+        if not tag:
+            self.syntax_error('name-less end tag')
+            found = len(self.stack) - 1
+            if found < 0:
+                self.unknown_endtag(tag)
+                return
+        else:
+            found = -1
+            for i in range(len(self.stack)):
+                if tag == self.stack[i][0]:
+                    found = i
+            if found == -1:
+                self.syntax_error('unopened end tag')
+                return
+        while len(self.stack) > found:
+            if found < len(self.stack) - 1:
+                self.syntax_error('missing close tag for %s' % self.stack[-1][2])
+            nstag = self.stack[-1][2]
+            method = self.elements.get(nstag, (None, None))[1]
+            if method is not None:
+                self.handle_endtag(nstag, method)
+            else:
+                self.unknown_endtag(nstag)
+            if self.__use_namespaces == len(self.stack):
+                self.__use_namespaces = 0
+            del self.stack[-1]
+
+    # Overridable -- handle xml processing instruction
+    def handle_xml(self, encoding, standalone):
+        pass
+
+    # Overridable -- handle DOCTYPE
+    def handle_doctype(self, tag, pubid, syslit, data):
+        pass
+
+    # Overridable -- handle start tag
+    def handle_starttag(self, tag, method, attrs):
+        method(attrs)
+
+    # Overridable -- handle end tag
+    def handle_endtag(self, tag, method):
+        method()
+
+    # Example -- handle character reference, no need to override
+    def handle_charref(self, name):
+        try:
+            if name[0] == 'x':
+                n = int(name[1:], 16)
+            else:
+                n = int(name)
+        except ValueError:
+            self.unknown_charref(name)
+            return
+        if not 0 <= n <= 255:
+            self.unknown_charref(name)
+            return
+        self.handle_data(chr(n))
+
+    # Definition of entities -- derived classes may override
+    entitydefs = {'lt': '&#60;',        # must use charref
+                  'gt': '&#62;',
+                  'amp': '&#38;',       # must use charref
+                  'quot': '&#34;',
+                  'apos': '&#39;',
+                  }
+
+    # Example -- handle data, should be overridden
+    def handle_data(self, data):
+        pass
+
+    # Example -- handle cdata, could be overridden
+    def handle_cdata(self, data):
+        pass
+
+    # Example -- handle comment, could be overridden
+    def handle_comment(self, data):
+        pass
+
+    # Example -- handle processing instructions, could be overridden
+    def handle_proc(self, name, data):
+        pass
+
+    # Example -- handle relatively harmless syntax errors, could be overridden
+    def syntax_error(self, message):
+        raise Error('Syntax error at line %d: %s' % (self.lineno, message))
+
+    # To be overridden -- handlers for unknown objects
+    def unknown_starttag(self, tag, attrs): pass
+    def unknown_endtag(self, tag): pass
+    def unknown_charref(self, ref): pass
+    def unknown_entityref(self, name):
+        self.syntax_error("reference to unknown entity `&%s;'" % name)
+
+
+class TestXMLParser(XMLParser):
+
+    def __init__(self, **kw):
+        self.testdata = ""
+        apply(XMLParser.__init__, (self,), kw)
+
+    def handle_xml(self, encoding, standalone):
+        self.flush()
+        print 'xml: encoding =',encoding,'standalone =',standalone
+
+    def handle_doctype(self, tag, pubid, syslit, data):
+        self.flush()
+        print 'DOCTYPE:',tag, `data`
+
+    def handle_data(self, data):
+        self.testdata = self.testdata + data
+        if len(`self.testdata`) >= 70:
+            self.flush()
+
+    def flush(self):
+        data = self.testdata
+        if data:
+            self.testdata = ""
+            print 'data:', `data`
+
+    def handle_cdata(self, data):
+        self.flush()
+        print 'cdata:', `data`
+
+    def handle_proc(self, name, data):
+        self.flush()
+        print 'processing:',name,`data`
+
+    def handle_comment(self, data):
+        self.flush()
+        r = `data`
+        if len(r) > 68:
+            r = r[:32] + '...' + r[-32:]
+        print 'comment:', r
+
+    def syntax_error(self, message):
+        print 'error at line %d:' % self.lineno, message
+
+    def unknown_starttag(self, tag, attrs):
+        self.flush()
+        if not attrs:
+            print 'start tag: <' + tag + '>'
+        else:
+            print 'start tag: <' + tag,
+            for name, value in attrs.items():
+                print name + '=' + '"' + value + '"',
+            print '>'
+
+    def unknown_endtag(self, tag):
+        self.flush()
+        print 'end tag: </' + tag + '>'
+
+    def unknown_entityref(self, ref):
+        self.flush()
+        print '*** unknown entity ref: &' + ref + ';'
+
+    def unknown_charref(self, ref):
+        self.flush()
+        print '*** unknown char ref: &#' + ref + ';'
+
+    def close(self):
+        XMLParser.close(self)
+        self.flush()
+
+def test(args = None):
+    import sys, getopt
+    from time import time
+
+    if not args:
+        args = sys.argv[1:]
+
+    opts, args = getopt.getopt(args, 'st')
+    klass = TestXMLParser
+    do_time = 0
+    for o, a in opts:
+        if o == '-s':
+            klass = XMLParser
+        elif o == '-t':
+            do_time = 1
+
+    if args:
+        file = args[0]
+    else:
+        file = 'test.xml'
+
+    if file == '-':
+        f = sys.stdin
+    else:
+        try:
+            f = open(file, 'r')
+        except IOError, msg:
+            print file, ":", msg
+            sys.exit(1)
+
+    data = f.read()
+    if f is not sys.stdin:
+        f.close()
+
+    x = klass()
+    t0 = time()
+    try:
+        if do_time:
+            x.feed(data)
+            x.close()
+        else:
+            for c in data:
+                x.feed(c)
+            x.close()
+    except Error, msg:
+        t1 = time()
+        print msg
+        if do_time:
+            print 'total time: %g' % (t1-t0)
+        sys.exit(1)
+    t1 = time()
+    if do_time:
+        print 'total time: %g' % (t1-t0)
+
+
+if __name__ == '__main__':
+    test()
diff --git a/lib-python/2.2/xmlrpclib.py b/lib-python/2.2/xmlrpclib.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/xmlrpclib.py
@@ -0,0 +1,1019 @@
+#
+# XML-RPC CLIENT LIBRARY
+# $Id$
+#
+# an XML-RPC client interface for Python.
+#
+# the marshalling and response parser code can also be used to
+# implement XML-RPC servers.
+#
+# Notes:
+# this version is designed to work with Python 1.5.2 or newer.
+# unicode encoding support requires at least Python 1.6.
+# experimental HTTPS requires Python 2.0 built with SSL sockets.
+# expat parser support requires Python 2.0 with pyexpat support.
+#
+# History:
+# 1999-01-14 fl  Created
+# 1999-01-15 fl  Changed dateTime to use localtime
+# 1999-01-16 fl  Added Binary/base64 element, default to RPC2 service
+# 1999-01-19 fl  Fixed array data element (from Skip Montanaro)
+# 1999-01-21 fl  Fixed dateTime constructor, etc.
+# 1999-02-02 fl  Added fault handling, handle empty sequences, etc.
+# 1999-02-10 fl  Fixed problem with empty responses (from Skip Montanaro)
+# 1999-06-20 fl  Speed improvements, pluggable parsers/transports (0.9.8)
+# 2000-11-28 fl  Changed boolean to check the truth value of its argument
+# 2001-02-24 fl  Added encoding/Unicode/SafeTransport patches
+# 2001-02-26 fl  Added compare support to wrappers (0.9.9/1.0b1)
+# 2001-03-28 fl  Make sure response tuple is a singleton
+# 2001-03-29 fl  Don't require empty params element (from Nicholas Riley)
+# 2001-06-10 fl  Folded in _xmlrpclib accelerator support (1.0b2)
+# 2001-08-20 fl  Base xmlrpclib.Error on built-in Exception (from Paul Prescod)
+# 2001-09-03 fl  Allow Transport subclass to override getparser
+# 2001-09-10 fl  Lazy import of urllib, cgi, xmllib (20x import speedup)
+# 2001-10-01 fl  Remove containers from memo cache when done with them
+# 2001-10-01 fl  Use faster escape method (80% dumps speedup)
+# 2001-10-10 sm  Allow long ints to be passed as ints if they don't overflow
+# 2001-10-17 sm  test for int and long overflow (allows use on 64-bit systems)
+# 2001-11-12 fl  Use repr() to marshal doubles (from Paul Felix)
+#
+# Copyright (c) 1999-2001 by Secret Labs AB.
+# Copyright (c) 1999-2001 by Fredrik Lundh.
+#
+# info at pythonware.com
+# http://www.pythonware.com
+#
+# --------------------------------------------------------------------
+# The XML-RPC client interface is
+#
+# Copyright (c) 1999-2001 by Secret Labs AB
+# Copyright (c) 1999-2001 by Fredrik Lundh
+#
+# By obtaining, using, and/or copying this software and/or its
+# associated documentation, you agree that you have read, understood,
+# and will comply with the following terms and conditions:
+#
+# Permission to use, copy, modify, and distribute this software and
+# its associated documentation for any purpose and without fee is
+# hereby granted, provided that the above copyright notice appears in
+# all copies, and that both that copyright notice and this permission
+# notice appear in supporting documentation, and that the name of
+# Secret Labs AB or the author not be used in advertising or publicity
+# pertaining to distribution of the software without specific, written
+# prior permission.
+#
+# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
+# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
+# ABILITY AND FITNESS.  IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
+# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+# --------------------------------------------------------------------
+
+#
+# things to look into:
+
+# TODO: support basic authentication (see robin's patch)
+# TODO: fix host tuple handling in the server constructor
+# TODO: let transport verify schemes
+# TODO: update documentation
+# TODO: authentication plugins
+
+"""
+An XML-RPC client interface for Python.
+
+The marshalling and response parser code can also be used to
+implement XML-RPC servers.
+
+Exported exceptions:
+
+  Error          Base class for client errors
+  ProtocolError  Indicates an HTTP protocol error
+  ResponseError  Indicates a broken response package
+  Fault          Indicates an XML-RPC fault package
+
+Exported classes:
+
+  ServerProxy    Represents a logical connection to an XML-RPC server
+
+  Boolean        boolean wrapper to generate a "boolean" XML-RPC value
+  DateTime       dateTime wrapper for an ISO 8601 string or time tuple or
+                 localtime integer value to generate a "dateTime.iso8601"
+                 XML-RPC value
+  Binary         binary data wrapper
+
+  SlowParser     Slow but safe standard parser (based on xmllib)
+  Marshaller     Generate an XML-RPC params chunk from a Python data structure
+  Unmarshaller   Unmarshal an XML-RPC response from incoming XML event message
+  Transport      Handles an HTTP transaction to an XML-RPC server
+  SafeTransport  Handles an HTTPS transaction to an XML-RPC server
+
+Exported constants:
+
+  True
+  False
+
+Exported functions:
+
+  boolean        Convert any Python value to an XML-RPC boolean
+  getparser      Create instance of the fastest available parser & attach
+                 to an unmarshalling object
+  dumps          Convert an argument tuple or a Fault instance to an XML-RPC
+                 request (or response, if the methodresponse option is used).
+  loads          Convert an XML-RPC packet to unmarshalled data plus a method
+                 name (None if not present).
+"""
+
+import re, string, time, operator
+
+from types import *
+
+try:
+    unicode
+except NameError:
+    unicode = None # unicode support not available
+
+def _decode(data, encoding, is8bit=re.compile("[\x80-\xff]").search):
+    # decode non-ascii string (if possible)
+    if unicode and encoding and is8bit(data):
+        data = unicode(data, encoding)
+    return data
+
+def escape(s, replace=string.replace):
+    s = replace(s, "&", "&amp;")
+    s = replace(s, "<", "&lt;")
+    return replace(s, ">", "&gt;",)
+
+MAXINT =  2L**31-1
+MININT = -2L**31
+
+if unicode:
+    def _stringify(string):
+        # convert to 7-bit ascii if possible
+        try:
+            return str(string)
+        except UnicodeError:
+            return string
+else:
+    def _stringify(string):
+        return string
+
+__version__ = "1.0.0"
+
+# --------------------------------------------------------------------
+# Exceptions
+
+class Error(Exception):
+    """Base class for client errors."""
+    def __str__(self):
+        return repr(self)
+
+class ProtocolError(Error):
+    """Indicates an HTTP protocol error."""
+    def __init__(self, url, errcode, errmsg, headers):
+        Error.__init__(self)
+        self.url = url
+        self.errcode = errcode
+        self.errmsg = errmsg
+        self.headers = headers
+    def __repr__(self):
+        return (
+            "<ProtocolError for %s: %s %s>" %
+            (self.url, self.errcode, self.errmsg)
+            )
+
+class ResponseError(Error):
+    """Indicates a broken response package."""
+    pass
+
+class Fault(Error):
+    """Indicates an XML-RPC fault package."""
+    def __init__(self, faultCode, faultString, **extra):
+        Error.__init__(self)
+        self.faultCode = faultCode
+        self.faultString = faultString
+    def __repr__(self):
+        return (
+            "<Fault %s: %s>" %
+            (self.faultCode, repr(self.faultString))
+            )
+
+# --------------------------------------------------------------------
+# Special values
+
+class Boolean:
+    """Boolean-value wrapper.
+
+    Use True or False to generate a "boolean" XML-RPC value.
+    """
+
+    def __init__(self, value = 0):
+        self.value = operator.truth(value)
+
+    def encode(self, out):
+        out.write("<value><boolean>%d</boolean></value>\n" % self.value)
+
+    def __cmp__(self, other):
+        if isinstance(other, Boolean):
+            other = other.value
+        return cmp(self.value, other)
+
+    def __repr__(self):
+        if self.value:
+            return "<Boolean True at %x>" % id(self)
+        else:
+            return "<Boolean False at %x>" % id(self)
+
+    def __int__(self):
+        return self.value
+
+    def __nonzero__(self):
+        return self.value
+
+True, False = Boolean(1), Boolean(0)
+
+def boolean(value, truefalse=(False, True)):
+    """Convert any Python value to XML-RPC 'boolean'."""
+    return truefalse[operator.truth(value)]
+
+class DateTime:
+    """DateTime wrapper for an ISO 8601 string or time tuple or
+    localtime integer value to generate 'dateTime.iso8601' XML-RPC
+    value.
+    """
+
+    def __init__(self, value=0):
+        if not isinstance(value, StringType):
+            if not isinstance(value, TupleType):
+                if value == 0:
+                    value = time.time()
+                value = time.localtime(value)
+            value = time.strftime("%Y%m%dT%H:%M:%S", value)
+        self.value = value
+
+    def __cmp__(self, other):
+        if isinstance(other, DateTime):
+            other = other.value
+        return cmp(self.value, other)
+
+    def __repr__(self):
+        return "<DateTime %s at %x>" % (self.value, id(self))
+
+    def decode(self, data):
+        self.value = string.strip(data)
+
+    def encode(self, out):
+        out.write("<value><dateTime.iso8601>")
+        out.write(self.value)
+        out.write("</dateTime.iso8601></value>\n")
+
+def datetime(data):
+    value = DateTime()
+    value.decode(data)
+    return value
+
+class Binary:
+    """Wrapper for binary data."""
+
+    def __init__(self, data=None):
+        self.data = data
+
+    def __cmp__(self, other):
+        if isinstance(other, Binary):
+            other = other.data
+        return cmp(self.data, other)
+
+    def decode(self, data):
+        import base64
+        self.data = base64.decodestring(data)
+
+    def encode(self, out):
+        import base64, StringIO
+        out.write("<value><base64>\n")
+        base64.encode(StringIO.StringIO(self.data), out)
+        out.write("</base64></value>\n")
+
+def binary(data):
+    value = Binary()
+    value.decode(data)
+    return value
+
+WRAPPERS = DateTime, Binary, Boolean
+
+# --------------------------------------------------------------------
+# XML parsers
+
+try:
+    # optional xmlrpclib accelerator.  for more information on this
+    # component, contact info at pythonware.com
+    import _xmlrpclib
+    FastParser = _xmlrpclib.Parser
+    FastUnmarshaller = _xmlrpclib.Unmarshaller
+except (AttributeError, ImportError):
+    FastParser = FastUnmarshaller = None
+
+#
+# the SGMLOP parser is about 15x faster than Python's builtin
+# XML parser.  SGMLOP sources can be downloaded from:
+#
+#     http://www.pythonware.com/products/xml/sgmlop.htm
+#
+
+try:
+    import sgmlop
+    if not hasattr(sgmlop, "XMLParser"):
+        raise ImportError
+except ImportError:
+    SgmlopParser = None # sgmlop accelerator not available
+else:
+    class SgmlopParser:
+        def __init__(self, target):
+
+            # setup callbacks
+            self.finish_starttag = target.start
+            self.finish_endtag = target.end
+            self.handle_data = target.data
+            self.handle_xml = target.xml
+
+            # activate parser
+            self.parser = sgmlop.XMLParser()
+            self.parser.register(self)
+            self.feed = self.parser.feed
+            self.entity = {
+                "amp": "&", "gt": ">", "lt": "<",
+                "apos": "'", "quot": '"'
+                }
+
+        def close(self):
+            try:
+                self.parser.close()
+            finally:
+                self.parser = self.feed = None # nuke circular reference
+
+        def handle_proc(self, tag, attr):
+            import re
+            m = re.search("encoding\s*=\s*['\"]([^\"']+)[\"']", attr)
+            if m:
+                self.handle_xml(m.group(1), 1)
+
+        def handle_entityref(self, entity):
+            # <string> entity
+            try:
+                self.handle_data(self.entity[entity])
+            except KeyError:
+                self.handle_data("&%s;" % entity)
+
+try:
+    from xml.parsers import expat
+    if not hasattr(expat, "ParserCreate"):
+        raise ImportError, "ParserCreate"
+except ImportError:
+    ExpatParser = None
+else:
+    class ExpatParser:
+        # fast expat parser for Python 2.0.  this is about 50%
+        # slower than sgmlop, on roundtrip testing
+        def __init__(self, target):
+            self._parser = parser = expat.ParserCreate(None, None)
+            self._target = target
+            parser.StartElementHandler = target.start
+            parser.EndElementHandler = target.end
+            parser.CharacterDataHandler = target.data
+            encoding = None
+            if not parser.returns_unicode:
+                encoding = "utf-8"
+            target.xml(encoding, None)
+
+        def feed(self, data):
+            self._parser.Parse(data, 0)
+
+        def close(self):
+            self._parser.Parse("", 1) # end of data
+            del self._target, self._parser # get rid of circular references
+
+class SlowParser:
+    """Default XML parser (based on xmllib.XMLParser)."""
+    # this is about 10 times slower than sgmlop, on roundtrip
+    # testing.
+    def __init__(self, target):
+        import xmllib # lazy subclassing (!)
+        if xmllib.XMLParser not in SlowParser.__bases__:
+            SlowParser.__bases__ = (xmllib.XMLParser,)
+        self.handle_xml = target.xml
+        self.unknown_starttag = target.start
+        self.handle_data = target.data
+        self.unknown_endtag = target.end
+        try:
+            xmllib.XMLParser.__init__(self, accept_utf8=1)
+        except TypeError:
+            xmllib.XMLParser.__init__(self) # pre-2.0
+
+# --------------------------------------------------------------------
+# XML-RPC marshalling and unmarshalling code
+
+class Marshaller:
+    """Generate an XML-RPC params chunk from a Python data structure.
+
+    Create a Marshaller instance for each set of parameters, and use
+    the "dumps" method to convert your data (represented as a tuple)
+    to an XML-RPC params chunk.  To write a fault response, pass a
+    Fault instance instead.  You may prefer to use the "dumps" module
+    function for this purpose.
+    """
+
+    # by the way, if you don't understand what's going on in here,
+    # that's perfectly ok.
+
+    def __init__(self, encoding=None):
+        self.memo = {}
+        self.data = None
+        self.encoding = encoding
+
+    dispatch = {}
+
+    def dumps(self, values):
+        self.__out = []
+        self.write = write = self.__out.append
+        if isinstance(values, Fault):
+            # fault instance
+            write("<fault>\n")
+            self.__dump({
+                'faultCode': values.faultCode, 
+                'faultString': values.faultString,
+            })
+            write("</fault>\n")
+        else:
+            # parameter block
+            # FIXME: the xml-rpc specification allows us to leave out
+            # the entire <params> block if there are no parameters.
+            # however, changing this may break older code (including
+            # old versions of xmlrpclib.py), so this is better left as
+            # is for now.  See @XMLRPC3 for more information. /F
+            write("<params>\n")
+            for v in values:
+                write("<param>\n")
+                self.__dump(v)
+                write("</param>\n")
+            write("</params>\n")
+        result = string.join(self.__out, "")
+        del self.__out, self.write # don't need this any more
+        return result
+
+    def __dump(self, value):
+        try:
+            f = self.dispatch[type(value)]
+        except KeyError:
+            raise TypeError, "cannot marshal %s objects" % type(value)
+        else:
+            f(self, value)
+
+    def dump_int(self, value):
+        # in case ints are > 32 bits
+        if value > MAXINT or value < MININT:
+            raise OverflowError, "int exceeds XML-RPC limits"
+        self.write("<value><int>%s</int></value>\n" % value)
+    dispatch[IntType] = dump_int
+
+    def dump_long(self, value):
+        # in case ints are > 32 bits
+        if value > MAXINT or value < MININT:
+            raise OverflowError, "long int exceeds XML-RPC limits"
+        self.write("<value><int>%s</int></value>\n" % int(value))
+    dispatch[LongType] = dump_long
+
+    def dump_double(self, value):
+        self.write("<value><double>%s</double></value>\n" % repr(value))
+    dispatch[FloatType] = dump_double
+
+    def dump_string(self, value, escape=escape):
+        self.write("<value><string>%s</string></value>\n" % escape(value))
+    dispatch[StringType] = dump_string
+
+    if unicode:
+        def dump_unicode(self, value, escape=escape):
+            value = value.encode(self.encoding)
+            self.write("<value><string>%s</string></value>\n" % escape(value))
+        dispatch[UnicodeType] = dump_unicode
+
+    def opencontainer(self, value):
+        if value:
+            i = id(value)
+            if self.memo.has_key(i):
+                raise TypeError, "cannot marshal recursive data structures"
+            self.memo[i] = None
+
+    def closecontainer(self, value):
+        if value:
+            del self.memo[id(value)]
+
+    def dump_array(self, value):
+        self.opencontainer(value)
+        write = self.write
+        dump = self.__dump
+        write("<value><array><data>\n")
+        for v in value:
+            dump(v)
+        write("</data></array></value>\n")
+        self.closecontainer(value)
+    dispatch[TupleType] = dump_array
+    dispatch[ListType] = dump_array
+
+    def dump_struct(self, value, escape=escape):
+        self.opencontainer(value)
+        write = self.write
+        dump = self.__dump
+        write("<value><struct>\n")
+        for k, v in value.items():
+            write("<member>\n")
+            if type(k) is not StringType:
+                raise TypeError, "dictionary key must be string"
+            write("<name>%s</name>\n" % escape(k))
+            dump(v)
+            write("</member>\n")
+        write("</struct></value>\n")
+        self.closecontainer(value)
+    dispatch[DictType] = dump_struct
+
+    def dump_instance(self, value):
+        # check for special wrappers
+        if value.__class__ in WRAPPERS:
+            value.encode(self)
+        else:
+            # store instance attributes as a struct (really?)
+            self.dump_struct(value.__dict__)
+    dispatch[InstanceType] = dump_instance
+
+class Unmarshaller:
+    """Unmarshal an XML-RPC response, based on incoming XML event
+    messages (start, data, end).  Call close() to get the resulting
+    data structure.
+
+    Note that this reader is fairly tolerant, and gladly accepts bogus
+    XML-RPC data without complaining (but not bogus XML).
+    """
+
+    # and again, if you don't understand what's going on in here,
+    # that's perfectly ok.
+
+    def __init__(self):
+        self._type = None
+        self._stack = []
+        self._marks = []
+        self._data = []
+        self._methodname = None
+        self._encoding = "utf-8"
+        self.append = self._stack.append
+
+    def close(self):
+        # return response tuple and target method
+        if self._type is None or self._marks:
+            raise ResponseError()
+        if self._type == "fault":
+            raise apply(Fault, (), self._stack[0])
+        return tuple(self._stack)
+
+    def getmethodname(self):
+        return self._methodname
+
+    #
+    # event handlers
+
+    def xml(self, encoding, standalone):
+        self._encoding = encoding
+        # FIXME: assert standalone == 1 ???
+
+    def start(self, tag, attrs):
+        # prepare to handle this element
+        if tag == "array" or tag == "struct":
+            self._marks.append(len(self._stack))
+        self._data = []
+        self._value = (tag == "value")
+
+    def data(self, text):
+        self._data.append(text)
+
+    def end(self, tag, join=string.join):
+        # call the appropriate end tag handler
+        try:
+            f = self.dispatch[tag]
+        except KeyError:
+            pass # unknown tag ?
+        else:
+            return f(self, join(self._data, ""))
+
+    #
+    # accelerator support
+
+    def end_dispatch(self, tag, data):
+        # dispatch data
+        try:
+            f = self.dispatch[tag]
+        except KeyError:
+            pass # unknown tag ?
+        else:
+            return f(self, data)
+
+    #
+    # element decoders
+
+    dispatch = {}
+
+    def end_boolean(self, data):
+        if data == "0":
+            self.append(False)
+        elif data == "1":
+            self.append(True)
+        else:
+            raise TypeError, "bad boolean value"
+        self._value = 0
+    dispatch["boolean"] = end_boolean
+
+    def end_int(self, data):
+        self.append(int(data))
+        self._value = 0
+    dispatch["i4"] = end_int
+    dispatch["int"] = end_int
+
+    def end_double(self, data):
+        self.append(float(data))
+        self._value = 0
+    dispatch["double"] = end_double
+
+    def end_string(self, data):
+        if self._encoding:
+            data = _decode(data, self._encoding)
+        self.append(_stringify(data))
+        self._value = 0
+    dispatch["string"] = end_string
+    dispatch["name"] = end_string # struct keys are always strings
+
+    def end_array(self, data):
+        mark = self._marks[-1]
+        del self._marks[-1]
+        # map arrays to Python lists
+        self._stack[mark:] = [self._stack[mark:]]
+        self._value = 0
+    dispatch["array"] = end_array
+
+    def end_struct(self, data):
+        mark = self._marks[-1]
+        del self._marks[-1]
+        # map structs to Python dictionaries
+        dict = {}
+        items = self._stack[mark:]
+        for i in range(0, len(items), 2):
+            dict[_stringify(items[i])] = items[i+1]
+        self._stack[mark:] = [dict]
+        self._value = 0
+    dispatch["struct"] = end_struct
+
+    def end_base64(self, data):
+        value = Binary()
+        value.decode(data)
+        self.append(value)
+        self._value = 0
+    dispatch["base64"] = end_base64
+
+    def end_dateTime(self, data):
+        value = DateTime()
+        value.decode(data)
+        self.append(value)
+    dispatch["dateTime.iso8601"] = end_dateTime
+
+    def end_value(self, data):
+        # if we stumble upon an value element with no internal
+        # elements, treat it as a string element
+        if self._value:
+            self.end_string(data)
+    dispatch["value"] = end_value
+
+    def end_params(self, data):
+        self._type = "params"
+    dispatch["params"] = end_params
+
+    def end_fault(self, data):
+        self._type = "fault"
+    dispatch["fault"] = end_fault
+
+    def end_methodName(self, data):
+        if self._encoding:
+            data = _decode(data, self._encoding)
+        self._methodname = data
+        self._type = "methodName" # no params
+    dispatch["methodName"] = end_methodName
+
+
+# --------------------------------------------------------------------
+# convenience functions
+
+def getparser():
+    """getparser() -> parser, unmarshaller
+
+    Create an instance of the fastest available parser, and attach it
+    to an unmarshalling object.  Return both objects.
+    """
+    if FastParser and FastUnmarshaller:
+        target = FastUnmarshaller(True, False, binary, datetime)
+        parser = FastParser(target)
+    else:
+        target = Unmarshaller()
+        if FastParser:
+            parser = FastParser(target)
+        elif SgmlopParser:
+            parser = SgmlopParser(target)
+        elif ExpatParser:
+            parser = ExpatParser(target)
+        else:
+            parser = SlowParser(target)
+    return parser, target
+
+def dumps(params, methodname=None, methodresponse=None, encoding=None):
+    """data [,options] -> marshalled data
+
+    Convert an argument tuple or a Fault instance to an XML-RPC
+    request (or response, if the methodresponse option is used).
+
+    In addition to the data object, the following options can be given
+    as keyword arguments:
+
+        methodname: the method name for a methodCall packet
+
+        methodresponse: true to create a methodResponse packet.
+        If this option is used with a tuple, the tuple must be
+        a singleton (i.e. it can contain only one element).
+
+        encoding: the packet encoding (default is UTF-8)
+
+    All 8-bit strings in the data structure are assumed to use the
+    packet encoding.  Unicode strings are automatically converted,
+    where necessary.
+    """
+
+    assert isinstance(params, TupleType) or isinstance(params, Fault),\
+           "argument must be tuple or Fault instance"
+
+    if isinstance(params, Fault):
+        methodresponse = 1
+    elif methodresponse and isinstance(params, TupleType):
+        assert len(params) == 1, "response tuple must be a singleton"
+
+    if not encoding:
+        encoding = "utf-8"
+
+    m = Marshaller(encoding)
+    data = m.dumps(params)
+
+    if encoding != "utf-8":
+        xmlheader = "<?xml version='1.0' encoding=%s?>\n" % repr(encoding)
+    else:
+        xmlheader = "<?xml version='1.0'?>\n" # utf-8 is default
+
+    # standard XML-RPC wrappings
+    if methodname:
+        # a method call
+        if not isinstance(methodname, StringType):
+            methodname = methodname.encode(encoding)
+        data = (
+            xmlheader,
+            "<methodCall>\n"
+            "<methodName>", methodname, "</methodName>\n",
+            data,
+            "</methodCall>\n"
+            )
+    elif methodresponse:
+        # a method response, or a fault structure
+        data = (
+            xmlheader,
+            "<methodResponse>\n",
+            data,
+            "</methodResponse>\n"
+            )
+    else:
+        return data # return as is
+    return string.join(data, "")
+
+def loads(data):
+    """data -> unmarshalled data, method name
+
+    Convert an XML-RPC packet to unmarshalled data plus a method
+    name (None if not present).
+
+    If the XML-RPC packet represents a fault condition, this function
+    raises a Fault exception.
+    """
+    p, u = getparser()
+    p.feed(data)
+    p.close()
+    return u.close(), u.getmethodname()
+
+
+# --------------------------------------------------------------------
+# request dispatcher
+
+class _Method:
+    # some magic to bind an XML-RPC method to an RPC server.
+    # supports "nested" methods (e.g. examples.getStateName)
+    def __init__(self, send, name):
+        self.__send = send
+        self.__name = name
+    def __getattr__(self, name):
+        return _Method(self.__send, "%s.%s" % (self.__name, name))
+    def __call__(self, *args):
+        return self.__send(self.__name, args)
+
+
+class Transport:
+    """Handles an HTTP transaction to an XML-RPC server."""
+
+    # client identifier (may be overridden)
+    user_agent = "xmlrpclib.py/%s (by www.pythonware.com)" % __version__
+
+    def request(self, host, handler, request_body, verbose=0):
+        # issue XML-RPC request
+
+        h = self.make_connection(host)
+        if verbose:
+            h.set_debuglevel(1)
+
+        self.send_request(h, handler, request_body)
+        self.send_host(h, host)
+        self.send_user_agent(h)
+        self.send_content(h, request_body)
+
+        errcode, errmsg, headers = h.getreply()
+
+        if errcode != 200:
+            raise ProtocolError(
+                host + handler,
+                errcode, errmsg,
+                headers
+                )
+
+        self.verbose = verbose
+
+        return self.parse_response(h.getfile())
+
+    def getparser(self):
+        # get parser and unmarshaller
+        return getparser()
+
+    def make_connection(self, host):
+        # create a HTTP connection object from a host descriptor
+        import httplib
+        return httplib.HTTP(host)
+
+    def send_request(self, connection, handler, request_body):
+        connection.putrequest("POST", handler)
+
+    def send_host(self, connection, host):
+        connection.putheader("Host", host)
+
+    def send_user_agent(self, connection):
+        connection.putheader("User-Agent", self.user_agent)
+
+    def send_content(self, connection, request_body):
+        connection.putheader("Content-Type", "text/xml")
+        connection.putheader("Content-Length", str(len(request_body)))
+        connection.endheaders()
+        if request_body:
+            connection.send(request_body)
+
+    def parse_response(self, f):
+        # read response from input file, and parse it
+
+        p, u = self.getparser()
+
+        while 1:
+            response = f.read(1024)
+            if not response:
+                break
+            if self.verbose:
+                print "body:", repr(response)
+            p.feed(response)
+
+        f.close()
+        p.close()
+
+        return u.close()
+
+class SafeTransport(Transport):
+    """Handles an HTTPS transaction to an XML-RPC server."""
+
+    def make_connection(self, host):
+        # create a HTTPS connection object from a host descriptor
+        # host may be a string, or a (host, x509-dict) tuple
+        import httplib
+        if isinstance(host, TupleType):
+            host, x509 = host
+        else:
+            x509 = {}
+        try:
+            HTTPS = httplib.HTTPS
+        except AttributeError:
+            raise NotImplementedError,\
+                  "your version of httplib doesn't support HTTPS"
+        else:
+            return apply(HTTPS, (host, None), x509)
+
+    def send_host(self, connection, host):
+        if isinstance(host, TupleType):
+            host, x509 = host
+        connection.putheader("Host", host)
+
+class ServerProxy:
+    """uri [,options] -> a logical connection to an XML-RPC server
+
+    uri is the connection point on the server, given as
+    scheme://host/target.
+
+    The standard implementation always supports the "http" scheme.  If
+    SSL socket support is available (Python 2.0), it also supports
+    "https".
+
+    If the target part and the slash preceding it are both omitted,
+    "/RPC2" is assumed.
+
+    The following options can be given as keyword arguments:
+
+        transport: a transport factory
+        encoding: the request encoding (default is UTF-8)
+
+    All 8-bit strings passed to the server proxy are assumed to use
+    the given encoding.
+    """
+
+    def __init__(self, uri, transport=None, encoding=None, verbose=0):
+        # establish a "logical" server connection
+
+        # get the url
+        import urllib
+        type, uri = urllib.splittype(uri)
+        if type not in ("http", "https"):
+            raise IOError, "unsupported XML-RPC protocol"
+        self.__host, self.__handler = urllib.splithost(uri)
+        if not self.__handler:
+            self.__handler = "/RPC2"
+
+        if transport is None:
+            if type == "https":
+                transport = SafeTransport()
+            else:
+                transport = Transport()
+        self.__transport = transport
+
+        self.__encoding = encoding
+        self.__verbose = verbose
+
+    def __request(self, methodname, params):
+        # call a method on the remote server
+
+        request = dumps(params, methodname, encoding=self.__encoding)
+
+        response = self.__transport.request(
+            self.__host,
+            self.__handler,
+            request,
+            verbose=self.__verbose
+            )
+
+        if len(response) == 1:
+            response = response[0]
+
+        return response
+
+    def __repr__(self):
+        return (
+            "<ServerProxy for %s%s>" %
+            (self.__host, self.__handler)
+            )
+
+    __str__ = __repr__
+
+    def __getattr__(self, name):
+        # magic method dispatcher
+        return _Method(self.__request, name)
+
+    # note: to call a remote object with an non-standard name, use
+    # result getattr(server, "strange-python-name")(args)
+
+# compatibility
+Server = ServerProxy
+
+# --------------------------------------------------------------------
+# test code
+
+if __name__ == "__main__":
+
+    # simple test program (from the XML-RPC specification)
+
+    # server = ServerProxy("http://localhost:8000") # local server
+    server = ServerProxy("http://betty.userland.com")
+
+    print server
+
+    try:
+        print server.examples.getStateName(41)
+    except Error, v:
+        print "ERROR", v
diff --git a/lib-python/2.2/zipfile.py b/lib-python/2.2/zipfile.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/zipfile.py
@@ -0,0 +1,586 @@
+"Read and write ZIP files."
+
+import struct, os, time
+import binascii
+
+try:
+    import zlib # We may need its compression method
+except ImportError:
+    zlib = None
+
+__all__ = ["BadZipfile", "error", "ZIP_STORED", "ZIP_DEFLATED", "is_zipfile",
+           "ZipInfo", "ZipFile", "PyZipFile"]
+
+class BadZipfile(Exception):
+    pass
+error = BadZipfile      # The exception raised by this module
+
+# constants for Zip file compression methods
+ZIP_STORED = 0
+ZIP_DEFLATED = 8
+# Other ZIP compression methods not supported
+
+# Here are some struct module formats for reading headers
+structEndArchive = "<4s4H2lH"     # 9 items, end of archive, 22 bytes
+stringEndArchive = "PK\005\006"   # magic number for end of archive record
+structCentralDir = "<4s4B4H3l5H2l"# 19 items, central directory, 46 bytes
+stringCentralDir = "PK\001\002"   # magic number for central directory
+structFileHeader = "<4s2B4H3l2H"  # 12 items, file header record, 30 bytes
+stringFileHeader = "PK\003\004"   # magic number for file header
+
+# indexes of entries in the central directory structure
+_CD_SIGNATURE = 0
+_CD_CREATE_VERSION = 1
+_CD_CREATE_SYSTEM = 2
+_CD_EXTRACT_VERSION = 3
+_CD_EXTRACT_SYSTEM = 4                  # is this meaningful?
+_CD_FLAG_BITS = 5
+_CD_COMPRESS_TYPE = 6
+_CD_TIME = 7
+_CD_DATE = 8
+_CD_CRC = 9
+_CD_COMPRESSED_SIZE = 10
+_CD_UNCOMPRESSED_SIZE = 11
+_CD_FILENAME_LENGTH = 12
+_CD_EXTRA_FIELD_LENGTH = 13
+_CD_COMMENT_LENGTH = 14
+_CD_DISK_NUMBER_START = 15
+_CD_INTERNAL_FILE_ATTRIBUTES = 16
+_CD_EXTERNAL_FILE_ATTRIBUTES = 17
+_CD_LOCAL_HEADER_OFFSET = 18
+
+# indexes of entries in the local file header structure
+_FH_SIGNATURE = 0
+_FH_EXTRACT_VERSION = 1
+_FH_EXTRACT_SYSTEM = 2                  # is this meaningful?
+_FH_GENERAL_PURPOSE_FLAG_BITS = 3
+_FH_COMPRESSION_METHOD = 4
+_FH_LAST_MOD_TIME = 5
+_FH_LAST_MOD_DATE = 6
+_FH_CRC = 7
+_FH_COMPRESSED_SIZE = 8
+_FH_UNCOMPRESSED_SIZE = 9
+_FH_FILENAME_LENGTH = 10
+_FH_EXTRA_FIELD_LENGTH = 11
+
+# Used to compare file passed to ZipFile
+import types
+_STRING_TYPES = (types.StringType,)
+if hasattr(types, "UnicodeType"):
+    _STRING_TYPES = _STRING_TYPES + (types.UnicodeType,)
+
+
+def is_zipfile(filename):
+    """Quickly see if file is a ZIP file by checking the magic number.
+
+    Will not accept a ZIP archive with an ending comment.
+    """
+    try:
+        fpin = open(filename, "rb")
+        fpin.seek(-22, 2)               # Seek to end-of-file record
+        endrec = fpin.read()
+        fpin.close()
+        if endrec[0:4] == "PK\005\006" and endrec[-2:] == "\000\000":
+            return 1    # file has correct magic number
+    except IOError:
+        pass
+
+
+class ZipInfo:
+    """Class with attributes describing each file in the ZIP archive."""
+
+    def __init__(self, filename="NoName", date_time=(1980,1,1,0,0,0)):
+        self.orig_filename = filename   # Original file name in archive
+# Terminate the file name at the first null byte.  Null bytes in file
+# names are used as tricks by viruses in archives.
+        null_byte = filename.find(chr(0))
+        if null_byte >= 0:
+            filename = filename[0:null_byte]
+# This is used to ensure paths in generated ZIP files always use
+# forward slashes as the directory separator, as required by the
+# ZIP format specification.
+        if os.sep != "/":
+            filename = filename.replace(os.sep, "/")
+        self.filename = filename        # Normalized file name
+        self.date_time = date_time      # year, month, day, hour, min, sec
+        # Standard values:
+        self.compress_type = ZIP_STORED # Type of compression for the file
+        self.comment = ""               # Comment for each file
+        self.extra = ""                 # ZIP extra data
+        self.create_system = 0          # System which created ZIP archive
+        self.create_version = 20        # Version which created ZIP archive
+        self.extract_version = 20       # Version needed to extract archive
+        self.reserved = 0               # Must be zero
+        self.flag_bits = 0              # ZIP flag bits
+        self.volume = 0                 # Volume number of file header
+        self.internal_attr = 0          # Internal attributes
+        self.external_attr = 0          # External file attributes
+        # Other attributes are set by class ZipFile:
+        # header_offset         Byte offset to the file header
+        # file_offset           Byte offset to the start of the file data
+        # CRC                   CRC-32 of the uncompressed file
+        # compress_size         Size of the compressed file
+        # file_size             Size of the uncompressed file
+
+    def FileHeader(self):
+        """Return the per-file header as a string."""
+        dt = self.date_time
+        dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
+        dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
+        if self.flag_bits & 0x08:
+            # Set these to zero because we write them after the file data
+            CRC = compress_size = file_size = 0
+        else:
+            CRC = self.CRC
+            compress_size = self.compress_size
+            file_size = self.file_size
+        header = struct.pack(structFileHeader, stringFileHeader,
+                 self.extract_version, self.reserved, self.flag_bits,
+                 self.compress_type, dostime, dosdate, CRC,
+                 compress_size, file_size,
+                 len(self.filename), len(self.extra))
+        return header + self.filename + self.extra
+
+
+class ZipFile:
+    """ Class with methods to open, read, write, close, list zip files.
+
+    z = ZipFile(file, mode="r", compression=ZIP_STORED)
+
+    file: Either the path to the file, or a file-like object.
+          If it is a path, the file will be opened and closed by ZipFile.
+    mode: The mode can be either read "r", write "w" or append "a".
+    compression: ZIP_STORED (no compression) or ZIP_DEFLATED (requires zlib).
+    """
+
+    fp = None                   # Set here since __del__ checks it
+
+    def __init__(self, file, mode="r", compression=ZIP_STORED):
+        """Open the ZIP file with mode read "r", write "w" or append "a"."""
+        if compression == ZIP_STORED:
+            pass
+        elif compression == ZIP_DEFLATED:
+            if not zlib:
+                raise RuntimeError,\
+                      "Compression requires the (missing) zlib module"
+        else:
+            raise RuntimeError, "That compression method is not supported"
+        self.debug = 0  # Level of printing: 0 through 3
+        self.NameToInfo = {}    # Find file info given name
+        self.filelist = []      # List of ZipInfo instances for archive
+        self.compression = compression  # Method of compression
+        self.mode = key = mode[0]
+
+        # Check if we were passed a file-like object
+        if type(file) in _STRING_TYPES:
+            self._filePassed = 0
+            self.filename = file
+            modeDict = {'r' : 'rb', 'w': 'wb', 'a' : 'r+b'}
+            self.fp = open(file, modeDict[mode])
+        else:
+            self._filePassed = 1
+            self.fp = file
+            self.filename = getattr(file, 'name', None)
+
+        if key == 'r':
+            self._GetContents()
+        elif key == 'w':
+            pass
+        elif key == 'a':
+            fp = self.fp
+            fp.seek(-22, 2)             # Seek to end-of-file record
+            endrec = fp.read()
+            if endrec[0:4] == stringEndArchive and \
+                       endrec[-2:] == "\000\000":
+                self._GetContents()     # file is a zip file
+                # seek to start of directory and overwrite
+                fp.seek(self.start_dir, 0)
+            else:               # file is not a zip file, just append
+                fp.seek(0, 2)
+        else:
+            if not self._filePassed:
+                self.fp.close()
+                self.fp = None
+            raise RuntimeError, 'Mode must be "r", "w" or "a"'
+
+    def _GetContents(self):
+        """Read the directory, making sure we close the file if the format
+        is bad."""
+        try:
+            self._RealGetContents()
+        except BadZipfile:
+            if not self._filePassed:
+                self.fp.close()
+                self.fp = None
+            raise
+
+    def _RealGetContents(self):
+        """Read in the table of contents for the ZIP file."""
+        fp = self.fp
+        fp.seek(-22, 2)         # Start of end-of-archive record
+        filesize = fp.tell() + 22       # Get file size
+        endrec = fp.read(22)    # Archive must not end with a comment!
+        if endrec[0:4] != stringEndArchive or endrec[-2:] != "\000\000":
+            raise BadZipfile, "File is not a zip file, or ends with a comment"
+        endrec = struct.unpack(structEndArchive, endrec)
+        if self.debug > 1:
+            print endrec
+        size_cd = endrec[5]             # bytes in central directory
+        offset_cd = endrec[6]   # offset of central directory
+        x = filesize - 22 - size_cd
+        # "concat" is zero, unless zip was concatenated to another file
+        concat = x - offset_cd
+        if self.debug > 2:
+            print "given, inferred, offset", offset_cd, x, concat
+        # self.start_dir:  Position of start of central directory
+        self.start_dir = offset_cd + concat
+        fp.seek(self.start_dir, 0)
+        total = 0
+        while total < size_cd:
+            centdir = fp.read(46)
+            total = total + 46
+            if centdir[0:4] != stringCentralDir:
+                raise BadZipfile, "Bad magic number for central directory"
+            centdir = struct.unpack(structCentralDir, centdir)
+            if self.debug > 2:
+                print centdir
+            filename = fp.read(centdir[_CD_FILENAME_LENGTH])
+            # Create ZipInfo instance to store file information
+            x = ZipInfo(filename)
+            x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH])
+            x.comment = fp.read(centdir[_CD_COMMENT_LENGTH])
+            total = (total + centdir[_CD_FILENAME_LENGTH]
+                     + centdir[_CD_EXTRA_FIELD_LENGTH]
+                     + centdir[_CD_COMMENT_LENGTH])
+            x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET] + concat
+            # file_offset must be computed below...
+            (x.create_version, x.create_system, x.extract_version, x.reserved,
+                x.flag_bits, x.compress_type, t, d,
+                x.CRC, x.compress_size, x.file_size) = centdir[1:12]
+            x.volume, x.internal_attr, x.external_attr = centdir[15:18]
+            # Convert date/time code to (year, month, day, hour, min, sec)
+            x.date_time = ( (d>>9)+1980, (d>>5)&0xF, d&0x1F,
+                                     t>>11, (t>>5)&0x3F, (t&0x1F) * 2 )
+            self.filelist.append(x)
+            self.NameToInfo[x.filename] = x
+            if self.debug > 2:
+                print "total", total
+        for data in self.filelist:
+            fp.seek(data.header_offset, 0)
+            fheader = fp.read(30)
+            if fheader[0:4] != stringFileHeader:
+                raise BadZipfile, "Bad magic number for file header"
+            fheader = struct.unpack(structFileHeader, fheader)
+            # file_offset is computed here, since the extra field for
+            # the central directory and for the local file header
+            # refer to different fields, and they can have different
+            # lengths
+            data.file_offset = (data.header_offset + 30
+                                + fheader[_FH_FILENAME_LENGTH]
+                                + fheader[_FH_EXTRA_FIELD_LENGTH])
+            fname = fp.read(fheader[_FH_FILENAME_LENGTH])
+            if fname != data.orig_filename:
+                raise RuntimeError, \
+                      'File name in directory "%s" and header "%s" differ.' % (
+                          data.orig_filename, fname)
+
+    def namelist(self):
+        """Return a list of file names in the archive."""
+        l = []
+        for data in self.filelist:
+            l.append(data.filename)
+        return l
+
+    def infolist(self):
+        """Return a list of class ZipInfo instances for files in the
+        archive."""
+        return self.filelist
+
+    def printdir(self):
+        """Print a table of contents for the zip file."""
+        print "%-46s %19s %12s" % ("File Name", "Modified    ", "Size")
+        for zinfo in self.filelist:
+            date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time
+            print "%-46s %s %12d" % (zinfo.filename, date, zinfo.file_size)
+
+    def testzip(self):
+        """Read all the files and check the CRC."""
+        for zinfo in self.filelist:
+            try:
+                self.read(zinfo.filename)       # Check CRC-32
+            except:
+                return zinfo.filename
+
+    def getinfo(self, name):
+        """Return the instance of ZipInfo given 'name'."""
+        return self.NameToInfo[name]
+
+    def read(self, name):
+        """Return file bytes (as a string) for name."""
+        if self.mode not in ("r", "a"):
+            raise RuntimeError, 'read() requires mode "r" or "a"'
+        if not self.fp:
+            raise RuntimeError, \
+                  "Attempt to read ZIP archive that was already closed"
+        zinfo = self.getinfo(name)
+        filepos = self.fp.tell()
+        self.fp.seek(zinfo.file_offset, 0)
+        bytes = self.fp.read(zinfo.compress_size)
+        self.fp.seek(filepos, 0)
+        if zinfo.compress_type == ZIP_STORED:
+            pass
+        elif zinfo.compress_type == ZIP_DEFLATED:
+            if not zlib:
+                raise RuntimeError, \
+                      "De-compression requires the (missing) zlib module"
+            # zlib compress/decompress code by Jeremy Hylton of CNRI
+            dc = zlib.decompressobj(-15)
+            bytes = dc.decompress(bytes)
+            # need to feed in unused pad byte so that zlib won't choke
+            ex = dc.decompress('Z') + dc.flush()
+            if ex:
+                bytes = bytes + ex
+        else:
+            raise BadZipfile, \
+                  "Unsupported compression method %d for file %s" % \
+            (zinfo.compress_type, name)
+        crc = binascii.crc32(bytes)
+        if crc != zinfo.CRC:
+            raise BadZipfile, "Bad CRC-32 for file %s" % name
+        return bytes
+
+    def _writecheck(self, zinfo):
+        """Check for errors before writing a file to the archive."""
+        if self.NameToInfo.has_key(zinfo.filename):
+            if self.debug:      # Warning for duplicate names
+                print "Duplicate name:", zinfo.filename
+        if self.mode not in ("w", "a"):
+            raise RuntimeError, 'write() requires mode "w" or "a"'
+        if not self.fp:
+            raise RuntimeError, \
+                  "Attempt to write ZIP archive that was already closed"
+        if zinfo.compress_type == ZIP_DEFLATED and not zlib:
+            raise RuntimeError, \
+                  "Compression requires the (missing) zlib module"
+        if zinfo.compress_type not in (ZIP_STORED, ZIP_DEFLATED):
+            raise RuntimeError, \
+                  "That compression method is not supported"
+
+    def write(self, filename, arcname=None, compress_type=None):
+        """Put the bytes from filename into the archive under the name
+        arcname."""
+        st = os.stat(filename)
+        mtime = time.localtime(st[8])
+        date_time = mtime[0:6]
+        # Create ZipInfo instance to store file information
+        if arcname is None:
+            zinfo = ZipInfo(filename, date_time)
+        else:
+            zinfo = ZipInfo(arcname, date_time)
+        zinfo.external_attr = st[0] << 16       # Unix attributes
+        if compress_type is None:
+            zinfo.compress_type = self.compression
+        else:
+            zinfo.compress_type = compress_type
+        self._writecheck(zinfo)
+        fp = open(filename, "rb")
+        zinfo.flag_bits = 0x00
+        zinfo.header_offset = self.fp.tell()    # Start of header bytes
+        # Must overwrite CRC and sizes with correct data later
+        zinfo.CRC = CRC = 0
+        zinfo.compress_size = compress_size = 0
+        zinfo.file_size = file_size = 0
+        self.fp.write(zinfo.FileHeader())
+        zinfo.file_offset = self.fp.tell()      # Start of file bytes
+        if zinfo.compress_type == ZIP_DEFLATED:
+            cmpr = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
+                 zlib.DEFLATED, -15)
+        else:
+            cmpr = None
+        while 1:
+            buf = fp.read(1024 * 8)
+            if not buf:
+                break
+            file_size = file_size + len(buf)
+            CRC = binascii.crc32(buf, CRC)
+            if cmpr:
+                buf = cmpr.compress(buf)
+                compress_size = compress_size + len(buf)
+            self.fp.write(buf)
+        fp.close()
+        if cmpr:
+            buf = cmpr.flush()
+            compress_size = compress_size + len(buf)
+            self.fp.write(buf)
+            zinfo.compress_size = compress_size
+        else:
+            zinfo.compress_size = file_size
+        zinfo.CRC = CRC
+        zinfo.file_size = file_size
+        # Seek backwards and write CRC and file sizes
+        position = self.fp.tell()       # Preserve current position in file
+        self.fp.seek(zinfo.header_offset + 14, 0)
+        self.fp.write(struct.pack("<lll", zinfo.CRC, zinfo.compress_size,
+              zinfo.file_size))
+        self.fp.seek(position, 0)
+        self.filelist.append(zinfo)
+        self.NameToInfo[zinfo.filename] = zinfo
+
+    def writestr(self, zinfo, bytes):
+        """Write a file into the archive.  The contents is the string
+        'bytes'."""
+        self._writecheck(zinfo)
+        zinfo.file_size = len(bytes)            # Uncompressed size
+        zinfo.CRC = binascii.crc32(bytes)       # CRC-32 checksum
+        if zinfo.compress_type == ZIP_DEFLATED:
+            co = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
+                 zlib.DEFLATED, -15)
+            bytes = co.compress(bytes) + co.flush()
+            zinfo.compress_size = len(bytes)    # Compressed size
+        else:
+            zinfo.compress_size = zinfo.file_size
+        zinfo.header_offset = self.fp.tell()    # Start of header bytes
+        self.fp.write(zinfo.FileHeader())
+        zinfo.file_offset = self.fp.tell()      # Start of file bytes
+        self.fp.write(bytes)
+        if zinfo.flag_bits & 0x08:
+            # Write CRC and file sizes after the file data
+            self.fp.write(struct.pack("<lll", zinfo.CRC, zinfo.compress_size,
+                  zinfo.file_size))
+        self.filelist.append(zinfo)
+        self.NameToInfo[zinfo.filename] = zinfo
+
+    def __del__(self):
+        """Call the "close()" method in case the user forgot."""
+        self.close()
+
+    def close(self):
+        """Close the file, and for mode "w" and "a" write the ending
+        records."""
+        if self.fp is None:
+            return
+        if self.mode in ("w", "a"):             # write ending records
+            count = 0
+            pos1 = self.fp.tell()
+            for zinfo in self.filelist:         # write central directory
+                count = count + 1
+                dt = zinfo.date_time
+                dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
+                dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
+                centdir = struct.pack(structCentralDir,
+                  stringCentralDir, zinfo.create_version,
+                  zinfo.create_system, zinfo.extract_version, zinfo.reserved,
+                  zinfo.flag_bits, zinfo.compress_type, dostime, dosdate,
+                  zinfo.CRC, zinfo.compress_size, zinfo.file_size,
+                  len(zinfo.filename), len(zinfo.extra), len(zinfo.comment),
+                  0, zinfo.internal_attr, zinfo.external_attr,
+                  zinfo.header_offset)
+                self.fp.write(centdir)
+                self.fp.write(zinfo.filename)
+                self.fp.write(zinfo.extra)
+                self.fp.write(zinfo.comment)
+            pos2 = self.fp.tell()
+            # Write end-of-zip-archive record
+            endrec = struct.pack(structEndArchive, stringEndArchive,
+                     0, 0, count, count, pos2 - pos1, pos1, 0)
+            self.fp.write(endrec)
+            self.fp.flush()
+        if not self._filePassed:
+            self.fp.close()
+        self.fp = None
+
+
+class PyZipFile(ZipFile):
+    """Class to create ZIP archives with Python library files and packages."""
+
+    def writepy(self, pathname, basename = ""):
+        """Add all files from "pathname" to the ZIP archive.
+
+        If pathname is a package directory, search the directory and
+        all package subdirectories recursively for all *.py and enter
+        the modules into the archive.  If pathname is a plain
+        directory, listdir *.py and enter all modules.  Else, pathname
+        must be a Python *.py file and the module will be put into the
+        archive.  Added modules are always module.pyo or module.pyc.
+        This method will compile the module.py into module.pyc if
+        necessary.
+        """
+        dir, name = os.path.split(pathname)
+        if os.path.isdir(pathname):
+            initname = os.path.join(pathname, "__init__.py")
+            if os.path.isfile(initname):
+                # This is a package directory, add it
+                if basename:
+                    basename = "%s/%s" % (basename, name)
+                else:
+                    basename = name
+                if self.debug:
+                    print "Adding package in", pathname, "as", basename
+                fname, arcname = self._get_codename(initname[0:-3], basename)
+                if self.debug:
+                    print "Adding", arcname
+                self.write(fname, arcname)
+                dirlist = os.listdir(pathname)
+                dirlist.remove("__init__.py")
+                # Add all *.py files and package subdirectories
+                for filename in dirlist:
+                    path = os.path.join(pathname, filename)
+                    root, ext = os.path.splitext(filename)
+                    if os.path.isdir(path):
+                        if os.path.isfile(os.path.join(path, "__init__.py")):
+                            # This is a package directory, add it
+                            self.writepy(path, basename)  # Recursive call
+                    elif ext == ".py":
+                        fname, arcname = self._get_codename(path[0:-3],
+                                         basename)
+                        if self.debug:
+                            print "Adding", arcname
+                        self.write(fname, arcname)
+            else:
+                # This is NOT a package directory, add its files at top level
+                if self.debug:
+                    print "Adding files from directory", pathname
+                for filename in os.listdir(pathname):
+                    path = os.path.join(pathname, filename)
+                    root, ext = os.path.splitext(filename)
+                    if ext == ".py":
+                        fname, arcname = self._get_codename(path[0:-3],
+                                         basename)
+                        if self.debug:
+                            print "Adding", arcname
+                        self.write(fname, arcname)
+        else:
+            if pathname[-3:] != ".py":
+                raise RuntimeError, \
+                      'Files added with writepy() must end with ".py"'
+            fname, arcname = self._get_codename(pathname[0:-3], basename)
+            if self.debug:
+                print "Adding file", arcname
+            self.write(fname, arcname)
+
+    def _get_codename(self, pathname, basename):
+        """Return (filename, archivename) for the path.
+
+        Given a module name path, return the correct file path and
+        archive name, compiling if necessary.  For example, given
+        /python/lib/string, return (/python/lib/string.pyc, string).
+        """
+        file_py  = pathname + ".py"
+        file_pyc = pathname + ".pyc"
+        file_pyo = pathname + ".pyo"
+        if os.path.isfile(file_pyo) and \
+                            os.stat(file_pyo)[8] >= os.stat(file_py)[8]:
+            fname = file_pyo    # Use .pyo file
+        elif not os.path.isfile(file_pyc) or \
+             os.stat(file_pyc)[8] < os.stat(file_py)[8]:
+            import py_compile
+            if self.debug:
+                print "Compiling", file_py
+            py_compile.compile(file_py, file_pyc)
+            fname = file_pyc
+        else:
+            fname = file_pyc
+        archivename = os.path.split(fname)[1]
+        if basename:
+            archivename = "%s/%s" % (basename, archivename)
+        return (fname, archivename)

-- 
Repository URL: http://hg.python.org/jython


More information about the Jython-checkins mailing list