[Jython-checkins] jython (2.2): Switch (back) to copying full CPython Lib until we get to a shared Lib.

frank.wierzbicki jython-checkins at python.org
Mon Mar 19 20:34:44 CET 2012


http://hg.python.org/jython/rev/bed9f9de4ef3
changeset:   6412:bed9f9de4ef3
branch:      2.2
parent:      5374:512dab783493
user:        Frank Wierzbicki <fwierzbicki at gmail.com>
date:        Mon Mar 19 10:45:11 2012 -0700
summary:
  Switch (back) to copying full CPython Lib until we get to a shared Lib.

files:
  .hgignore                                           |    30 +
  .hgsub                                              |     1 -
  .hgsubstate                                         |     1 -
  build.xml                                           |     4 +-
  lib-python/2.2/BaseHTTPServer.py                    |   484 +
  lib-python/2.2/Bastion.py                           |   177 +
  lib-python/2.2/CGIHTTPServer.py                     |   325 +
  lib-python/2.2/ConfigParser.py                      |   472 +
  lib-python/2.2/Cookie.py                            |   742 ++
  lib-python/2.2/FCNTL.py                             |    14 +
  lib-python/2.2/HTMLParser.py                        |   383 +
  lib-python/2.2/MimeWriter.py                        |   181 +
  lib-python/2.2/Queue.py                             |   151 +
  lib-python/2.2/SimpleHTTPServer.py                  |   198 +
  lib-python/2.2/SimpleXMLRPCServer.py                |   274 +
  lib-python/2.2/SocketServer.py                      |   576 +
  lib-python/2.2/StringIO.py                          |   239 +
  lib-python/2.2/TERMIOS.py                           |    14 +
  lib-python/2.2/UserDict.py                          |    60 +
  lib-python/2.2/UserList.py                          |    85 +
  lib-python/2.2/UserString.py                        |   182 +
  lib-python/2.2/__future__.py                        |   104 +
  lib-python/2.2/__phello__.foo.py                    |     1 +
  lib-python/2.2/aifc.py                              |   961 ++
  lib-python/2.2/anydbm.py                            |    86 +
  lib-python/2.2/asynchat.py                          |   293 +
  lib-python/2.2/asyncore.py                          |   556 +
  lib-python/2.2/atexit.py                            |    50 +
  lib-python/2.2/audiodev.py                          |   257 +
  lib-python/2.2/base64.py                            |    77 +
  lib-python/2.2/bdb.py                               |   563 +
  lib-python/2.2/binhex.py                            |   531 +
  lib-python/2.2/bisect.py                            |    78 +
  lib-python/2.2/calendar.py                          |   246 +
  lib-python/2.2/cgi.py                               |  1040 +++
  lib-python/2.2/cgitb.py                             |   205 +
  lib-python/2.2/chunk.py                             |   167 +
  lib-python/2.2/cmd.py                               |   336 +
  lib-python/2.2/code.py                              |   311 +
  lib-python/2.2/codecs.py                            |   636 +
  lib-python/2.2/codeop.py                            |   171 +
  lib-python/2.2/colorsys.py                          |   123 +
  lib-python/2.2/commands.py                          |    84 +
  lib-python/2.2/compileall.py                        |   148 +
  lib-python/2.2/compiler/__init__.py                 |    26 +
  lib-python/2.2/compiler/ast.py                      |  1241 +++
  lib-python/2.2/compiler/consts.py                   |    19 +
  lib-python/2.2/compiler/future.py                   |    72 +
  lib-python/2.2/compiler/misc.py                     |    74 +
  lib-python/2.2/compiler/pyassem.py                  |   824 ++
  lib-python/2.2/compiler/pycodegen.py                |  1388 ++++
  lib-python/2.2/compiler/symbols.py                  |   419 +
  lib-python/2.2/compiler/syntax.py                   |    46 +
  lib-python/2.2/compiler/transformer.py              |  1359 ++++
  lib-python/2.2/compiler/visitor.py                  |   121 +
  lib-python/2.2/copy.py                              |   381 +
  lib-python/2.2/copy_reg.py                          |    78 +
  lib-python/2.2/curses/__init__.py                   |    54 +
  lib-python/2.2/curses/ascii.py                      |   100 +
  lib-python/2.2/curses/has_key.py                    |   189 +
  lib-python/2.2/curses/panel.py                      |     9 +
  lib-python/2.2/curses/textpad.py                    |   167 +
  lib-python/2.2/curses/wrapper.py                    |    63 +
  lib-python/2.2/dbhash.py                            |    16 +
  lib-python/2.2/difflib.py                           |  1088 +++
  lib-python/2.2/dircache.py                          |    44 +
  lib-python/2.2/dis.py                               |   327 +
  lib-python/2.2/distutils/README                     |    18 +
  lib-python/2.2/distutils/__init__.py                |    13 +
  lib-python/2.2/distutils/archive_util.py            |   173 +
  lib-python/2.2/distutils/bcppcompiler.py            |   409 +
  lib-python/2.2/distutils/ccompiler.py               |  1046 +++
  lib-python/2.2/distutils/cmd.py                     |   486 +
  lib-python/2.2/distutils/command/__init__.py        |    24 +
  lib-python/2.2/distutils/command/bdist.py           |   139 +
  lib-python/2.2/distutils/command/bdist_dumb.py      |    96 +
  lib-python/2.2/distutils/command/bdist_rpm.py       |   488 +
  lib-python/2.2/distutils/command/bdist_wininst.py   |   570 +
  lib-python/2.2/distutils/command/build.py           |   131 +
  lib-python/2.2/distutils/command/build_clib.py      |   240 +
  lib-python/2.2/distutils/command/build_ext.py       |   630 +
  lib-python/2.2/distutils/command/build_py.py        |   401 +
  lib-python/2.2/distutils/command/build_scripts.py   |   110 +
  lib-python/2.2/distutils/command/clean.py           |    79 +
  lib-python/2.2/distutils/command/command_template   |    45 +
  lib-python/2.2/distutils/command/config.py          |   366 +
  lib-python/2.2/distutils/command/install.py         |   598 +
  lib-python/2.2/distutils/command/install_data.py    |    83 +
  lib-python/2.2/distutils/command/install_headers.py |    53 +
  lib-python/2.2/distutils/command/install_lib.py     |   213 +
  lib-python/2.2/distutils/command/install_scripts.py |    63 +
  lib-python/2.2/distutils/command/sdist.py           |   475 +
  lib-python/2.2/distutils/core.py                    |   231 +
  lib-python/2.2/distutils/cygwinccompiler.py         |   441 +
  lib-python/2.2/distutils/dep_util.py                |   115 +
  lib-python/2.2/distutils/dir_util.py                |   219 +
  lib-python/2.2/distutils/dist.py                    |  1086 +++
  lib-python/2.2/distutils/errors.py                  |    99 +
  lib-python/2.2/distutils/extension.py               |   231 +
  lib-python/2.2/distutils/fancy_getopt.py            |   504 +
  lib-python/2.2/distutils/file_util.py               |   258 +
  lib-python/2.2/distutils/filelist.py                |   367 +
  lib-python/2.2/distutils/msvccompiler.py            |   515 +
  lib-python/2.2/distutils/mwerkscompiler.py          |   217 +
  lib-python/2.2/distutils/spawn.py                   |   169 +
  lib-python/2.2/distutils/sysconfig.py               |   445 +
  lib-python/2.2/distutils/text_file.py               |   384 +
  lib-python/2.2/distutils/unixccompiler.py           |   308 +
  lib-python/2.2/distutils/util.py                    |   458 +
  lib-python/2.2/distutils/version.py                 |   301 +
  lib-python/2.2/doctest.py                           |  1173 +++
  lib-python/2.2/dospath.py                           |   341 +
  lib-python/2.2/dumbdbm.py                           |   170 +
  lib-python/2.2/email/Charset.py                     |   393 +
  lib-python/2.2/email/Encoders.py                    |    94 +
  lib-python/2.2/email/Errors.py                      |    26 +
  lib-python/2.2/email/Generator.py                   |   378 +
  lib-python/2.2/email/Header.py                      |   515 +
  lib-python/2.2/email/Iterators.py                   |    25 +
  lib-python/2.2/email/MIMEAudio.py                   |    71 +
  lib-python/2.2/email/MIMEBase.py                    |    24 +
  lib-python/2.2/email/MIMEImage.py                   |    45 +
  lib-python/2.2/email/MIMEMessage.py                 |    32 +
  lib-python/2.2/email/MIMEMultipart.py               |    37 +
  lib-python/2.2/email/MIMENonMultipart.py            |    24 +
  lib-python/2.2/email/MIMEText.py                    |    45 +
  lib-python/2.2/email/Message.py                     |   837 ++
  lib-python/2.2/email/Parser.py                      |   292 +
  lib-python/2.2/email/Utils.py                       |   340 +
  lib-python/2.2/email/__init__.py                    |    72 +
  lib-python/2.2/email/_compat21.py                   |    69 +
  lib-python/2.2/email/_compat22.py                   |    70 +
  lib-python/2.2/email/_parseaddr.py                  |   480 +
  lib-python/2.2/email/base64MIME.py                  |   184 +
  lib-python/2.2/email/quopriMIME.py                  |   323 +
  lib-python/2.2/email/test/__init__.py               |     2 +
  lib-python/2.2/email/test/data/PyBanner048.gif      |   Bin 
  lib-python/2.2/email/test/data/audiotest.au         |   Bin 
  lib-python/2.2/email/test/data/msg_01.txt           |    19 +
  lib-python/2.2/email/test/data/msg_02.txt           |   135 +
  lib-python/2.2/email/test/data/msg_03.txt           |    16 +
  lib-python/2.2/email/test/data/msg_04.txt           |    37 +
  lib-python/2.2/email/test/data/msg_05.txt           |    28 +
  lib-python/2.2/email/test/data/msg_06.txt           |    33 +
  lib-python/2.2/email/test/data/msg_07.txt           |    83 +
  lib-python/2.2/email/test/data/msg_08.txt           |    24 +
  lib-python/2.2/email/test/data/msg_09.txt           |    24 +
  lib-python/2.2/email/test/data/msg_10.txt           |    32 +
  lib-python/2.2/email/test/data/msg_11.txt           |     7 +
  lib-python/2.2/email/test/data/msg_12.txt           |    36 +
  lib-python/2.2/email/test/data/msg_13.txt           |    94 +
  lib-python/2.2/email/test/data/msg_14.txt           |    23 +
  lib-python/2.2/email/test/data/msg_15.txt           |    52 +
  lib-python/2.2/email/test/data/msg_16.txt           |   123 +
  lib-python/2.2/email/test/data/msg_17.txt           |    12 +
  lib-python/2.2/email/test/data/msg_18.txt           |     6 +
  lib-python/2.2/email/test/data/msg_19.txt           |    43 +
  lib-python/2.2/email/test/data/msg_20.txt           |    22 +
  lib-python/2.2/email/test/data/msg_21.txt           |    20 +
  lib-python/2.2/email/test/data/msg_22.txt           |    46 +
  lib-python/2.2/email/test/data/msg_23.txt           |     8 +
  lib-python/2.2/email/test/data/msg_24.txt           |    10 +
  lib-python/2.2/email/test/data/msg_25.txt           |   117 +
  lib-python/2.2/email/test/data/msg_26.txt           |    45 +
  lib-python/2.2/email/test/data/msg_27.txt           |    15 +
  lib-python/2.2/email/test/data/msg_28.txt           |    25 +
  lib-python/2.2/email/test/data/msg_29.txt           |    22 +
  lib-python/2.2/email/test/data/msg_30.txt           |    23 +
  lib-python/2.2/email/test/data/msg_31.txt           |    15 +
  lib-python/2.2/email/test/data/msg_32.txt           |    14 +
  lib-python/2.2/email/test/data/msg_33.txt           |    29 +
  lib-python/2.2/email/test/data/msg_34.txt           |    19 +
  lib-python/2.2/email/test/data/msg_35.txt           |     4 +
  lib-python/2.2/email/test/test_email.py             |  2718 ++++++++
  lib-python/2.2/email/test/test_email_codecs.py      |    68 +
  lib-python/2.2/email/test/test_email_torture.py     |   136 +
  lib-python/2.2/encodings/__init__.py                |    97 +
  lib-python/2.2/encodings/aliases.py                 |   115 +
  lib-python/2.2/encodings/ascii.py                   |    35 +
  lib-python/2.2/encodings/base64_codec.py            |    62 +
  lib-python/2.2/encodings/charmap.py                 |    51 +
  lib-python/2.2/encodings/cp037.py                   |   280 +
  lib-python/2.2/encodings/cp1006.py                  |   138 +
  lib-python/2.2/encodings/cp1026.py                  |   280 +
  lib-python/2.2/encodings/cp1140.py                  |    45 +
  lib-python/2.2/encodings/cp1250.py                  |   123 +
  lib-python/2.2/encodings/cp1251.py                  |   157 +
  lib-python/2.2/encodings/cp1252.py                  |    76 +
  lib-python/2.2/encodings/cp1253.py                  |   151 +
  lib-python/2.2/encodings/cp1254.py                  |    82 +
  lib-python/2.2/encodings/cp1255.py                  |   143 +
  lib-python/2.2/encodings/cp1256.py                  |   129 +
  lib-python/2.2/encodings/cp1257.py                  |   131 +
  lib-python/2.2/encodings/cp1258.py                  |    90 +
  lib-python/2.2/encodings/cp424.py                   |   280 +
  lib-python/2.2/encodings/cp437.py                   |   172 +
  lib-python/2.2/encodings/cp500.py                   |   280 +
  lib-python/2.2/encodings/cp737.py                   |   172 +
  lib-python/2.2/encodings/cp775.py                   |   172 +
  lib-python/2.2/encodings/cp850.py                   |   172 +
  lib-python/2.2/encodings/cp852.py                   |   172 +
  lib-python/2.2/encodings/cp855.py                   |   172 +
  lib-python/2.2/encodings/cp856.py                   |   172 +
  lib-python/2.2/encodings/cp857.py                   |   171 +
  lib-python/2.2/encodings/cp860.py                   |   172 +
  lib-python/2.2/encodings/cp861.py                   |   172 +
  lib-python/2.2/encodings/cp862.py                   |   172 +
  lib-python/2.2/encodings/cp863.py                   |   172 +
  lib-python/2.2/encodings/cp864.py                   |   170 +
  lib-python/2.2/encodings/cp865.py                   |   172 +
  lib-python/2.2/encodings/cp866.py                   |   172 +
  lib-python/2.2/encodings/cp869.py                   |   172 +
  lib-python/2.2/encodings/cp874.py                   |   171 +
  lib-python/2.2/encodings/cp875.py                   |   281 +
  lib-python/2.2/encodings/hex_codec.py               |    62 +
  lib-python/2.2/encodings/iso8859_1.py               |    44 +
  lib-python/2.2/encodings/iso8859_10.py              |    90 +
  lib-python/2.2/encodings/iso8859_13.py              |   100 +
  lib-python/2.2/encodings/iso8859_14.py              |    75 +
  lib-python/2.2/encodings/iso8859_15.py              |    52 +
  lib-python/2.2/encodings/iso8859_2.py               |   101 +
  lib-python/2.2/encodings/iso8859_3.py               |    79 +
  lib-python/2.2/encodings/iso8859_4.py               |    94 +
  lib-python/2.2/encodings/iso8859_5.py               |   138 +
  lib-python/2.2/encodings/iso8859_6.py               |   137 +
  lib-python/2.2/encodings/iso8859_7.py               |   124 +
  lib-python/2.2/encodings/iso8859_8.py               |   112 +
  lib-python/2.2/encodings/iso8859_9.py               |    50 +
  lib-python/2.2/encodings/koi8_r.py                  |   172 +
  lib-python/2.2/encodings/koi8_u.py                  |    54 +
  lib-python/2.2/encodings/latin_1.py                 |    35 +
  lib-python/2.2/encodings/mac_cyrillic.py            |   167 +
  lib-python/2.2/encodings/mac_greek.py               |   170 +
  lib-python/2.2/encodings/mac_iceland.py             |   166 +
  lib-python/2.2/encodings/mac_latin2.py              |   170 +
  lib-python/2.2/encodings/mac_roman.py               |   167 +
  lib-python/2.2/encodings/mac_turkish.py             |   167 +
  lib-python/2.2/encodings/mbcs.py                    |    36 +
  lib-python/2.2/encodings/quopri_codec.py            |    58 +
  lib-python/2.2/encodings/raw_unicode_escape.py      |    30 +
  lib-python/2.2/encodings/rot_13.py                  |   105 +
  lib-python/2.2/encodings/undefined.py               |    34 +
  lib-python/2.2/encodings/unicode_escape.py          |    30 +
  lib-python/2.2/encodings/unicode_internal.py        |    30 +
  lib-python/2.2/encodings/utf_16.py                  |    61 +
  lib-python/2.2/encodings/utf_16_be.py               |    31 +
  lib-python/2.2/encodings/utf_16_le.py               |    31 +
  lib-python/2.2/encodings/utf_7.py                   |    27 +
  lib-python/2.2/encodings/utf_8.py                   |    31 +
  lib-python/2.2/encodings/uu_codec.py                |   112 +
  lib-python/2.2/encodings/zlib_codec.py              |    63 +
  lib-python/2.2/filecmp.py                           |   331 +
  lib-python/2.2/fileinput.py                         |   349 +
  lib-python/2.2/fnmatch.py                           |   107 +
  lib-python/2.2/formatter.py                         |   454 +
  lib-python/2.2/fpformat.py                          |   142 +
  lib-python/2.2/ftplib.py                            |   804 ++
  lib-python/2.2/getopt.py                            |   144 +
  lib-python/2.2/getpass.py                           |   123 +
  lib-python/2.2/gettext.py                           |   311 +
  lib-python/2.2/glob.py                              |    56 +
  lib-python/2.2/gopherlib.py                         |   205 +
  lib-python/2.2/gzip.py                              |   390 +
  lib-python/2.2/hmac.py                              |    99 +
  lib-python/2.2/hotshot/__init__.py                  |    41 +
  lib-python/2.2/hotshot/log.py                       |   194 +
  lib-python/2.2/hotshot/stats.py                     |    93 +
  lib-python/2.2/htmlentitydefs.py                    |   257 +
  lib-python/2.2/htmllib.py                           |   475 +
  lib-python/2.2/httplib.py                           |  1238 +++
  lib-python/2.2/ihooks.py                            |   511 +
  lib-python/2.2/imaplib.py                           |  1208 +++
  lib-python/2.2/imghdr.py                            |   154 +
  lib-python/2.2/imputil.py                           |   720 ++
  lib-python/2.2/inspect.py                           |   785 ++
  lib-python/2.2/keyword.py                           |    97 +
  lib-python/2.2/knee.py                              |   126 +
  lib-python/2.2/lib-old/Para.py                      |   343 +
  lib-python/2.2/lib-old/addpack.py                   |    67 +
  lib-python/2.2/lib-old/cmp.py                       |    63 +
  lib-python/2.2/lib-old/cmpcache.py                  |    64 +
  lib-python/2.2/lib-old/codehack.py                  |    81 +
  lib-python/2.2/lib-old/dircmp.py                    |   202 +
  lib-python/2.2/lib-old/dump.py                      |    63 +
  lib-python/2.2/lib-old/find.py                      |    26 +
  lib-python/2.2/lib-old/fmt.py                       |   623 +
  lib-python/2.2/lib-old/grep.py                      |    79 +
  lib-python/2.2/lib-old/lockfile.py                  |    15 +
  lib-python/2.2/lib-old/newdir.py                    |    73 +
  lib-python/2.2/lib-old/ni.py                        |   433 +
  lib-python/2.2/lib-old/packmail.py                  |   111 +
  lib-python/2.2/lib-old/poly.py                      |    52 +
  lib-python/2.2/lib-old/rand.py                      |    13 +
  lib-python/2.2/lib-old/tb.py                        |   177 +
  lib-python/2.2/lib-old/util.py                      |    25 +
  lib-python/2.2/lib-old/whatsound.py                 |     1 +
  lib-python/2.2/lib-old/zmod.py                      |    94 +
  lib-python/2.2/lib-tk/Canvas.py                     |   188 +
  lib-python/2.2/lib-tk/Dialog.py                     |    49 +
  lib-python/2.2/lib-tk/FileDialog.py                 |   273 +
  lib-python/2.2/lib-tk/FixTk.py                      |    37 +
  lib-python/2.2/lib-tk/ScrolledText.py               |    43 +
  lib-python/2.2/lib-tk/SimpleDialog.py               |   111 +
  lib-python/2.2/lib-tk/Tix.py                        |  1626 ++++
  lib-python/2.2/lib-tk/Tkconstants.py                |   103 +
  lib-python/2.2/lib-tk/Tkdnd.py                      |   321 +
  lib-python/2.2/lib-tk/Tkinter.py                    |  3141 +++++++++
  lib-python/2.2/lib-tk/tkColorChooser.py             |    74 +
  lib-python/2.2/lib-tk/tkCommonDialog.py             |    65 +
  lib-python/2.2/lib-tk/tkFileDialog.py               |   129 +
  lib-python/2.2/lib-tk/tkFont.py                     |   191 +
  lib-python/2.2/lib-tk/tkMessageBox.py               |   120 +
  lib-python/2.2/lib-tk/tkSimpleDialog.py             |   313 +
  lib-python/2.2/lib-tk/turtle.py                     |   385 +
  lib-python/2.2/linecache.py                         |   101 +
  lib-python/2.2/locale.py                            |   743 ++
  lib-python/2.2/macpath.py                           |   242 +
  lib-python/2.2/macurl2path.py                       |    95 +
  lib-python/2.2/mailbox.py                           |   313 +
  lib-python/2.2/mailcap.py                           |   255 +
  lib-python/2.2/markupbase.py                        |   317 +
  lib-python/2.2/mhlib.py                             |  1003 +++
  lib-python/2.2/mimetools.py                         |   226 +
  lib-python/2.2/mimetypes.py                         |   435 +
  lib-python/2.2/mimify.py                            |   464 +
  lib-python/2.2/multifile.py                         |   160 +
  lib-python/2.2/mutex.py                             |    51 +
  lib-python/2.2/netrc.py                             |   108 +
  lib-python/2.2/nntplib.py                           |   575 +
  lib-python/2.2/ntpath.py                            |   482 +
  lib-python/2.2/nturl2path.py                        |    66 +
  lib-python/2.2/os.py                                |   613 +
  lib-python/2.2/pdb.doc                              |   192 +
  lib-python/2.2/pdb.py                               |   979 ++
  lib-python/2.2/pickle.py                            |   986 +++
  lib-python/2.2/pipes.py                             |   297 +
  lib-python/2.2/plat-aix3/IN.py                      |   126 +
  lib-python/2.2/plat-aix3/regen                      |     8 +
  lib-python/2.2/plat-aix4/IN.py                      |   165 +
  lib-python/2.2/plat-aix4/regen                      |     8 +
  lib-python/2.2/plat-beos5/IN.py                     |   327 +
  lib-python/2.2/plat-beos5/regen                     |     7 +
  lib-python/2.2/plat-darwin/IN.py                    |   357 +
  lib-python/2.2/plat-darwin/regen                    |     3 +
  lib-python/2.2/plat-freebsd2/IN.py                  |   187 +
  lib-python/2.2/plat-freebsd2/regen                  |     3 +
  lib-python/2.2/plat-freebsd3/IN.py                  |   189 +
  lib-python/2.2/plat-freebsd3/regen                  |     4 +
  lib-python/2.2/plat-freebsd4/IN.py                  |   355 +
  lib-python/2.2/plat-freebsd4/regen                  |     3 +
  lib-python/2.2/plat-freebsd5/IN.py                  |   355 +
  lib-python/2.2/plat-freebsd5/regen                  |     3 +
  lib-python/2.2/plat-generic/regen                   |     3 +
  lib-python/2.2/plat-irix5/AL.py                     |    61 +
  lib-python/2.2/plat-irix5/CD.py                     |    34 +
  lib-python/2.2/plat-irix5/CL.py                     |    24 +
  lib-python/2.2/plat-irix5/CL_old.py                 |   236 +
  lib-python/2.2/plat-irix5/DEVICE.py                 |   400 +
  lib-python/2.2/plat-irix5/ERRNO.py                  |   147 +
  lib-python/2.2/plat-irix5/FILE.py                   |   239 +
  lib-python/2.2/plat-irix5/FL.py                     |   289 +
  lib-python/2.2/plat-irix5/GET.py                    |    59 +
  lib-python/2.2/plat-irix5/GL.py                     |   393 +
  lib-python/2.2/plat-irix5/GLWS.py                   |    12 +
  lib-python/2.2/plat-irix5/IN.py                     |   141 +
  lib-python/2.2/plat-irix5/IOCTL.py                  |   233 +
  lib-python/2.2/plat-irix5/SV.py                     |   120 +
  lib-python/2.2/plat-irix5/WAIT.py                   |    14 +
  lib-python/2.2/plat-irix5/cddb.py                   |   206 +
  lib-python/2.2/plat-irix5/cdplayer.py               |    89 +
  lib-python/2.2/plat-irix5/flp.doc                   |   117 +
  lib-python/2.2/plat-irix5/flp.py                    |   451 +
  lib-python/2.2/plat-irix5/jpeg.py                   |   111 +
  lib-python/2.2/plat-irix5/panel.py                  |   281 +
  lib-python/2.2/plat-irix5/panelparser.py            |   128 +
  lib-python/2.2/plat-irix5/readcd.doc                |   104 +
  lib-python/2.2/plat-irix5/readcd.py                 |   244 +
  lib-python/2.2/plat-irix5/regen                     |    10 +
  lib-python/2.2/plat-irix5/torgb.py                  |    98 +
  lib-python/2.2/plat-irix6/AL.py                     |    61 +
  lib-python/2.2/plat-irix6/CD.py                     |    34 +
  lib-python/2.2/plat-irix6/CL.py                     |    24 +
  lib-python/2.2/plat-irix6/DEVICE.py                 |   400 +
  lib-python/2.2/plat-irix6/ERRNO.py                  |   180 +
  lib-python/2.2/plat-irix6/FILE.py                   |   674 ++
  lib-python/2.2/plat-irix6/FL.py                     |   289 +
  lib-python/2.2/plat-irix6/GET.py                    |    59 +
  lib-python/2.2/plat-irix6/GL.py                     |   393 +
  lib-python/2.2/plat-irix6/GLWS.py                   |    12 +
  lib-python/2.2/plat-irix6/IN.py                     |   385 +
  lib-python/2.2/plat-irix6/IOCTL.py                  |   233 +
  lib-python/2.2/plat-irix6/SV.py                     |   120 +
  lib-python/2.2/plat-irix6/WAIT.py                   |   335 +
  lib-python/2.2/plat-irix6/cddb.py                   |   206 +
  lib-python/2.2/plat-irix6/cdplayer.py               |    89 +
  lib-python/2.2/plat-irix6/flp.doc                   |   117 +
  lib-python/2.2/plat-irix6/flp.py                    |   450 +
  lib-python/2.2/plat-irix6/jpeg.py                   |   111 +
  lib-python/2.2/plat-irix6/panel.py                  |   281 +
  lib-python/2.2/plat-irix6/panelparser.py            |   128 +
  lib-python/2.2/plat-irix6/readcd.doc                |   104 +
  lib-python/2.2/plat-irix6/readcd.py                 |   244 +
  lib-python/2.2/plat-irix6/regen                     |    11 +
  lib-python/2.2/plat-irix6/torgb.py                  |    98 +
  lib-python/2.2/plat-linux1/IN.py                    |   239 +
  lib-python/2.2/plat-linux1/regen                    |     8 +
  lib-python/2.2/plat-linux2/CDROM.py                 |   207 +
  lib-python/2.2/plat-linux2/DLFCN.py                 |    83 +
  lib-python/2.2/plat-linux2/IN.py                    |   603 +
  lib-python/2.2/plat-linux2/TYPES.py                 |   171 +
  lib-python/2.2/plat-linux2/regen                    |     8 +
  lib-python/2.2/plat-netbsd1/IN.py                   |    57 +
  lib-python/2.2/plat-netbsd1/regen                   |     3 +
  lib-python/2.2/plat-next3/regen                     |     6 +
  lib-python/2.2/plat-riscos/riscosenviron.py         |    43 +
  lib-python/2.2/plat-riscos/riscospath.py            |   375 +
  lib-python/2.2/plat-riscos/rourl2path.py            |    69 +
  lib-python/2.2/plat-sunos4/IN.py                    |    59 +
  lib-python/2.2/plat-sunos4/SUNAUDIODEV.py           |    38 +
  lib-python/2.2/plat-sunos4/WAIT.py                  |    13 +
  lib-python/2.2/plat-sunos4/regen                    |     9 +
  lib-python/2.2/plat-sunos5/CDIO.py                  |    73 +
  lib-python/2.2/plat-sunos5/DLFCN.py                 |    27 +
  lib-python/2.2/plat-sunos5/IN.py                    |  1421 ++++
  lib-python/2.2/plat-sunos5/STROPTS.py               |  1813 +++++
  lib-python/2.2/plat-sunos5/SUNAUDIODEV.py           |    40 +
  lib-python/2.2/plat-sunos5/TYPES.py                 |   314 +
  lib-python/2.2/plat-sunos5/regen                    |     9 +
  lib-python/2.2/plat-unixware7/IN.py                 |   836 ++
  lib-python/2.2/plat-unixware7/STROPTS.py            |   328 +
  lib-python/2.2/plat-unixware7/regen                 |     9 +
  lib-python/2.2/popen2.py                            |   199 +
  lib-python/2.2/poplib.py                            |   335 +
  lib-python/2.2/posixfile.py                         |   240 +
  lib-python/2.2/posixpath.py                         |   414 +
  lib-python/2.2/pprint.py                            |   310 +
  lib-python/2.2/pre.py                               |   656 ++
  lib-python/2.2/profile.doc                          |   702 ++
  lib-python/2.2/profile.py                           |   556 +
  lib-python/2.2/pstats.py                            |   641 +
  lib-python/2.2/pty.py                               |   167 +
  lib-python/2.2/py_compile.py                        |    82 +
  lib-python/2.2/pyclbr.py                            |   337 +
  lib-python/2.2/pydoc.py                             |  2112 ++++++
  lib-python/2.2/quopri.py                            |   237 +
  lib-python/2.2/random.py                            |   779 ++
  lib-python/2.2/re.py                                |    33 +
  lib-python/2.2/reconvert.py                         |   192 +
  lib-python/2.2/regex_syntax.py                      |    53 +
  lib-python/2.2/regsub.py                            |   198 +
  lib-python/2.2/repr.py                              |    95 +
  lib-python/2.2/rexec.py                             |   592 +
  lib-python/2.2/rfc822.py                            |  1010 +++
  lib-python/2.2/rlcompleter.py                       |   122 +
  lib-python/2.2/robotparser.py                       |   262 +
  lib-python/2.2/sched.py                             |   106 +
  lib-python/2.2/sgmllib.py                           |   516 +
  lib-python/2.2/shelve.py                            |   158 +
  lib-python/2.2/shlex.py                             |   209 +
  lib-python/2.2/shutil.py                            |   138 +
  lib-python/2.2/site-packages/README                 |     2 +
  lib-python/2.2/site.py                              |   330 +
  lib-python/2.2/smtpd.py                             |   543 +
  lib-python/2.2/smtplib.py                           |   729 ++
  lib-python/2.2/sndhdr.py                            |   228 +
  lib-python/2.2/socket.py                            |   256 +
  lib-python/2.2/sre.py                               |   311 +
  lib-python/2.2/sre_compile.py                       |   455 +
  lib-python/2.2/sre_constants.py                     |   259 +
  lib-python/2.2/sre_parse.py                         |   738 ++
  lib-python/2.2/stat.py                              |    86 +
  lib-python/2.2/statcache.py                         |    77 +
  lib-python/2.2/statvfs.py                           |    15 +
  lib-python/2.2/string.py                            |   387 +
  lib-python/2.2/stringold.py                         |   430 +
  lib-python/2.2/sunau.py                             |   474 +
  lib-python/2.2/sunaudio.py                          |    44 +
  lib-python/2.2/symbol.py                            |    95 +
  lib-python/2.2/symtable.py                          |   255 +
  lib-python/2.2/tabnanny.py                          |   327 +
  lib-python/2.2/telnetlib.py                         |   593 +
  lib-python/2.2/tempfile.py                          |   244 +
  lib-python/2.2/test/README                          |   372 +
  lib-python/2.2/test/__init__.py                     |     1 +
  lib-python/2.2/test/audiotest.au                    |   Bin 
  lib-python/2.2/test/autotest.py                     |     6 +
  lib-python/2.2/test/badsyntax_future3.py            |    10 +
  lib-python/2.2/test/badsyntax_future4.py            |    10 +
  lib-python/2.2/test/badsyntax_future5.py            |    12 +
  lib-python/2.2/test/badsyntax_future6.py            |    10 +
  lib-python/2.2/test/badsyntax_future7.py            |    11 +
  lib-python/2.2/test/badsyntax_nocaret.py            |     2 +
  lib-python/2.2/test/data/PyBanner048.gif            |   Bin 
  lib-python/2.2/test/data/msg_01.txt                 |    19 +
  lib-python/2.2/test/data/msg_02.txt                 |   135 +
  lib-python/2.2/test/data/msg_03.txt                 |    16 +
  lib-python/2.2/test/data/msg_04.txt                 |    37 +
  lib-python/2.2/test/data/msg_05.txt                 |    28 +
  lib-python/2.2/test/data/msg_06.txt                 |    33 +
  lib-python/2.2/test/data/msg_07.txt                 |    83 +
  lib-python/2.2/test/data/msg_08.txt                 |    24 +
  lib-python/2.2/test/data/msg_09.txt                 |    24 +
  lib-python/2.2/test/data/msg_10.txt                 |    32 +
  lib-python/2.2/test/data/msg_11.txt                 |     7 +
  lib-python/2.2/test/data/msg_12.txt                 |    36 +
  lib-python/2.2/test/data/msg_13.txt                 |    94 +
  lib-python/2.2/test/data/msg_14.txt                 |    23 +
  lib-python/2.2/test/data/msg_15.txt                 |    52 +
  lib-python/2.2/test/data/msg_16.txt                 |   123 +
  lib-python/2.2/test/data/msg_17.txt                 |    12 +
  lib-python/2.2/test/data/msg_18.txt                 |     6 +
  lib-python/2.2/test/data/msg_19.txt                 |    43 +
  lib-python/2.2/test/data/msg_20.txt                 |    22 +
  lib-python/2.2/test/data/msg_21.txt                 |    22 +
  lib-python/2.2/test/data/msg_22.txt                 |    46 +
  lib-python/2.2/test/data/msg_23.txt                 |     8 +
  lib-python/2.2/test/double_const.py                 |    30 +
  lib-python/2.2/test/greyrgb.uue                     |  1547 ++++
  lib-python/2.2/test/output/test_MimeWriter          |   110 +
  lib-python/2.2/test/output/test_asynchat            |     3 +
  lib-python/2.2/test/output/test_augassign           |    51 +
  lib-python/2.2/test/output/test_binascii            |    29 +
  lib-python/2.2/test/output/test_builtin             |    53 +
  lib-python/2.2/test/output/test_cfgparser           |     9 +
  lib-python/2.2/test/output/test_cgi                 |    29 +
  lib-python/2.2/test/output/test_charmapcodec        |    16 +
  lib-python/2.2/test/output/test_class               |   101 +
  lib-python/2.2/test/output/test_coercion            |  1054 +++
  lib-python/2.2/test/output/test_compare             |   101 +
  lib-python/2.2/test/output/test_compile             |     7 +
  lib-python/2.2/test/output/test_cookie              |    32 +
  lib-python/2.2/test/output/test_exceptions          |    52 +
  lib-python/2.2/test/output/test_extcall             |   112 +
  lib-python/2.2/test/output/test_frozen              |     4 +
  lib-python/2.2/test/output/test_future              |     9 +
  lib-python/2.2/test/output/test_gettext             |    46 +
  lib-python/2.2/test/output/test_global              |     5 +
  lib-python/2.2/test/output/test_grammar             |    66 +
  lib-python/2.2/test/output/test_httplib             |    10 +
  lib-python/2.2/test/output/test_linuxaudiodev       |     7 +
  lib-python/2.2/test/output/test_longexp             |     2 +
  lib-python/2.2/test/output/test_math                |    26 +
  lib-python/2.2/test/output/test_md5                 |     9 +
  lib-python/2.2/test/output/test_mimetools           |     5 +
  lib-python/2.2/test/output/test_mmap                |    34 +
  lib-python/2.2/test/output/test_new                 |     7 +
  lib-python/2.2/test/output/test_nis                 |     2 +
  lib-python/2.2/test/output/test_opcodes             |     6 +
  lib-python/2.2/test/output/test_openpty             |     2 +
  lib-python/2.2/test/output/test_operations          |     6 +
  lib-python/2.2/test/output/test_pkg                 |    45 +
  lib-python/2.2/test/output/test_poll                |    17 +
  lib-python/2.2/test/output/test_popen2              |     9 +
  lib-python/2.2/test/output/test_posixpath           |     2 +
  lib-python/2.2/test/output/test_pow                 |    25 +
  lib-python/2.2/test/output/test_profile             |    17 +
  lib-python/2.2/test/output/test_pty                 |     3 +
  lib-python/2.2/test/output/test_pwd                 |     7 +
  lib-python/2.2/test/output/test_pyexpat             |   110 +
  lib-python/2.2/test/output/test_re                  |     2 +
  lib-python/2.2/test/output/test_regex               |    29 +
  lib-python/2.2/test/output/test_rgbimg              |     2 +
  lib-python/2.2/test/output/test_richcmp             |   187 +
  lib-python/2.2/test/output/test_rotor               |     5 +
  lib-python/2.2/test/output/test_sax                 |    42 +
  lib-python/2.2/test/output/test_scope               |    24 +
  lib-python/2.2/test/output/test_signal              |     2 +
  lib-python/2.2/test/output/test_socket              |     2 +
  lib-python/2.2/test/output/test_string              |     3 +
  lib-python/2.2/test/output/test_thread              |     6 +
  lib-python/2.2/test/output/test_threadedtempfile    |     5 +
  lib-python/2.2/test/output/test_tokenize            |   648 +
  lib-python/2.2/test/output/test_types               |    16 +
  lib-python/2.2/test/output/test_ucn                 |     7 +
  lib-python/2.2/test/output/test_unicode             |    21 +
  lib-python/2.2/test/output/test_unicode_file        |     2 +
  lib-python/2.2/test/output/test_unicodedata         |     5 +
  lib-python/2.2/test/output/test_urlparse            |    47 +
  lib-python/2.2/test/output/test_winreg              |     3 +
  lib-python/2.2/test/output/test_winsound            |     2 +
  lib-python/2.2/test/output/test_xreadline           |     4 +
  lib-python/2.2/test/output/test_zlib                |    14 +
  lib-python/2.2/test/pickletester.py                 |   285 +
  lib-python/2.2/test/pydocfodder.py                  |   210 +
  lib-python/2.2/test/pystone.py                      |   252 +
  lib-python/2.2/test/re_tests.py                     |   661 ++
  lib-python/2.2/test/regex_tests.py                  |   287 +
  lib-python/2.2/test/regrtest.py                     |   832 ++
  lib-python/2.2/test/reperf.py                       |    23 +
  lib-python/2.2/test/sortperf.py                     |   141 +
  lib-python/2.2/test/string_tests.py                 |   265 +
  lib-python/2.2/test/test.xml                        |   115 +
  lib-python/2.2/test/test.xml.out                    |   115 +
  lib-python/2.2/test/test_MimeWriter.py              |   170 +
  lib-python/2.2/test/test_StringIO.py                |   113 +
  lib-python/2.2/test/test___all__.py                 |   158 +
  lib-python/2.2/test/test___future__.py              |    59 +
  lib-python/2.2/test/test_al.py                      |    23 +
  lib-python/2.2/test/test_array.py                   |   192 +
  lib-python/2.2/test/test_asynchat.py                |    58 +
  lib-python/2.2/test/test_atexit.py                  |    66 +
  lib-python/2.2/test/test_audioop.py                 |   264 +
  lib-python/2.2/test/test_augassign.py               |   261 +
  lib-python/2.2/test/test_b1.py                      |   632 +
  lib-python/2.2/test/test_b2.py                      |   365 +
  lib-python/2.2/test/test_base64.py                  |    53 +
  lib-python/2.2/test/test_bastion.py                 |     3 +
  lib-python/2.2/test/test_binascii.py                |   119 +
  lib-python/2.2/test/test_binhex.py                  |    50 +
  lib-python/2.2/test/test_binop.py                   |   328 +
  lib-python/2.2/test/test_bisect.py                  |   127 +
  lib-python/2.2/test/test_bsddb.py                   |    76 +
  lib-python/2.2/test/test_bufio.py                   |    60 +
  lib-python/2.2/test/test_builtin.py                 |    13 +
  lib-python/2.2/test/test_calendar.py                |    61 +
  lib-python/2.2/test/test_call.py                    |   131 +
  lib-python/2.2/test/test_capi.py                    |    16 +
  lib-python/2.2/test/test_cd.py                      |    26 +
  lib-python/2.2/test/test_cfgparser.py               |   284 +
  lib-python/2.2/test/test_cgi.py                     |   188 +
  lib-python/2.2/test/test_charmapcodec.py            |    43 +
  lib-python/2.2/test/test_cl.py                      |    78 +
  lib-python/2.2/test/test_class.py                   |   317 +
  lib-python/2.2/test/test_cmath.py                   |    35 +
  lib-python/2.2/test/test_codecs.py                  |    31 +
  lib-python/2.2/test/test_codeop.py                  |   190 +
  lib-python/2.2/test/test_coercion.py                |   118 +
  lib-python/2.2/test/test_commands.py                |    52 +
  lib-python/2.2/test/test_compare.py                 |    56 +
  lib-python/2.2/test/test_compile.py                 |   129 +
  lib-python/2.2/test/test_complex.py                 |    68 +
  lib-python/2.2/test/test_contains.py                |   171 +
  lib-python/2.2/test/test_cookie.py                  |    47 +
  lib-python/2.2/test/test_copy_reg.py                |    30 +
  lib-python/2.2/test/test_cpickle.py                 |   100 +
  lib-python/2.2/test/test_crypt.py                   |    11 +
  lib-python/2.2/test/test_curses.py                  |   210 +
  lib-python/2.2/test/test_dbm.py                     |    43 +
  lib-python/2.2/test/test_descr.py                   |  3276 ++++++++++
  lib-python/2.2/test/test_descrtut.py                |   501 +
  lib-python/2.2/test/test_difflib.py                 |     2 +
  lib-python/2.2/test/test_dircache.py                |    74 +
  lib-python/2.2/test/test_dl.py                      |    33 +
  lib-python/2.2/test/test_doctest.py                 |     2 +
  lib-python/2.2/test/test_doctest2.py                |   108 +
  lib-python/2.2/test/test_dospath.py                 |    61 +
  lib-python/2.2/test/test_dumbdbm.py                 |    79 +
  lib-python/2.2/test/test_email.py                   |    13 +
  lib-python/2.2/test/test_email_codecs.py            |    11 +
  lib-python/2.2/test/test_errno.py                   |    49 +
  lib-python/2.2/test/test_exceptions.py              |   206 +
  lib-python/2.2/test/test_extcall.py                 |   268 +
  lib-python/2.2/test/test_fcntl.py                   |    53 +
  lib-python/2.2/test/test_file.py                    |    63 +
  lib-python/2.2/test/test_fileinput.py               |   159 +
  lib-python/2.2/test/test_fnmatch.py                 |    46 +
  lib-python/2.2/test/test_fork1.py                   |    75 +
  lib-python/2.2/test/test_format.py                  |   218 +
  lib-python/2.2/test/test_fpformat.py                |    75 +
  lib-python/2.2/test/test_frozen.py                  |    26 +
  lib-python/2.2/test/test_funcattrs.py               |   379 +
  lib-python/2.2/test/test_future.py                  |    47 +
  lib-python/2.2/test/test_future1.py                 |    11 +
  lib-python/2.2/test/test_future2.py                 |    10 +
  lib-python/2.2/test/test_future3.py                 |    11 +
  lib-python/2.2/test/test_gc.py                      |   346 +
  lib-python/2.2/test/test_gdbm.py                    |    46 +
  lib-python/2.2/test/test_generators.py              |  1386 ++++
  lib-python/2.2/test/test_getargs.py                 |    21 +
  lib-python/2.2/test/test_getopt.py                  |   110 +
  lib-python/2.2/test/test_gettext.py                 |   200 +
  lib-python/2.2/test/test_gl.py                      |   150 +
  lib-python/2.2/test/test_glob.py                    |   115 +
  lib-python/2.2/test/test_global.py                  |    51 +
  lib-python/2.2/test/test_grammar.py                 |   732 ++
  lib-python/2.2/test/test_grp.py                     |    27 +
  lib-python/2.2/test/test_gzip.py                    |    82 +
  lib-python/2.2/test/test_hash.py                    |    36 +
  lib-python/2.2/test/test_hmac.py                    |   108 +
  lib-python/2.2/test/test_hotshot.py                 |   117 +
  lib-python/2.2/test/test_htmllib.py                 |    42 +
  lib-python/2.2/test/test_htmlparser.py              |   294 +
  lib-python/2.2/test/test_httplib.py                 |    58 +
  lib-python/2.2/test/test_imageop.py                 |   171 +
  lib-python/2.2/test/test_imgfile.py                 |   116 +
  lib-python/2.2/test/test_import.py                  |    71 +
  lib-python/2.2/test/test_inspect.py                 |   363 +
  lib-python/2.2/test/test_iter.py                    |   779 ++
  lib-python/2.2/test/test_largefile.py               |   162 +
  lib-python/2.2/test/test_linuxaudiodev.py           |    89 +
  lib-python/2.2/test/test_locale.py                  |    44 +
  lib-python/2.2/test/test_long.py                    |   410 +
  lib-python/2.2/test/test_long_future.py             |    55 +
  lib-python/2.2/test/test_longexp.py                 |    12 +
  lib-python/2.2/test/test_mailbox.py                 |   104 +
  lib-python/2.2/test/test_marshal.py                 |    44 +
  lib-python/2.2/test/test_math.py                    |   195 +
  lib-python/2.2/test/test_md5.py                     |    30 +
  lib-python/2.2/test/test_mhlib.py                   |   340 +
  lib-python/2.2/test/test_mimetools.py               |    18 +
  lib-python/2.2/test/test_mimetypes.py               |    59 +
  lib-python/2.2/test/test_minidom.py                 |   649 +
  lib-python/2.2/test/test_mmap.py                    |   317 +
  lib-python/2.2/test/test_multifile.py               |    66 +
  lib-python/2.2/test/test_mutants.py                 |   285 +
  lib-python/2.2/test/test_netrc.py                   |    42 +
  lib-python/2.2/test/test_new.py                     |   108 +
  lib-python/2.2/test/test_nis.py                     |    32 +
  lib-python/2.2/test/test_ntpath.py                  |   114 +
  lib-python/2.2/test/test_opcodes.py                 |   101 +
  lib-python/2.2/test/test_openpty.py                 |    21 +
  lib-python/2.2/test/test_operations.py              |    52 +
  lib-python/2.2/test/test_operator.py                |   218 +
  lib-python/2.2/test/test_os.py                      |   187 +
  lib-python/2.2/test/test_parser.py                  |   383 +
  lib-python/2.2/test/test_pep247.py                  |    50 +
  lib-python/2.2/test/test_pickle.py                  |    40 +
  lib-python/2.2/test/test_pkg.py                     |   259 +
  lib-python/2.2/test/test_pkgimport.py               |    84 +
  lib-python/2.2/test/test_poll.py                    |   172 +
  lib-python/2.2/test/test_popen2.py                  |    72 +
  lib-python/2.2/test/test_posixpath.py               |    40 +
  lib-python/2.2/test/test_pow.py                     |   125 +
  lib-python/2.2/test/test_pprint.py                  |   104 +
  lib-python/2.2/test/test_profile.py                 |    86 +
  lib-python/2.2/test/test_profilehooks.py            |   360 +
  lib-python/2.2/test/test_pty.py                     |    98 +
  lib-python/2.2/test/test_pwd.py                     |    71 +
  lib-python/2.2/test/test_pyclbr.py                  |   154 +
  lib-python/2.2/test/test_pyexpat.py                 |   202 +
  lib-python/2.2/test/test_queue.py                   |   158 +
  lib-python/2.2/test/test_quopri.py                  |   157 +
  lib-python/2.2/test/test_random.py                  |    19 +
  lib-python/2.2/test/test_re.py                      |   392 +
  lib-python/2.2/test/test_regex.py                   |   113 +
  lib-python/2.2/test/test_repr.py                    |   275 +
  lib-python/2.2/test/test_rfc822.py                  |   211 +
  lib-python/2.2/test/test_rgbimg.py                  |    63 +
  lib-python/2.2/test/test_richcmp.py                 |   261 +
  lib-python/2.2/test/test_rotor.py                   |    28 +
  lib-python/2.2/test/test_sax.py                     |   707 ++
  lib-python/2.2/test/test_scope.py                   |   524 +
  lib-python/2.2/test/test_select.py                  |    62 +
  lib-python/2.2/test/test_sgmllib.py                 |   314 +
  lib-python/2.2/test/test_sha.py                     |    52 +
  lib-python/2.2/test/test_signal.py                  |    66 +
  lib-python/2.2/test/test_socket.py                  |   170 +
  lib-python/2.2/test/test_socket_ssl.py              |    27 +
  lib-python/2.2/test/test_socketserver.py            |   165 +
  lib-python/2.2/test/test_sre.py                     |   434 +
  lib-python/2.2/test/test_strftime.py                |   146 +
  lib-python/2.2/test/test_string.py                  |    83 +
  lib-python/2.2/test/test_strop.py                   |   133 +
  lib-python/2.2/test/test_struct.py                  |   441 +
  lib-python/2.2/test/test_structseq.py               |    28 +
  lib-python/2.2/test/test_sunaudiodev.py             |    28 +
  lib-python/2.2/test/test_sundry.py                  |   102 +
  lib-python/2.2/test/test_support.py                 |   233 +
  lib-python/2.2/test/test_symtable.py                |     8 +
  lib-python/2.2/test/test_tempfile.py                |    10 +
  lib-python/2.2/test/test_thread.py                  |   117 +
  lib-python/2.2/test/test_threaded_import.py         |    56 +
  lib-python/2.2/test/test_threadedtempfile.py        |    86 +
  lib-python/2.2/test/test_threading.py               |    55 +
  lib-python/2.2/test/test_time.py                    |    50 +
  lib-python/2.2/test/test_timing.py                  |    21 +
  lib-python/2.2/test/test_tokenize.py                |     9 +
  lib-python/2.2/test/test_trace.py                   |   219 +
  lib-python/2.2/test/test_traceback.py               |    49 +
  lib-python/2.2/test/test_types.py                   |   428 +
  lib-python/2.2/test/test_ucn.py                     |   113 +
  lib-python/2.2/test/test_unary.py                   |    58 +
  lib-python/2.2/test/test_unicode.py                 |   782 ++
  lib-python/2.2/test/test_unicode_file.py            |    95 +
  lib-python/2.2/test/test_unicodedata.py             |   125 +
  lib-python/2.2/test/test_unpack.py                  |   144 +
  lib-python/2.2/test/test_urllib.py                  |   109 +
  lib-python/2.2/test/test_urllib2.py                 |    31 +
  lib-python/2.2/test/test_urlparse.py                |    94 +
  lib-python/2.2/test/test_userdict.py                |   120 +
  lib-python/2.2/test/test_userlist.py                |   201 +
  lib-python/2.2/test/test_userstring.py              |    43 +
  lib-python/2.2/test/test_uu.py                      |   158 +
  lib-python/2.2/test/test_wave.py                    |    34 +
  lib-python/2.2/test/test_weakref.py                 |   573 +
  lib-python/2.2/test/test_winreg.py                  |   151 +
  lib-python/2.2/test/test_winsound.py                |     6 +
  lib-python/2.2/test/test_xmllib.py                  |    35 +
  lib-python/2.2/test/test_xmlrpc.py                  |    37 +
  lib-python/2.2/test/test_xreadline.py               |    43 +
  lib-python/2.2/test/test_zipfile.py                 |    78 +
  lib-python/2.2/test/test_zlib.py                    |   226 +
  lib-python/2.2/test/testall.py                      |     4 +
  lib-python/2.2/test/testcodec.py                    |    48 +
  lib-python/2.2/test/testimg.uue                     |  1170 +++
  lib-python/2.2/test/testimgr.uue                    |  1170 +++
  lib-python/2.2/test/testrgb.uue                     |   971 ++
  lib-python/2.2/test/tokenize_tests.py               |   175 +
  lib-python/2.2/this.py                              |    28 +
  lib-python/2.2/threading.py                         |   698 ++
  lib-python/2.2/toaiff.py                            |   106 +
  lib-python/2.2/token.py                             |   140 +
  lib-python/2.2/tokenize.py                          |   287 +
  lib-python/2.2/traceback.py                         |   301 +
  lib-python/2.2/tty.py                               |    36 +
  lib-python/2.2/types.py                             |    86 +
  lib-python/2.2/tzparse.py                           |    98 +
  lib-python/2.2/unittest.py                          |   723 ++
  lib-python/2.2/urllib.py                            |  1465 ++++
  lib-python/2.2/urllib2.py                           |  1144 +++
  lib-python/2.2/urlparse.py                          |   276 +
  lib-python/2.2/user.py                              |    45 +
  lib-python/2.2/uu.py                                |   195 +
  lib-python/2.2/warnings.py                          |   258 +
  lib-python/2.2/wave.py                              |   489 +
  lib-python/2.2/weakref.py                           |   280 +
  lib-python/2.2/webbrowser.py                        |   330 +
  lib-python/2.2/whichdb.py                           |    87 +
  lib-python/2.2/whrandom.py                          |   140 +
  lib-python/2.2/xdrlib.py                            |   285 +
  lib-python/2.2/xml/__init__.py                      |    42 +
  lib-python/2.2/xml/dom/__init__.py                  |   125 +
  lib-python/2.2/xml/dom/domreg.py                    |    76 +
  lib-python/2.2/xml/dom/minidom.py                   |   970 ++
  lib-python/2.2/xml/dom/pulldom.py                   |   341 +
  lib-python/2.2/xml/parsers/__init__.py              |     8 +
  lib-python/2.2/xml/parsers/expat.py                 |    13 +
  lib-python/2.2/xml/sax/__init__.py                  |   108 +
  lib-python/2.2/xml/sax/_exceptions.py               |   126 +
  lib-python/2.2/xml/sax/expatreader.py               |   333 +
  lib-python/2.2/xml/sax/handler.py                   |   321 +
  lib-python/2.2/xml/sax/saxutils.py                  |   260 +
  lib-python/2.2/xml/sax/xmlreader.py                 |   378 +
  lib-python/2.2/xmllib.py                            |   929 ++
  lib-python/2.2/xmlrpclib.py                         |  1019 +++
  lib-python/2.2/zipfile.py                           |   586 +
  836 files changed, 175696 insertions(+), 4 deletions(-)


diff --git a/.hgignore b/.hgignore
new file mode 100644
--- /dev/null
+++ b/.hgignore
@@ -0,0 +1,30 @@
+syntax: glob
+*.class
+*.pyc
+*.pyd
+*.pyo
+*.orig
+*.rej
+*.swp
+\#*
+*~
+# IntelliJ files
+*.ipr
+*.iml
+*.iws
+.idea/misc.xml
+.idea/workspace.xml
+
+.AppleDouble
+.DS_Store
+.classpath
+.externalToolBuilders
+.project
+.settings
+__pycache__
+ant.properties
+bin
+build
+cachedir
+dist
+profile.txt
diff --git a/.hgsub b/.hgsub
--- a/.hgsub
+++ b/.hgsub
@@ -1,1 +0,0 @@
-CPythonLib = [svn] http://svn.python.org/projects/python/branches/release22-maint/Lib/
diff --git a/.hgsubstate b/.hgsubstate
--- a/.hgsubstate
+++ b/.hgsubstate
@@ -1,1 +0,0 @@
-70085 CPythonLib
diff --git a/build.xml b/build.xml
--- a/build.xml
+++ b/build.xml
@@ -216,7 +216,7 @@
         <property name="jython.base.dir" value="${basedir}" />
         <property name="source.dir" value="${basedir}/src" />
         <property name="templates.dir" value="${basedir}/src/templates" />
-        <property name="python.lib" value="${basedir}/CPythonLib" />
+        <property name="python.lib" value="${basedir}/lib-python/2.2" />
         <property name="bugtests.dir" value="${basedir}/bugtests" />
         <property name="templates.lazy" value="true" />
     </target>
@@ -236,7 +236,7 @@
         <property name="jython.base.dir" value="${svn.checkout.dir}/jython" />
         <property name="source.dir" value="${jython.base.dir}/src" />
         <property name="has.repositories.connection" value="true" />
-        <property name="python.lib" value="${jython.base.dir}/CPythonLib" />
+        <property name="python.lib" value="${jython.base.dir}/lib-python/2.2" />
         <property name="python.exe" value="${python.home}/python" />
         <condition property="do.checkout" value="true">
             <istrue value="${has.repositories.connection}" />
diff --git a/lib-python/2.2/BaseHTTPServer.py b/lib-python/2.2/BaseHTTPServer.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/BaseHTTPServer.py
@@ -0,0 +1,484 @@
+"""HTTP server base class.
+
+Note: the class in this module doesn't implement any HTTP request; see
+SimpleHTTPServer for simple implementations of GET, HEAD and POST
+(including CGI scripts).
+
+Contents:
+
+- BaseHTTPRequestHandler: HTTP request handler base class
+- test: test function
+
+XXX To do:
+
+- send server version
+- log requests even later (to capture byte count)
+- log user-agent header and other interesting goodies
+- send error log to separate file
+- are request names really case sensitive?
+
+"""
+
+
+# See also:
+#
+# HTTP Working Group                                        T. Berners-Lee
+# INTERNET-DRAFT                                            R. T. Fielding
+# <draft-ietf-http-v10-spec-00.txt>                     H. Frystyk Nielsen
+# Expires September 8, 1995                                  March 8, 1995
+#
+# URL: http://www.ics.uci.edu/pub/ietf/http/draft-ietf-http-v10-spec-00.txt
+
+
+# Log files
+# ---------
+#
+# Here's a quote from the NCSA httpd docs about log file format.
+#
+# | The logfile format is as follows. Each line consists of:
+# |
+# | host rfc931 authuser [DD/Mon/YYYY:hh:mm:ss] "request" ddd bbbb
+# |
+# |        host: Either the DNS name or the IP number of the remote client
+# |        rfc931: Any information returned by identd for this person,
+# |                - otherwise.
+# |        authuser: If user sent a userid for authentication, the user name,
+# |                  - otherwise.
+# |        DD: Day
+# |        Mon: Month (calendar name)
+# |        YYYY: Year
+# |        hh: hour (24-hour format, the machine's timezone)
+# |        mm: minutes
+# |        ss: seconds
+# |        request: The first line of the HTTP request as sent by the client.
+# |        ddd: the status code returned by the server, - if not available.
+# |        bbbb: the total number of bytes sent,
+# |              *not including the HTTP/1.0 header*, - if not available
+# |
+# | You can determine the name of the file accessed through request.
+#
+# (Actually, the latter is only true if you know the server configuration
+# at the time the request was made!)
+
+
+__version__ = "0.2"
+
+__all__ = ["HTTPServer", "BaseHTTPRequestHandler"]
+
+import sys
+import time
+import socket # For gethostbyaddr()
+import mimetools
+import SocketServer
+
+# Default error message
+DEFAULT_ERROR_MESSAGE = """\
+<head>
+<title>Error response</title>
+</head>
+<body>
+<h1>Error response</h1>
+<p>Error code %(code)d.
+<p>Message: %(message)s.
+<p>Error code explanation: %(code)s = %(explain)s.
+</body>
+"""
+
+
+class HTTPServer(SocketServer.TCPServer):
+
+    allow_reuse_address = 1    # Seems to make sense in testing environment
+
+    def server_bind(self):
+        """Override server_bind to store the server name."""
+        SocketServer.TCPServer.server_bind(self)
+        host, port = self.socket.getsockname()
+        self.server_name = socket.getfqdn(host)
+        self.server_port = port
+
+
+class BaseHTTPRequestHandler(SocketServer.StreamRequestHandler):
+
+    """HTTP request handler base class.
+
+    The following explanation of HTTP serves to guide you through the
+    code as well as to expose any misunderstandings I may have about
+    HTTP (so you don't need to read the code to figure out I'm wrong
+    :-).
+
+    HTTP (HyperText Transfer Protocol) is an extensible protocol on
+    top of a reliable stream transport (e.g. TCP/IP).  The protocol
+    recognizes three parts to a request:
+
+    1. One line identifying the request type and path
+    2. An optional set of RFC-822-style headers
+    3. An optional data part
+
+    The headers and data are separated by a blank line.
+
+    The first line of the request has the form
+
+    <command> <path> <version>
+
+    where <command> is a (case-sensitive) keyword such as GET or POST,
+    <path> is a string containing path information for the request,
+    and <version> should be the string "HTTP/1.0".  <path> is encoded
+    using the URL encoding scheme (using %xx to signify the ASCII
+    character with hex code xx).
+
+    The protocol is vague about whether lines are separated by LF
+    characters or by CRLF pairs -- for compatibility with the widest
+    range of clients, both should be accepted.  Similarly, whitespace
+    in the request line should be treated sensibly (allowing multiple
+    spaces between components and allowing trailing whitespace).
+
+    Similarly, for output, lines ought to be separated by CRLF pairs
+    but most clients grok LF characters just fine.
+
+    If the first line of the request has the form
+
+    <command> <path>
+
+    (i.e. <version> is left out) then this is assumed to be an HTTP
+    0.9 request; this form has no optional headers and data part and
+    the reply consists of just the data.
+
+    The reply form of the HTTP 1.0 protocol again has three parts:
+
+    1. One line giving the response code
+    2. An optional set of RFC-822-style headers
+    3. The data
+
+    Again, the headers and data are separated by a blank line.
+
+    The response code line has the form
+
+    <version> <responsecode> <responsestring>
+
+    where <version> is the protocol version (always "HTTP/1.0"),
+    <responsecode> is a 3-digit response code indicating success or
+    failure of the request, and <responsestring> is an optional
+    human-readable string explaining what the response code means.
+
+    This server parses the request and the headers, and then calls a
+    function specific to the request type (<command>).  Specifically,
+    a request SPAM will be handled by a method do_SPAM().  If no
+    such method exists the server sends an error response to the
+    client.  If it exists, it is called with no arguments:
+
+    do_SPAM()
+
+    Note that the request name is case sensitive (i.e. SPAM and spam
+    are different requests).
+
+    The various request details are stored in instance variables:
+
+    - client_address is the client IP address in the form (host,
+    port);
+
+    - command, path and version are the broken-down request line;
+
+    - headers is an instance of mimetools.Message (or a derived
+    class) containing the header information;
+
+    - rfile is a file object open for reading positioned at the
+    start of the optional input data part;
+
+    - wfile is a file object open for writing.
+
+    IT IS IMPORTANT TO ADHERE TO THE PROTOCOL FOR WRITING!
+
+    The first thing to be written must be the response line.  Then
+    follow 0 or more header lines, then a blank line, and then the
+    actual data (if any).  The meaning of the header lines depends on
+    the command executed by the server; in most cases, when data is
+    returned, there should be at least one header line of the form
+
+    Content-type: <type>/<subtype>
+
+    where <type> and <subtype> should be registered MIME types,
+    e.g. "text/html" or "text/plain".
+
+    """
+
+    # The Python system version, truncated to its first component.
+    sys_version = "Python/" + sys.version.split()[0]
+
+    # The server software version.  You may want to override this.
+    # The format is multiple whitespace-separated strings,
+    # where each string is of the form name[/version].
+    server_version = "BaseHTTP/" + __version__
+
+    def parse_request(self):
+        """Parse a request (internal).
+
+        The request should be stored in self.raw_request; the results
+        are in self.command, self.path, self.request_version and
+        self.headers.
+
+        Return value is 1 for success, 0 for failure; on failure, an
+        error is sent back.
+
+        """
+        self.request_version = version = "HTTP/0.9" # Default
+        requestline = self.raw_requestline
+        if requestline[-2:] == '\r\n':
+            requestline = requestline[:-2]
+        elif requestline[-1:] == '\n':
+            requestline = requestline[:-1]
+        self.requestline = requestline
+        words = requestline.split()
+        if len(words) == 3:
+            [command, path, version] = words
+            if version[:5] != 'HTTP/':
+                self.send_error(400, "Bad request version (%s)" % `version`)
+                return 0
+        elif len(words) == 2:
+            [command, path] = words
+            if command != 'GET':
+                self.send_error(400,
+                                "Bad HTTP/0.9 request type (%s)" % `command`)
+                return 0
+        else:
+            self.send_error(400, "Bad request syntax (%s)" % `requestline`)
+            return 0
+        self.command, self.path, self.request_version = command, path, version
+        self.headers = self.MessageClass(self.rfile, 0)
+        return 1
+
+    def handle(self):
+        """Handle a single HTTP request.
+
+        You normally don't need to override this method; see the class
+        __doc__ string for information on how to handle specific HTTP
+        commands such as GET and POST.
+
+        """
+
+        self.raw_requestline = self.rfile.readline()
+        if not self.parse_request(): # An error code has been sent, just exit
+            return
+        mname = 'do_' + self.command
+        if not hasattr(self, mname):
+            self.send_error(501, "Unsupported method (%s)" % `self.command`)
+            return
+        method = getattr(self, mname)
+        method()
+
+    def send_error(self, code, message=None):
+        """Send and log an error reply.
+
+        Arguments are the error code, and a detailed message.
+        The detailed message defaults to the short entry matching the
+        response code.
+
+        This sends an error response (so it must be called before any
+        output has been generated), logs the error, and finally sends
+        a piece of HTML explaining the error to the user.
+
+        """
+
+        try:
+            short, long = self.responses[code]
+        except KeyError:
+            short, long = '???', '???'
+        if not message:
+            message = short
+        explain = long
+        self.log_error("code %d, message %s", code, message)
+        self.send_response(code, message)
+        self.send_header("Content-Type", "text/html")
+        self.end_headers()
+        self.wfile.write(self.error_message_format %
+                         {'code': code,
+                          'message': message,
+                          'explain': explain})
+
+    error_message_format = DEFAULT_ERROR_MESSAGE
+
+    def send_response(self, code, message=None):
+        """Send the response header and log the response code.
+
+        Also send two standard headers with the server software
+        version and the current date.
+
+        """
+        self.log_request(code)
+        if message is None:
+            if self.responses.has_key(code):
+                message = self.responses[code][0]
+            else:
+                message = ''
+        if self.request_version != 'HTTP/0.9':
+            self.wfile.write("%s %s %s\r\n" %
+                             (self.protocol_version, str(code), message))
+        self.send_header('Server', self.version_string())
+        self.send_header('Date', self.date_time_string())
+
+    def send_header(self, keyword, value):
+        """Send a MIME header."""
+        if self.request_version != 'HTTP/0.9':
+            self.wfile.write("%s: %s\r\n" % (keyword, value))
+
+    def end_headers(self):
+        """Send the blank line ending the MIME headers."""
+        if self.request_version != 'HTTP/0.9':
+            self.wfile.write("\r\n")
+
+    def log_request(self, code='-', size='-'):
+        """Log an accepted request.
+
+        This is called by send_reponse().
+
+        """
+
+        self.log_message('"%s" %s %s',
+                         self.requestline, str(code), str(size))
+
+    def log_error(self, *args):
+        """Log an error.
+
+        This is called when a request cannot be fulfilled.  By
+        default it passes the message on to log_message().
+
+        Arguments are the same as for log_message().
+
+        XXX This should go to the separate error log.
+
+        """
+
+        apply(self.log_message, args)
+
+    def log_message(self, format, *args):
+        """Log an arbitrary message.
+
+        This is used by all other logging functions.  Override
+        it if you have specific logging wishes.
+
+        The first argument, FORMAT, is a format string for the
+        message to be logged.  If the format string contains
+        any % escapes requiring parameters, they should be
+        specified as subsequent arguments (it's just like
+        printf!).
+
+        The client host and current date/time are prefixed to
+        every message.
+
+        """
+
+        sys.stderr.write("%s - - [%s] %s\n" %
+                         (self.address_string(),
+                          self.log_date_time_string(),
+                          format%args))
+
+    def version_string(self):
+        """Return the server software version string."""
+        return self.server_version + ' ' + self.sys_version
+
+    def date_time_string(self):
+        """Return the current date and time formatted for a message header."""
+        now = time.time()
+        year, month, day, hh, mm, ss, wd, y, z = time.gmtime(now)
+        s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
+                self.weekdayname[wd],
+                day, self.monthname[month], year,
+                hh, mm, ss)
+        return s
+
+    def log_date_time_string(self):
+        """Return the current time formatted for logging."""
+        now = time.time()
+        year, month, day, hh, mm, ss, x, y, z = time.localtime(now)
+        s = "%02d/%3s/%04d %02d:%02d:%02d" % (
+                day, self.monthname[month], year, hh, mm, ss)
+        return s
+
+    weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
+
+    monthname = [None,
+                 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
+                 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
+
+    def address_string(self):
+        """Return the client address formatted for logging.
+
+        This version looks up the full hostname using gethostbyaddr(),
+        and tries to find a name that contains at least one dot.
+
+        """
+
+        host, port = self.client_address
+        return socket.getfqdn(host)
+
+    # Essentially static class variables
+
+    # The version of the HTTP protocol we support.
+    # Don't override unless you know what you're doing (hint: incoming
+    # requests are required to have exactly this version string).
+    protocol_version = "HTTP/1.0"
+
+    # The Message-like class used to parse headers
+    MessageClass = mimetools.Message
+
+    # Table mapping response codes to messages; entries have the
+    # form {code: (shortmessage, longmessage)}.
+    # See http://www.w3.org/hypertext/WWW/Protocols/HTTP/HTRESP.html
+    responses = {
+        200: ('OK', 'Request fulfilled, document follows'),
+        201: ('Created', 'Document created, URL follows'),
+        202: ('Accepted',
+              'Request accepted, processing continues off-line'),
+        203: ('Partial information', 'Request fulfilled from cache'),
+        204: ('No response', 'Request fulfilled, nothing follows'),
+
+        301: ('Moved', 'Object moved permanently -- see URI list'),
+        302: ('Found', 'Object moved temporarily -- see URI list'),
+        303: ('Method', 'Object moved -- see Method and URL list'),
+        304: ('Not modified',
+              'Document has not changed singe given time'),
+
+        400: ('Bad request',
+              'Bad request syntax or unsupported method'),
+        401: ('Unauthorized',
+              'No permission -- see authorization schemes'),
+        402: ('Payment required',
+              'No payment -- see charging schemes'),
+        403: ('Forbidden',
+              'Request forbidden -- authorization will not help'),
+        404: ('Not found', 'Nothing matches the given URI'),
+
+        500: ('Internal error', 'Server got itself in trouble'),
+        501: ('Not implemented',
+              'Server does not support this operation'),
+        502: ('Service temporarily overloaded',
+              'The server cannot process the request due to a high load'),
+        503: ('Gateway timeout',
+              'The gateway server did not receive a timely response'),
+
+        }
+
+
+def test(HandlerClass = BaseHTTPRequestHandler,
+         ServerClass = HTTPServer):
+    """Test the HTTP request handler class.
+
+    This runs an HTTP server on port 8000 (or the first command line
+    argument).
+
+    """
+
+    if sys.argv[1:]:
+        port = int(sys.argv[1])
+    else:
+        port = 8000
+    server_address = ('', port)
+
+    httpd = ServerClass(server_address, HandlerClass)
+
+    sa = httpd.socket.getsockname()
+    print "Serving HTTP on", sa[0], "port", sa[1], "..."
+    httpd.serve_forever()
+
+
+if __name__ == '__main__':
+    test()
diff --git a/lib-python/2.2/Bastion.py b/lib-python/2.2/Bastion.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/Bastion.py
@@ -0,0 +1,177 @@
+"""Bastionification utility.
+
+A bastion (for another object -- the 'original') is an object that has
+the same methods as the original but does not give access to its
+instance variables.  Bastions have a number of uses, but the most
+obvious one is to provide code executing in restricted mode with a
+safe interface to an object implemented in unrestricted mode.
+
+The bastionification routine has an optional second argument which is
+a filter function.  Only those methods for which the filter method
+(called with the method name as argument) returns true are accessible.
+The default filter method returns true unless the method name begins
+with an underscore.
+
+There are a number of possible implementations of bastions.  We use a
+'lazy' approach where the bastion's __getattr__() discipline does all
+the work for a particular method the first time it is used.  This is
+usually fastest, especially if the user doesn't call all available
+methods.  The retrieved methods are stored as instance variables of
+the bastion, so the overhead is only occurred on the first use of each
+method.
+
+Detail: the bastion class has a __repr__() discipline which includes
+the repr() of the original object.  This is precomputed when the
+bastion is created.
+
+"""
+
+__all__ = ["BastionClass", "Bastion"]
+
+from types import MethodType
+
+
+class BastionClass:
+
+    """Helper class used by the Bastion() function.
+
+    You could subclass this and pass the subclass as the bastionclass
+    argument to the Bastion() function, as long as the constructor has
+    the same signature (a get() function and a name for the object).
+
+    """
+
+    def __init__(self, get, name):
+        """Constructor.
+
+        Arguments:
+
+        get - a function that gets the attribute value (by name)
+        name - a human-readable name for the original object
+               (suggestion: use repr(object))
+
+        """
+        self._get_ = get
+        self._name_ = name
+
+    def __repr__(self):
+        """Return a representation string.
+
+        This includes the name passed in to the constructor, so that
+        if you print the bastion during debugging, at least you have
+        some idea of what it is.
+
+        """
+        return "<Bastion for %s>" % self._name_
+
+    def __getattr__(self, name):
+        """Get an as-yet undefined attribute value.
+
+        This calls the get() function that was passed to the
+        constructor.  The result is stored as an instance variable so
+        that the next time the same attribute is requested,
+        __getattr__() won't be invoked.
+
+        If the get() function raises an exception, this is simply
+        passed on -- exceptions are not cached.
+
+        """
+        attribute = self._get_(name)
+        self.__dict__[name] = attribute
+        return attribute
+
+
+def Bastion(object, filter = lambda name: name[:1] != '_',
+            name=None, bastionclass=BastionClass):
+    """Create a bastion for an object, using an optional filter.
+
+    See the Bastion module's documentation for background.
+
+    Arguments:
+
+    object - the original object
+    filter - a predicate that decides whether a function name is OK;
+             by default all names are OK that don't start with '_'
+    name - the name of the object; default repr(object)
+    bastionclass - class used to create the bastion; default BastionClass
+
+    """
+
+    raise RuntimeError, "This code is not secure in Python 2.2 and 2.3"
+
+    # Note: we define *two* ad-hoc functions here, get1 and get2.
+    # Both are intended to be called in the same way: get(name).
+    # It is clear that the real work (getting the attribute
+    # from the object and calling the filter) is done in get1.
+    # Why can't we pass get1 to the bastion?  Because the user
+    # would be able to override the filter argument!  With get2,
+    # overriding the default argument is no security loophole:
+    # all it does is call it.
+    # Also notice that we can't place the object and filter as
+    # instance variables on the bastion object itself, since
+    # the user has full access to all instance variables!
+
+    def get1(name, object=object, filter=filter):
+        """Internal function for Bastion().  See source comments."""
+        if filter(name):
+            attribute = getattr(object, name)
+            if type(attribute) == MethodType:
+                return attribute
+        raise AttributeError, name
+
+    def get2(name, get1=get1):
+        """Internal function for Bastion().  See source comments."""
+        return get1(name)
+
+    if name is None:
+        name = `object`
+    return bastionclass(get2, name)
+
+
+def _test():
+    """Test the Bastion() function."""
+    class Original:
+        def __init__(self):
+            self.sum = 0
+        def add(self, n):
+            self._add(n)
+        def _add(self, n):
+            self.sum = self.sum + n
+        def total(self):
+            return self.sum
+    o = Original()
+    b = Bastion(o)
+    testcode = """if 1:
+    b.add(81)
+    b.add(18)
+    print "b.total() =", b.total()
+    try:
+        print "b.sum =", b.sum,
+    except:
+        print "inaccessible"
+    else:
+        print "accessible"
+    try:
+        print "b._add =", b._add,
+    except:
+        print "inaccessible"
+    else:
+        print "accessible"
+    try:
+        print "b._get_.func_defaults =", map(type, b._get_.func_defaults),
+    except:
+        print "inaccessible"
+    else:
+        print "accessible"
+    \n"""
+    exec testcode
+    print '='*20, "Using rexec:", '='*20
+    import rexec
+    r = rexec.RExec()
+    m = r.add_module('__main__')
+    m.b = b
+    r.r_exec(testcode)
+
+
+if __name__ == '__main__':
+    _test()
diff --git a/lib-python/2.2/CGIHTTPServer.py b/lib-python/2.2/CGIHTTPServer.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/CGIHTTPServer.py
@@ -0,0 +1,325 @@
+"""CGI-savvy HTTP Server.
+
+This module builds on SimpleHTTPServer by implementing GET and POST
+requests to cgi-bin scripts.
+
+If the os.fork() function is not present (e.g. on Windows),
+os.popen2() is used as a fallback, with slightly altered semantics; if
+that function is not present either (e.g. on Macintosh), only Python
+scripts are supported, and they are executed by the current process.
+
+In all cases, the implementation is intentionally naive -- all
+requests are executed sychronously.
+
+SECURITY WARNING: DON'T USE THIS CODE UNLESS YOU ARE INSIDE A FIREWALL
+-- it may execute arbitrary Python code or external programs.
+
+"""
+
+
+__version__ = "0.4"
+
+__all__ = ["CGIHTTPRequestHandler"]
+
+import os
+import sys
+import urllib
+import BaseHTTPServer
+import SimpleHTTPServer
+import select
+
+
+class CGIHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
+
+    """Complete HTTP server with GET, HEAD and POST commands.
+
+    GET and HEAD also support running CGI scripts.
+
+    The POST command is *only* implemented for CGI scripts.
+
+    """
+
+    # Determine platform specifics
+    have_fork = hasattr(os, 'fork')
+    have_popen2 = hasattr(os, 'popen2')
+    have_popen3 = hasattr(os, 'popen3')
+
+    # Make rfile unbuffered -- we need to read one line and then pass
+    # the rest to a subprocess, so we can't use buffered input.
+    rbufsize = 0
+
+    def do_POST(self):
+        """Serve a POST request.
+
+        This is only implemented for CGI scripts.
+
+        """
+
+        if self.is_cgi():
+            self.run_cgi()
+        else:
+            self.send_error(501, "Can only POST to CGI scripts")
+
+    def send_head(self):
+        """Version of send_head that support CGI scripts"""
+        if self.is_cgi():
+            return self.run_cgi()
+        else:
+            return SimpleHTTPServer.SimpleHTTPRequestHandler.send_head(self)
+
+    def is_cgi(self):
+        """Test whether self.path corresponds to a CGI script.
+
+        Return a tuple (dir, rest) if self.path requires running a
+        CGI script, None if not.  Note that rest begins with a
+        slash if it is not empty.
+
+        The default implementation tests whether the path
+        begins with one of the strings in the list
+        self.cgi_directories (and the next character is a '/'
+        or the end of the string).
+
+        """
+
+        path = self.path
+
+        for x in self.cgi_directories:
+            i = len(x)
+            if path[:i] == x and (not path[i:] or path[i] == '/'):
+                self.cgi_info = path[:i], path[i+1:]
+                return 1
+        return 0
+
+    cgi_directories = ['/cgi-bin', '/htbin']
+
+    def is_executable(self, path):
+        """Test whether argument path is an executable file."""
+        return executable(path)
+
+    def is_python(self, path):
+        """Test whether argument path is a Python script."""
+        head, tail = os.path.splitext(path)
+        return tail.lower() in (".py", ".pyw")
+
+    def run_cgi(self):
+        """Execute a CGI script."""
+        dir, rest = self.cgi_info
+        i = rest.rfind('?')
+        if i >= 0:
+            rest, query = rest[:i], rest[i+1:]
+        else:
+            query = ''
+        i = rest.find('/')
+        if i >= 0:
+            script, rest = rest[:i], rest[i:]
+        else:
+            script, rest = rest, ''
+        scriptname = dir + '/' + script
+        scriptfile = self.translate_path(scriptname)
+        if not os.path.exists(scriptfile):
+            self.send_error(404, "No such CGI script (%s)" % `scriptname`)
+            return
+        if not os.path.isfile(scriptfile):
+            self.send_error(403, "CGI script is not a plain file (%s)" %
+                            `scriptname`)
+            return
+        ispy = self.is_python(scriptname)
+        if not ispy:
+            if not (self.have_fork or self.have_popen2 or self.have_popen3):
+                self.send_error(403, "CGI script is not a Python script (%s)" %
+                                `scriptname`)
+                return
+            if not self.is_executable(scriptfile):
+                self.send_error(403, "CGI script is not executable (%s)" %
+                                `scriptname`)
+                return
+
+        # Reference: http://hoohoo.ncsa.uiuc.edu/cgi/env.html
+        # XXX Much of the following could be prepared ahead of time!
+        env = {}
+        env['SERVER_SOFTWARE'] = self.version_string()
+        env['SERVER_NAME'] = self.server.server_name
+        env['GATEWAY_INTERFACE'] = 'CGI/1.1'
+        env['SERVER_PROTOCOL'] = self.protocol_version
+        env['SERVER_PORT'] = str(self.server.server_port)
+        env['REQUEST_METHOD'] = self.command
+        uqrest = urllib.unquote(rest)
+        env['PATH_INFO'] = uqrest
+        env['PATH_TRANSLATED'] = self.translate_path(uqrest)
+        env['SCRIPT_NAME'] = scriptname
+        if query:
+            env['QUERY_STRING'] = query
+        host = self.address_string()
+        if host != self.client_address[0]:
+            env['REMOTE_HOST'] = host
+        env['REMOTE_ADDR'] = self.client_address[0]
+        # XXX AUTH_TYPE
+        # XXX REMOTE_USER
+        # XXX REMOTE_IDENT
+        if self.headers.typeheader is None:
+            env['CONTENT_TYPE'] = self.headers.type
+        else:
+            env['CONTENT_TYPE'] = self.headers.typeheader
+        length = self.headers.getheader('content-length')
+        if length:
+            env['CONTENT_LENGTH'] = length
+        accept = []
+        for line in self.headers.getallmatchingheaders('accept'):
+            if line[:1] in "\t\n\r ":
+                accept.append(line.strip())
+            else:
+                accept = accept + line[7:].split(',')
+        env['HTTP_ACCEPT'] = ','.join(accept)
+        ua = self.headers.getheader('user-agent')
+        if ua:
+            env['HTTP_USER_AGENT'] = ua
+        co = filter(None, self.headers.getheaders('cookie'))
+        if co:
+            env['HTTP_COOKIE'] = ', '.join(co)
+        # XXX Other HTTP_* headers
+        if not self.have_fork:
+            # Since we're setting the env in the parent, provide empty
+            # values to override previously set values
+            for k in ('QUERY_STRING', 'REMOTE_HOST', 'CONTENT_LENGTH',
+                      'HTTP_USER_AGENT', 'HTTP_COOKIE'):
+                env.setdefault(k, "")
+        os.environ.update(env)
+
+        self.send_response(200, "Script output follows")
+
+        decoded_query = query.replace('+', ' ')
+
+        if self.have_fork:
+            # Unix -- fork as we should
+            args = [script]
+            if '=' not in decoded_query:
+                args.append(decoded_query)
+            nobody = nobody_uid()
+            self.wfile.flush() # Always flush before forking
+            pid = os.fork()
+            if pid != 0:
+                # Parent
+                pid, sts = os.waitpid(pid, 0)
+                # throw away additional data [see bug #427345]
+                while select.select([self.rfile], [], [], 0)[0]:
+                    waste = self.rfile.read(1)
+                if sts:
+                    self.log_error("CGI script exit status %#x", sts)
+                return
+            # Child
+            try:
+                try:
+                    os.setuid(nobody)
+                except os.error:
+                    pass
+                os.dup2(self.rfile.fileno(), 0)
+                os.dup2(self.wfile.fileno(), 1)
+                os.execve(scriptfile, args, env)
+            except:
+                self.server.handle_error(self.request, self.client_address)
+                os._exit(127)
+
+        elif self.have_popen2 or self.have_popen3:
+            # Windows -- use popen2 or popen3 to create a subprocess
+            import shutil
+            if self.have_popen3:
+                popenx = os.popen3
+            else:
+                popenx = os.popen2
+            cmdline = scriptfile
+            if self.is_python(scriptfile):
+                interp = sys.executable
+                if interp.lower().endswith("w.exe"):
+                    # On Windows, use python.exe, not pythonw.exe
+                    interp = interp[:-5] + interp[-4:]
+                cmdline = "%s -u %s" % (interp, cmdline)
+            if '=' not in query and '"' not in query:
+                cmdline = '%s "%s"' % (cmdline, query)
+            self.log_message("command: %s", cmdline)
+            try:
+                nbytes = int(length)
+            except:
+                nbytes = 0
+            files = popenx(cmdline, 'b')
+            fi = files[0]
+            fo = files[1]
+            if self.have_popen3:
+                fe = files[2]
+            if self.command.lower() == "post" and nbytes > 0:
+                data = self.rfile.read(nbytes)
+                fi.write(data)
+            # throw away additional data [see bug #427345]
+            while select.select([self.rfile._sock], [], [], 0)[0]:
+                waste = self.rfile._sock.recv(1)
+            fi.close()
+            shutil.copyfileobj(fo, self.wfile)
+            if self.have_popen3:
+                errors = fe.read()
+                fe.close()
+                if errors:
+                    self.log_error('%s', errors)
+            sts = fo.close()
+            if sts:
+                self.log_error("CGI script exit status %#x", sts)
+            else:
+                self.log_message("CGI script exited OK")
+
+        else:
+            # Other O.S. -- execute script in this process
+            save_argv = sys.argv
+            save_stdin = sys.stdin
+            save_stdout = sys.stdout
+            save_stderr = sys.stderr
+            try:
+                try:
+                    sys.argv = [scriptfile]
+                    if '=' not in decoded_query:
+                        sys.argv.append(decoded_query)
+                    sys.stdout = self.wfile
+                    sys.stdin = self.rfile
+                    execfile(scriptfile, {"__name__": "__main__"})
+                finally:
+                    sys.argv = save_argv
+                    sys.stdin = save_stdin
+                    sys.stdout = save_stdout
+                    sys.stderr = save_stderr
+            except SystemExit, sts:
+                self.log_error("CGI script exit status %s", str(sts))
+            else:
+                self.log_message("CGI script exited OK")
+
+
+nobody = None
+
+def nobody_uid():
+    """Internal routine to get nobody's uid"""
+    global nobody
+    if nobody:
+        return nobody
+    try:
+        import pwd
+    except ImportError:
+        return -1
+    try:
+        nobody = pwd.getpwnam('nobody')[2]
+    except KeyError:
+        nobody = 1 + max(map(lambda x: x[2], pwd.getpwall()))
+    return nobody
+
+
+def executable(path):
+    """Test for executable file."""
+    try:
+        st = os.stat(path)
+    except os.error:
+        return 0
+    return st[0] & 0111 != 0
+
+
+def test(HandlerClass = CGIHTTPRequestHandler,
+         ServerClass = BaseHTTPServer.HTTPServer):
+    SimpleHTTPServer.test(HandlerClass, ServerClass)
+
+
+if __name__ == '__main__':
+    test()
diff --git a/lib-python/2.2/ConfigParser.py b/lib-python/2.2/ConfigParser.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/ConfigParser.py
@@ -0,0 +1,472 @@
+"""Configuration file parser.
+
+A setup file consists of sections, lead by a "[section]" header,
+and followed by "name: value" entries, with continuations and such in
+the style of RFC 822.
+
+The option values can contain format strings which refer to other values in
+the same section, or values in a special [DEFAULT] section.
+
+For example:
+
+    something: %(dir)s/whatever
+
+would resolve the "%(dir)s" to the value of dir.  All reference
+expansions are done late, on demand.
+
+Intrinsic defaults can be specified by passing them into the
+ConfigParser constructor as a dictionary.
+
+class:
+
+ConfigParser -- responsible for for parsing a list of
+                configuration files, and managing the parsed database.
+
+    methods:
+
+    __init__(defaults=None)
+        create the parser and specify a dictionary of intrinsic defaults.  The
+        keys must be strings, the values must be appropriate for %()s string
+        interpolation.  Note that `__name__' is always an intrinsic default;
+        it's value is the section's name.
+
+    sections()
+        return all the configuration section names, sans DEFAULT
+
+    has_section(section)
+        return whether the given section exists
+
+    has_option(section, option)
+        return whether the given option exists in the given section
+
+    options(section)
+        return list of configuration options for the named section
+
+    read(filenames)
+        read and parse the list of named configuration files, given by
+        name.  A single filename is also allowed.  Non-existing files
+        are ignored.
+
+    readfp(fp, filename=None)
+        read and parse one configuration file, given as a file object.
+        The filename defaults to fp.name; it is only used in error
+        messages (if fp has no `name' attribute, the string `<???>' is used).
+
+    get(section, option, raw=0, vars=None)
+        return a string value for the named option.  All % interpolations are
+        expanded in the return values, based on the defaults passed into the
+        constructor and the DEFAULT section.  Additional substitutions may be
+        provided using the `vars' argument, which must be a dictionary whose
+        contents override any pre-existing defaults.
+
+    getint(section, options)
+        like get(), but convert value to an integer
+
+    getfloat(section, options)
+        like get(), but convert value to a float
+
+    getboolean(section, options)
+        like get(), but convert value to a boolean (currently case
+        insensitively defined as 0, false, no, off for 0, and 1, true,
+        yes, on for 1).  Returns 0 or 1.
+
+    remove_section(section)
+        remove the given file section and all its options
+
+    remove_option(section, option)
+        remove the given option from the given section
+
+    set(section, option, value)
+        set the given option
+
+    write(fp)
+        write the configuration state in .ini format
+"""
+
+import re
+import types
+
+__all__ = ["NoSectionError","DuplicateSectionError","NoOptionError",
+           "InterpolationError","InterpolationDepthError","ParsingError",
+           "MissingSectionHeaderError","ConfigParser",
+           "DEFAULTSECT", "MAX_INTERPOLATION_DEPTH"]
+
+DEFAULTSECT = "DEFAULT"
+
+MAX_INTERPOLATION_DEPTH = 10
+
+
+
+# exception classes
+class Error(Exception):
+    def __init__(self, msg=''):
+        self._msg = msg
+        Exception.__init__(self, msg)
+    def __repr__(self):
+        return self._msg
+    __str__ = __repr__
+
+class NoSectionError(Error):
+    def __init__(self, section):
+        Error.__init__(self, 'No section: %s' % section)
+        self.section = section
+
+class DuplicateSectionError(Error):
+    def __init__(self, section):
+        Error.__init__(self, "Section %s already exists" % section)
+        self.section = section
+
+class NoOptionError(Error):
+    def __init__(self, option, section):
+        Error.__init__(self, "No option `%s' in section: %s" %
+                       (option, section))
+        self.option = option
+        self.section = section
+
+class InterpolationError(Error):
+    def __init__(self, reference, option, section, rawval):
+        Error.__init__(self,
+                       "Bad value substitution:\n"
+                       "\tsection: [%s]\n"
+                       "\toption : %s\n"
+                       "\tkey    : %s\n"
+                       "\trawval : %s\n"
+                       % (section, option, reference, rawval))
+        self.reference = reference
+        self.option = option
+        self.section = section
+
+class InterpolationDepthError(Error):
+    def __init__(self, option, section, rawval):
+        Error.__init__(self,
+                       "Value interpolation too deeply recursive:\n"
+                       "\tsection: [%s]\n"
+                       "\toption : %s\n"
+                       "\trawval : %s\n"
+                       % (section, option, rawval))
+        self.option = option
+        self.section = section
+
+class ParsingError(Error):
+    def __init__(self, filename):
+        Error.__init__(self, 'File contains parsing errors: %s' % filename)
+        self.filename = filename
+        self.errors = []
+
+    def append(self, lineno, line):
+        self.errors.append((lineno, line))
+        self._msg = self._msg + '\n\t[line %2d]: %s' % (lineno, line)
+
+class MissingSectionHeaderError(ParsingError):
+    def __init__(self, filename, lineno, line):
+        Error.__init__(
+            self,
+            'File contains no section headers.\nfile: %s, line: %d\n%s' %
+            (filename, lineno, line))
+        self.filename = filename
+        self.lineno = lineno
+        self.line = line
+
+
+
+class ConfigParser:
+    def __init__(self, defaults=None):
+        self.__sections = {}
+        if defaults is None:
+            self.__defaults = {}
+        else:
+            self.__defaults = defaults
+
+    def defaults(self):
+        return self.__defaults
+
+    def sections(self):
+        """Return a list of section names, excluding [DEFAULT]"""
+        # self.__sections will never have [DEFAULT] in it
+        return self.__sections.keys()
+
+    def add_section(self, section):
+        """Create a new section in the configuration.
+
+        Raise DuplicateSectionError if a section by the specified name
+        already exists.
+        """
+        if section in self.__sections:
+            raise DuplicateSectionError(section)
+        self.__sections[section] = {}
+
+    def has_section(self, section):
+        """Indicate whether the named section is present in the configuration.
+
+        The DEFAULT section is not acknowledged.
+        """
+        return section in self.__sections
+
+    def options(self, section):
+        """Return a list of option names for the given section name."""
+        try:
+            opts = self.__sections[section].copy()
+        except KeyError:
+            raise NoSectionError(section)
+        opts.update(self.__defaults)
+        if '__name__' in opts:
+            del opts['__name__']
+        return opts.keys()
+
+    def read(self, filenames):
+        """Read and parse a filename or a list of filenames.
+
+        Files that cannot be opened are silently ignored; this is
+        designed so that you can specify a list of potential
+        configuration file locations (e.g. current directory, user's
+        home directory, systemwide directory), and all existing
+        configuration files in the list will be read.  A single
+        filename may also be given.
+        """
+        if isinstance(filenames, types.StringTypes):
+            filenames = [filenames]
+        for filename in filenames:
+            try:
+                fp = open(filename)
+            except IOError:
+                continue
+            self.__read(fp, filename)
+            fp.close()
+
+    def readfp(self, fp, filename=None):
+        """Like read() but the argument must be a file-like object.
+
+        The `fp' argument must have a `readline' method.  Optional
+        second argument is the `filename', which if not given, is
+        taken from fp.name.  If fp has no `name' attribute, `<???>' is
+        used.
+
+        """
+        if filename is None:
+            try:
+                filename = fp.name
+            except AttributeError:
+                filename = '<???>'
+        self.__read(fp, filename)
+
+    def get(self, section, option, raw=0, vars=None):
+        """Get an option value for a given section.
+
+        All % interpolations are expanded in the return values, based on the
+        defaults passed into the constructor, unless the optional argument
+        `raw' is true.  Additional substitutions may be provided using the
+        `vars' argument, which must be a dictionary whose contents overrides
+        any pre-existing defaults.
+
+        The section DEFAULT is special.
+        """
+        d = self.__defaults.copy()
+        try:
+            d.update(self.__sections[section])
+        except KeyError:
+            if section != DEFAULTSECT:
+                raise NoSectionError(section)
+        # Update with the entry specific variables
+        if vars is not None:
+            d.update(vars)
+        option = self.optionxform(option)
+        try:
+            value = d[option]
+        except KeyError:
+            raise NoOptionError(option, section)
+
+        if raw:
+            return value
+        return self._interpolate(section, option, value, d)
+
+    def _interpolate(self, section, option, rawval, vars):
+        # do the string interpolation
+        value = rawval
+        depth = MAX_INTERPOLATION_DEPTH
+        while depth:                    # Loop through this until it's done
+            depth -= 1
+            if value.find("%(") != -1:
+                try:
+                    value = value % vars
+                except KeyError, key:
+                    raise InterpolationError(key, option, section, rawval)
+            else:
+                break
+        if value.find("%(") != -1:
+            raise InterpolationDepthError(option, section, rawval)
+        return value
+
+    def __get(self, section, conv, option):
+        return conv(self.get(section, option))
+
+    def getint(self, section, option):
+        return self.__get(section, int, option)
+
+    def getfloat(self, section, option):
+        return self.__get(section, float, option)
+
+    _boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
+                       '0': False, 'no': False, 'false': False, 'off': False}
+
+    def getboolean(self, section, option):
+        v = self.get(section, option)
+        if v.lower() not in self._boolean_states:
+            raise ValueError, 'Not a boolean: %s' % v
+        return self._boolean_states[v.lower()]
+
+    def optionxform(self, optionstr):
+        return optionstr.lower()
+
+    def has_option(self, section, option):
+        """Check for the existence of a given option in a given section."""
+        if not section or section == DEFAULTSECT:
+            option = self.optionxform(option)
+            return option in self.__defaults
+        elif section not in self.__sections:
+            return 0
+        else:
+            option = self.optionxform(option)
+            return (option in self.__sections[section]
+                    or option in self.__defaults)
+
+    def set(self, section, option, value):
+        """Set an option."""
+        if not section or section == DEFAULTSECT:
+            sectdict = self.__defaults
+        else:
+            try:
+                sectdict = self.__sections[section]
+            except KeyError:
+                raise NoSectionError(section)
+        sectdict[self.optionxform(option)] = value
+
+    def write(self, fp):
+        """Write an .ini-format representation of the configuration state."""
+        if self.__defaults:
+            fp.write("[%s]\n" % DEFAULTSECT)
+            for (key, value) in self.__defaults.items():
+                fp.write("%s = %s\n" % (key, str(value).replace('\n', '\n\t')))
+            fp.write("\n")
+        for section in self.__sections:
+            fp.write("[%s]\n" % section)
+            for (key, value) in self.__sections[section].items():
+                if key != "__name__":
+                    fp.write("%s = %s\n" %
+                             (key, str(value).replace('\n', '\n\t')))
+            fp.write("\n")
+
+    def remove_option(self, section, option):
+        """Remove an option."""
+        if not section or section == DEFAULTSECT:
+            sectdict = self.__defaults
+        else:
+            try:
+                sectdict = self.__sections[section]
+            except KeyError:
+                raise NoSectionError(section)
+        option = self.optionxform(option)
+        existed = option in sectdict
+        if existed:
+            del sectdict[option]
+        return existed
+
+    def remove_section(self, section):
+        """Remove a file section."""
+        existed = section in self.__sections
+        if existed:
+            del self.__sections[section]
+        return existed
+
+    #
+    # Regular expressions for parsing section headers and options.
+    #
+    SECTCRE = re.compile(
+        r'\['                                 # [
+        r'(?P<header>[^]]+)'                  # very permissive!
+        r'\]'                                 # ]
+        )
+    OPTCRE = re.compile(
+        r'(?P<option>[^:=\s][^:=]*)'          # very permissive!
+        r'\s*(?P<vi>[:=])\s*'                 # any number of space/tab,
+                                              # followed by separator
+                                              # (either : or =), followed
+                                              # by any # space/tab
+        r'(?P<value>.*)$'                     # everything up to eol
+        )
+
+    def __read(self, fp, fpname):
+        """Parse a sectioned setup file.
+
+        The sections in setup file contains a title line at the top,
+        indicated by a name in square brackets (`[]'), plus key/value
+        options lines, indicated by `name: value' format lines.
+        Continuation are represented by an embedded newline then
+        leading whitespace.  Blank lines, lines beginning with a '#',
+        and just about everything else is ignored.
+        """
+        cursect = None                            # None, or a dictionary
+        optname = None
+        lineno = 0
+        e = None                                  # None, or an exception
+        while 1:
+            line = fp.readline()
+            if not line:
+                break
+            lineno = lineno + 1
+            # comment or blank line?
+            if line.strip() == '' or line[0] in '#;':
+                continue
+            if line.split(None, 1)[0].lower() == 'rem' and line[0] in "rR":
+                # no leading whitespace
+                continue
+            # continuation line?
+            if line[0].isspace() and cursect is not None and optname:
+                value = line.strip()
+                if value:
+                    cursect[optname] = "%s\n%s" % (cursect[optname], value)
+            # a section header or option header?
+            else:
+                # is it a section header?
+                mo = self.SECTCRE.match(line)
+                if mo:
+                    sectname = mo.group('header')
+                    if sectname in self.__sections:
+                        cursect = self.__sections[sectname]
+                    elif sectname == DEFAULTSECT:
+                        cursect = self.__defaults
+                    else:
+                        cursect = {'__name__': sectname}
+                        self.__sections[sectname] = cursect
+                    # So sections can't start with a continuation line
+                    optname = None
+                # no section header in the file?
+                elif cursect is None:
+                    raise MissingSectionHeaderError(fpname, lineno, `line`)
+                # an option line?
+                else:
+                    mo = self.OPTCRE.match(line)
+                    if mo:
+                        optname, vi, optval = mo.group('option', 'vi', 'value')
+                        if vi in ('=', ':') and ';' in optval:
+                            # ';' is a comment delimiter only if it follows
+                            # a spacing character
+                            pos = optval.find(';')
+                            if pos != -1 and optval[pos-1].isspace():
+                                optval = optval[:pos]
+                        optval = optval.strip()
+                        # allow empty values
+                        if optval == '""':
+                            optval = ''
+                        optname = self.optionxform(optname.rstrip())
+                        cursect[optname] = optval
+                    else:
+                        # a non-fatal parsing error occurred.  set up the
+                        # exception but keep going. the exception will be
+                        # raised at the end of the file and will contain a
+                        # list of all bogus lines
+                        if not e:
+                            e = ParsingError(fpname)
+                        e.append(lineno, `line`)
+        # if any parsing errors occurred, raise an exception
+        if e:
+            raise e
diff --git a/lib-python/2.2/Cookie.py b/lib-python/2.2/Cookie.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/Cookie.py
@@ -0,0 +1,742 @@
+#!/usr/bin/env python
+#
+
+####
+# Copyright 2000 by Timothy O'Malley <timo at alum.mit.edu>
+#
+#                All Rights Reserved
+#
+# Permission to use, copy, modify, and distribute this software
+# and its documentation for any purpose and without fee is hereby
+# granted, provided that the above copyright notice appear in all
+# copies and that both that copyright notice and this permission
+# notice appear in supporting documentation, and that the name of
+# Timothy O'Malley  not be used in advertising or publicity
+# pertaining to distribution of the software without specific, written
+# prior permission.
+#
+# Timothy O'Malley DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
+# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS, IN NO EVENT SHALL Timothy O'Malley BE LIABLE FOR
+# ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+#
+####
+#
+# Id: Cookie.py,v 2.29 2000/08/23 05:28:49 timo Exp
+#   by Timothy O'Malley <timo at alum.mit.edu>
+#
+#  Cookie.py is a Python module for the handling of HTTP
+#  cookies as a Python dictionary.  See RFC 2109 for more
+#  information on cookies.
+#
+#  The original idea to treat Cookies as a dictionary came from
+#  Dave Mitchell (davem at magnet.com) in 1995, when he released the
+#  first version of nscookie.py.
+#
+####
+
+r"""
+Here's a sample session to show how to use this module.
+At the moment, this is the only documentation.
+
+The Basics
+----------
+
+Importing is easy..
+
+   >>> import Cookie
+
+Most of the time you start by creating a cookie.  Cookies come in
+three flavors, each with slightly different encoding semanitcs, but
+more on that later.
+
+   >>> C = Cookie.SimpleCookie()
+   >>> C = Cookie.SerialCookie()
+   >>> C = Cookie.SmartCookie()
+
+[Note: Long-time users of Cookie.py will remember using
+Cookie.Cookie() to create an Cookie object.  Although deprecated, it
+is still supported by the code.  See the Backward Compatibility notes
+for more information.]
+
+Once you've created your Cookie, you can add values just as if it were
+a dictionary.
+
+   >>> C = Cookie.SmartCookie()
+   >>> C["fig"] = "newton"
+   >>> C["sugar"] = "wafer"
+   >>> print C
+   Set-Cookie: fig=newton;
+   Set-Cookie: sugar=wafer;
+
+Notice that the printable representation of a Cookie is the
+appropriate format for a Set-Cookie: header.  This is the
+default behavior.  You can change the header and printed
+attributes by using the the .output() function
+
+   >>> C = Cookie.SmartCookie()
+   >>> C["rocky"] = "road"
+   >>> C["rocky"]["path"] = "/cookie"
+   >>> print C.output(header="Cookie:")
+   Cookie: rocky=road; Path=/cookie;
+   >>> print C.output(attrs=[], header="Cookie:")
+   Cookie: rocky=road;
+
+The load() method of a Cookie extracts cookies from a string.  In a
+CGI script, you would use this method to extract the cookies from the
+HTTP_COOKIE environment variable.
+
+   >>> C = Cookie.SmartCookie()
+   >>> C.load("chips=ahoy; vienna=finger")
+   >>> print C
+   Set-Cookie: chips=ahoy;
+   Set-Cookie: vienna=finger;
+
+The load() method is darn-tootin smart about identifying cookies
+within a string.  Escaped quotation marks, nested semicolons, and other
+such trickeries do not confuse it.
+
+   >>> C = Cookie.SmartCookie()
+   >>> C.load('keebler="E=everybody; L=\\"Loves\\"; fudge=\\012;";')
+   >>> print C
+   Set-Cookie: keebler="E=everybody; L=\"Loves\"; fudge=\012;";
+
+Each element of the Cookie also supports all of the RFC 2109
+Cookie attributes.  Here's an example which sets the Path
+attribute.
+
+   >>> C = Cookie.SmartCookie()
+   >>> C["oreo"] = "doublestuff"
+   >>> C["oreo"]["path"] = "/"
+   >>> print C
+   Set-Cookie: oreo=doublestuff; Path=/;
+
+Each dictionary element has a 'value' attribute, which gives you
+back the value associated with the key.
+
+   >>> C = Cookie.SmartCookie()
+   >>> C["twix"] = "none for you"
+   >>> C["twix"].value
+   'none for you'
+
+
+A Bit More Advanced
+-------------------
+
+As mentioned before, there are three different flavors of Cookie
+objects, each with different encoding/decoding semantics.  This
+section briefly discusses the differences.
+
+SimpleCookie
+
+The SimpleCookie expects that all values should be standard strings.
+Just to be sure, SimpleCookie invokes the str() builtin to convert
+the value to a string, when the values are set dictionary-style.
+
+   >>> C = Cookie.SimpleCookie()
+   >>> C["number"] = 7
+   >>> C["string"] = "seven"
+   >>> C["number"].value
+   '7'
+   >>> C["string"].value
+   'seven'
+   >>> print C
+   Set-Cookie: number=7;
+   Set-Cookie: string=seven;
+
+
+SerialCookie
+
+The SerialCookie expects that all values should be serialized using
+cPickle (or pickle, if cPickle isn't available).  As a result of
+serializing, SerialCookie can save almost any Python object to a
+value, and recover the exact same object when the cookie has been
+returned.  (SerialCookie can yield some strange-looking cookie
+values, however.)
+
+   >>> C = Cookie.SerialCookie()
+   >>> C["number"] = 7
+   >>> C["string"] = "seven"
+   >>> C["number"].value
+   7
+   >>> C["string"].value
+   'seven'
+   >>> print C
+   Set-Cookie: number="I7\012.";
+   Set-Cookie: string="S'seven'\012p1\012.";
+
+Be warned, however, if SerialCookie cannot de-serialize a value (because
+it isn't a valid pickle'd object), IT WILL RAISE AN EXCEPTION.
+
+
+SmartCookie
+
+The SmartCookie combines aspects of each of the other two flavors.
+When setting a value in a dictionary-fashion, the SmartCookie will
+serialize (ala cPickle) the value *if and only if* it isn't a
+Python string.  String objects are *not* serialized.  Similarly,
+when the load() method parses out values, it attempts to de-serialize
+the value.  If it fails, then it fallsback to treating the value
+as a string.
+
+   >>> C = Cookie.SmartCookie()
+   >>> C["number"] = 7
+   >>> C["string"] = "seven"
+   >>> C["number"].value
+   7
+   >>> C["string"].value
+   'seven'
+   >>> print C
+   Set-Cookie: number="I7\012.";
+   Set-Cookie: string=seven;
+
+
+Backwards Compatibility
+-----------------------
+
+In order to keep compatibilty with earlier versions of Cookie.py,
+it is still possible to use Cookie.Cookie() to create a Cookie.  In
+fact, this simply returns a SmartCookie.
+
+   >>> C = Cookie.Cookie()
+   >>> print C.__class__.__name__
+   SmartCookie
+
+
+Finis.
+"""  #"
+#     ^
+#     |----helps out font-lock
+
+#
+# Import our required modules
+#
+import string
+from UserDict import UserDict
+
+try:
+    from cPickle import dumps, loads
+except ImportError:
+    from pickle import dumps, loads
+
+try:
+    import re
+except ImportError:
+    raise ImportError, "Cookie.py requires 're' from Python 1.5 or later"
+
+__all__ = ["CookieError","BaseCookie","SimpleCookie","SerialCookie",
+           "SmartCookie","Cookie"]
+
+#
+# Define an exception visible to External modules
+#
+class CookieError(Exception):
+    pass
+
+
+# These quoting routines conform to the RFC2109 specification, which in
+# turn references the character definitions from RFC2068.  They provide
+# a two-way quoting algorithm.  Any non-text character is translated
+# into a 4 character sequence: a forward-slash followed by the
+# three-digit octal equivalent of the character.  Any '\' or '"' is
+# quoted with a preceeding '\' slash.
+#
+# These are taken from RFC2068 and RFC2109.
+#       _LegalChars       is the list of chars which don't require "'s
+#       _Translator       hash-table for fast quoting
+#
+_LegalChars       = string.ascii_letters + string.digits + "!#$%&'*+-.^_`|~"
+_Translator       = {
+    '\000' : '\\000',  '\001' : '\\001',  '\002' : '\\002',
+    '\003' : '\\003',  '\004' : '\\004',  '\005' : '\\005',
+    '\006' : '\\006',  '\007' : '\\007',  '\010' : '\\010',
+    '\011' : '\\011',  '\012' : '\\012',  '\013' : '\\013',
+    '\014' : '\\014',  '\015' : '\\015',  '\016' : '\\016',
+    '\017' : '\\017',  '\020' : '\\020',  '\021' : '\\021',
+    '\022' : '\\022',  '\023' : '\\023',  '\024' : '\\024',
+    '\025' : '\\025',  '\026' : '\\026',  '\027' : '\\027',
+    '\030' : '\\030',  '\031' : '\\031',  '\032' : '\\032',
+    '\033' : '\\033',  '\034' : '\\034',  '\035' : '\\035',
+    '\036' : '\\036',  '\037' : '\\037',
+
+    '"' : '\\"',       '\\' : '\\\\',
+
+    '\177' : '\\177',  '\200' : '\\200',  '\201' : '\\201',
+    '\202' : '\\202',  '\203' : '\\203',  '\204' : '\\204',
+    '\205' : '\\205',  '\206' : '\\206',  '\207' : '\\207',
+    '\210' : '\\210',  '\211' : '\\211',  '\212' : '\\212',
+    '\213' : '\\213',  '\214' : '\\214',  '\215' : '\\215',
+    '\216' : '\\216',  '\217' : '\\217',  '\220' : '\\220',
+    '\221' : '\\221',  '\222' : '\\222',  '\223' : '\\223',
+    '\224' : '\\224',  '\225' : '\\225',  '\226' : '\\226',
+    '\227' : '\\227',  '\230' : '\\230',  '\231' : '\\231',
+    '\232' : '\\232',  '\233' : '\\233',  '\234' : '\\234',
+    '\235' : '\\235',  '\236' : '\\236',  '\237' : '\\237',
+    '\240' : '\\240',  '\241' : '\\241',  '\242' : '\\242',
+    '\243' : '\\243',  '\244' : '\\244',  '\245' : '\\245',
+    '\246' : '\\246',  '\247' : '\\247',  '\250' : '\\250',
+    '\251' : '\\251',  '\252' : '\\252',  '\253' : '\\253',
+    '\254' : '\\254',  '\255' : '\\255',  '\256' : '\\256',
+    '\257' : '\\257',  '\260' : '\\260',  '\261' : '\\261',
+    '\262' : '\\262',  '\263' : '\\263',  '\264' : '\\264',
+    '\265' : '\\265',  '\266' : '\\266',  '\267' : '\\267',
+    '\270' : '\\270',  '\271' : '\\271',  '\272' : '\\272',
+    '\273' : '\\273',  '\274' : '\\274',  '\275' : '\\275',
+    '\276' : '\\276',  '\277' : '\\277',  '\300' : '\\300',
+    '\301' : '\\301',  '\302' : '\\302',  '\303' : '\\303',
+    '\304' : '\\304',  '\305' : '\\305',  '\306' : '\\306',
+    '\307' : '\\307',  '\310' : '\\310',  '\311' : '\\311',
+    '\312' : '\\312',  '\313' : '\\313',  '\314' : '\\314',
+    '\315' : '\\315',  '\316' : '\\316',  '\317' : '\\317',
+    '\320' : '\\320',  '\321' : '\\321',  '\322' : '\\322',
+    '\323' : '\\323',  '\324' : '\\324',  '\325' : '\\325',
+    '\326' : '\\326',  '\327' : '\\327',  '\330' : '\\330',
+    '\331' : '\\331',  '\332' : '\\332',  '\333' : '\\333',
+    '\334' : '\\334',  '\335' : '\\335',  '\336' : '\\336',
+    '\337' : '\\337',  '\340' : '\\340',  '\341' : '\\341',
+    '\342' : '\\342',  '\343' : '\\343',  '\344' : '\\344',
+    '\345' : '\\345',  '\346' : '\\346',  '\347' : '\\347',
+    '\350' : '\\350',  '\351' : '\\351',  '\352' : '\\352',
+    '\353' : '\\353',  '\354' : '\\354',  '\355' : '\\355',
+    '\356' : '\\356',  '\357' : '\\357',  '\360' : '\\360',
+    '\361' : '\\361',  '\362' : '\\362',  '\363' : '\\363',
+    '\364' : '\\364',  '\365' : '\\365',  '\366' : '\\366',
+    '\367' : '\\367',  '\370' : '\\370',  '\371' : '\\371',
+    '\372' : '\\372',  '\373' : '\\373',  '\374' : '\\374',
+    '\375' : '\\375',  '\376' : '\\376',  '\377' : '\\377'
+    }
+
+def _quote(str, LegalChars=_LegalChars,
+    join=string.join, idmap=string._idmap, translate=string.translate):
+    #
+    # If the string does not need to be double-quoted,
+    # then just return the string.  Otherwise, surround
+    # the string in doublequotes and precede quote (with a \)
+    # special characters.
+    #
+    if "" == translate(str, idmap, LegalChars):
+        return str
+    else:
+        return '"' + join( map(_Translator.get, str, str), "" ) + '"'
+# end _quote
+
+
+_OctalPatt = re.compile(r"\\[0-3][0-7][0-7]")
+_QuotePatt = re.compile(r"[\\].")
+
+def _unquote(str, join=string.join, atoi=string.atoi):
+    # If there aren't any doublequotes,
+    # then there can't be any special characters.  See RFC 2109.
+    if  len(str) < 2:
+        return str
+    if str[0] != '"' or str[-1] != '"':
+        return str
+
+    # We have to assume that we must decode this string.
+    # Down to work.
+
+    # Remove the "s
+    str = str[1:-1]
+
+    # Check for special sequences.  Examples:
+    #    \012 --> \n
+    #    \"   --> "
+    #
+    i = 0
+    n = len(str)
+    res = []
+    while 0 <= i < n:
+        Omatch = _OctalPatt.search(str, i)
+        Qmatch = _QuotePatt.search(str, i)
+        if not Omatch and not Qmatch:              # Neither matched
+            res.append(str[i:])
+            break
+        # else:
+        j = k = -1
+        if Omatch: j = Omatch.start(0)
+        if Qmatch: k = Qmatch.start(0)
+        if Qmatch and ( not Omatch or k < j ):     # QuotePatt matched
+            res.append(str[i:k])
+            res.append(str[k+1])
+            i = k+2
+        else:                                      # OctalPatt matched
+            res.append(str[i:j])
+            res.append( chr( atoi(str[j+1:j+4], 8) ) )
+            i = j+4
+    return join(res, "")
+# end _unquote
+
+# The _getdate() routine is used to set the expiration time in
+# the cookie's HTTP header.      By default, _getdate() returns the
+# current time in the appropriate "expires" format for a
+# Set-Cookie header.     The one optional argument is an offset from
+# now, in seconds.      For example, an offset of -3600 means "one hour ago".
+# The offset may be a floating point number.
+#
+
+_weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
+
+_monthname = [None,
+              'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
+              'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
+
+def _getdate(future=0, weekdayname=_weekdayname, monthname=_monthname):
+    from time import gmtime, time
+    now = time()
+    year, month, day, hh, mm, ss, wd, y, z = gmtime(now + future)
+    return "%s, %02d-%3s-%4d %02d:%02d:%02d GMT" % \
+           (weekdayname[wd], day, monthname[month], year, hh, mm, ss)
+
+
+#
+# A class to hold ONE key,value pair.
+# In a cookie, each such pair may have several attributes.
+#       so this class is used to keep the attributes associated
+#       with the appropriate key,value pair.
+# This class also includes a coded_value attribute, which
+#       is used to hold the network representation of the
+#       value.  This is most useful when Python objects are
+#       pickled for network transit.
+#
+
+class Morsel(UserDict):
+    # RFC 2109 lists these attributes as reserved:
+    #   path       comment         domain
+    #   max-age    secure      version
+    #
+    # For historical reasons, these attributes are also reserved:
+    #   expires
+    #
+    # This dictionary provides a mapping from the lowercase
+    # variant on the left to the appropriate traditional
+    # formatting on the right.
+    _reserved = { "expires" : "expires",
+                   "path"        : "Path",
+                   "comment" : "Comment",
+                   "domain"      : "Domain",
+                   "max-age" : "Max-Age",
+                   "secure"      : "secure",
+                   "version" : "Version",
+                   }
+    _reserved_keys = _reserved.keys()
+
+    def __init__(self):
+        # Set defaults
+        self.key = self.value = self.coded_value = None
+        UserDict.__init__(self)
+
+        # Set default attributes
+        for K in self._reserved_keys:
+            UserDict.__setitem__(self, K, "")
+    # end __init__
+
+    def __setitem__(self, K, V):
+        K = string.lower(K)
+        if not K in self._reserved_keys:
+            raise CookieError("Invalid Attribute %s" % K)
+        UserDict.__setitem__(self, K, V)
+    # end __setitem__
+
+    def isReservedKey(self, K):
+        return string.lower(K) in self._reserved_keys
+    # end isReservedKey
+
+    def set(self, key, val, coded_val,
+            LegalChars=_LegalChars,
+            idmap=string._idmap, translate=string.translate ):
+        # First we verify that the key isn't a reserved word
+        # Second we make sure it only contains legal characters
+        if string.lower(key) in self._reserved_keys:
+            raise CookieError("Attempt to set a reserved key: %s" % key)
+        if "" != translate(key, idmap, LegalChars):
+            raise CookieError("Illegal key value: %s" % key)
+
+        # It's a good key, so save it.
+        self.key                 = key
+        self.value               = val
+        self.coded_value         = coded_val
+    # end set
+
+    def output(self, attrs=None, header = "Set-Cookie:"):
+        return "%s %s" % ( header, self.OutputString(attrs) )
+
+    __str__ = output
+
+    def __repr__(self):
+        return '<%s: %s=%s>' % (self.__class__.__name__,
+                                self.key, repr(self.value) )
+
+    def js_output(self, attrs=None):
+        # Print javascript
+        return """
+        <SCRIPT LANGUAGE="JavaScript">
+        <!-- begin hiding
+        document.cookie = \"%s\"
+        // end hiding -->
+        </script>
+        """ % ( self.OutputString(attrs), )
+    # end js_output()
+
+    def OutputString(self, attrs=None):
+        # Build up our result
+        #
+        result = []
+        RA = result.append
+
+        # First, the key=value pair
+        RA("%s=%s;" % (self.key, self.coded_value))
+
+        # Now add any defined attributes
+        if attrs is None:
+            attrs = self._reserved_keys
+        items = self.items()
+        items.sort()
+        for K,V in items:
+            if V == "": continue
+            if K not in attrs: continue
+            if K == "expires" and type(V) == type(1):
+                RA("%s=%s;" % (self._reserved[K], _getdate(V)))
+            elif K == "max-age" and type(V) == type(1):
+                RA("%s=%d;" % (self._reserved[K], V))
+            elif K == "secure":
+                RA("%s;" % self._reserved[K])
+            else:
+                RA("%s=%s;" % (self._reserved[K], V))
+
+        # Return the result
+        return string.join(result, " ")
+    # end OutputString
+# end Morsel class
+
+
+
+#
+# Pattern for finding cookie
+#
+# This used to be strict parsing based on the RFC2109 and RFC2068
+# specifications.  I have since discovered that MSIE 3.0x doesn't
+# follow the character rules outlined in those specs.  As a
+# result, the parsing rules here are less strict.
+#
+
+_LegalCharsPatt  = r"[\w\d!#%&'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=]"
+_CookiePattern = re.compile(
+    r"(?x)"                       # This is a Verbose pattern
+    r"(?P<key>"                   # Start of group 'key'
+    ""+ _LegalCharsPatt +"+?"     # Any word of at least one letter, nongreedy
+    r")"                          # End of group 'key'
+    r"\s*=\s*"                    # Equal Sign
+    r"(?P<val>"                   # Start of group 'val'
+    r'"(?:[^\\"]|\\.)*"'            # Any doublequoted string
+    r"|"                            # or
+    ""+ _LegalCharsPatt +"*"        # Any word or empty string
+    r")"                          # End of group 'val'
+    r"\s*;?"                      # Probably ending in a semi-colon
+    )
+
+
+# At long last, here is the cookie class.
+#   Using this class is almost just like using a dictionary.
+# See this module's docstring for example usage.
+#
+class BaseCookie(UserDict):
+    # A container class for a set of Morsels
+    #
+
+    def value_decode(self, val):
+        """real_value, coded_value = value_decode(STRING)
+        Called prior to setting a cookie's value from the network
+        representation.  The VALUE is the value read from HTTP
+        header.
+        Override this function to modify the behavior of cookies.
+        """
+        return val, val
+    # end value_encode
+
+    def value_encode(self, val):
+        """real_value, coded_value = value_encode(VALUE)
+        Called prior to setting a cookie's value from the dictionary
+        representation.  The VALUE is the value being assigned.
+        Override this function to modify the behavior of cookies.
+        """
+        strval = str(val)
+        return strval, strval
+    # end value_encode
+
+    def __init__(self, input=None):
+        UserDict.__init__(self)
+        if input: self.load(input)
+    # end __init__
+
+    def __set(self, key, real_value, coded_value):
+        """Private method for setting a cookie's value"""
+        M = self.get(key, Morsel())
+        M.set(key, real_value, coded_value)
+        UserDict.__setitem__(self, key, M)
+    # end __set
+
+    def __setitem__(self, key, value):
+        """Dictionary style assignment."""
+        rval, cval = self.value_encode(value)
+        self.__set(key, rval, cval)
+    # end __setitem__
+
+    def output(self, attrs=None, header="Set-Cookie:", sep="\n"):
+        """Return a string suitable for HTTP."""
+        result = []
+        items = self.items()
+        items.sort()
+        for K,V in items:
+            result.append( V.output(attrs, header) )
+        return string.join(result, sep)
+    # end output
+
+    __str__ = output
+
+    def __repr__(self):
+        L = []
+        items = self.items()
+        items.sort()
+        for K,V in items:
+            L.append( '%s=%s' % (K,repr(V.value) ) )
+        return '<%s: %s>' % (self.__class__.__name__, string.join(L))
+
+    def js_output(self, attrs=None):
+        """Return a string suitable for JavaScript."""
+        result = []
+        items = self.items()
+        items.sort()
+        for K,V in items:
+            result.append( V.js_output(attrs) )
+        return string.join(result, "")
+    # end js_output
+
+    def load(self, rawdata):
+        """Load cookies from a string (presumably HTTP_COOKIE) or
+        from a dictionary.  Loading cookies from a dictionary 'd'
+        is equivalent to calling:
+            map(Cookie.__setitem__, d.keys(), d.values())
+        """
+        if type(rawdata) == type(""):
+            self.__ParseString(rawdata)
+        else:
+            self.update(rawdata)
+        return
+    # end load()
+
+    def __ParseString(self, str, patt=_CookiePattern):
+        i = 0            # Our starting point
+        n = len(str)     # Length of string
+        M = None         # current morsel
+
+        while 0 <= i < n:
+            # Start looking for a cookie
+            match = patt.search(str, i)
+            if not match: break          # No more cookies
+
+            K,V = match.group("key"), match.group("val")
+            i = match.end(0)
+
+            # Parse the key, value in case it's metainfo
+            if K[0] == "$":
+                # We ignore attributes which pertain to the cookie
+                # mechanism as a whole.  See RFC 2109.
+                # (Does anyone care?)
+                if M:
+                    M[ K[1:] ] = V
+            elif string.lower(K) in Morsel._reserved_keys:
+                if M:
+                    M[ K ] = _unquote(V)
+            else:
+                rval, cval = self.value_decode(V)
+                self.__set(K, rval, cval)
+                M = self[K]
+    # end __ParseString
+# end BaseCookie class
+
+class SimpleCookie(BaseCookie):
+    """SimpleCookie
+    SimpleCookie supports strings as cookie values.  When setting
+    the value using the dictionary assignment notation, SimpleCookie
+    calls the builtin str() to convert the value to a string.  Values
+    received from HTTP are kept as strings.
+    """
+    def value_decode(self, val):
+        return _unquote( val ), val
+    def value_encode(self, val):
+        strval = str(val)
+        return strval, _quote( strval )
+# end SimpleCookie
+
+class SerialCookie(BaseCookie):
+    """SerialCookie
+    SerialCookie supports arbitrary objects as cookie values. All
+    values are serialized (using cPickle) before being sent to the
+    client.  All incoming values are assumed to be valid Pickle
+    representations.  IF AN INCOMING VALUE IS NOT IN A VALID PICKLE
+    FORMAT, THEN AN EXCEPTION WILL BE RAISED.
+
+    Note: Large cookie values add overhead because they must be
+    retransmitted on every HTTP transaction.
+
+    Note: HTTP has a 2k limit on the size of a cookie.  This class
+    does not check for this limit, so be careful!!!
+    """
+    def value_decode(self, val):
+        # This could raise an exception!
+        return loads( _unquote(val) ), val
+    def value_encode(self, val):
+        return val, _quote( dumps(val) )
+# end SerialCookie
+
+class SmartCookie(BaseCookie):
+    """SmartCookie
+    SmartCookie supports arbitrary objects as cookie values.  If the
+    object is a string, then it is quoted.  If the object is not a
+    string, however, then SmartCookie will use cPickle to serialize
+    the object into a string representation.
+
+    Note: Large cookie values add overhead because they must be
+    retransmitted on every HTTP transaction.
+
+    Note: HTTP has a 2k limit on the size of a cookie.  This class
+    does not check for this limit, so be careful!!!
+    """
+    def value_decode(self, val):
+        strval = _unquote(val)
+        try:
+            return loads(strval), val
+        except:
+            return strval, val
+    def value_encode(self, val):
+        if type(val) == type(""):
+            return val, _quote(val)
+        else:
+            return val, _quote( dumps(val) )
+# end SmartCookie
+
+
+###########################################################
+# Backwards Compatibility:  Don't break any existing code!
+
+# We provide Cookie() as an alias for SmartCookie()
+Cookie = SmartCookie
+
+#
+###########################################################
+
+def _test():
+    import doctest, Cookie
+    return doctest.testmod(Cookie)
+
+if __name__ == "__main__":
+    _test()
+
+
+#Local Variables:
+#tab-width: 4
+#end:
diff --git a/lib-python/2.2/FCNTL.py b/lib-python/2.2/FCNTL.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/FCNTL.py
@@ -0,0 +1,14 @@
+"""Backward-compatibility version of FCNTL; export constants exported by
+fcntl, and issue a deprecation warning.
+"""
+
+import warnings
+warnings.warn("the FCNTL module is deprecated; please use fcntl",
+              DeprecationWarning)
+
+
+# Export the constants known to the fcntl module:
+from fcntl import *
+
+# and *only* the constants:
+__all__ = [s for s in dir() if s[0] in "ABCDEFGHIJKLMNOPQRSTUVWXYZ"]
diff --git a/lib-python/2.2/HTMLParser.py b/lib-python/2.2/HTMLParser.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/HTMLParser.py
@@ -0,0 +1,383 @@
+"""A parser for HTML and XHTML."""
+
+# This file is based on sgmllib.py, but the API is slightly different.
+
+# XXX There should be a way to distinguish between PCDATA (parsed
+# character data -- the normal case), RCDATA (replaceable character
+# data -- only char and entity references and end tags are special)
+# and CDATA (character data -- only end tags are special).
+
+
+import markupbase
+import re
+
+# Regular expressions used for parsing
+
+interesting_normal = re.compile('[&<]')
+interesting_cdata = re.compile(r'<(/|\Z)')
+incomplete = re.compile('&[a-zA-Z#]')
+
+entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]')
+charref = re.compile('&#(?:[0-9]+|[xX][0-9a-fA-F]+)[^0-9a-fA-F]')
+
+starttagopen = re.compile('<[a-zA-Z]')
+piclose = re.compile('>')
+commentclose = re.compile(r'--\s*>')
+tagfind = re.compile('[a-zA-Z][-.a-zA-Z0-9:_]*')
+attrfind = re.compile(
+    r'\s*([a-zA-Z_][-.:a-zA-Z_0-9]*)(\s*=\s*'
+    r'(\'[^\']*\'|"[^"]*"|[-a-zA-Z0-9./:;+*%?!&$\(\)_#=~]*))?')
+
+locatestarttagend = re.compile(r"""
+  <[a-zA-Z][-.a-zA-Z0-9:_]*          # tag name
+  (?:\s+                             # whitespace before attribute name
+    (?:[a-zA-Z_][-.:a-zA-Z0-9_]*     # attribute name
+      (?:\s*=\s*                     # value indicator
+        (?:'[^']*'                   # LITA-enclosed value
+          |\"[^\"]*\"                # LIT-enclosed value
+          |[^'\">\s]+                # bare value
+         )
+       )?
+     )
+   )*
+  \s*                                # trailing whitespace
+""", re.VERBOSE)
+endendtag = re.compile('>')
+endtagfind = re.compile('</\s*([a-zA-Z][-.a-zA-Z0-9:_]*)\s*>')
+
+
+class HTMLParseError(Exception):
+    """Exception raised for all parse errors."""
+
+    def __init__(self, msg, position=(None, None)):
+        assert msg
+        self.msg = msg
+        self.lineno = position[0]
+        self.offset = position[1]
+
+    def __str__(self):
+        result = self.msg
+        if self.lineno is not None:
+            result = result + ", at line %d" % self.lineno
+        if self.offset is not None:
+            result = result + ", column %d" % (self.offset + 1)
+        return result
+
+
+class HTMLParser(markupbase.ParserBase):
+    """Find tags and other markup and call handler functions.
+
+    Usage:
+        p = HTMLParser()
+        p.feed(data)
+        ...
+        p.close()
+
+    Start tags are handled by calling self.handle_starttag() or
+    self.handle_startendtag(); end tags by self.handle_endtag().  The
+    data between tags is passed from the parser to the derived class
+    by calling self.handle_data() with the data as argument (the data
+    may be split up in arbitrary chunks).  Entity references are
+    passed by calling self.handle_entityref() with the entity
+    reference as the argument.  Numeric character references are
+    passed to self.handle_charref() with the string containing the
+    reference as the argument.
+    """
+
+    CDATA_CONTENT_ELEMENTS = ("script", "style")
+
+
+    def __init__(self):
+        """Initialize and reset this instance."""
+        self.reset()
+
+    def reset(self):
+        """Reset this instance.  Loses all unprocessed data."""
+        self.rawdata = ''
+        self.lasttag = '???'
+        self.interesting = interesting_normal
+        markupbase.ParserBase.reset(self)
+
+    def feed(self, data):
+        """Feed data to the parser.
+
+        Call this as often as you want, with as little or as much text
+        as you want (may include '\n').
+        """
+        self.rawdata = self.rawdata + data
+        self.goahead(0)
+
+    def close(self):
+        """Handle any buffered data."""
+        self.goahead(1)
+
+    def error(self, message):
+        raise HTMLParseError(message, self.getpos())
+
+    __starttag_text = None
+
+    def get_starttag_text(self):
+        """Return full source of start tag: '<...>'."""
+        return self.__starttag_text
+
+    def set_cdata_mode(self):
+        self.interesting = interesting_cdata
+
+    def clear_cdata_mode(self):
+        self.interesting = interesting_normal
+
+    # Internal -- handle data as far as reasonable.  May leave state
+    # and data to be processed by a subsequent call.  If 'end' is
+    # true, force handling all data as if followed by EOF marker.
+    def goahead(self, end):
+        rawdata = self.rawdata
+        i = 0
+        n = len(rawdata)
+        while i < n:
+            match = self.interesting.search(rawdata, i) # < or &
+            if match:
+                j = match.start()
+            else:
+                j = n
+            if i < j: self.handle_data(rawdata[i:j])
+            i = self.updatepos(i, j)
+            if i == n: break
+            startswith = rawdata.startswith
+            if startswith('<', i):
+                if starttagopen.match(rawdata, i): # < + letter
+                    k = self.parse_starttag(i)
+                elif startswith("</", i):
+                    k = self.parse_endtag(i)
+                    if k >= 0:
+                        self.clear_cdata_mode()
+                elif startswith("<!--", i):
+                    k = self.parse_comment(i)
+                elif startswith("<?", i):
+                    k = self.parse_pi(i)
+                elif startswith("<!", i):
+                    k = self.parse_declaration(i)
+                elif (i + 1) < n:
+                    self.handle_data("<")
+                    k = i + 1
+                else:
+                    break
+                if k < 0:
+                    if end:
+                        self.error("EOF in middle of construct")
+                    break
+                i = self.updatepos(i, k)
+            elif startswith("&#", i):
+                match = charref.match(rawdata, i)
+                if match:
+                    name = match.group()[2:-1]
+                    self.handle_charref(name)
+                    k = match.end()
+                    if not startswith(';', k-1):
+                        k = k - 1
+                    i = self.updatepos(i, k)
+                    continue
+                else:
+                    break
+            elif startswith('&', i):
+                match = entityref.match(rawdata, i)
+                if match:
+                    name = match.group(1)
+                    self.handle_entityref(name)
+                    k = match.end()
+                    if not startswith(';', k-1):
+                        k = k - 1
+                    i = self.updatepos(i, k)
+                    continue
+                match = incomplete.match(rawdata, i)
+                if match:
+                    # match.group() will contain at least 2 chars
+                    if end and match.group() == rawdata[i:]:
+                        self.error("EOF in middle of entity or char ref")
+                    # incomplete
+                    break
+                elif (i + 1) < n:
+                    # not the end of the buffer, and can't be confused
+                    # with some other construct
+                    self.handle_data("&")
+                    i = self.updatepos(i, i + 1)
+                else:
+                    break
+            else:
+                assert 0, "interesting.search() lied"
+        # end while
+        if end and i < n:
+            self.handle_data(rawdata[i:n])
+            i = self.updatepos(i, n)
+        self.rawdata = rawdata[i:]
+
+    # Internal -- parse comment, return end or -1 if not terminated
+    def parse_comment(self, i, report=1):
+        rawdata = self.rawdata
+        assert rawdata[i:i+4] == '<!--', 'unexpected call to parse_comment()'
+        match = commentclose.search(rawdata, i+4)
+        if not match:
+            return -1
+        if report:
+            j = match.start()
+            self.handle_comment(rawdata[i+4: j])
+        j = match.end()
+        return j
+
+    # Internal -- parse processing instr, return end or -1 if not terminated
+    def parse_pi(self, i):
+        rawdata = self.rawdata
+        assert rawdata[i:i+2] == '<?', 'unexpected call to parse_pi()'
+        match = piclose.search(rawdata, i+2) # >
+        if not match:
+            return -1
+        j = match.start()
+        self.handle_pi(rawdata[i+2: j])
+        j = match.end()
+        return j
+
+    # Internal -- handle starttag, return end or -1 if not terminated
+    def parse_starttag(self, i):
+        self.__starttag_text = None
+        endpos = self.check_for_whole_start_tag(i)
+        if endpos < 0:
+            return endpos
+        rawdata = self.rawdata
+        self.__starttag_text = rawdata[i:endpos]
+
+        # Now parse the data between i+1 and j into a tag and attrs
+        attrs = []
+        match = tagfind.match(rawdata, i+1)
+        assert match, 'unexpected call to parse_starttag()'
+        k = match.end()
+        self.lasttag = tag = rawdata[i+1:k].lower()
+
+        while k < endpos:
+            m = attrfind.match(rawdata, k)
+            if not m:
+                break
+            attrname, rest, attrvalue = m.group(1, 2, 3)
+            if not rest:
+                attrvalue = None
+            elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
+                 attrvalue[:1] == '"' == attrvalue[-1:]:
+                attrvalue = attrvalue[1:-1]
+                attrvalue = self.unescape(attrvalue)
+            attrs.append((attrname.lower(), attrvalue))
+            k = m.end()
+
+        end = rawdata[k:endpos].strip()
+        if end not in (">", "/>"):
+            lineno, offset = self.getpos()
+            if "\n" in self.__starttag_text:
+                lineno = lineno + self.__starttag_text.count("\n")
+                offset = len(self.__starttag_text) \
+                         - self.__starttag_text.rfind("\n")
+            else:
+                offset = offset + len(self.__starttag_text)
+            self.error("junk characters in start tag: %s"
+                       % `rawdata[k:endpos][:20]`)
+        if end.endswith('/>'):
+            # XHTML-style empty tag: <span attr="value" />
+            self.handle_startendtag(tag, attrs)
+        else:
+            self.handle_starttag(tag, attrs)
+            if tag in self.CDATA_CONTENT_ELEMENTS:
+                self.set_cdata_mode()
+        return endpos
+
+    # Internal -- check to see if we have a complete starttag; return end
+    # or -1 if incomplete.
+    def check_for_whole_start_tag(self, i):
+        rawdata = self.rawdata
+        m = locatestarttagend.match(rawdata, i)
+        if m:
+            j = m.end()
+            next = rawdata[j:j+1]
+            if next == ">":
+                return j + 1
+            if next == "/":
+                if rawdata.startswith("/>", j):
+                    return j + 2
+                if rawdata.startswith("/", j):
+                    # buffer boundary
+                    return -1
+                # else bogus input
+                self.updatepos(i, j + 1)
+                self.error("malformed empty start tag")
+            if next == "":
+                # end of input
+                return -1
+            if next in ("abcdefghijklmnopqrstuvwxyz=/"
+                        "ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
+                # end of input in or before attribute value, or we have the
+                # '/' from a '/>' ending
+                return -1
+            self.updatepos(i, j)
+            self.error("malformed start tag")
+        raise AssertionError("we should not get here!")
+
+    # Internal -- parse endtag, return end or -1 if incomplete
+    def parse_endtag(self, i):
+        rawdata = self.rawdata
+        assert rawdata[i:i+2] == "</", "unexpected call to parse_endtag"
+        match = endendtag.search(rawdata, i+1) # >
+        if not match:
+            return -1
+        j = match.end()
+        match = endtagfind.match(rawdata, i) # </ + tag + >
+        if not match:
+            self.error("bad end tag: %s" % `rawdata[i:j]`)
+        tag = match.group(1)
+        self.handle_endtag(tag.lower())
+        return j
+
+    # Overridable -- finish processing of start+end tag: <tag.../>
+    def handle_startendtag(self, tag, attrs):
+        self.handle_starttag(tag, attrs)
+        self.handle_endtag(tag)
+
+    # Overridable -- handle start tag
+    def handle_starttag(self, tag, attrs):
+        pass
+
+    # Overridable -- handle end tag
+    def handle_endtag(self, tag):
+        pass
+
+    # Overridable -- handle character reference
+    def handle_charref(self, name):
+        pass
+
+    # Overridable -- handle entity reference
+    def handle_entityref(self, name):
+        pass
+
+    # Overridable -- handle data
+    def handle_data(self, data):
+        pass
+
+    # Overridable -- handle comment
+    def handle_comment(self, data):
+        pass
+
+    # Overridable -- handle declaration
+    def handle_decl(self, decl):
+        pass
+
+    # Overridable -- handle processing instruction
+    def handle_pi(self, data):
+        pass
+
+    def unknown_decl(self, data):
+        self.error("unknown declaration: " + `data`)
+
+    # Internal -- helper to remove special character quoting
+    def unescape(self, s):
+        if '&' not in s:
+            return s
+        s = s.replace("&lt;", "<")
+        s = s.replace("&gt;", ">")
+        s = s.replace("&apos;", "'")
+        s = s.replace("&quot;", '"')
+        s = s.replace("&amp;", "&") # Must be last
+        return s
diff --git a/lib-python/2.2/MimeWriter.py b/lib-python/2.2/MimeWriter.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/MimeWriter.py
@@ -0,0 +1,181 @@
+"""Generic MIME writer.
+
+This module defines the class MimeWriter.  The MimeWriter class implements
+a basic formatter for creating MIME multi-part files.  It doesn't seek around
+the output file nor does it use large amounts of buffer space. You must write
+the parts out in the order that they should occur in the final file.
+MimeWriter does buffer the headers you add, allowing you to rearrange their
+order.
+
+"""
+
+
+import mimetools
+
+__all__ = ["MimeWriter"]
+
+class MimeWriter:
+
+    """Generic MIME writer.
+
+    Methods:
+
+    __init__()
+    addheader()
+    flushheaders()
+    startbody()
+    startmultipartbody()
+    nextpart()
+    lastpart()
+
+    A MIME writer is much more primitive than a MIME parser.  It
+    doesn't seek around on the output file, and it doesn't use large
+    amounts of buffer space, so you have to write the parts in the
+    order they should occur on the output file.  It does buffer the
+    headers you add, allowing you to rearrange their order.
+
+    General usage is:
+
+    f = <open the output file>
+    w = MimeWriter(f)
+    ...call w.addheader(key, value) 0 or more times...
+
+    followed by either:
+
+    f = w.startbody(content_type)
+    ...call f.write(data) for body data...
+
+    or:
+
+    w.startmultipartbody(subtype)
+    for each part:
+        subwriter = w.nextpart()
+        ...use the subwriter's methods to create the subpart...
+    w.lastpart()
+
+    The subwriter is another MimeWriter instance, and should be
+    treated in the same way as the toplevel MimeWriter.  This way,
+    writing recursive body parts is easy.
+
+    Warning: don't forget to call lastpart()!
+
+    XXX There should be more state so calls made in the wrong order
+    are detected.
+
+    Some special cases:
+
+    - startbody() just returns the file passed to the constructor;
+      but don't use this knowledge, as it may be changed.
+
+    - startmultipartbody() actually returns a file as well;
+      this can be used to write the initial 'if you can read this your
+      mailer is not MIME-aware' message.
+
+    - If you call flushheaders(), the headers accumulated so far are
+      written out (and forgotten); this is useful if you don't need a
+      body part at all, e.g. for a subpart of type message/rfc822
+      that's (mis)used to store some header-like information.
+
+    - Passing a keyword argument 'prefix=<flag>' to addheader(),
+      start*body() affects where the header is inserted; 0 means
+      append at the end, 1 means insert at the start; default is
+      append for addheader(), but insert for start*body(), which use
+      it to determine where the Content-Type header goes.
+
+    """
+
+    def __init__(self, fp):
+        self._fp = fp
+        self._headers = []
+
+    def addheader(self, key, value, prefix=0):
+        """Add a header line to the MIME message.
+
+        The key is the name of the header, where the value obviously provides
+        the value of the header. The optional argument prefix determines
+        where the header is inserted; 0 means append at the end, 1 means
+        insert at the start. The default is to append.
+
+        """
+        lines = value.split("\n")
+        while lines and not lines[-1]: del lines[-1]
+        while lines and not lines[0]: del lines[0]
+        for i in range(1, len(lines)):
+            lines[i] = "    " + lines[i].strip()
+        value = "\n".join(lines) + "\n"
+        line = key + ": " + value
+        if prefix:
+            self._headers.insert(0, line)
+        else:
+            self._headers.append(line)
+
+    def flushheaders(self):
+        """Writes out and forgets all headers accumulated so far.
+
+        This is useful if you don't need a body part at all; for example,
+        for a subpart of type message/rfc822 that's (mis)used to store some
+        header-like information.
+
+        """
+        self._fp.writelines(self._headers)
+        self._headers = []
+
+    def startbody(self, ctype, plist=[], prefix=1):
+        """Returns a file-like object for writing the body of the message.
+
+        The content-type is set to the provided ctype, and the optional
+        parameter, plist, provides additional parameters for the
+        content-type declaration.  The optional argument prefix determines
+        where the header is inserted; 0 means append at the end, 1 means
+        insert at the start. The default is to insert at the start.
+
+        """
+        for name, value in plist:
+            ctype = ctype + ';\n %s=\"%s\"' % (name, value)
+        self.addheader("Content-Type", ctype, prefix=prefix)
+        self.flushheaders()
+        self._fp.write("\n")
+        return self._fp
+
+    def startmultipartbody(self, subtype, boundary=None, plist=[], prefix=1):
+        """Returns a file-like object for writing the body of the message.
+
+        Additionally, this method initializes the multi-part code, where the
+        subtype parameter provides the multipart subtype, the boundary
+        parameter may provide a user-defined boundary specification, and the
+        plist parameter provides optional parameters for the subtype.  The
+        optional argument, prefix, determines where the header is inserted;
+        0 means append at the end, 1 means insert at the start. The default
+        is to insert at the start.  Subparts should be created using the
+        nextpart() method.
+
+        """
+        self._boundary = boundary or mimetools.choose_boundary()
+        return self.startbody("multipart/" + subtype,
+                              [("boundary", self._boundary)] + plist,
+                              prefix=prefix)
+
+    def nextpart(self):
+        """Returns a new instance of MimeWriter which represents an
+        individual part in a multipart message.
+
+        This may be used to write the part as well as used for creating
+        recursively complex multipart messages. The message must first be
+        initialized with the startmultipartbody() method before using the
+        nextpart() method.
+
+        """
+        self._fp.write("\n--" + self._boundary + "\n")
+        return self.__class__(self._fp)
+
+    def lastpart(self):
+        """This is used to designate the last part of a multipart message.
+
+        It should always be used when writing multipart messages.
+
+        """
+        self._fp.write("\n--" + self._boundary + "--\n")
+
+
+if __name__ == '__main__':
+    import test.test_MimeWriter
diff --git a/lib-python/2.2/Queue.py b/lib-python/2.2/Queue.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/Queue.py
@@ -0,0 +1,151 @@
+"""A multi-producer, multi-consumer queue."""
+
+class Empty(Exception):
+    "Exception raised by Queue.get(block=0)/get_nowait()."
+    pass
+
+class Full(Exception):
+    "Exception raised by Queue.put(block=0)/put_nowait()."
+    pass
+
+class Queue:
+    def __init__(self, maxsize=0):
+        """Initialize a queue object with a given maximum size.
+
+        If maxsize is <= 0, the queue size is infinite.
+        """
+        import thread
+        self._init(maxsize)
+        self.mutex = thread.allocate_lock()
+        self.esema = thread.allocate_lock()
+        self.esema.acquire()
+        self.fsema = thread.allocate_lock()
+
+    def qsize(self):
+        """Return the approximate size of the queue (not reliable!)."""
+        self.mutex.acquire()
+        n = self._qsize()
+        self.mutex.release()
+        return n
+
+    def empty(self):
+        """Return 1 if the queue is empty, 0 otherwise (not reliable!)."""
+        self.mutex.acquire()
+        n = self._empty()
+        self.mutex.release()
+        return n
+
+    def full(self):
+        """Return 1 if the queue is full, 0 otherwise (not reliable!)."""
+        self.mutex.acquire()
+        n = self._full()
+        self.mutex.release()
+        return n
+
+    def put(self, item, block=1):
+        """Put an item into the queue.
+
+        If optional arg 'block' is 1 (the default), block if
+        necessary until a free slot is available.  Otherwise (block
+        is 0), put an item on the queue if a free slot is immediately
+        available, else raise the Full exception.
+        """
+        if block:
+            self.fsema.acquire()
+        elif not self.fsema.acquire(0):
+            raise Full
+        self.mutex.acquire()
+        release_fsema = True
+        try:
+            was_empty = self._empty()
+            self._put(item)
+            # If we fail before here, the empty state has
+            # not changed, so we can skip the release of esema
+            if was_empty:
+                self.esema.release()
+            # If we fail before here, the queue can not be full, so
+            # release_full_sema remains True
+            release_fsema = not self._full()
+        finally:
+            # Catching system level exceptions here (RecursionDepth,
+            # OutOfMemory, etc) - so do as little as possible in terms
+            # of Python calls.
+            if release_fsema:
+                self.fsema.release()
+            self.mutex.release()
+
+    def put_nowait(self, item):
+        """Put an item into the queue without blocking.
+
+        Only enqueue the item if a free slot is immediately available.
+        Otherwise raise the Full exception.
+        """
+        return self.put(item, 0)
+
+    def get(self, block=1):
+        """Remove and return an item from the queue.
+
+        If optional arg 'block' is 1 (the default), block if
+        necessary until an item is available.  Otherwise (block is 0),
+        return an item if one is immediately available, else raise the
+        Empty exception.
+        """
+        if block:
+            self.esema.acquire()
+        elif not self.esema.acquire(0):
+            raise Empty
+        self.mutex.acquire()
+        release_esema = True
+        try:
+            was_full = self._full()
+            item = self._get()
+            # If we fail before here, the full state has
+            # not changed, so we can skip the release of fsema
+            if was_full:
+                self.fsema.release()
+            # Failure means empty state also unchanged - release_esema
+            # remains True.
+            release_esema = not self._empty()
+        finally:
+            if release_esema:
+                self.esema.release()
+            self.mutex.release()
+        return item
+
+    def get_nowait(self):
+        """Remove and return an item from the queue without blocking.
+
+        Only get an item if one is immediately available.  Otherwise
+        raise the Empty exception.
+        """
+        return self.get(0)
+
+    # Override these methods to implement other queue organizations
+    # (e.g. stack or priority queue).
+    # These will only be called with appropriate locks held
+
+    # Initialize the queue representation
+    def _init(self, maxsize):
+        self.maxsize = maxsize
+        self.queue = []
+
+    def _qsize(self):
+        return len(self.queue)
+
+    # Check whether the queue is empty
+    def _empty(self):
+        return not self.queue
+
+    # Check whether the queue is full
+    def _full(self):
+        return self.maxsize > 0 and len(self.queue) == self.maxsize
+
+    # Put a new item in the queue
+    def _put(self, item):
+        self.queue.append(item)
+
+    # Get an item from the queue
+    def _get(self):
+        item = self.queue[0]
+        del self.queue[0]
+        return item
diff --git a/lib-python/2.2/SimpleHTTPServer.py b/lib-python/2.2/SimpleHTTPServer.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/SimpleHTTPServer.py
@@ -0,0 +1,198 @@
+"""Simple HTTP Server.
+
+This module builds on BaseHTTPServer by implementing the standard GET
+and HEAD requests in a fairly straightforward manner.
+
+"""
+
+
+__version__ = "0.6"
+
+__all__ = ["SimpleHTTPRequestHandler"]
+
+import os
+import posixpath
+import BaseHTTPServer
+import urllib
+import cgi
+import shutil
+import mimetypes
+from StringIO import StringIO
+
+
+class SimpleHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
+
+    """Simple HTTP request handler with GET and HEAD commands.
+
+    This serves files from the current directory and any of its
+    subdirectories.  It assumes that all files are plain text files
+    unless they have the extension ".html" in which case it assumes
+    they are HTML files.
+
+    The GET and HEAD requests are identical except that the HEAD
+    request omits the actual contents of the file.
+
+    """
+
+    server_version = "SimpleHTTP/" + __version__
+
+    def do_GET(self):
+        """Serve a GET request."""
+        f = self.send_head()
+        if f:
+            self.copyfile(f, self.wfile)
+            f.close()
+
+    def do_HEAD(self):
+        """Serve a HEAD request."""
+        f = self.send_head()
+        if f:
+            f.close()
+
+    def send_head(self):
+        """Common code for GET and HEAD commands.
+
+        This sends the response code and MIME headers.
+
+        Return value is either a file object (which has to be copied
+        to the outputfile by the caller unless the command was HEAD,
+        and must be closed by the caller under all circumstances), or
+        None, in which case the caller has nothing further to do.
+
+        """
+        path = self.translate_path(self.path)
+        f = None
+        if os.path.isdir(path):
+            for index in "index.html", "index.htm":
+                index = os.path.join(path, index)
+                if os.path.exists(index):
+                    path = index
+                    break
+            else:
+                return self.list_directory(path)
+        ctype = self.guess_type(path)
+        if ctype.startswith('text/'):
+            mode = 'r'
+        else:
+            mode = 'rb'
+        try:
+            f = open(path, mode)
+        except IOError:
+            self.send_error(404, "File not found")
+            return None
+        self.send_response(200)
+        self.send_header("Content-type", ctype)
+        self.end_headers()
+        return f
+
+    def list_directory(self, path):
+        """Helper to produce a directory listing (absent index.html).
+
+        Return value is either a file object, or None (indicating an
+        error).  In either case, the headers are sent, making the
+        interface the same as for send_head().
+
+        """
+        try:
+            list = os.listdir(path)
+        except os.error:
+            self.send_error(404, "No permission to list directory")
+            return None
+        list.sort(lambda a, b: cmp(a.lower(), b.lower()))
+        f = StringIO()
+        f.write("<title>Directory listing for %s</title>\n" % self.path)
+        f.write("<h2>Directory listing for %s</h2>\n" % self.path)
+        f.write("<hr>\n<ul>\n")
+        for name in list:
+            fullname = os.path.join(path, name)
+            displayname = linkname = name = cgi.escape(name)
+            # Append / for directories or @ for symbolic links
+            if os.path.isdir(fullname):
+                displayname = name + "/"
+                linkname = name + "/"
+            if os.path.islink(fullname):
+                displayname = name + "@"
+                # Note: a link to a directory displays with @ and links with /
+            f.write('<li><a href="%s">%s</a>\n' % (linkname, displayname))
+        f.write("</ul>\n<hr>\n")
+        f.seek(0)
+        self.send_response(200)
+        self.send_header("Content-type", "text/html")
+        self.end_headers()
+        return f
+
+    def translate_path(self, path):
+        """Translate a /-separated PATH to the local filename syntax.
+
+        Components that mean special things to the local file system
+        (e.g. drive or directory names) are ignored.  (XXX They should
+        probably be diagnosed.)
+
+        """
+        path = posixpath.normpath(urllib.unquote(path))
+        words = path.split('/')
+        words = filter(None, words)
+        path = os.getcwd()
+        for word in words:
+            drive, word = os.path.splitdrive(word)
+            head, word = os.path.split(word)
+            if word in (os.curdir, os.pardir): continue
+            path = os.path.join(path, word)
+        return path
+
+    def copyfile(self, source, outputfile):
+        """Copy all data between two file objects.
+
+        The SOURCE argument is a file object open for reading
+        (or anything with a read() method) and the DESTINATION
+        argument is a file object open for writing (or
+        anything with a write() method).
+
+        The only reason for overriding this would be to change
+        the block size or perhaps to replace newlines by CRLF
+        -- note however that this the default server uses this
+        to copy binary data as well.
+
+        """
+        shutil.copyfileobj(source, outputfile)
+
+    def guess_type(self, path):
+        """Guess the type of a file.
+
+        Argument is a PATH (a filename).
+
+        Return value is a string of the form type/subtype,
+        usable for a MIME Content-type header.
+
+        The default implementation looks the file's extension
+        up in the table self.extensions_map, using text/plain
+        as a default; however it would be permissible (if
+        slow) to look inside the data to make a better guess.
+
+        """
+
+        base, ext = posixpath.splitext(path)
+        if self.extensions_map.has_key(ext):
+            return self.extensions_map[ext]
+        ext = ext.lower()
+        if self.extensions_map.has_key(ext):
+            return self.extensions_map[ext]
+        else:
+            return self.extensions_map['']
+
+    extensions_map = mimetypes.types_map.copy()
+    extensions_map.update({
+        '': 'application/octet-stream', # Default
+        '.py': 'text/plain',
+        '.c': 'text/plain',
+        '.h': 'text/plain',
+        })
+
+
+def test(HandlerClass = SimpleHTTPRequestHandler,
+         ServerClass = BaseHTTPServer.HTTPServer):
+    BaseHTTPServer.test(HandlerClass, ServerClass)
+
+
+if __name__ == '__main__':
+    test()
diff --git a/lib-python/2.2/SimpleXMLRPCServer.py b/lib-python/2.2/SimpleXMLRPCServer.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/SimpleXMLRPCServer.py
@@ -0,0 +1,274 @@
+"""Simple XML-RPC Server.
+
+This module can be used to create simple XML-RPC servers
+by creating a server and either installing functions, a
+class instance, or by extending the SimpleXMLRPCRequestHandler
+class.
+
+A list of possible usage patterns follows:
+
+1. Install functions:
+
+server = SimpleXMLRPCServer(("localhost", 8000))
+server.register_function(pow)
+server.register_function(lambda x,y: x+y, 'add')
+server.serve_forever()
+
+2. Install an instance:
+
+class MyFuncs:
+    def __init__(self):
+        # make all of the string functions available through
+        # string.func_name
+        import string
+        self.string = string
+    def pow(self, x, y): return pow(x, y)
+    def add(self, x, y) : return x + y
+server = SimpleXMLRPCServer(("localhost", 8000))
+server.register_instance(MyFuncs())
+server.serve_forever()
+
+3. Install an instance with custom dispatch method:
+
+class Math:
+    def _dispatch(self, method, params):
+        if method == 'pow':
+            return apply(pow, params)
+        elif method == 'add':
+            return params[0] + params[1]
+        else:
+            raise 'bad method'
+server = SimpleXMLRPCServer(("localhost", 8000))
+server.register_instance(Math())
+server.serve_forever()
+
+4. Subclass SimpleXMLRPCRequestHandler:
+
+class MathHandler(SimpleXMLRPCRequestHandler):
+    def _dispatch(self, method, params):
+        try:
+            # We are forcing the 'export_' prefix on methods that are
+            # callable through XML-RPC to prevent potential security
+            # problems
+            func = getattr(self, 'export_' + method)
+        except AttributeError:
+            raise Exception('method "%s" is not supported' % method)
+        else:
+            return apply(func, params)
+
+    def log_message(self, format, *args):
+        pass # maybe do something fancy like write the messages to a file
+
+    def export_add(self, x, y):
+        return x + y
+
+server = SimpleXMLRPCServer(("localhost", 8000), MathHandler)
+server.serve_forever()
+"""
+
+# Written by Brian Quinlan (brian at sweetapp.com).
+# Based on code written by Fredrik Lundh.
+
+import xmlrpclib
+import SocketServer
+import BaseHTTPServer
+import sys
+
+class SimpleXMLRPCRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
+    """Simple XML-RPC request handler class.
+
+    Handles all HTTP POST requests and attempts to decode them as
+    XML-RPC requests.
+
+    XML-RPC requests are dispatched to the _dispatch method, which
+    may be overriden by subclasses. The default implementation attempts
+    to dispatch XML-RPC calls to the functions or instance installed
+    in the server.
+    """
+
+    def do_POST(self):
+        """Handles the HTTP POST request.
+
+        Attempts to interpret all HTTP POST requests as XML-RPC calls,
+        which are forwarded to the _dispatch method for handling.
+        """
+
+        try:
+            # get arguments
+            data = self.rfile.read(int(self.headers["content-length"]))
+            params, method = xmlrpclib.loads(data)
+
+            # generate response
+            try:
+                response = self._dispatch(method, params)
+                # wrap response in a singleton tuple
+                response = (response,)
+            except:
+                # report exception back to server
+                response = xmlrpclib.dumps(
+                    xmlrpclib.Fault(1, "%s:%s" % (sys.exc_type, sys.exc_value))
+                    )
+            else:
+                response = xmlrpclib.dumps(response, methodresponse=1)
+        except:
+            # internal error, report as HTTP server error
+            self.send_response(500)
+            self.end_headers()
+        else:
+            # got a valid XML RPC response
+            self.send_response(200)
+            self.send_header("Content-type", "text/xml")
+            self.send_header("Content-length", str(len(response)))
+            self.end_headers()
+            self.wfile.write(response)
+
+            # shut down the connection
+            self.wfile.flush()
+            self.connection.shutdown(1)
+
+    def _dispatch(self, method, params):
+        """Dispatches the XML-RPC method.
+
+        XML-RPC calls are forwarded to a registered function that
+        matches the called XML-RPC method name. If no such function
+        exists then the call is forwarded to the registered instance,
+        if available.
+
+        If the registered instance has a _dispatch method then that
+        method will be called with the name of the XML-RPC method and
+        it's parameters as a tuple
+        e.g. instance._dispatch('add',(2,3))
+
+        If the registered instance does not have a _dispatch method
+        then the instance will be searched to find a matching method
+        and, if found, will be called.
+
+        Methods beginning with an '_' are considered private and will
+        not be called by SimpleXMLRPCServer.
+        """
+
+        func = None
+        try:
+            # check to see if a matching function has been registered
+            func = self.server.funcs[method]
+        except KeyError:
+            if self.server.instance is not None:
+                # check for a _dispatch method
+                if hasattr(self.server.instance, '_dispatch'):
+                    return self.server.instance._dispatch(method, params)
+                else:
+                    # call instance method directly
+                    try:
+                        func = _resolve_dotted_attribute(
+                            self.server.instance,
+                            method,
+                            self.allow_dotted_names
+                            )
+                    except AttributeError:
+                        pass
+
+        if func is not None:
+            return apply(func, params)
+        else:
+            raise Exception('method "%s" is not supported' % method)
+
+    def log_request(self, code='-', size='-'):
+        """Selectively log an accepted request."""
+
+        if self.server.logRequests:
+            BaseHTTPServer.BaseHTTPRequestHandler.log_request(self, code, size)
+
+
+def _resolve_dotted_attribute(obj, attr, allow_dotted_names=True):
+    """Resolves a dotted attribute name to an object.  Raises
+    an AttributeError if any attribute in the chain starts with a '_'.
+
+    If the optional allow_dotted_names argument is false, dots are not
+    supported and this function operates similar to getattr(obj, attr).
+    """
+
+    if allow_dotted_names:
+        attrs = attr.split('.')
+    else:
+        attrs = [attr]
+
+    for i in attrs:
+        if i.startswith('_'):
+            raise AttributeError(
+                'attempt to access private attribute "%s"' % i
+                )
+        else:
+            obj = getattr(obj,i)
+    return obj
+
+
+class SimpleXMLRPCServer(SocketServer.TCPServer):
+    """Simple XML-RPC server.
+
+    Simple XML-RPC server that allows functions and a single instance
+    to be installed to handle requests.
+    """
+
+    def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler,
+                 logRequests=1):
+        self.funcs = {}
+        self.logRequests = logRequests
+        self.instance = None
+        SocketServer.TCPServer.__init__(self, addr, requestHandler)
+
+    def register_instance(self, instance, allow_dotted_names=False):
+        """Registers an instance to respond to XML-RPC requests.
+
+        Only one instance can be installed at a time.
+
+        If the registered instance has a _dispatch method then that
+        method will be called with the name of the XML-RPC method and
+        it's parameters as a tuple
+        e.g. instance._dispatch('add',(2,3))
+
+        If the registered instance does not have a _dispatch method
+        then the instance will be searched to find a matching method
+        and, if found, will be called.
+
+        Methods beginning with an '_' are considered private and will
+        not be called by SimpleXMLRPCServer.
+
+        If a registered function matches a XML-RPC request, then it
+        will be called instead of the registered instance.
+
+        If the optional allow_dotted_names argument is true and the
+        instance does not have a _dispatch method, method names
+        containing dots are supported and resolved, as long as none of
+        the name segments start with an '_'.
+
+            *** SECURITY WARNING: ***
+
+            Enabling the allow_dotted_names options allows intruders
+            to access your module's global variables and may allow
+            intruders to execute arbitrary code on your machine.  Only
+            use this option on a secure, closed network.
+
+        """
+
+        self.instance = instance
+        self.allow_dotted_names = allow_dotted_names
+
+    def register_function(self, function, name = None):
+        """Registers a function to respond to XML-RPC requests.
+
+        The optional name argument can be used to set a Unicode name
+        for the function.
+
+        If an instance is also registered then it will only be called
+        if a matching function is not found.
+        """
+
+        if name is None:
+            name = function.__name__
+        self.funcs[name] = function
+
+if __name__ == '__main__':
+    server = SimpleXMLRPCServer(("localhost", 8000))
+    server.register_function(pow)
+    server.register_function(lambda x,y: x+y, 'add')
+    server.serve_forever()
diff --git a/lib-python/2.2/SocketServer.py b/lib-python/2.2/SocketServer.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/SocketServer.py
@@ -0,0 +1,576 @@
+"""Generic socket server classes.
+
+This module tries to capture the various aspects of defining a server:
+
+For socket-based servers:
+
+- address family:
+        - AF_INET{,6}: IP (Internet Protocol) sockets (default)
+        - AF_UNIX: Unix domain sockets
+        - others, e.g. AF_DECNET are conceivable (see <socket.h>
+- socket type:
+        - SOCK_STREAM (reliable stream, e.g. TCP)
+        - SOCK_DGRAM (datagrams, e.g. UDP)
+
+For request-based servers (including socket-based):
+
+- client address verification before further looking at the request
+        (This is actually a hook for any processing that needs to look
+         at the request before anything else, e.g. logging)
+- how to handle multiple requests:
+        - synchronous (one request is handled at a time)
+        - forking (each request is handled by a new process)
+        - threading (each request is handled by a new thread)
+
+The classes in this module favor the server type that is simplest to
+write: a synchronous TCP/IP server.  This is bad class design, but
+save some typing.  (There's also the issue that a deep class hierarchy
+slows down method lookups.)
+
+There are five classes in an inheritance diagram, four of which represent
+synchronous servers of four types:
+
+        +------------+
+        | BaseServer |
+        +------------+
+              |
+              v
+        +-----------+        +------------------+
+        | TCPServer |------->| UnixStreamServer |
+        +-----------+        +------------------+
+              |
+              v
+        +-----------+        +--------------------+
+        | UDPServer |------->| UnixDatagramServer |
+        +-----------+        +--------------------+
+
+Note that UnixDatagramServer derives from UDPServer, not from
+UnixStreamServer -- the only difference between an IP and a Unix
+stream server is the address family, which is simply repeated in both
+unix server classes.
+
+Forking and threading versions of each type of server can be created
+using the ForkingServer and ThreadingServer mix-in classes.  For
+instance, a threading UDP server class is created as follows:
+
+        class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass
+
+The Mix-in class must come first, since it overrides a method defined
+in UDPServer!
+
+To implement a service, you must derive a class from
+BaseRequestHandler and redefine its handle() method.  You can then run
+various versions of the service by combining one of the server classes
+with your request handler class.
+
+The request handler class must be different for datagram or stream
+services.  This can be hidden by using the mix-in request handler
+classes StreamRequestHandler or DatagramRequestHandler.
+
+Of course, you still have to use your head!
+
+For instance, it makes no sense to use a forking server if the service
+contains state in memory that can be modified by requests (since the
+modifications in the child process would never reach the initial state
+kept in the parent process and passed to each child).  In this case,
+you can use a threading server, but you will probably have to use
+locks to avoid two requests that come in nearly simultaneous to apply
+conflicting changes to the server state.
+
+On the other hand, if you are building e.g. an HTTP server, where all
+data is stored externally (e.g. in the file system), a synchronous
+class will essentially render the service "deaf" while one request is
+being handled -- which may be for a very long time if a client is slow
+to reqd all the data it has requested.  Here a threading or forking
+server is appropriate.
+
+In some cases, it may be appropriate to process part of a request
+synchronously, but to finish processing in a forked child depending on
+the request data.  This can be implemented by using a synchronous
+server and doing an explicit fork in the request handler class
+handle() method.
+
+Another approach to handling multiple simultaneous requests in an
+environment that supports neither threads nor fork (or where these are
+too expensive or inappropriate for the service) is to maintain an
+explicit table of partially finished requests and to use select() to
+decide which request to work on next (or whether to handle a new
+incoming request).  This is particularly important for stream services
+where each client can potentially be connected for a long time (if
+threads or subprocesses cannot be used).
+
+Future work:
+- Standard classes for Sun RPC (which uses either UDP or TCP)
+- Standard mix-in classes to implement various authentication
+  and encryption schemes
+- Standard framework for select-based multiplexing
+
+XXX Open problems:
+- What to do with out-of-band data?
+
+BaseServer:
+- split generic "request" functionality out into BaseServer class.
+  Copyright (C) 2000  Luke Kenneth Casson Leighton <lkcl at samba.org>
+
+  example: read entries from a SQL database (requires overriding
+  get_request() to return a table entry from the database).
+  entry is processed by a RequestHandlerClass.
+
+"""
+
+# Author of the BaseServer patch: Luke Kenneth Casson Leighton
+
+# XXX Warning!
+# There is a test suite for this module, but it cannot be run by the
+# standard regression test.
+# To run it manually, run Lib/test/test_socketserver.py.
+
+__version__ = "0.4"
+
+
+import socket
+import sys
+import os
+
+__all__ = ["TCPServer","UDPServer","ForkingUDPServer","ForkingTCPServer",
+           "ThreadingUDPServer","ThreadingTCPServer","BaseRequestHandler",
+           "StreamRequestHandler","DatagramRequestHandler",
+           "ThreadingMixIn", "ForkingMixIn"]
+if hasattr(socket, "AF_UNIX"):
+    __all__.extend(["UnixStreamServer","UnixDatagramServer",
+                    "ThreadingUnixStreamServer",
+                    "ThreadingUnixDatagramServer"])
+
+class BaseServer:
+
+    """Base class for server classes.
+
+    Methods for the caller:
+
+    - __init__(server_address, RequestHandlerClass)
+    - serve_forever()
+    - handle_request()  # if you do not use serve_forever()
+    - fileno() -> int   # for select()
+
+    Methods that may be overridden:
+
+    - server_bind()
+    - server_activate()
+    - get_request() -> request, client_address
+    - verify_request(request, client_address)
+    - server_close()
+    - process_request(request, client_address)
+    - close_request(request)
+    - handle_error()
+
+    Methods for derived classes:
+
+    - finish_request(request, client_address)
+
+    Class variables that may be overridden by derived classes or
+    instances:
+
+    - address_family
+    - socket_type
+    - reuse_address
+
+    Instance variables:
+
+    - RequestHandlerClass
+    - socket
+
+    """
+
+    def __init__(self, server_address, RequestHandlerClass):
+        """Constructor.  May be extended, do not override."""
+        self.server_address = server_address
+        self.RequestHandlerClass = RequestHandlerClass
+
+    def server_activate(self):
+        """Called by constructor to activate the server.
+
+        May be overridden.
+
+        """
+        pass
+
+    def serve_forever(self):
+        """Handle one request at a time until doomsday."""
+        while 1:
+            self.handle_request()
+
+    # The distinction between handling, getting, processing and
+    # finishing a request is fairly arbitrary.  Remember:
+    #
+    # - handle_request() is the top-level call.  It calls
+    #   get_request(), verify_request() and process_request()
+    # - get_request() is different for stream or datagram sockets
+    # - process_request() is the place that may fork a new process
+    #   or create a new thread to finish the request
+    # - finish_request() instantiates the request handler class;
+    #   this constructor will handle the request all by itself
+
+    def handle_request(self):
+        """Handle one request, possibly blocking."""
+        try:
+            request, client_address = self.get_request()
+        except socket.error:
+            return
+        if self.verify_request(request, client_address):
+            try:
+                self.process_request(request, client_address)
+            except:
+                self.handle_error(request, client_address)
+                self.close_request(request)
+
+    def verify_request(self, request, client_address):
+        """Verify the request.  May be overridden.
+
+        Return true if we should proceed with this request.
+
+        """
+        return 1
+
+    def process_request(self, request, client_address):
+        """Call finish_request.
+
+        Overridden by ForkingMixIn and ThreadingMixIn.
+
+        """
+        self.finish_request(request, client_address)
+        self.close_request(request)
+
+    def server_close(self):
+        """Called to clean-up the server.
+
+        May be overridden.
+
+        """
+        pass
+
+    def finish_request(self, request, client_address):
+        """Finish one request by instantiating RequestHandlerClass."""
+        self.RequestHandlerClass(request, client_address, self)
+
+    def close_request(self, request):
+        """Called to clean up an individual request."""
+        pass
+
+    def handle_error(self, request, client_address):
+        """Handle an error gracefully.  May be overridden.
+
+        The default is to print a traceback and continue.
+
+        """
+        print '-'*40
+        print 'Exception happened during processing of request from',
+        print client_address
+        import traceback
+        traceback.print_exc() # XXX But this goes to stderr!
+        print '-'*40
+
+
+class TCPServer(BaseServer):
+
+    """Base class for various socket-based server classes.
+
+    Defaults to synchronous IP stream (i.e., TCP).
+
+    Methods for the caller:
+
+    - __init__(server_address, RequestHandlerClass)
+    - serve_forever()
+    - handle_request()  # if you don't use serve_forever()
+    - fileno() -> int   # for select()
+
+    Methods that may be overridden:
+
+    - server_bind()
+    - server_activate()
+    - get_request() -> request, client_address
+    - verify_request(request, client_address)
+    - process_request(request, client_address)
+    - close_request(request)
+    - handle_error()
+
+    Methods for derived classes:
+
+    - finish_request(request, client_address)
+
+    Class variables that may be overridden by derived classes or
+    instances:
+
+    - address_family
+    - socket_type
+    - request_queue_size (only for stream sockets)
+    - reuse_address
+
+    Instance variables:
+
+    - server_address
+    - RequestHandlerClass
+    - socket
+
+    """
+
+    address_family = socket.AF_INET
+
+    socket_type = socket.SOCK_STREAM
+
+    request_queue_size = 5
+
+    allow_reuse_address = 0
+
+    def __init__(self, server_address, RequestHandlerClass):
+        """Constructor.  May be extended, do not override."""
+        BaseServer.__init__(self, server_address, RequestHandlerClass)
+        self.socket = socket.socket(self.address_family,
+                                    self.socket_type)
+        self.server_bind()
+        self.server_activate()
+
+    def server_bind(self):
+        """Called by constructor to bind the socket.
+
+        May be overridden.
+
+        """
+        if self.allow_reuse_address:
+            self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+        self.socket.bind(self.server_address)
+
+    def server_activate(self):
+        """Called by constructor to activate the server.
+
+        May be overridden.
+
+        """
+        self.socket.listen(self.request_queue_size)
+
+    def server_close(self):
+        """Called to clean-up the server.
+
+        May be overridden.
+
+        """
+        self.socket.close()
+
+    def fileno(self):
+        """Return socket file number.
+
+        Interface required by select().
+
+        """
+        return self.socket.fileno()
+
+    def get_request(self):
+        """Get the request and client address from the socket.
+
+        May be overridden.
+
+        """
+        return self.socket.accept()
+
+    def close_request(self, request):
+        """Called to clean up an individual request."""
+        request.close()
+
+
+class UDPServer(TCPServer):
+
+    """UDP server class."""
+
+    allow_reuse_address = 0
+
+    socket_type = socket.SOCK_DGRAM
+
+    max_packet_size = 8192
+
+    def get_request(self):
+        data, client_addr = self.socket.recvfrom(self.max_packet_size)
+        return (data, self.socket), client_addr
+
+    def server_activate(self):
+        # No need to call listen() for UDP.
+        pass
+
+    def close_request(self, request):
+        # No need to close anything.
+        pass
+
+class ForkingMixIn:
+
+    """Mix-in class to handle each request in a new process."""
+
+    active_children = None
+    max_children = 40
+
+    def collect_children(self):
+        """Internal routine to wait for died children."""
+        while self.active_children:
+            if len(self.active_children) < self.max_children:
+                options = os.WNOHANG
+            else:
+                # If the maximum number of children are already
+                # running, block while waiting for a child to exit
+                options = 0
+            try:
+                pid, status = os.waitpid(0, options)
+            except os.error:
+                pid = None
+            if not pid: break
+            self.active_children.remove(pid)
+
+    def process_request(self, request, client_address):
+        """Fork a new subprocess to process the request."""
+        self.collect_children()
+        pid = os.fork()
+        if pid:
+            # Parent process
+            if self.active_children is None:
+                self.active_children = []
+            self.active_children.append(pid)
+            self.close_request(request)
+            return
+        else:
+            # Child process.
+            # This must never return, hence os._exit()!
+            try:
+                self.finish_request(request, client_address)
+                os._exit(0)
+            except:
+                try:
+                    self.handle_error(request, client_address)
+                finally:
+                    os._exit(1)
+
+
+class ThreadingMixIn:
+    """Mix-in class to handle each request in a new thread."""
+
+    def process_request_thread(self, request, client_address):
+        """Same as in BaseServer but as a thread.
+
+        In addition, exception handling is done here.
+
+        """
+        try:
+            self.finish_request(request, client_address)
+            self.close_request(request)
+        except:
+            self.handle_error(request, client_address)
+            self.close_request(request)
+
+    def process_request(self, request, client_address):
+        """Start a new thread to process the request."""
+        import threading
+        t = threading.Thread(target = self.process_request_thread,
+                             args = (request, client_address))
+        t.start()
+
+
+class ForkingUDPServer(ForkingMixIn, UDPServer): pass
+class ForkingTCPServer(ForkingMixIn, TCPServer): pass
+
+class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass
+class ThreadingTCPServer(ThreadingMixIn, TCPServer): pass
+
+if hasattr(socket, 'AF_UNIX'):
+
+    class UnixStreamServer(TCPServer):
+        address_family = socket.AF_UNIX
+
+    class UnixDatagramServer(UDPServer):
+        address_family = socket.AF_UNIX
+
+    class ThreadingUnixStreamServer(ThreadingMixIn, UnixStreamServer): pass
+
+    class ThreadingUnixDatagramServer(ThreadingMixIn, UnixDatagramServer): pass
+
+class BaseRequestHandler:
+
+    """Base class for request handler classes.
+
+    This class is instantiated for each request to be handled.  The
+    constructor sets the instance variables request, client_address
+    and server, and then calls the handle() method.  To implement a
+    specific service, all you need to do is to derive a class which
+    defines a handle() method.
+
+    The handle() method can find the request as self.request, the
+    client address as self.client_address, and the server (in case it
+    needs access to per-server information) as self.server.  Since a
+    separate instance is created for each request, the handle() method
+    can define arbitrary other instance variariables.
+
+    """
+
+    def __init__(self, request, client_address, server):
+        self.request = request
+        self.client_address = client_address
+        self.server = server
+        try:
+            self.setup()
+            self.handle()
+            self.finish()
+        finally:
+            sys.exc_traceback = None    # Help garbage collection
+
+    def setup(self):
+        pass
+
+    def handle(self):
+        pass
+
+    def finish(self):
+        pass
+
+
+# The following two classes make it possible to use the same service
+# class for stream or datagram servers.
+# Each class sets up these instance variables:
+# - rfile: a file object from which receives the request is read
+# - wfile: a file object to which the reply is written
+# When the handle() method returns, wfile is flushed properly
+
+
+class StreamRequestHandler(BaseRequestHandler):
+
+    """Define self.rfile and self.wfile for stream sockets."""
+
+    # Default buffer sizes for rfile, wfile.
+    # We default rfile to buffered because otherwise it could be
+    # really slow for large data (a getc() call per byte); we make
+    # wfile unbuffered because (a) often after a write() we want to
+    # read and we need to flush the line; (b) big writes to unbuffered
+    # files are typically optimized by stdio even when big reads
+    # aren't.
+    rbufsize = -1
+    wbufsize = 0
+
+    def setup(self):
+        self.connection = self.request
+        self.rfile = self.connection.makefile('rb', self.rbufsize)
+        self.wfile = self.connection.makefile('wb', self.wbufsize)
+
+    def finish(self):
+        self.wfile.flush()
+        self.wfile.close()
+        self.rfile.close()
+
+
+class DatagramRequestHandler(BaseRequestHandler):
+
+    # XXX Regrettably, I cannot get this working on Linux;
+    # s.recvfrom() doesn't return a meaningful client address.
+
+    """Define self.rfile and self.wfile for datagram sockets."""
+
+    def setup(self):
+        import StringIO
+        self.packet, self.socket = self.request
+        self.rfile = StringIO.StringIO(self.packet)
+        self.wfile = StringIO.StringIO()
+
+    def finish(self):
+        self.socket.sendto(self.wfile.getvalue(), self.client_address)
diff --git a/lib-python/2.2/StringIO.py b/lib-python/2.2/StringIO.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/StringIO.py
@@ -0,0 +1,239 @@
+"""File-like objects that read from or write to a string buffer.
+
+This implements (nearly) all stdio methods.
+
+f = StringIO()      # ready for writing
+f = StringIO(buf)   # ready for reading
+f.close()           # explicitly release resources held
+flag = f.isatty()   # always false
+pos = f.tell()      # get current position
+f.seek(pos)         # set current position
+f.seek(pos, mode)   # mode 0: absolute; 1: relative; 2: relative to EOF
+buf = f.read()      # read until EOF
+buf = f.read(n)     # read up to n bytes
+buf = f.readline()  # read until end of line ('\n') or EOF
+list = f.readlines()# list of f.readline() results until EOF
+f.truncate([size])  # truncate file at to at most size (default: current pos)
+f.write(buf)        # write at current position
+f.writelines(list)  # for line in list: f.write(line)
+f.getvalue()        # return whole file's contents as a string
+
+Notes:
+- Using a real file is often faster (but less convenient).
+- There's also a much faster implementation in C, called cStringIO, but
+  it's not subclassable.
+- fileno() is left unimplemented so that code which uses it triggers
+  an exception early.
+- Seeking far beyond EOF and then writing will insert real null
+  bytes that occupy space in the buffer.
+- There's a simple test set (see end of this file).
+"""
+import types
+try:
+    from errno import EINVAL
+except ImportError:
+    EINVAL = 22
+
+__all__ = ["StringIO"]
+
+class StringIO:
+    """class StringIO([buffer])
+
+    When a StringIO object is created, it can be initialized to an existing
+    string by passing the string to the constructor. If no string is given,
+    the StringIO will start empty.
+
+    The StringIO object can accept either Unicode or 8-bit strings, but
+    mixing the two may take some care. If both are used, 8-bit strings that
+    cannot be interpreted as 7-bit ASCII (that use the 8th bit) will cause
+    a UnicodeError to be raised when getvalue() is called.
+    """
+    def __init__(self, buf = ''):
+        # Force self.buf to be a string or unicode
+        if type(buf) not in types.StringTypes:
+            buf = str(buf)
+        self.buf = buf
+        self.len = len(buf)
+        self.buflist = []
+        self.pos = 0
+        self.closed = 0
+        self.softspace = 0
+
+    def __iter__(self):
+        return iter(self.readline, '')
+
+    def close(self):
+        """Free the memory buffer."""
+        if not self.closed:
+            self.closed = 1
+            del self.buf, self.pos
+
+    def isatty(self):
+        if self.closed:
+            raise ValueError, "I/O operation on closed file"
+        return 0
+
+    def seek(self, pos, mode = 0):
+        if self.closed:
+            raise ValueError, "I/O operation on closed file"
+        if self.buflist:
+            self.buf += ''.join(self.buflist)
+            self.buflist = []
+        if mode == 1:
+            pos += self.pos
+        elif mode == 2:
+            pos += self.len
+        self.pos = max(0, pos)
+
+    def tell(self):
+        if self.closed:
+            raise ValueError, "I/O operation on closed file"
+        return self.pos
+
+    def read(self, n = -1):
+        if self.closed:
+            raise ValueError, "I/O operation on closed file"
+        if self.buflist:
+            self.buf += ''.join(self.buflist)
+            self.buflist = []
+        if n < 0:
+            newpos = self.len
+        else:
+            newpos = min(self.pos+n, self.len)
+        r = self.buf[self.pos:newpos]
+        self.pos = newpos
+        return r
+
+    def readline(self, length=None):
+        if self.closed:
+            raise ValueError, "I/O operation on closed file"
+        if self.buflist:
+            self.buf += ''.join(self.buflist)
+            self.buflist = []
+        i = self.buf.find('\n', self.pos)
+        if i < 0:
+            newpos = self.len
+        else:
+            newpos = i+1
+        if length is not None:
+            if self.pos + length < newpos:
+                newpos = self.pos + length
+        r = self.buf[self.pos:newpos]
+        self.pos = newpos
+        return r
+
+    def readlines(self, sizehint = 0):
+        total = 0
+        lines = []
+        line = self.readline()
+        while line:
+            lines.append(line)
+            total += len(line)
+            if 0 < sizehint <= total:
+                break
+            line = self.readline()
+        return lines
+
+    def truncate(self, size=None):
+        if self.closed:
+            raise ValueError, "I/O operation on closed file"
+        if size is None:
+            size = self.pos
+        elif size < 0:
+            raise IOError(EINVAL, "Negative size not allowed")
+        elif size < self.pos:
+            self.pos = size
+        self.buf = self.getvalue()[:size]
+
+    def write(self, s):
+        if self.closed:
+            raise ValueError, "I/O operation on closed file"
+        if not s: return
+        # Force s to be a string or unicode
+        if type(s) not in types.StringTypes:
+            s = str(s)
+        if self.pos > self.len:
+            self.buflist.append('\0'*(self.pos - self.len))
+            self.len = self.pos
+        newpos = self.pos + len(s)
+        if self.pos < self.len:
+            if self.buflist:
+                self.buf += ''.join(self.buflist)
+                self.buflist = []
+            self.buflist = [self.buf[:self.pos], s, self.buf[newpos:]]
+            self.buf = ''
+            if newpos > self.len:
+                self.len = newpos
+        else:
+            self.buflist.append(s)
+            self.len = newpos
+        self.pos = newpos
+
+    def writelines(self, list):
+        self.write(''.join(list))
+
+    def flush(self):
+        if self.closed:
+            raise ValueError, "I/O operation on closed file"
+
+    def getvalue(self):
+        """
+        Retrieve the entire contents of the "file" at any time before
+        the StringIO object's close() method is called.
+
+        The StringIO object can accept either Unicode or 8-bit strings,
+        but mixing the two may take some care. If both are used, 8-bit
+        strings that cannot be interpreted as 7-bit ASCII (that use the
+        8th bit) will cause a UnicodeError to be raised when getvalue()
+        is called.
+        """
+        if self.buflist:
+            self.buf += ''.join(self.buflist)
+            self.buflist = []
+        return self.buf
+
+
+# A little test suite
+
+def test():
+    import sys
+    if sys.argv[1:]:
+        file = sys.argv[1]
+    else:
+        file = '/etc/passwd'
+    lines = open(file, 'r').readlines()
+    text = open(file, 'r').read()
+    f = StringIO()
+    for line in lines[:-2]:
+        f.write(line)
+    f.writelines(lines[-2:])
+    if f.getvalue() != text:
+        raise RuntimeError, 'write failed'
+    length = f.tell()
+    print 'File length =', length
+    f.seek(len(lines[0]))
+    f.write(lines[1])
+    f.seek(0)
+    print 'First line =', `f.readline()`
+    here = f.tell()
+    line = f.readline()
+    print 'Second line =', `line`
+    f.seek(-len(line), 1)
+    line2 = f.read(len(line))
+    if line != line2:
+        raise RuntimeError, 'bad result after seek back'
+    f.seek(len(line2), 1)
+    list = f.readlines()
+    line = list[-1]
+    f.seek(f.tell() - len(line))
+    line2 = f.read()
+    if line != line2:
+        raise RuntimeError, 'bad result after seek back from EOF'
+    print 'Read', len(list), 'more lines'
+    print 'File length =', f.tell()
+    if f.tell() != length:
+        raise RuntimeError, 'bad length'
+    f.close()
+
+if __name__ == '__main__':
+    test()
diff --git a/lib-python/2.2/TERMIOS.py b/lib-python/2.2/TERMIOS.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/TERMIOS.py
@@ -0,0 +1,14 @@
+"""Backward-compatibility version of TERMIOS; export constants exported by
+termios, and issue a deprecation warning.
+"""
+
+import warnings
+warnings.warn("the TERMIOS module is deprecated; please use termios",
+              DeprecationWarning)
+
+
+# Export the constants known to the termios module:
+from termios import *
+
+# and *only* the constants:
+__all__ = [s for s in dir() if s[0] in "ABCDEFGHIJKLMNOPQRSTUVWXYZ"]
diff --git a/lib-python/2.2/UserDict.py b/lib-python/2.2/UserDict.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/UserDict.py
@@ -0,0 +1,60 @@
+"""A more or less complete user-defined wrapper around dictionary objects."""
+
+class UserDict:
+    def __init__(self, dict=None):
+        self.data = {}
+        if dict is not None: self.update(dict)
+    def __repr__(self): return repr(self.data)
+    def __cmp__(self, dict):
+        if isinstance(dict, UserDict):
+            return cmp(self.data, dict.data)
+        else:
+            return cmp(self.data, dict)
+    def __len__(self): return len(self.data)
+    def __getitem__(self, key): return self.data[key]
+    def __setitem__(self, key, item): self.data[key] = item
+    def __delitem__(self, key): del self.data[key]
+    def clear(self): self.data.clear()
+    def copy(self):
+        if self.__class__ is UserDict:
+            return UserDict(self.data)
+        import copy
+        data = self.data
+        try:
+            self.data = {}
+            c = copy.copy(self)
+        finally:
+            self.data = data
+        c.update(self)
+        return c
+    def keys(self): return self.data.keys()
+    def items(self): return self.data.items()
+    def iteritems(self): return self.data.iteritems()
+    def iterkeys(self): return self.data.iterkeys()
+    def itervalues(self): return self.data.itervalues()
+    def values(self): return self.data.values()
+    def has_key(self, key): return self.data.has_key(key)
+    def update(self, dict):
+        if isinstance(dict, UserDict):
+            self.data.update(dict.data)
+        elif isinstance(dict, type(self.data)):
+            self.data.update(dict)
+        else:
+            for k, v in dict.items():
+                self[k] = v
+    def get(self, key, failobj=None):
+        if not self.has_key(key):
+            return failobj
+        return self[key]
+    def setdefault(self, key, failobj=None):
+        if not self.has_key(key):
+            self[key] = failobj
+        return self[key]
+    def popitem(self):
+        return self.data.popitem()
+    def __contains__(self, key):
+        return key in self.data
+
+class IterableUserDict(UserDict):
+    def __iter__(self):
+        return iter(self.data)
diff --git a/lib-python/2.2/UserList.py b/lib-python/2.2/UserList.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/UserList.py
@@ -0,0 +1,85 @@
+"""A more or less complete user-defined wrapper around list objects."""
+
+class UserList:
+    def __init__(self, initlist=None):
+        self.data = []
+        if initlist is not None:
+            # XXX should this accept an arbitrary sequence?
+            if type(initlist) == type(self.data):
+                self.data[:] = initlist
+            elif isinstance(initlist, UserList):
+                self.data[:] = initlist.data[:]
+            else:
+                self.data = list(initlist)
+    def __repr__(self): return repr(self.data)
+    def __lt__(self, other): return self.data <  self.__cast(other)
+    def __le__(self, other): return self.data <= self.__cast(other)
+    def __eq__(self, other): return self.data == self.__cast(other)
+    def __ne__(self, other): return self.data != self.__cast(other)
+    def __gt__(self, other): return self.data >  self.__cast(other)
+    def __ge__(self, other): return self.data >= self.__cast(other)
+    def __cast(self, other):
+        if isinstance(other, UserList): return other.data
+        else: return other
+    def __cmp__(self, other):
+        return cmp(self.data, self.__cast(other))
+    def __contains__(self, item): return item in self.data
+    def __len__(self): return len(self.data)
+    def __getitem__(self, i): return self.data[i]
+    def __setitem__(self, i, item): self.data[i] = item
+    def __delitem__(self, i): del self.data[i]
+    def __getslice__(self, i, j):
+        i = max(i, 0); j = max(j, 0)
+        return self.__class__(self.data[i:j])
+    def __setslice__(self, i, j, other):
+        i = max(i, 0); j = max(j, 0)
+        if isinstance(other, UserList):
+            self.data[i:j] = other.data
+        elif isinstance(other, type(self.data)):
+            self.data[i:j] = other
+        else:
+            self.data[i:j] = list(other)
+    def __delslice__(self, i, j):
+        i = max(i, 0); j = max(j, 0)
+        del self.data[i:j]
+    def __add__(self, other):
+        if isinstance(other, UserList):
+            return self.__class__(self.data + other.data)
+        elif isinstance(other, type(self.data)):
+            return self.__class__(self.data + other)
+        else:
+            return self.__class__(self.data + list(other))
+    def __radd__(self, other):
+        if isinstance(other, UserList):
+            return self.__class__(other.data + self.data)
+        elif isinstance(other, type(self.data)):
+            return self.__class__(other + self.data)
+        else:
+            return self.__class__(list(other) + self.data)
+    def __iadd__(self, other):
+        if isinstance(other, UserList):
+            self.data += other.data
+        elif isinstance(other, type(self.data)):
+            self.data += other
+        else:
+            self.data += list(other)
+        return self
+    def __mul__(self, n):
+        return self.__class__(self.data*n)
+    __rmul__ = __mul__
+    def __imul__(self, n):
+        self.data *= n
+        return self
+    def append(self, item): self.data.append(item)
+    def insert(self, i, item): self.data.insert(i, item)
+    def pop(self, i=-1): return self.data.pop(i)
+    def remove(self, item): self.data.remove(item)
+    def count(self, item): return self.data.count(item)
+    def index(self, item): return self.data.index(item)
+    def reverse(self): self.data.reverse()
+    def sort(self, *args): apply(self.data.sort, args)
+    def extend(self, other):
+        if isinstance(other, UserList):
+            self.data.extend(other.data)
+        else:
+            self.data.extend(other)
diff --git a/lib-python/2.2/UserString.py b/lib-python/2.2/UserString.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/UserString.py
@@ -0,0 +1,182 @@
+#!/usr/bin/env python
+## vim:ts=4:et:nowrap
+"""A user-defined wrapper around string objects
+
+Note: string objects have grown methods in Python 1.6
+This module requires Python 1.6 or later.
+"""
+from types import StringType, UnicodeType
+import sys
+
+__all__ = ["UserString","MutableString"]
+
+class UserString:
+    def __init__(self, seq):
+        if isinstance(seq, StringType) or isinstance(seq, UnicodeType):
+            self.data = seq
+        elif isinstance(seq, UserString):
+            self.data = seq.data[:]
+        else:
+            self.data = str(seq)
+    def __str__(self): return str(self.data)
+    def __repr__(self): return repr(self.data)
+    def __int__(self): return int(self.data)
+    def __long__(self): return long(self.data)
+    def __float__(self): return float(self.data)
+    def __complex__(self): return complex(self.data)
+    def __hash__(self): return hash(self.data)
+
+    def __cmp__(self, string):
+        if isinstance(string, UserString):
+            return cmp(self.data, string.data)
+        else:
+            return cmp(self.data, string)
+    def __contains__(self, char):
+        return char in self.data
+
+    def __len__(self): return len(self.data)
+    def __getitem__(self, index): return self.__class__(self.data[index])
+    def __getslice__(self, start, end):
+        start = max(start, 0); end = max(end, 0)
+        return self.__class__(self.data[start:end])
+
+    def __add__(self, other):
+        if isinstance(other, UserString):
+            return self.__class__(self.data + other.data)
+        elif isinstance(other, StringType) or isinstance(other, UnicodeType):
+            return self.__class__(self.data + other)
+        else:
+            return self.__class__(self.data + str(other))
+    def __radd__(self, other):
+        if isinstance(other, StringType) or isinstance(other, UnicodeType):
+            return self.__class__(other + self.data)
+        else:
+            return self.__class__(str(other) + self.data)
+    def __iadd__(self, other):
+        if isinstance(other, UserString):
+            self.data += other.data
+        elif isinstance(other, StringType) or isinstance(other, UnicodeType):
+            self.data += other
+        else:
+            self.data += str(other)
+        return self
+    def __mul__(self, n):
+        return self.__class__(self.data*n)
+    __rmul__ = __mul__
+    def __imul__(self, n):
+        self.data *= n
+        return self
+
+    # the following methods are defined in alphabetical order:
+    def capitalize(self): return self.__class__(self.data.capitalize())
+    def center(self, width): return self.__class__(self.data.center(width))
+    def count(self, sub, start=0, end=sys.maxint):
+        return self.data.count(sub, start, end)
+    def decode(self, encoding=None, errors=None): # XXX improve this?
+        if encoding:
+            if errors:
+                return self.__class__(self.data.decode(encoding, errors))
+            else:
+                return self.__class__(self.data.decode(encoding))
+        else:
+            return self.__class__(self.data.decode())
+    def encode(self, encoding=None, errors=None): # XXX improve this?
+        if encoding:
+            if errors:
+                return self.__class__(self.data.encode(encoding, errors))
+            else:
+                return self.__class__(self.data.encode(encoding))
+        else:
+            return self.__class__(self.data.encode())
+    def endswith(self, suffix, start=0, end=sys.maxint):
+        return self.data.endswith(suffix, start, end)
+    def expandtabs(self, tabsize=8):
+        return self.__class__(self.data.expandtabs(tabsize))
+    def find(self, sub, start=0, end=sys.maxint):
+        return self.data.find(sub, start, end)
+    def index(self, sub, start=0, end=sys.maxint):
+        return self.data.index(sub, start, end)
+    def isalpha(self): return self.data.isalpha()
+    def isalnum(self): return self.data.isalnum()
+    def isdecimal(self): return self.data.isdecimal()
+    def isdigit(self): return self.data.isdigit()
+    def islower(self): return self.data.islower()
+    def isnumeric(self): return self.data.isnumeric()
+    def isspace(self): return self.data.isspace()
+    def istitle(self): return self.data.istitle()
+    def isupper(self): return self.data.isupper()
+    def join(self, seq): return self.data.join(seq)
+    def ljust(self, width): return self.__class__(self.data.ljust(width))
+    def lower(self): return self.__class__(self.data.lower())
+    def lstrip(self, chars=None): return self.__class__(self.data.lstrip(chars))
+    def replace(self, old, new, maxsplit=-1):
+        return self.__class__(self.data.replace(old, new, maxsplit))
+    def rfind(self, sub, start=0, end=sys.maxint):
+        return self.data.rfind(sub, start, end)
+    def rindex(self, sub, start=0, end=sys.maxint):
+        return self.data.rindex(sub, start, end)
+    def rjust(self, width): return self.__class__(self.data.rjust(width))
+    def rstrip(self, chars=None): return self.__class__(self.data.rstrip(chars))
+    def split(self, sep=None, maxsplit=-1):
+        return self.data.split(sep, maxsplit)
+    def splitlines(self, keepends=0): return self.data.splitlines(keepends)
+    def startswith(self, prefix, start=0, end=sys.maxint):
+        return self.data.startswith(prefix, start, end)
+    def strip(self, chars=None): return self.__class__(self.data.strip(chars))
+    def swapcase(self): return self.__class__(self.data.swapcase())
+    def title(self): return self.__class__(self.data.title())
+    def translate(self, *args):
+        return self.__class__(self.data.translate(*args))
+    def upper(self): return self.__class__(self.data.upper())
+    def zfill(self, width): return self.__class__(self.data.zfill(width))
+
+class MutableString(UserString):
+    """mutable string objects
+
+    Python strings are immutable objects.  This has the advantage, that
+    strings may be used as dictionary keys.  If this property isn't needed
+    and you insist on changing string values in place instead, you may cheat
+    and use MutableString.
+
+    But the purpose of this class is an educational one: to prevent
+    people from inventing their own mutable string class derived
+    from UserString and than forget thereby to remove (override) the
+    __hash__ method inherited from ^UserString.  This would lead to
+    errors that would be very hard to track down.
+
+    A faster and better solution is to rewrite your program using lists."""
+    def __init__(self, string=""):
+        self.data = string
+    def __hash__(self):
+        raise TypeError, "unhashable type (it is mutable)"
+    def __setitem__(self, index, sub):
+        if index < 0 or index >= len(self.data): raise IndexError
+        self.data = self.data[:index] + sub + self.data[index+1:]
+    def __delitem__(self, index):
+        if index < 0 or index >= len(self.data): raise IndexError
+        self.data = self.data[:index] + self.data[index+1:]
+    def __setslice__(self, start, end, sub):
+        start = max(start, 0); end = max(end, 0)
+        if isinstance(sub, UserString):
+            self.data = self.data[:start]+sub.data+self.data[end:]
+        elif isinstance(sub, StringType) or isinstance(sub, UnicodeType):
+            self.data = self.data[:start]+sub+self.data[end:]
+        else:
+            self.data =  self.data[:start]+str(sub)+self.data[end:]
+    def __delslice__(self, start, end):
+        start = max(start, 0); end = max(end, 0)
+        self.data = self.data[:start] + self.data[end:]
+    def immutable(self):
+        return UserString(self.data)
+
+if __name__ == "__main__":
+    # execute the regression test to stdout, if called as a script:
+    import os
+    called_in_dir, called_as = os.path.split(sys.argv[0])
+    called_in_dir = os.path.abspath(called_in_dir)
+    called_as, py = os.path.splitext(called_as)
+    sys.path.append(os.path.join(called_in_dir, 'test'))
+    if '-q' in sys.argv:
+        import test_support
+        test_support.verbose = 0
+    __import__('test_' + called_as.lower())
diff --git a/lib-python/2.2/__future__.py b/lib-python/2.2/__future__.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/__future__.py
@@ -0,0 +1,104 @@
+"""Record of phased-in incompatible language changes.
+
+Each line is of the form:
+
+    FeatureName = "_Feature(" OptionalRelease "," MandatoryRelease ","
+                              CompilerFlag ")"
+
+where, normally, OptionalRelease < MandatoryRelease, and both are 5-tuples
+of the same form as sys.version_info:
+
+    (PY_MAJOR_VERSION, # the 2 in 2.1.0a3; an int
+     PY_MINOR_VERSION, # the 1; an int
+     PY_MICRO_VERSION, # the 0; an int
+     PY_RELEASE_LEVEL, # "alpha", "beta", "candidate" or "final"; string
+     PY_RELEASE_SERIAL # the 3; an int
+    )
+
+OptionalRelease records the first release in which
+
+    from __future__ import FeatureName
+
+was accepted.
+
+In the case of MandatoryReleases that have not yet occurred,
+MandatoryRelease predicts the release in which the feature will become part
+of the language.
+
+Else MandatoryRelease records when the feature became part of the language;
+in releases at or after that, modules no longer need
+
+    from __future__ import FeatureName
+
+to use the feature in question, but may continue to use such imports.
+
+MandatoryRelease may also be None, meaning that a planned feature got
+dropped.
+
+Instances of class _Feature have two corresponding methods,
+.getOptionalRelease() and .getMandatoryRelease().
+
+CompilerFlag is the (bitfield) flag that should be passed in the fourth
+argument to the builtin function compile() to enable the feature in
+dynamically compiled code.  This flag is stored in the .compiler_flag
+attribute on _Future instances.  These values must match the appropriate
+#defines of CO_xxx flags in Include/compile.h.
+
+No feature line is ever to be deleted from this file.
+"""
+
+all_feature_names = [
+    "nested_scopes",
+    "generators",
+    "division",
+]
+
+__all__ = ["all_feature_names"] + all_feature_names
+
+# The CO_xxx symbols are defined here under the same names used by
+# compile.h, so that an editor search will find them here.  However,
+# they're not exported in __all__, because they don't really belong to
+# this module.
+CO_NESTED            = 0x0010   # nested_scopes
+CO_GENERATOR_ALLOWED = 0x1000   # generators
+CO_FUTURE_DIVISION   = 0x2000   # division
+
+class _Feature:
+    def __init__(self, optionalRelease, mandatoryRelease, compiler_flag):
+        self.optional = optionalRelease
+        self.mandatory = mandatoryRelease
+        self.compiler_flag = compiler_flag
+
+    def getOptionalRelease(self):
+        """Return first release in which this feature was recognized.
+
+        This is a 5-tuple, of the same form as sys.version_info.
+        """
+
+        return self.optional
+
+    def getMandatoryRelease(self):
+        """Return release in which this feature will become mandatory.
+
+        This is a 5-tuple, of the same form as sys.version_info, or, if
+        the feature was dropped, is None.
+        """
+
+        return self.mandatory
+
+    def __repr__(self):
+        return "_Feature" + repr((self.optional,
+                                  self.mandatory,
+                                  self.compiler_flag))
+
+nested_scopes = _Feature((2, 1, 0, "beta",  1),
+                         (2, 2, 0, "alpha", 0),
+                         CO_NESTED)
+
+generators = _Feature((2, 2, 0, "alpha", 1),
+                      (2, 3, 0, "final", 0),
+                      CO_GENERATOR_ALLOWED)
+
+division = _Feature((2, 2, 0, "alpha", 2),
+                    (3, 0, 0, "alpha", 0),
+                    CO_FUTURE_DIVISION)
diff --git a/lib-python/2.2/__phello__.foo.py b/lib-python/2.2/__phello__.foo.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/__phello__.foo.py
@@ -0,0 +1,1 @@
+# This file exists as a helper for the test.test_frozen module.
diff --git a/lib-python/2.2/aifc.py b/lib-python/2.2/aifc.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/aifc.py
@@ -0,0 +1,961 @@
+"""Stuff to parse AIFF-C and AIFF files.
+
+Unless explicitly stated otherwise, the description below is true
+both for AIFF-C files and AIFF files.
+
+An AIFF-C file has the following structure.
+
+  +-----------------+
+  | FORM            |
+  +-----------------+
+  | <size>          |
+  +----+------------+
+  |    | AIFC       |
+  |    +------------+
+  |    | <chunks>   |
+  |    |    .       |
+  |    |    .       |
+  |    |    .       |
+  +----+------------+
+
+An AIFF file has the string "AIFF" instead of "AIFC".
+
+A chunk consists of an identifier (4 bytes) followed by a size (4 bytes,
+big endian order), followed by the data.  The size field does not include
+the size of the 8 byte header.
+
+The following chunk types are recognized.
+
+  FVER
+      <version number of AIFF-C defining document> (AIFF-C only).
+  MARK
+      <# of markers> (2 bytes)
+      list of markers:
+          <marker ID> (2 bytes, must be > 0)
+          <position> (4 bytes)
+          <marker name> ("pstring")
+  COMM
+      <# of channels> (2 bytes)
+      <# of sound frames> (4 bytes)
+      <size of the samples> (2 bytes)
+      <sampling frequency> (10 bytes, IEEE 80-bit extended
+          floating point)
+      in AIFF-C files only:
+      <compression type> (4 bytes)
+      <human-readable version of compression type> ("pstring")
+  SSND
+      <offset> (4 bytes, not used by this program)
+      <blocksize> (4 bytes, not used by this program)
+      <sound data>
+
+A pstring consists of 1 byte length, a string of characters, and 0 or 1
+byte pad to make the total length even.
+
+Usage.
+
+Reading AIFF files:
+  f = aifc.open(file, 'r')
+where file is either the name of a file or an open file pointer.
+The open file pointer must have methods read(), seek(), and close().
+In some types of audio files, if the setpos() method is not used,
+the seek() method is not necessary.
+
+This returns an instance of a class with the following public methods:
+  getnchannels()  -- returns number of audio channels (1 for
+             mono, 2 for stereo)
+  getsampwidth()  -- returns sample width in bytes
+  getframerate()  -- returns sampling frequency
+  getnframes()    -- returns number of audio frames
+  getcomptype()   -- returns compression type ('NONE' for AIFF files)
+  getcompname()   -- returns human-readable version of
+             compression type ('not compressed' for AIFF files)
+  getparams() -- returns a tuple consisting of all of the
+             above in the above order
+  getmarkers()    -- get the list of marks in the audio file or None
+             if there are no marks
+  getmark(id) -- get mark with the specified id (raises an error
+             if the mark does not exist)
+  readframes(n)   -- returns at most n frames of audio
+  rewind()    -- rewind to the beginning of the audio stream
+  setpos(pos) -- seek to the specified position
+  tell()      -- return the current position
+  close()     -- close the instance (make it unusable)
+The position returned by tell(), the position given to setpos() and
+the position of marks are all compatible and have nothing to do with
+the actual position in the file.
+The close() method is called automatically when the class instance
+is destroyed.
+
+Writing AIFF files:
+  f = aifc.open(file, 'w')
+where file is either the name of a file or an open file pointer.
+The open file pointer must have methods write(), tell(), seek(), and
+close().
+
+This returns an instance of a class with the following public methods:
+  aiff()      -- create an AIFF file (AIFF-C default)
+  aifc()      -- create an AIFF-C file
+  setnchannels(n) -- set the number of channels
+  setsampwidth(n) -- set the sample width
+  setframerate(n) -- set the frame rate
+  setnframes(n)   -- set the number of frames
+  setcomptype(type, name)
+          -- set the compression type and the
+             human-readable compression type
+  setparams(tuple)
+          -- set all parameters at once
+  setmark(id, pos, name)
+          -- add specified mark to the list of marks
+  tell()      -- return current position in output file (useful
+             in combination with setmark())
+  writeframesraw(data)
+          -- write audio frames without pathing up the
+             file header
+  writeframes(data)
+          -- write audio frames and patch up the file header
+  close()     -- patch up the file header and close the
+             output file
+You should set the parameters before the first writeframesraw or
+writeframes.  The total number of frames does not need to be set,
+but when it is set to the correct value, the header does not have to
+be patched up.
+It is best to first set all parameters, perhaps possibly the
+compression type, and then write audio frames using writeframesraw.
+When all frames have been written, either call writeframes('') or
+close() to patch up the sizes in the header.
+Marks can be added anytime.  If there are any marks, ypu must call
+close() after all frames have been written.
+The close() method is called automatically when the class instance
+is destroyed.
+
+When a file is opened with the extension '.aiff', an AIFF file is
+written, otherwise an AIFF-C file is written.  This default can be
+changed by calling aiff() or aifc() before the first writeframes or
+writeframesraw.
+"""
+
+import struct
+import __builtin__
+
+__all__ = ["Error","open","openfp"]
+
+class Error(Exception):
+    pass
+
+_AIFC_version = 0xA2805140      # Version 1 of AIFF-C
+
+_skiplist = 'COMT', 'INST', 'MIDI', 'AESD', \
+      'APPL', 'NAME', 'AUTH', '(c) ', 'ANNO'
+
+def _read_long(file):
+    try:
+        return struct.unpack('>l', file.read(4))[0]
+    except struct.error:
+        raise EOFError
+
+def _read_ulong(file):
+    try:
+        return struct.unpack('>L', file.read(4))[0]
+    except struct.error:
+        raise EOFError
+
+def _read_short(file):
+    try:
+        return struct.unpack('>h', file.read(2))[0]
+    except struct.error:
+        raise EOFError
+
+def _read_string(file):
+    length = ord(file.read(1))
+    if length == 0:
+        data = ''
+    else:
+        data = file.read(length)
+    if length & 1 == 0:
+        dummy = file.read(1)
+    return data
+
+_HUGE_VAL = 1.79769313486231e+308 # See <limits.h>
+
+def _read_float(f): # 10 bytes
+    import math
+    expon = _read_short(f) # 2 bytes
+    sign = 1
+    if expon < 0:
+        sign = -1
+        expon = expon + 0x8000
+    himant = _read_ulong(f) # 4 bytes
+    lomant = _read_ulong(f) # 4 bytes
+    if expon == himant == lomant == 0:
+        f = 0.0
+    elif expon == 0x7FFF:
+        f = _HUGE_VAL
+    else:
+        expon = expon - 16383
+        f = (himant * 0x100000000L + lomant) * pow(2.0, expon - 63)
+    return sign * f
+
+def _write_short(f, x):
+    f.write(struct.pack('>h', x))
+
+def _write_long(f, x):
+    f.write(struct.pack('>L', x))
+
+def _write_string(f, s):
+    f.write(chr(len(s)))
+    f.write(s)
+    if len(s) & 1 == 0:
+        f.write(chr(0))
+
+def _write_float(f, x):
+    import math
+    if x < 0:
+        sign = 0x8000
+        x = x * -1
+    else:
+        sign = 0
+    if x == 0:
+        expon = 0
+        himant = 0
+        lomant = 0
+    else:
+        fmant, expon = math.frexp(x)
+        if expon > 16384 or fmant >= 1:     # Infinity or NaN
+            expon = sign|0x7FFF
+            himant = 0
+            lomant = 0
+        else:                   # Finite
+            expon = expon + 16382
+            if expon < 0:           # denormalized
+                fmant = math.ldexp(fmant, expon)
+                expon = 0
+            expon = expon | sign
+            fmant = math.ldexp(fmant, 32)
+            fsmant = math.floor(fmant)
+            himant = long(fsmant)
+            fmant = math.ldexp(fmant - fsmant, 32)
+            fsmant = math.floor(fmant)
+            lomant = long(fsmant)
+    _write_short(f, expon)
+    _write_long(f, himant)
+    _write_long(f, lomant)
+
+from chunk import Chunk
+
+class Aifc_read:
+    # Variables used in this class:
+    #
+    # These variables are available to the user though appropriate
+    # methods of this class:
+    # _file -- the open file with methods read(), close(), and seek()
+    #       set through the __init__() method
+    # _nchannels -- the number of audio channels
+    #       available through the getnchannels() method
+    # _nframes -- the number of audio frames
+    #       available through the getnframes() method
+    # _sampwidth -- the number of bytes per audio sample
+    #       available through the getsampwidth() method
+    # _framerate -- the sampling frequency
+    #       available through the getframerate() method
+    # _comptype -- the AIFF-C compression type ('NONE' if AIFF)
+    #       available through the getcomptype() method
+    # _compname -- the human-readable AIFF-C compression type
+    #       available through the getcomptype() method
+    # _markers -- the marks in the audio file
+    #       available through the getmarkers() and getmark()
+    #       methods
+    # _soundpos -- the position in the audio stream
+    #       available through the tell() method, set through the
+    #       setpos() method
+    #
+    # These variables are used internally only:
+    # _version -- the AIFF-C version number
+    # _decomp -- the decompressor from builtin module cl
+    # _comm_chunk_read -- 1 iff the COMM chunk has been read
+    # _aifc -- 1 iff reading an AIFF-C file
+    # _ssnd_seek_needed -- 1 iff positioned correctly in audio
+    #       file for readframes()
+    # _ssnd_chunk -- instantiation of a chunk class for the SSND chunk
+    # _framesize -- size of one frame in the file
+
+    def initfp(self, file):
+        self._version = 0
+        self._decomp = None
+        self._convert = None
+        self._markers = []
+        self._soundpos = 0
+        self._file = Chunk(file)
+        if self._file.getname() != 'FORM':
+            raise Error, 'file does not start with FORM id'
+        formdata = self._file.read(4)
+        if formdata == 'AIFF':
+            self._aifc = 0
+        elif formdata == 'AIFC':
+            self._aifc = 1
+        else:
+            raise Error, 'not an AIFF or AIFF-C file'
+        self._comm_chunk_read = 0
+        while 1:
+            self._ssnd_seek_needed = 1
+            try:
+                chunk = Chunk(self._file)
+            except EOFError:
+                break
+            chunkname = chunk.getname()
+            if chunkname == 'COMM':
+                self._read_comm_chunk(chunk)
+                self._comm_chunk_read = 1
+            elif chunkname == 'SSND':
+                self._ssnd_chunk = chunk
+                dummy = chunk.read(8)
+                self._ssnd_seek_needed = 0
+            elif chunkname == 'FVER':
+                self._version = _read_long(chunk)
+            elif chunkname == 'MARK':
+                self._readmark(chunk)
+            elif chunkname in _skiplist:
+                pass
+            else:
+                raise Error, 'unrecognized chunk type '+chunk.chunkname
+            chunk.skip()
+        if not self._comm_chunk_read or not self._ssnd_chunk:
+            raise Error, 'COMM chunk and/or SSND chunk missing'
+        if self._aifc and self._decomp:
+            import cl
+            params = [cl.ORIGINAL_FORMAT, 0,
+                  cl.BITS_PER_COMPONENT, self._sampwidth * 8,
+                  cl.FRAME_RATE, self._framerate]
+            if self._nchannels == 1:
+                params[1] = cl.MONO
+            elif self._nchannels == 2:
+                params[1] = cl.STEREO_INTERLEAVED
+            else:
+                raise Error, 'cannot compress more than 2 channels'
+            self._decomp.SetParams(params)
+
+    def __init__(self, f):
+        if type(f) == type(''):
+            f = __builtin__.open(f, 'rb')
+        # else, assume it is an open file object already
+        self.initfp(f)
+
+    #
+    # User visible methods.
+    #
+    def getfp(self):
+        return self._file
+
+    def rewind(self):
+        self._ssnd_seek_needed = 1
+        self._soundpos = 0
+
+    def close(self):
+        if self._decomp:
+            self._decomp.CloseDecompressor()
+            self._decomp = None
+        self._file = None
+
+    def tell(self):
+        return self._soundpos
+
+    def getnchannels(self):
+        return self._nchannels
+
+    def getnframes(self):
+        return self._nframes
+
+    def getsampwidth(self):
+        return self._sampwidth
+
+    def getframerate(self):
+        return self._framerate
+
+    def getcomptype(self):
+        return self._comptype
+
+    def getcompname(self):
+        return self._compname
+
+##  def getversion(self):
+##      return self._version
+
+    def getparams(self):
+        return self.getnchannels(), self.getsampwidth(), \
+              self.getframerate(), self.getnframes(), \
+              self.getcomptype(), self.getcompname()
+
+    def getmarkers(self):
+        if len(self._markers) == 0:
+            return None
+        return self._markers
+
+    def getmark(self, id):
+        for marker in self._markers:
+            if id == marker[0]:
+                return marker
+        raise Error, 'marker ' + `id` + ' does not exist'
+
+    def setpos(self, pos):
+        if pos < 0 or pos > self._nframes:
+            raise Error, 'position not in range'
+        self._soundpos = pos
+        self._ssnd_seek_needed = 1
+
+    def readframes(self, nframes):
+        if self._ssnd_seek_needed:
+            self._ssnd_chunk.seek(0)
+            dummy = self._ssnd_chunk.read(8)
+            pos = self._soundpos * self._framesize
+            if pos:
+                self._ssnd_chunk.seek(pos + 8)
+            self._ssnd_seek_needed = 0
+        if nframes == 0:
+            return ''
+        data = self._ssnd_chunk.read(nframes * self._framesize)
+        if self._convert and data:
+            data = self._convert(data)
+        self._soundpos = self._soundpos + len(data) / (self._nchannels * self._sampwidth)
+        return data
+
+    #
+    # Internal methods.
+    #
+
+    def _decomp_data(self, data):
+        import cl
+        dummy = self._decomp.SetParam(cl.FRAME_BUFFER_SIZE,
+                          len(data) * 2)
+        return self._decomp.Decompress(len(data) / self._nchannels,
+                           data)
+
+    def _ulaw2lin(self, data):
+        import audioop
+        return audioop.ulaw2lin(data, 2)
+
+    def _adpcm2lin(self, data):
+        import audioop
+        if not hasattr(self, '_adpcmstate'):
+            # first time
+            self._adpcmstate = None
+        data, self._adpcmstate = audioop.adpcm2lin(data, 2,
+                               self._adpcmstate)
+        return data
+
+    def _read_comm_chunk(self, chunk):
+        self._nchannels = _read_short(chunk)
+        self._nframes = _read_long(chunk)
+        self._sampwidth = (_read_short(chunk) + 7) / 8
+        self._framerate = int(_read_float(chunk))
+        self._framesize = self._nchannels * self._sampwidth
+        if self._aifc:
+            #DEBUG: SGI's soundeditor produces a bad size :-(
+            kludge = 0
+            if chunk.chunksize == 18:
+                kludge = 1
+                print 'Warning: bad COMM chunk size'
+                chunk.chunksize = 23
+            #DEBUG end
+            self._comptype = chunk.read(4)
+            #DEBUG start
+            if kludge:
+                length = ord(chunk.file.read(1))
+                if length & 1 == 0:
+                    length = length + 1
+                chunk.chunksize = chunk.chunksize + length
+                chunk.file.seek(-1, 1)
+            #DEBUG end
+            self._compname = _read_string(chunk)
+            if self._comptype != 'NONE':
+                if self._comptype == 'G722':
+                    try:
+                        import audioop
+                    except ImportError:
+                        pass
+                    else:
+                        self._convert = self._adpcm2lin
+                        self._framesize = self._framesize / 4
+                        return
+                # for ULAW and ALAW try Compression Library
+                try:
+                    import cl
+                except ImportError:
+                    if self._comptype == 'ULAW':
+                        try:
+                            import audioop
+                            self._convert = self._ulaw2lin
+                            self._framesize = self._framesize / 2
+                            return
+                        except ImportError:
+                            pass
+                    raise Error, 'cannot read compressed AIFF-C files'
+                if self._comptype == 'ULAW':
+                    scheme = cl.G711_ULAW
+                    self._framesize = self._framesize / 2
+                elif self._comptype == 'ALAW':
+                    scheme = cl.G711_ALAW
+                    self._framesize = self._framesize / 2
+                else:
+                    raise Error, 'unsupported compression type'
+                self._decomp = cl.OpenDecompressor(scheme)
+                self._convert = self._decomp_data
+        else:
+            self._comptype = 'NONE'
+            self._compname = 'not compressed'
+
+    def _readmark(self, chunk):
+        nmarkers = _read_short(chunk)
+        # Some files appear to contain invalid counts.
+        # Cope with this by testing for EOF.
+        try:
+            for i in range(nmarkers):
+                id = _read_short(chunk)
+                pos = _read_long(chunk)
+                name = _read_string(chunk)
+                if pos or name:
+                    # some files appear to have
+                    # dummy markers consisting of
+                    # a position 0 and name ''
+                    self._markers.append((id, pos, name))
+        except EOFError:
+            print 'Warning: MARK chunk contains only',
+            print len(self._markers),
+            if len(self._markers) == 1: print 'marker',
+            else: print 'markers',
+            print 'instead of', nmarkers
+
+class Aifc_write:
+    # Variables used in this class:
+    #
+    # These variables are user settable through appropriate methods
+    # of this class:
+    # _file -- the open file with methods write(), close(), tell(), seek()
+    #       set through the __init__() method
+    # _comptype -- the AIFF-C compression type ('NONE' in AIFF)
+    #       set through the setcomptype() or setparams() method
+    # _compname -- the human-readable AIFF-C compression type
+    #       set through the setcomptype() or setparams() method
+    # _nchannels -- the number of audio channels
+    #       set through the setnchannels() or setparams() method
+    # _sampwidth -- the number of bytes per audio sample
+    #       set through the setsampwidth() or setparams() method
+    # _framerate -- the sampling frequency
+    #       set through the setframerate() or setparams() method
+    # _nframes -- the number of audio frames written to the header
+    #       set through the setnframes() or setparams() method
+    # _aifc -- whether we're writing an AIFF-C file or an AIFF file
+    #       set through the aifc() method, reset through the
+    #       aiff() method
+    #
+    # These variables are used internally only:
+    # _version -- the AIFF-C version number
+    # _comp -- the compressor from builtin module cl
+    # _nframeswritten -- the number of audio frames actually written
+    # _datalength -- the size of the audio samples written to the header
+    # _datawritten -- the size of the audio samples actually written
+
+    def __init__(self, f):
+        if type(f) == type(''):
+            filename = f
+            f = __builtin__.open(f, 'wb')
+        else:
+            # else, assume it is an open file object already
+            filename = '???'
+        self.initfp(f)
+        if filename[-5:] == '.aiff':
+            self._aifc = 0
+        else:
+            self._aifc = 1
+
+    def initfp(self, file):
+        self._file = file
+        self._version = _AIFC_version
+        self._comptype = 'NONE'
+        self._compname = 'not compressed'
+        self._comp = None
+        self._convert = None
+        self._nchannels = 0
+        self._sampwidth = 0
+        self._framerate = 0
+        self._nframes = 0
+        self._nframeswritten = 0
+        self._datawritten = 0
+        self._datalength = 0
+        self._markers = []
+        self._marklength = 0
+        self._aifc = 1      # AIFF-C is default
+
+    def __del__(self):
+        if self._file:
+            self.close()
+
+    #
+    # User visible methods.
+    #
+    def aiff(self):
+        if self._nframeswritten:
+            raise Error, 'cannot change parameters after starting to write'
+        self._aifc = 0
+
+    def aifc(self):
+        if self._nframeswritten:
+            raise Error, 'cannot change parameters after starting to write'
+        self._aifc = 1
+
+    def setnchannels(self, nchannels):
+        if self._nframeswritten:
+            raise Error, 'cannot change parameters after starting to write'
+        if nchannels < 1:
+            raise Error, 'bad # of channels'
+        self._nchannels = nchannels
+
+    def getnchannels(self):
+        if not self._nchannels:
+            raise Error, 'number of channels not set'
+        return self._nchannels
+
+    def setsampwidth(self, sampwidth):
+        if self._nframeswritten:
+            raise Error, 'cannot change parameters after starting to write'
+        if sampwidth < 1 or sampwidth > 4:
+            raise Error, 'bad sample width'
+        self._sampwidth = sampwidth
+
+    def getsampwidth(self):
+        if not self._sampwidth:
+            raise Error, 'sample width not set'
+        return self._sampwidth
+
+    def setframerate(self, framerate):
+        if self._nframeswritten:
+            raise Error, 'cannot change parameters after starting to write'
+        if framerate <= 0:
+            raise Error, 'bad frame rate'
+        self._framerate = framerate
+
+    def getframerate(self):
+        if not self._framerate:
+            raise Error, 'frame rate not set'
+        return self._framerate
+
+    def setnframes(self, nframes):
+        if self._nframeswritten:
+            raise Error, 'cannot change parameters after starting to write'
+        self._nframes = nframes
+
+    def getnframes(self):
+        return self._nframeswritten
+
+    def setcomptype(self, comptype, compname):
+        if self._nframeswritten:
+            raise Error, 'cannot change parameters after starting to write'
+        if comptype not in ('NONE', 'ULAW', 'ALAW', 'G722'):
+            raise Error, 'unsupported compression type'
+        self._comptype = comptype
+        self._compname = compname
+
+    def getcomptype(self):
+        return self._comptype
+
+    def getcompname(self):
+        return self._compname
+
+##  def setversion(self, version):
+##      if self._nframeswritten:
+##          raise Error, 'cannot change parameters after starting to write'
+##      self._version = version
+
+    def setparams(self, (nchannels, sampwidth, framerate, nframes, comptype, compname)):
+        if self._nframeswritten:
+            raise Error, 'cannot change parameters after starting to write'
+        if comptype not in ('NONE', 'ULAW', 'ALAW', 'G722'):
+            raise Error, 'unsupported compression type'
+        self.setnchannels(nchannels)
+        self.setsampwidth(sampwidth)
+        self.setframerate(framerate)
+        self.setnframes(nframes)
+        self.setcomptype(comptype, compname)
+
+    def getparams(self):
+        if not self._nchannels or not self._sampwidth or not self._framerate:
+            raise Error, 'not all parameters set'
+        return self._nchannels, self._sampwidth, self._framerate, \
+              self._nframes, self._comptype, self._compname
+
+    def setmark(self, id, pos, name):
+        if id <= 0:
+            raise Error, 'marker ID must be > 0'
+        if pos < 0:
+            raise Error, 'marker position must be >= 0'
+        if type(name) != type(''):
+            raise Error, 'marker name must be a string'
+        for i in range(len(self._markers)):
+            if id == self._markers[i][0]:
+                self._markers[i] = id, pos, name
+                return
+        self._markers.append((id, pos, name))
+
+    def getmark(self, id):
+        for marker in self._markers:
+            if id == marker[0]:
+                return marker
+        raise Error, 'marker ' + `id` + ' does not exist'
+
+    def getmarkers(self):
+        if len(self._markers) == 0:
+            return None
+        return self._markers
+
+    def tell(self):
+        return self._nframeswritten
+
+    def writeframesraw(self, data):
+        self._ensure_header_written(len(data))
+        nframes = len(data) / (self._sampwidth * self._nchannels)
+        if self._convert:
+            data = self._convert(data)
+        self._file.write(data)
+        self._nframeswritten = self._nframeswritten + nframes
+        self._datawritten = self._datawritten + len(data)
+
+    def writeframes(self, data):
+        self.writeframesraw(data)
+        if self._nframeswritten != self._nframes or \
+              self._datalength != self._datawritten:
+            self._patchheader()
+
+    def close(self):
+        self._ensure_header_written(0)
+        if self._datawritten & 1:
+            # quick pad to even size
+            self._file.write(chr(0))
+            self._datawritten = self._datawritten + 1
+        self._writemarkers()
+        if self._nframeswritten != self._nframes or \
+              self._datalength != self._datawritten or \
+              self._marklength:
+            self._patchheader()
+        if self._comp:
+            self._comp.CloseCompressor()
+            self._comp = None
+        self._file.flush()
+        self._file = None
+
+    #
+    # Internal methods.
+    #
+
+    def _comp_data(self, data):
+        import cl
+        dum = self._comp.SetParam(cl.FRAME_BUFFER_SIZE, len(data))
+        dum = self._comp.SetParam(cl.COMPRESSED_BUFFER_SIZE, len(data))
+        return self._comp.Compress(self._nframes, data)
+
+    def _lin2ulaw(self, data):
+        import audioop
+        return audioop.lin2ulaw(data, 2)
+
+    def _lin2adpcm(self, data):
+        import audioop
+        if not hasattr(self, '_adpcmstate'):
+            self._adpcmstate = None
+        data, self._adpcmstate = audioop.lin2adpcm(data, 2,
+                               self._adpcmstate)
+        return data
+
+    def _ensure_header_written(self, datasize):
+        if not self._nframeswritten:
+            if self._comptype in ('ULAW', 'ALAW'):
+                if not self._sampwidth:
+                    self._sampwidth = 2
+                if self._sampwidth != 2:
+                    raise Error, 'sample width must be 2 when compressing with ULAW or ALAW'
+            if self._comptype == 'G722':
+                if not self._sampwidth:
+                    self._sampwidth = 2
+                if self._sampwidth != 2:
+                    raise Error, 'sample width must be 2 when compressing with G7.22 (ADPCM)'
+            if not self._nchannels:
+                raise Error, '# channels not specified'
+            if not self._sampwidth:
+                raise Error, 'sample width not specified'
+            if not self._framerate:
+                raise Error, 'sampling rate not specified'
+            self._write_header(datasize)
+
+    def _init_compression(self):
+        if self._comptype == 'G722':
+            import audioop
+            self._convert = self._lin2adpcm
+            return
+        try:
+            import cl
+        except ImportError:
+            if self._comptype == 'ULAW':
+                try:
+                    import audioop
+                    self._convert = self._lin2ulaw
+                    return
+                except ImportError:
+                    pass
+            raise Error, 'cannot write compressed AIFF-C files'
+        if self._comptype == 'ULAW':
+            scheme = cl.G711_ULAW
+        elif self._comptype == 'ALAW':
+            scheme = cl.G711_ALAW
+        else:
+            raise Error, 'unsupported compression type'
+        self._comp = cl.OpenCompressor(scheme)
+        params = [cl.ORIGINAL_FORMAT, 0,
+              cl.BITS_PER_COMPONENT, self._sampwidth * 8,
+              cl.FRAME_RATE, self._framerate,
+              cl.FRAME_BUFFER_SIZE, 100,
+              cl.COMPRESSED_BUFFER_SIZE, 100]
+        if self._nchannels == 1:
+            params[1] = cl.MONO
+        elif self._nchannels == 2:
+            params[1] = cl.STEREO_INTERLEAVED
+        else:
+            raise Error, 'cannot compress more than 2 channels'
+        self._comp.SetParams(params)
+        # the compressor produces a header which we ignore
+        dummy = self._comp.Compress(0, '')
+        self._convert = self._comp_data
+
+    def _write_header(self, initlength):
+        if self._aifc and self._comptype != 'NONE':
+            self._init_compression()
+        self._file.write('FORM')
+        if not self._nframes:
+            self._nframes = initlength / (self._nchannels * self._sampwidth)
+        self._datalength = self._nframes * self._nchannels * self._sampwidth
+        if self._datalength & 1:
+            self._datalength = self._datalength + 1
+        if self._aifc:
+            if self._comptype in ('ULAW', 'ALAW'):
+                self._datalength = self._datalength / 2
+                if self._datalength & 1:
+                    self._datalength = self._datalength + 1
+            elif self._comptype == 'G722':
+                self._datalength = (self._datalength + 3) / 4
+                if self._datalength & 1:
+                    self._datalength = self._datalength + 1
+        self._form_length_pos = self._file.tell()
+        commlength = self._write_form_length(self._datalength)
+        if self._aifc:
+            self._file.write('AIFC')
+            self._file.write('FVER')
+            _write_long(self._file, 4)
+            _write_long(self._file, self._version)
+        else:
+            self._file.write('AIFF')
+        self._file.write('COMM')
+        _write_long(self._file, commlength)
+        _write_short(self._file, self._nchannels)
+        self._nframes_pos = self._file.tell()
+        _write_long(self._file, self._nframes)
+        _write_short(self._file, self._sampwidth * 8)
+        _write_float(self._file, self._framerate)
+        if self._aifc:
+            self._file.write(self._comptype)
+            _write_string(self._file, self._compname)
+        self._file.write('SSND')
+        self._ssnd_length_pos = self._file.tell()
+        _write_long(self._file, self._datalength + 8)
+        _write_long(self._file, 0)
+        _write_long(self._file, 0)
+
+    def _write_form_length(self, datalength):
+        if self._aifc:
+            commlength = 18 + 5 + len(self._compname)
+            if commlength & 1:
+                commlength = commlength + 1
+            verslength = 12
+        else:
+            commlength = 18
+            verslength = 0
+        _write_long(self._file, 4 + verslength + self._marklength + \
+                    8 + commlength + 16 + datalength)
+        return commlength
+
+    def _patchheader(self):
+        curpos = self._file.tell()
+        if self._datawritten & 1:
+            datalength = self._datawritten + 1
+            self._file.write(chr(0))
+        else:
+            datalength = self._datawritten
+        if datalength == self._datalength and \
+              self._nframes == self._nframeswritten and \
+              self._marklength == 0:
+            self._file.seek(curpos, 0)
+            return
+        self._file.seek(self._form_length_pos, 0)
+        dummy = self._write_form_length(datalength)
+        self._file.seek(self._nframes_pos, 0)
+        _write_long(self._file, self._nframeswritten)
+        self._file.seek(self._ssnd_length_pos, 0)
+        _write_long(self._file, datalength + 8)
+        self._file.seek(curpos, 0)
+        self._nframes = self._nframeswritten
+        self._datalength = datalength
+
+    def _writemarkers(self):
+        if len(self._markers) == 0:
+            return
+        self._file.write('MARK')
+        length = 2
+        for marker in self._markers:
+            id, pos, name = marker
+            length = length + len(name) + 1 + 6
+            if len(name) & 1 == 0:
+                length = length + 1
+        _write_long(self._file, length)
+        self._marklength = length + 8
+        _write_short(self._file, len(self._markers))
+        for marker in self._markers:
+            id, pos, name = marker
+            _write_short(self._file, id)
+            _write_long(self._file, pos)
+            _write_string(self._file, name)
+
+def open(f, mode=None):
+    if mode is None:
+        if hasattr(f, 'mode'):
+            mode = f.mode
+        else:
+            mode = 'rb'
+    if mode in ('r', 'rb'):
+        return Aifc_read(f)
+    elif mode in ('w', 'wb'):
+        return Aifc_write(f)
+    else:
+        raise Error, "mode must be 'r', 'rb', 'w', or 'wb'"
+
+openfp = open # B/W compatibility
+
+if __name__ == '__main__':
+    import sys
+    if not sys.argv[1:]:
+        sys.argv.append('/usr/demos/data/audio/bach.aiff')
+    fn = sys.argv[1]
+    f = open(fn, 'r')
+    print "Reading", fn
+    print "nchannels =", f.getnchannels()
+    print "nframes   =", f.getnframes()
+    print "sampwidth =", f.getsampwidth()
+    print "framerate =", f.getframerate()
+    print "comptype  =", f.getcomptype()
+    print "compname  =", f.getcompname()
+    if sys.argv[2:]:
+        gn = sys.argv[2]
+        print "Writing", gn
+        g = open(gn, 'w')
+        g.setparams(f.getparams())
+        while 1:
+            data = f.readframes(1024)
+            if not data:
+                break
+            g.writeframes(data)
+        g.close()
+        f.close()
+        print "Done."
diff --git a/lib-python/2.2/anydbm.py b/lib-python/2.2/anydbm.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/anydbm.py
@@ -0,0 +1,86 @@
+"""Generic interface to all dbm clones.
+
+Instead of
+
+        import dbm
+        d = dbm.open(file, 'w', 0666)
+
+use
+
+        import anydbm
+        d = anydbm.open(file, 'w')
+
+The returned object is a dbhash, gdbm, dbm or dumbdbm object,
+dependent on the type of database being opened (determined by whichdb
+module) in the case of an existing dbm. If the dbm does not exist and
+the create or new flag ('c' or 'n') was specified, the dbm type will
+be determined by the availability of the modules (tested in the above
+order).
+
+It has the following interface (key and data are strings):
+
+        d[key] = data   # store data at key (may override data at
+                        # existing key)
+        data = d[key]   # retrieve data at key (raise KeyError if no
+                        # such key)
+        del d[key]      # delete data stored at key (raises KeyError
+                        # if no such key)
+        flag = d.has_key(key)   # true if the key exists
+        list = d.keys() # return a list of all existing keys (slow!)
+
+Future versions may change the order in which implementations are
+tested for existence, add interfaces to other dbm-like
+implementations.
+
+The open function has an optional second argument.  This can be 'r',
+for read-only access, 'w', for read-write access of an existing
+database, 'c' for read-write access to a new or existing database, and
+'n' for read-write access to a new database.  The default is 'r'.
+
+Note: 'r' and 'w' fail if the database doesn't exist; 'c' creates it
+only if it doesn't exist; and 'n' always creates a new database.
+
+"""
+
+try:
+    class error(Exception):
+        pass
+except (NameError, TypeError):
+    error = "anydbm.error"
+
+_names = ['dbhash', 'gdbm', 'dbm', 'dumbdbm']
+_errors = [error]
+_defaultmod = None
+
+for _name in _names:
+    try:
+        _mod = __import__(_name)
+    except ImportError:
+        continue
+    if not _defaultmod:
+        _defaultmod = _mod
+    _errors.append(_mod.error)
+
+if not _defaultmod:
+    raise ImportError, "no dbm clone found; tried %s" % _names
+
+error = tuple(_errors)
+
+def open(file, flag = 'r', mode = 0666):
+    # guess the type of an existing database
+    from whichdb import whichdb
+    result=whichdb(file)
+    if result is None:
+        # db doesn't exist
+        if 'c' in flag or 'n' in flag:
+            # file doesn't exist and the new
+            # flag was used so use default type
+            mod = _defaultmod
+        else:
+            raise error, "need 'c' or 'n' flag to open new db"
+    elif result == "":
+        # db type cannot be determined
+        raise error, "db type could not be determined"
+    else:
+        mod = __import__(result)
+    return mod.open(file, flag, mode)
diff --git a/lib-python/2.2/asynchat.py b/lib-python/2.2/asynchat.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/asynchat.py
@@ -0,0 +1,293 @@
+# -*- Mode: Python; tab-width: 4 -*-
+#       Id: asynchat.py,v 2.26 2000/09/07 22:29:26 rushing Exp
+#       Author: Sam Rushing <rushing at nightmare.com>
+
+# ======================================================================
+# Copyright 1996 by Sam Rushing
+#
+#                         All Rights Reserved
+#
+# Permission to use, copy, modify, and distribute this software and
+# its documentation for any purpose and without fee is hereby
+# granted, provided that the above copyright notice appear in all
+# copies and that both that copyright notice and this permission
+# notice appear in supporting documentation, and that the name of Sam
+# Rushing not be used in advertising or publicity pertaining to
+# distribution of the software without specific, written prior
+# permission.
+#
+# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
+# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
+# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+# ======================================================================
+
+r"""A class supporting chat-style (command/response) protocols.
+
+This class adds support for 'chat' style protocols - where one side
+sends a 'command', and the other sends a response (examples would be
+the common internet protocols - smtp, nntp, ftp, etc..).
+
+The handle_read() method looks at the input stream for the current
+'terminator' (usually '\r\n' for single-line responses, '\r\n.\r\n'
+for multi-line output), calling self.found_terminator() on its
+receipt.
+
+for example:
+Say you build an async nntp client using this class.  At the start
+of the connection, you'll have self.terminator set to '\r\n', in
+order to process the single-line greeting.  Just before issuing a
+'LIST' command you'll set it to '\r\n.\r\n'.  The output of the LIST
+command will be accumulated (using your own 'collect_incoming_data'
+method) up to the terminator, and then control will be returned to
+you - by calling your self.found_terminator() method.
+"""
+
+import socket
+import asyncore
+
+class async_chat (asyncore.dispatcher):
+    """This is an abstract class.  You must derive from this class, and add
+    the two methods collect_incoming_data() and found_terminator()"""
+
+    # these are overridable defaults
+
+    ac_in_buffer_size       = 4096
+    ac_out_buffer_size      = 4096
+
+    def __init__ (self, conn=None):
+        self.ac_in_buffer = ''
+        self.ac_out_buffer = ''
+        self.producer_fifo = fifo()
+        asyncore.dispatcher.__init__ (self, conn)
+
+    def set_terminator (self, term):
+        "Set the input delimiter.  Can be a fixed string of any length, an integer, or None"
+        self.terminator = term
+
+    def get_terminator (self):
+        return self.terminator
+
+    # grab some more data from the socket,
+    # throw it to the collector method,
+    # check for the terminator,
+    # if found, transition to the next state.
+
+    def handle_read (self):
+
+        try:
+            data = self.recv (self.ac_in_buffer_size)
+        except socket.error, why:
+            self.handle_error()
+            return
+
+        self.ac_in_buffer = self.ac_in_buffer + data
+
+        # Continue to search for self.terminator in self.ac_in_buffer,
+        # while calling self.collect_incoming_data.  The while loop
+        # is necessary because we might read several data+terminator
+        # combos with a single recv(1024).
+
+        while self.ac_in_buffer:
+            lb = len(self.ac_in_buffer)
+            terminator = self.get_terminator()
+            if terminator is None or terminator == '':
+                # no terminator, collect it all
+                self.collect_incoming_data (self.ac_in_buffer)
+                self.ac_in_buffer = ''
+            elif type(terminator) == type(0):
+                # numeric terminator
+                n = terminator
+                if lb < n:
+                    self.collect_incoming_data (self.ac_in_buffer)
+                    self.ac_in_buffer = ''
+                    self.terminator = self.terminator - lb
+                else:
+                    self.collect_incoming_data (self.ac_in_buffer[:n])
+                    self.ac_in_buffer = self.ac_in_buffer[n:]
+                    self.terminator = 0
+                    self.found_terminator()
+            else:
+                # 3 cases:
+                # 1) end of buffer matches terminator exactly:
+                #    collect data, transition
+                # 2) end of buffer matches some prefix:
+                #    collect data to the prefix
+                # 3) end of buffer does not match any prefix:
+                #    collect data
+                terminator_len = len(terminator)
+                index = self.ac_in_buffer.find(terminator)
+                if index != -1:
+                    # we found the terminator
+                    if index > 0:
+                        # don't bother reporting the empty string (source of subtle bugs)
+                        self.collect_incoming_data (self.ac_in_buffer[:index])
+                    self.ac_in_buffer = self.ac_in_buffer[index+terminator_len:]
+                    # This does the Right Thing if the terminator is changed here.
+                    self.found_terminator()
+                else:
+                    # check for a prefix of the terminator
+                    index = find_prefix_at_end (self.ac_in_buffer, terminator)
+                    if index:
+                        if index != lb:
+                            # we found a prefix, collect up to the prefix
+                            self.collect_incoming_data (self.ac_in_buffer[:-index])
+                            self.ac_in_buffer = self.ac_in_buffer[-index:]
+                        break
+                    else:
+                        # no prefix, collect it all
+                        self.collect_incoming_data (self.ac_in_buffer)
+                        self.ac_in_buffer = ''
+
+    def handle_write (self):
+        self.initiate_send ()
+
+    def handle_close (self):
+        self.close()
+
+    def push (self, data):
+        self.producer_fifo.push (simple_producer (data))
+        self.initiate_send()
+
+    def push_with_producer (self, producer):
+        self.producer_fifo.push (producer)
+        self.initiate_send()
+
+    def readable (self):
+        "predicate for inclusion in the readable for select()"
+        return (len(self.ac_in_buffer) <= self.ac_in_buffer_size)
+
+    def writable (self):
+        "predicate for inclusion in the writable for select()"
+        # return len(self.ac_out_buffer) or len(self.producer_fifo) or (not self.connected)
+        # this is about twice as fast, though not as clear.
+        return not (
+                (self.ac_out_buffer == '') and
+                self.producer_fifo.is_empty() and
+                self.connected
+                )
+
+    def close_when_done (self):
+        "automatically close this channel once the outgoing queue is empty"
+        self.producer_fifo.push (None)
+
+    # refill the outgoing buffer by calling the more() method
+    # of the first producer in the queue
+    def refill_buffer (self):
+        _string_type = type('')
+        while 1:
+            if len(self.producer_fifo):
+                p = self.producer_fifo.first()
+                # a 'None' in the producer fifo is a sentinel,
+                # telling us to close the channel.
+                if p is None:
+                    if not self.ac_out_buffer:
+                        self.producer_fifo.pop()
+                        self.close()
+                    return
+                elif type(p) is _string_type:
+                    self.producer_fifo.pop()
+                    self.ac_out_buffer = self.ac_out_buffer + p
+                    return
+                data = p.more()
+                if data:
+                    self.ac_out_buffer = self.ac_out_buffer + data
+                    return
+                else:
+                    self.producer_fifo.pop()
+            else:
+                return
+
+    def initiate_send (self):
+        obs = self.ac_out_buffer_size
+        # try to refill the buffer
+        if (len (self.ac_out_buffer) < obs):
+            self.refill_buffer()
+
+        if self.ac_out_buffer and self.connected:
+            # try to send the buffer
+            try:
+                num_sent = self.send (self.ac_out_buffer[:obs])
+                if num_sent:
+                    self.ac_out_buffer = self.ac_out_buffer[num_sent:]
+
+            except socket.error, why:
+                self.handle_error()
+                return
+
+    def discard_buffers (self):
+        # Emergencies only!
+        self.ac_in_buffer = ''
+        self.ac_out_buffer = ''
+        while self.producer_fifo:
+            self.producer_fifo.pop()
+
+
+class simple_producer:
+
+    def __init__ (self, data, buffer_size=512):
+        self.data = data
+        self.buffer_size = buffer_size
+
+    def more (self):
+        if len (self.data) > self.buffer_size:
+            result = self.data[:self.buffer_size]
+            self.data = self.data[self.buffer_size:]
+            return result
+        else:
+            result = self.data
+            self.data = ''
+            return result
+
+class fifo:
+    def __init__ (self, list=None):
+        if not list:
+            self.list = []
+        else:
+            self.list = list
+
+    def __len__ (self):
+        return len(self.list)
+
+    def is_empty (self):
+        return self.list == []
+
+    def first (self):
+        return self.list[0]
+
+    def push (self, data):
+        self.list.append (data)
+
+    def pop (self):
+        if self.list:
+            result = self.list[0]
+            del self.list[0]
+            return (1, result)
+        else:
+            return (0, None)
+
+# Given 'haystack', see if any prefix of 'needle' is at its end.  This
+# assumes an exact match has already been checked.  Return the number of
+# characters matched.
+# for example:
+# f_p_a_e ("qwerty\r", "\r\n") => 1
+# f_p_a_e ("qwerty\r\n", "\r\n") => 2
+# f_p_a_e ("qwertydkjf", "\r\n") => 0
+
+# this could maybe be made faster with a computed regex?
+# [answer: no; circa Python-2.0, Jan 2001]
+# python:    18307/s
+# re:        12820/s
+# regex:     14035/s
+
+def find_prefix_at_end (haystack, needle):
+    nl = len(needle)
+    result = 0
+    for i in range (1,nl):
+        if haystack[-(nl-i):] == needle[:(nl-i)]:
+            result = nl-i
+            break
+    return result
diff --git a/lib-python/2.2/asyncore.py b/lib-python/2.2/asyncore.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/asyncore.py
@@ -0,0 +1,556 @@
+# -*- Mode: Python -*-
+#   Id: asyncore.py,v 2.51 2000/09/07 22:29:26 rushing Exp
+#   Author: Sam Rushing <rushing at nightmare.com>
+
+# ======================================================================
+# Copyright 1996 by Sam Rushing
+#
+#                         All Rights Reserved
+#
+# Permission to use, copy, modify, and distribute this software and
+# its documentation for any purpose and without fee is hereby
+# granted, provided that the above copyright notice appear in all
+# copies and that both that copyright notice and this permission
+# notice appear in supporting documentation, and that the name of Sam
+# Rushing not be used in advertising or publicity pertaining to
+# distribution of the software without specific, written prior
+# permission.
+#
+# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
+# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
+# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+# ======================================================================
+
+"""Basic infrastructure for asynchronous socket service clients and servers.
+
+There are only two ways to have a program on a single processor do "more
+than one thing at a time".  Multi-threaded programming is the simplest and
+most popular way to do it, but there is another very different technique,
+that lets you have nearly all the advantages of multi-threading, without
+actually using multiple threads. it's really only practical if your program
+is largely I/O bound. If your program is CPU bound, then pre-emptive
+scheduled threads are probably what you really need. Network servers are
+rarely CPU-bound, however.
+
+If your operating system supports the select() system call in its I/O
+library (and nearly all do), then you can use it to juggle multiple
+communication channels at once; doing other work while your I/O is taking
+place in the "background."  Although this strategy can seem strange and
+complex, especially at first, it is in many ways easier to understand and
+control than multi-threaded programming. The module documented here solves
+many of the difficult problems for you, making the task of building
+sophisticated high-performance network servers and clients a snap.
+"""
+
+import exceptions
+import select
+import socket
+import sys
+import time
+
+import os
+from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, ECONNRESET, \
+     ENOTCONN, ESHUTDOWN, EINTR, EISCONN
+
+try:
+    socket_map
+except NameError:
+    socket_map = {}
+
+class ExitNow (exceptions.Exception):
+    pass
+
+DEBUG = 0
+
+def poll (timeout=0.0, map=None):
+    if map is None:
+        map = socket_map
+    if map:
+        r = []; w = []; e = []
+        for fd, obj in map.items():
+            if obj.readable():
+                r.append (fd)
+            if obj.writable():
+                w.append (fd)
+        if [] == r == w == e:
+            time.sleep(timeout)
+        else:
+            try:
+                r,w,e = select.select (r,w,e, timeout)
+            except select.error, err:
+                if err[0] != EINTR:
+                    raise
+                else:
+                    return
+
+        if DEBUG:
+            print r,w,e
+
+        for fd in r:
+            try:
+                obj = map[fd]
+            except KeyError:
+                continue
+
+            try:
+                obj.handle_read_event()
+            except ExitNow:
+                raise ExitNow
+            except:
+                obj.handle_error()
+
+        for fd in w:
+            try:
+                obj = map[fd]
+            except KeyError:
+                continue
+
+            try:
+                obj.handle_write_event()
+            except ExitNow:
+                raise ExitNow
+            except:
+                obj.handle_error()
+
+def poll2 (timeout=0.0, map=None):
+    import poll
+    if map is None:
+        map=socket_map
+    if timeout is not None:
+        # timeout is in milliseconds
+        timeout = int(timeout*1000)
+    if map:
+        l = []
+        for fd, obj in map.items():
+            flags = 0
+            if obj.readable():
+                flags = poll.POLLIN
+            if obj.writable():
+                flags = flags | poll.POLLOUT
+            if flags:
+                l.append ((fd, flags))
+        r = poll.poll (l, timeout)
+        for fd, flags in r:
+            try:
+                obj = map[fd]
+            except KeyError:
+                continue
+
+            try:
+                if (flags  & poll.POLLIN):
+                    obj.handle_read_event()
+                if (flags & poll.POLLOUT):
+                    obj.handle_write_event()
+            except ExitNow:
+                raise ExitNow
+            except:
+                obj.handle_error()
+
+def poll3 (timeout=0.0, map=None):
+    # Use the poll() support added to the select module in Python 2.0
+    if map is None:
+        map=socket_map
+    if timeout is not None:
+        # timeout is in milliseconds
+        timeout = int(timeout*1000)
+    pollster = select.poll()
+    if map:
+        for fd, obj in map.items():
+            flags = 0
+            if obj.readable():
+                flags = select.POLLIN
+            if obj.writable():
+                flags = flags | select.POLLOUT
+            if flags:
+                pollster.register(fd, flags)
+        try:
+            r = pollster.poll (timeout)
+        except select.error, err:
+            if err[0] != EINTR:
+                raise
+            r = []
+        for fd, flags in r:
+            try:
+                obj = map[fd]
+            except KeyError:
+                continue
+
+            try:
+                if (flags  & select.POLLIN):
+                    obj.handle_read_event()
+                if (flags & select.POLLOUT):
+                    obj.handle_write_event()
+            except ExitNow:
+                raise ExitNow
+            except:
+                obj.handle_error()
+
+def loop (timeout=30.0, use_poll=0, map=None):
+
+    if map is None:
+        map=socket_map
+
+    if use_poll:
+        if hasattr (select, 'poll'):
+            poll_fun = poll3
+        else:
+            poll_fun = poll2
+    else:
+        poll_fun = poll
+
+    while map:
+        poll_fun (timeout, map)
+
+class dispatcher:
+    debug = 0
+    connected = 0
+    accepting = 0
+    closing = 0
+    addr = None
+
+    def __init__ (self, sock=None, map=None):
+        if sock:
+            self.set_socket (sock, map)
+            # I think it should inherit this anyway
+            self.socket.setblocking (0)
+            self.connected = 1
+            # XXX Does the constructor require that the socket passed
+            # be connected?
+            try:
+                self.addr = sock.getpeername()
+            except socket.error:
+                # The addr isn't crucial
+                pass
+        else:
+            self.socket = None
+
+    def __repr__ (self):
+        status = [self.__class__.__module__+"."+self.__class__.__name__]
+        if self.accepting and self.addr:
+            status.append ('listening')
+        elif self.connected:
+            status.append ('connected')
+        if self.addr is not None:
+            try:
+                status.append ('%s:%d' % self.addr)
+            except TypeError:
+                status.append (repr(self.addr))
+        return '<%s at %#x>' % (' '.join (status), id (self))
+
+    def add_channel (self, map=None):
+        #self.log_info ('adding channel %s' % self)
+        if map is None:
+            map=socket_map
+        map [self._fileno] = self
+
+    def del_channel (self, map=None):
+        fd = self._fileno
+        if map is None:
+            map=socket_map
+        if map.has_key (fd):
+            #self.log_info ('closing channel %d:%s' % (fd, self))
+            del map [fd]
+
+    def create_socket (self, family, type):
+        self.family_and_type = family, type
+        self.socket = socket.socket (family, type)
+        self.socket.setblocking(0)
+        self._fileno = self.socket.fileno()
+        self.add_channel()
+
+    def set_socket (self, sock, map=None):
+        self.socket = sock
+##        self.__dict__['socket'] = sock
+        self._fileno = sock.fileno()
+        self.add_channel (map)
+
+    def set_reuse_addr (self):
+        # try to re-use a server port if possible
+        try:
+            self.socket.setsockopt (
+                socket.SOL_SOCKET, socket.SO_REUSEADDR,
+                self.socket.getsockopt (socket.SOL_SOCKET,
+                                        socket.SO_REUSEADDR) | 1
+                )
+        except socket.error:
+            pass
+
+    # ==================================================
+    # predicates for select()
+    # these are used as filters for the lists of sockets
+    # to pass to select().
+    # ==================================================
+
+    def readable (self):
+        return 1
+
+    if os.name == 'mac':
+        # The macintosh will select a listening socket for
+        # write if you let it.  What might this mean?
+        def writable (self):
+            return not self.accepting
+    else:
+        def writable (self):
+            return 1
+
+    # ==================================================
+    # socket object methods.
+    # ==================================================
+
+    def listen (self, num):
+        self.accepting = 1
+        if os.name == 'nt' and num > 5:
+            num = 1
+        return self.socket.listen (num)
+
+    def bind (self, addr):
+        self.addr = addr
+        return self.socket.bind (addr)
+
+    def connect (self, address):
+        self.connected = 0
+        err = self.socket.connect_ex(address)
+        if err in (EINPROGRESS, EALREADY, EWOULDBLOCK):
+            return
+        if err in (0, EISCONN):
+            self.addr = address
+            self.connected = 1
+            self.handle_connect()
+        else:
+            raise socket.error, err
+
+    def accept (self):
+        try:
+            conn, addr = self.socket.accept()
+            return conn, addr
+        except socket.error, why:
+            if why[0] == EWOULDBLOCK:
+                pass
+            else:
+                raise socket.error, why
+
+    def send (self, data):
+        try:
+            result = self.socket.send (data)
+            return result
+        except socket.error, why:
+            if why[0] == EWOULDBLOCK:
+                return 0
+            else:
+                raise socket.error, why
+            return 0
+
+    def recv (self, buffer_size):
+        try:
+            data = self.socket.recv (buffer_size)
+            if not data:
+                # a closed connection is indicated by signaling
+                # a read condition, and having recv() return 0.
+                self.handle_close()
+                return ''
+            else:
+                return data
+        except socket.error, why:
+            # winsock sometimes throws ENOTCONN
+            if why[0] in [ECONNRESET, ENOTCONN, ESHUTDOWN]:
+                self.handle_close()
+                return ''
+            else:
+                raise socket.error, why
+
+    def close (self):
+        self.del_channel()
+        self.socket.close()
+
+    # cheap inheritance, used to pass all other attribute
+    # references to the underlying socket object.
+    def __getattr__ (self, attr):
+        return getattr (self.socket, attr)
+
+    # log and log_info maybe overriden to provide more sophisticated
+    # logging and warning methods. In general, log is for 'hit' logging
+    # and 'log_info' is for informational, warning and error logging.
+
+    def log (self, message):
+        sys.stderr.write ('log: %s\n' % str(message))
+
+    def log_info (self, message, type='info'):
+        if __debug__ or type != 'info':
+            print '%s: %s' % (type, message)
+
+    def handle_read_event (self):
+        if self.accepting:
+            # for an accepting socket, getting a read implies
+            # that we are connected
+            if not self.connected:
+                self.connected = 1
+            self.handle_accept()
+        elif not self.connected:
+            self.handle_connect()
+            self.connected = 1
+            self.handle_read()
+        else:
+            self.handle_read()
+
+    def handle_write_event (self):
+        # getting a write implies that we are connected
+        if not self.connected:
+            self.handle_connect()
+            self.connected = 1
+        self.handle_write()
+
+    def handle_expt_event (self):
+        self.handle_expt()
+
+    def handle_error (self):
+        nil, t, v, tbinfo = compact_traceback()
+
+        # sometimes a user repr method will crash.
+        try:
+            self_repr = repr (self)
+        except:
+            self_repr = '<__repr__ (self) failed for object at %0x>' % id(self)
+
+        self.log_info (
+            'uncaptured python exception, closing channel %s (%s:%s %s)' % (
+                self_repr,
+                t,
+                v,
+                tbinfo
+                ),
+            'error'
+            )
+        self.close()
+
+    def handle_expt (self):
+        self.log_info ('unhandled exception', 'warning')
+
+    def handle_read (self):
+        self.log_info ('unhandled read event', 'warning')
+
+    def handle_write (self):
+        self.log_info ('unhandled write event', 'warning')
+
+    def handle_connect (self):
+        self.log_info ('unhandled connect event', 'warning')
+
+    def handle_accept (self):
+        self.log_info ('unhandled accept event', 'warning')
+
+    def handle_close (self):
+        self.log_info ('unhandled close event', 'warning')
+        self.close()
+
+# ---------------------------------------------------------------------------
+# adds simple buffered output capability, useful for simple clients.
+# [for more sophisticated usage use asynchat.async_chat]
+# ---------------------------------------------------------------------------
+
+class dispatcher_with_send (dispatcher):
+    def __init__ (self, sock=None):
+        dispatcher.__init__ (self, sock)
+        self.out_buffer = ''
+
+    def initiate_send (self):
+        num_sent = 0
+        num_sent = dispatcher.send (self, self.out_buffer[:512])
+        self.out_buffer = self.out_buffer[num_sent:]
+
+    def handle_write (self):
+        self.initiate_send()
+
+    def writable (self):
+        return (not self.connected) or len(self.out_buffer)
+
+    def send (self, data):
+        if self.debug:
+            self.log_info ('sending %s' % repr(data))
+        self.out_buffer = self.out_buffer + data
+        self.initiate_send()
+
+# ---------------------------------------------------------------------------
+# used for debugging.
+# ---------------------------------------------------------------------------
+
+def compact_traceback ():
+    t,v,tb = sys.exc_info()
+    tbinfo = []
+    while 1:
+        tbinfo.append ((
+            tb.tb_frame.f_code.co_filename,
+            tb.tb_frame.f_code.co_name,
+            str(tb.tb_lineno)
+            ))
+        tb = tb.tb_next
+        if not tb:
+            break
+
+    # just to be safe
+    del tb
+
+    file, function, line = tbinfo[-1]
+    info = '[' + '] ['.join(map(lambda x: '|'.join(x), tbinfo)) + ']'
+    return (file, function, line), t, v, info
+
+def close_all (map=None):
+    if map is None:
+        map=socket_map
+    for x in map.values():
+        x.socket.close()
+    map.clear()
+
+# Asynchronous File I/O:
+#
+# After a little research (reading man pages on various unixen, and
+# digging through the linux kernel), I've determined that select()
+# isn't meant for doing doing asynchronous file i/o.
+# Heartening, though - reading linux/mm/filemap.c shows that linux
+# supports asynchronous read-ahead.  So _MOST_ of the time, the data
+# will be sitting in memory for us already when we go to read it.
+#
+# What other OS's (besides NT) support async file i/o?  [VMS?]
+#
+# Regardless, this is useful for pipes, and stdin/stdout...
+
+if os.name == 'posix':
+    import fcntl
+
+    class file_wrapper:
+        # here we override just enough to make a file
+        # look like a socket for the purposes of asyncore.
+        def __init__ (self, fd):
+            self.fd = fd
+
+        def recv (self, *args):
+            return apply (os.read, (self.fd,)+args)
+
+        def send (self, *args):
+            return apply (os.write, (self.fd,)+args)
+
+        read = recv
+        write = send
+
+        def close (self):
+            return os.close (self.fd)
+
+        def fileno (self):
+            return self.fd
+
+    class file_dispatcher (dispatcher):
+        def __init__ (self, fd):
+            dispatcher.__init__ (self)
+            self.connected = 1
+            # set it to non-blocking mode
+            flags = fcntl.fcntl (fd, fcntl.F_GETFL, 0)
+            flags = flags | os.O_NONBLOCK
+            fcntl.fcntl (fd, fcntl.F_SETFL, flags)
+            self.set_file (fd)
+
+        def set_file (self, fd):
+            self._fileno = fd
+            self.socket = file_wrapper (fd)
+            self.add_channel()
diff --git a/lib-python/2.2/atexit.py b/lib-python/2.2/atexit.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/atexit.py
@@ -0,0 +1,50 @@
+"""
+atexit.py - allow programmer to define multiple exit functions to be executed
+upon normal program termination.
+
+One public function, register, is defined.
+"""
+
+__all__ = ["register"]
+
+_exithandlers = []
+def _run_exitfuncs():
+    """run any registered exit functions
+
+    _exithandlers is traversed in reverse order so functions are executed
+    last in, first out.
+    """
+
+    while _exithandlers:
+        func, targs, kargs = _exithandlers.pop()
+        apply(func, targs, kargs)
+
+def register(func, *targs, **kargs):
+    """register a function to be executed upon normal program termination
+
+    func - function to be called at exit
+    targs - optional arguments to pass to func
+    kargs - optional keyword arguments to pass to func
+    """
+    _exithandlers.append((func, targs, kargs))
+
+import sys
+if hasattr(sys, "exitfunc"):
+    # Assume it's another registered exit function - append it to our list
+    register(sys.exitfunc)
+sys.exitfunc = _run_exitfuncs
+
+del sys
+
+if __name__ == "__main__":
+    def x1():
+        print "running x1"
+    def x2(n):
+        print "running x2(%s)" % `n`
+    def x3(n, kwd=None):
+        print "running x3(%s, kwd=%s)" % (`n`, `kwd`)
+
+    register(x1)
+    register(x2, 12)
+    register(x3, 5, "bar")
+    register(x3, "no kwd args")
diff --git a/lib-python/2.2/audiodev.py b/lib-python/2.2/audiodev.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/audiodev.py
@@ -0,0 +1,257 @@
+"""Classes for manipulating audio devices (currently only for Sun and SGI)"""
+
+__all__ = ["error","AudioDev"]
+
+class error(Exception):
+    pass
+
+class Play_Audio_sgi:
+    # Private instance variables
+##      if 0: access frameratelist, nchannelslist, sampwidthlist, oldparams, \
+##                params, config, inited_outrate, inited_width, \
+##                inited_nchannels, port, converter, classinited: private
+
+    classinited = 0
+    frameratelist = nchannelslist = sampwidthlist = None
+
+    def initclass(self):
+        import AL
+        self.frameratelist = [
+                  (48000, AL.RATE_48000),
+                  (44100, AL.RATE_44100),
+                  (32000, AL.RATE_32000),
+                  (22050, AL.RATE_22050),
+                  (16000, AL.RATE_16000),
+                  (11025, AL.RATE_11025),
+                  ( 8000,  AL.RATE_8000),
+                  ]
+        self.nchannelslist = [
+                  (1, AL.MONO),
+                  (2, AL.STEREO),
+                  (4, AL.QUADRO),
+                  ]
+        self.sampwidthlist = [
+                  (1, AL.SAMPLE_8),
+                  (2, AL.SAMPLE_16),
+                  (3, AL.SAMPLE_24),
+                  ]
+        self.classinited = 1
+
+    def __init__(self):
+        import al, AL
+        if not self.classinited:
+            self.initclass()
+        self.oldparams = []
+        self.params = [AL.OUTPUT_RATE, 0]
+        self.config = al.newconfig()
+        self.inited_outrate = 0
+        self.inited_width = 0
+        self.inited_nchannels = 0
+        self.converter = None
+        self.port = None
+        return
+
+    def __del__(self):
+        if self.port:
+            self.stop()
+        if self.oldparams:
+            import al, AL
+            al.setparams(AL.DEFAULT_DEVICE, self.oldparams)
+            self.oldparams = []
+
+    def wait(self):
+        if not self.port:
+            return
+        import time
+        while self.port.getfilled() > 0:
+            time.sleep(0.1)
+        self.stop()
+
+    def stop(self):
+        if self.port:
+            self.port.closeport()
+            self.port = None
+        if self.oldparams:
+            import al, AL
+            al.setparams(AL.DEFAULT_DEVICE, self.oldparams)
+            self.oldparams = []
+
+    def setoutrate(self, rate):
+        for (raw, cooked) in self.frameratelist:
+            if rate == raw:
+                self.params[1] = cooked
+                self.inited_outrate = 1
+                break
+        else:
+            raise error, 'bad output rate'
+
+    def setsampwidth(self, width):
+        for (raw, cooked) in self.sampwidthlist:
+            if width == raw:
+                self.config.setwidth(cooked)
+                self.inited_width = 1
+                break
+        else:
+            if width == 0:
+                import AL
+                self.inited_width = 0
+                self.config.setwidth(AL.SAMPLE_16)
+                self.converter = self.ulaw2lin
+            else:
+                raise error, 'bad sample width'
+
+    def setnchannels(self, nchannels):
+        for (raw, cooked) in self.nchannelslist:
+            if nchannels == raw:
+                self.config.setchannels(cooked)
+                self.inited_nchannels = 1
+                break
+        else:
+            raise error, 'bad # of channels'
+
+    def writeframes(self, data):
+        if not (self.inited_outrate and self.inited_nchannels):
+            raise error, 'params not specified'
+        if not self.port:
+            import al, AL
+            self.port = al.openport('Python', 'w', self.config)
+            self.oldparams = self.params[:]
+            al.getparams(AL.DEFAULT_DEVICE, self.oldparams)
+            al.setparams(AL.DEFAULT_DEVICE, self.params)
+        if self.converter:
+            data = self.converter(data)
+        self.port.writesamps(data)
+
+    def getfilled(self):
+        if self.port:
+            return self.port.getfilled()
+        else:
+            return 0
+
+    def getfillable(self):
+        if self.port:
+            return self.port.getfillable()
+        else:
+            return self.config.getqueuesize()
+
+    # private methods
+##      if 0: access *: private
+
+    def ulaw2lin(self, data):
+        import audioop
+        return audioop.ulaw2lin(data, 2)
+
+class Play_Audio_sun:
+##      if 0: access outrate, sampwidth, nchannels, inited_outrate, inited_width, \
+##                inited_nchannels, converter: private
+
+    def __init__(self):
+        self.outrate = 0
+        self.sampwidth = 0
+        self.nchannels = 0
+        self.inited_outrate = 0
+        self.inited_width = 0
+        self.inited_nchannels = 0
+        self.converter = None
+        self.port = None
+        return
+
+    def __del__(self):
+        self.stop()
+
+    def setoutrate(self, rate):
+        self.outrate = rate
+        self.inited_outrate = 1
+
+    def setsampwidth(self, width):
+        self.sampwidth = width
+        self.inited_width = 1
+
+    def setnchannels(self, nchannels):
+        self.nchannels = nchannels
+        self.inited_nchannels = 1
+
+    def writeframes(self, data):
+        if not (self.inited_outrate and self.inited_width and self.inited_nchannels):
+            raise error, 'params not specified'
+        if not self.port:
+            import sunaudiodev, SUNAUDIODEV
+            self.port = sunaudiodev.open('w')
+            info = self.port.getinfo()
+            info.o_sample_rate = self.outrate
+            info.o_channels = self.nchannels
+            if self.sampwidth == 0:
+                info.o_precision = 8
+                self.o_encoding = SUNAUDIODEV.ENCODING_ULAW
+                # XXX Hack, hack -- leave defaults
+            else:
+                info.o_precision = 8 * self.sampwidth
+                info.o_encoding = SUNAUDIODEV.ENCODING_LINEAR
+                self.port.setinfo(info)
+        if self.converter:
+            data = self.converter(data)
+        self.port.write(data)
+
+    def wait(self):
+        if not self.port:
+            return
+        self.port.drain()
+        self.stop()
+
+    def stop(self):
+        if self.port:
+            self.port.flush()
+            self.port.close()
+            self.port = None
+
+    def getfilled(self):
+        if self.port:
+            return self.port.obufcount()
+        else:
+            return 0
+
+##    # Nobody remembers what this method does, and it's broken. :-(
+##    def getfillable(self):
+##        return BUFFERSIZE - self.getfilled()
+
+def AudioDev():
+    # Dynamically try to import and use a platform specific module.
+    try:
+        import al
+    except ImportError:
+        try:
+            import sunaudiodev
+            return Play_Audio_sun()
+        except ImportError:
+            try:
+                import Audio_mac
+            except ImportError:
+                raise error, 'no audio device'
+            else:
+                return Audio_mac.Play_Audio_mac()
+    else:
+        return Play_Audio_sgi()
+
+def test(fn = None):
+    import sys
+    if sys.argv[1:]:
+        fn = sys.argv[1]
+    else:
+        fn = 'f:just samples:just.aif'
+    import aifc
+    af = aifc.open(fn, 'r')
+    print fn, af.getparams()
+    p = AudioDev()
+    p.setoutrate(af.getframerate())
+    p.setsampwidth(af.getsampwidth())
+    p.setnchannels(af.getnchannels())
+    BUFSIZ = af.getframerate()/af.getsampwidth()/af.getnchannels()
+    while 1:
+        data = af.readframes(BUFSIZ)
+        if not data: break
+        print len(data)
+        p.writeframes(data)
+    p.wait()
+
+if __name__ == '__main__':
+    test()
diff --git a/lib-python/2.2/base64.py b/lib-python/2.2/base64.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/base64.py
@@ -0,0 +1,77 @@
+#! /usr/bin/env python
+
+"""Conversions to/from base64 transport encoding as per RFC-1521."""
+
+# Modified 04-Oct-95 by Jack to use binascii module
+
+import binascii
+
+__all__ = ["encode","decode","encodestring","decodestring"]
+
+MAXLINESIZE = 76 # Excluding the CRLF
+MAXBINSIZE = (MAXLINESIZE//4)*3
+
+def encode(input, output):
+    """Encode a file."""
+    while 1:
+        s = input.read(MAXBINSIZE)
+        if not s: break
+        while len(s) < MAXBINSIZE:
+            ns = input.read(MAXBINSIZE-len(s))
+            if not ns: break
+            s = s + ns
+        line = binascii.b2a_base64(s)
+        output.write(line)
+
+def decode(input, output):
+    """Decode a file."""
+    while 1:
+        line = input.readline()
+        if not line: break
+        s = binascii.a2b_base64(line)
+        output.write(s)
+
+def encodestring(s):
+    """Encode a string."""
+    pieces = []
+    for i in range(0, len(s), MAXBINSIZE):
+        chunk = s[i : i + MAXBINSIZE]
+        pieces.append(binascii.b2a_base64(chunk))
+    return "".join(pieces)
+
+def decodestring(s):
+    """Decode a string."""
+    return binascii.a2b_base64(s)
+
+def test():
+    """Small test program"""
+    import sys, getopt
+    try:
+        opts, args = getopt.getopt(sys.argv[1:], 'deut')
+    except getopt.error, msg:
+        sys.stdout = sys.stderr
+        print msg
+        print """usage: %s [-d|-e|-u|-t] [file|-]
+        -d, -u: decode
+        -e: encode (default)
+        -t: encode and decode string 'Aladdin:open sesame'"""%sys.argv[0]
+        sys.exit(2)
+    func = encode
+    for o, a in opts:
+        if o == '-e': func = encode
+        if o == '-d': func = decode
+        if o == '-u': func = decode
+        if o == '-t': test1(); return
+    if args and args[0] != '-':
+        func(open(args[0], 'rb'), sys.stdout)
+    else:
+        func(sys.stdin, sys.stdout)
+
+def test1():
+    s0 = "Aladdin:open sesame"
+    s1 = encodestring(s0)
+    s2 = decodestring(s1)
+    print s0, `s1`, s2
+
+if __name__ == '__main__':
+    test()
diff --git a/lib-python/2.2/bdb.py b/lib-python/2.2/bdb.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/bdb.py
@@ -0,0 +1,563 @@
+"""Debugger basics"""
+
+import sys
+import os
+import types
+
+__all__ = ["BdbQuit","Bdb","Breakpoint"]
+
+BdbQuit = 'bdb.BdbQuit' # Exception to give up completely
+
+
+class Bdb:
+
+    """Generic Python debugger base class.
+
+    This class takes care of details of the trace facility;
+    a derived class should implement user interaction.
+    The standard debugger class (pdb.Pdb) is an example.
+    """
+
+    def __init__(self):
+        self.breaks = {}
+        self.fncache = {}
+
+    def canonic(self, filename):
+        if filename == "<" + filename[1:-1] + ">":
+            return filename
+        canonic = self.fncache.get(filename)
+        if not canonic:
+            canonic = os.path.abspath(filename)
+            canonic = os.path.normcase(canonic)
+            self.fncache[filename] = canonic
+        return canonic
+
+    def reset(self):
+        import linecache
+        linecache.checkcache()
+        self.botframe = None
+        self.stopframe = None
+        self.returnframe = None
+        self.quitting = 0
+
+    def trace_dispatch(self, frame, event, arg):
+        if self.quitting:
+            return # None
+        if event == 'line':
+            return self.dispatch_line(frame)
+        if event == 'call':
+            return self.dispatch_call(frame, arg)
+        if event == 'return':
+            return self.dispatch_return(frame, arg)
+        if event == 'exception':
+            return self.dispatch_exception(frame, arg)
+        print 'bdb.Bdb.dispatch: unknown debugging event:', `event`
+        return self.trace_dispatch
+
+    def dispatch_line(self, frame):
+        if self.stop_here(frame) or self.break_here(frame):
+            self.user_line(frame)
+            if self.quitting: raise BdbQuit
+        return self.trace_dispatch
+
+    def dispatch_call(self, frame, arg):
+        # XXX 'arg' is no longer used
+        if self.botframe is None:
+            # First call of dispatch since reset()
+            self.botframe = frame.f_back # (CT) Note that this may also be None!
+            return self.trace_dispatch
+        if not (self.stop_here(frame) or self.break_anywhere(frame)):
+            # No need to trace this function
+            return # None
+        self.user_call(frame, arg)
+        if self.quitting: raise BdbQuit
+        return self.trace_dispatch
+
+    def dispatch_return(self, frame, arg):
+        if self.stop_here(frame) or frame == self.returnframe:
+            self.user_return(frame, arg)
+            if self.quitting: raise BdbQuit
+        return self.trace_dispatch
+
+    def dispatch_exception(self, frame, arg):
+        if self.stop_here(frame):
+            self.user_exception(frame, arg)
+            if self.quitting: raise BdbQuit
+        return self.trace_dispatch
+
+    # Normally derived classes don't override the following
+    # methods, but they may if they want to redefine the
+    # definition of stopping and breakpoints.
+
+    def stop_here(self, frame):
+        # (CT) stopframe may now also be None, see dispatch_call.
+        # (CT) the former test for None is therefore removed from here.
+        if frame is self.stopframe:
+            return 1
+        while frame is not None and frame is not self.stopframe:
+            if frame is self.botframe:
+                return 1
+            frame = frame.f_back
+        return 0
+
+    def break_here(self, frame):
+        filename = self.canonic(frame.f_code.co_filename)
+        if not self.breaks.has_key(filename):
+            return 0
+        lineno = frame.f_lineno
+        if not lineno in self.breaks[filename]:
+            return 0
+        # flag says ok to delete temp. bp
+        (bp, flag) = effective(filename, lineno, frame)
+        if bp:
+            self.currentbp = bp.number
+            if (flag and bp.temporary):
+                self.do_clear(str(bp.number))
+            return 1
+        else:
+            return 0
+
+    def do_clear(self, arg):
+        raise NotImplementedError, "subclass of bdb must implement do_clear()"
+
+    def break_anywhere(self, frame):
+        return self.breaks.has_key(
+            self.canonic(frame.f_code.co_filename))
+
+    # Derived classes should override the user_* methods
+    # to gain control.
+
+    def user_call(self, frame, argument_list):
+        """This method is called when there is the remote possibility
+        that we ever need to stop in this function."""
+        pass
+
+    def user_line(self, frame):
+        """This method is called when we stop or break at this line."""
+        pass
+
+    def user_return(self, frame, return_value):
+        """This method is called when a return trap is set here."""
+        pass
+
+    def user_exception(self, frame, (exc_type, exc_value, exc_traceback)):
+        """This method is called if an exception occurs,
+        but only if we are to stop at or just below this level."""
+        pass
+
+    # Derived classes and clients can call the following methods
+    # to affect the stepping state.
+
+    def set_step(self):
+        """Stop after one line of code."""
+        self.stopframe = None
+        self.returnframe = None
+        self.quitting = 0
+
+    def set_next(self, frame):
+        """Stop on the next line in or below the given frame."""
+        self.stopframe = frame
+        self.returnframe = None
+        self.quitting = 0
+
+    def set_return(self, frame):
+        """Stop when returning from the given frame."""
+        self.stopframe = frame.f_back
+        self.returnframe = frame
+        self.quitting = 0
+
+    def set_trace(self):
+        """Start debugging from here."""
+        frame = sys._getframe().f_back
+        self.reset()
+        while frame:
+            frame.f_trace = self.trace_dispatch
+            self.botframe = frame
+            frame = frame.f_back
+        self.set_step()
+        sys.settrace(self.trace_dispatch)
+
+    def set_continue(self):
+        # Don't stop except at breakpoints or when finished
+        self.stopframe = self.botframe
+        self.returnframe = None
+        self.quitting = 0
+        if not self.breaks:
+            # no breakpoints; run without debugger overhead
+            sys.settrace(None)
+            frame = sys._getframe().f_back
+            while frame and frame is not self.botframe:
+                del frame.f_trace
+                frame = frame.f_back
+
+    def set_quit(self):
+        self.stopframe = self.botframe
+        self.returnframe = None
+        self.quitting = 1
+        sys.settrace(None)
+
+    # Derived classes and clients can call the following methods
+    # to manipulate breakpoints.  These methods return an
+    # error message is something went wrong, None if all is well.
+    # Set_break prints out the breakpoint line and file:lineno.
+    # Call self.get_*break*() to see the breakpoints or better
+    # for bp in Breakpoint.bpbynumber: if bp: bp.bpprint().
+
+    def set_break(self, filename, lineno, temporary=0, cond = None):
+        filename = self.canonic(filename)
+        import linecache # Import as late as possible
+        line = linecache.getline(filename, lineno)
+        if not line:
+            return 'Line %s:%d does not exist' % (filename,
+                                   lineno)
+        if not self.breaks.has_key(filename):
+            self.breaks[filename] = []
+        list = self.breaks[filename]
+        if not lineno in list:
+            list.append(lineno)
+        bp = Breakpoint(filename, lineno, temporary, cond)
+
+    def clear_break(self, filename, lineno):
+        filename = self.canonic(filename)
+        if not self.breaks.has_key(filename):
+            return 'There are no breakpoints in %s' % filename
+        if lineno not in self.breaks[filename]:
+            return 'There is no breakpoint at %s:%d' % (filename,
+                                    lineno)
+        # If there's only one bp in the list for that file,line
+        # pair, then remove the breaks entry
+        for bp in Breakpoint.bplist[filename, lineno][:]:
+            bp.deleteMe()
+        if not Breakpoint.bplist.has_key((filename, lineno)):
+            self.breaks[filename].remove(lineno)
+        if not self.breaks[filename]:
+            del self.breaks[filename]
+
+    def clear_bpbynumber(self, arg):
+        try:
+            number = int(arg)
+        except:
+            return 'Non-numeric breakpoint number (%s)' % arg
+        try:
+            bp = Breakpoint.bpbynumber[number]
+        except IndexError:
+            return 'Breakpoint number (%d) out of range' % number
+        if not bp:
+            return 'Breakpoint (%d) already deleted' % number
+        self.clear_break(bp.file, bp.line)
+
+    def clear_all_file_breaks(self, filename):
+        filename = self.canonic(filename)
+        if not self.breaks.has_key(filename):
+            return 'There are no breakpoints in %s' % filename
+        for line in self.breaks[filename]:
+            blist = Breakpoint.bplist[filename, line]
+            for bp in blist:
+                bp.deleteMe()
+        del self.breaks[filename]
+
+    def clear_all_breaks(self):
+        if not self.breaks:
+            return 'There are no breakpoints'
+        for bp in Breakpoint.bpbynumber:
+            if bp:
+                bp.deleteMe()
+        self.breaks = {}
+
+    def get_break(self, filename, lineno):
+        filename = self.canonic(filename)
+        return self.breaks.has_key(filename) and \
+            lineno in self.breaks[filename]
+
+    def get_breaks(self, filename, lineno):
+        filename = self.canonic(filename)
+        return self.breaks.has_key(filename) and \
+            lineno in self.breaks[filename] and \
+            Breakpoint.bplist[filename, lineno] or []
+
+    def get_file_breaks(self, filename):
+        filename = self.canonic(filename)
+        if self.breaks.has_key(filename):
+            return self.breaks[filename]
+        else:
+            return []
+
+    def get_all_breaks(self):
+        return self.breaks
+
+    # Derived classes and clients can call the following method
+    # to get a data structure representing a stack trace.
+
+    def get_stack(self, f, t):
+        stack = []
+        if t and t.tb_frame is f:
+            t = t.tb_next
+        while f is not None:
+            stack.append((f, f.f_lineno))
+            if f is self.botframe:
+                break
+            f = f.f_back
+        stack.reverse()
+        i = max(0, len(stack) - 1)
+        while t is not None:
+            stack.append((t.tb_frame, t.tb_lineno))
+            t = t.tb_next
+        return stack, i
+
+    #
+
+    def format_stack_entry(self, frame_lineno, lprefix=': '):
+        import linecache, repr
+        frame, lineno = frame_lineno
+        filename = self.canonic(frame.f_code.co_filename)
+        s = filename + '(' + `lineno` + ')'
+        if frame.f_code.co_name:
+            s = s + frame.f_code.co_name
+        else:
+            s = s + "<lambda>"
+        if frame.f_locals.has_key('__args__'):
+            args = frame.f_locals['__args__']
+        else:
+            args = None
+        if args:
+            s = s + repr.repr(args)
+        else:
+            s = s + '()'
+        if frame.f_locals.has_key('__return__'):
+            rv = frame.f_locals['__return__']
+            s = s + '->'
+            s = s + repr.repr(rv)
+        line = linecache.getline(filename, lineno)
+        if line: s = s + lprefix + line.strip()
+        return s
+
+    # The following two methods can be called by clients to use
+    # a debugger to debug a statement, given as a string.
+
+    def run(self, cmd, globals=None, locals=None):
+        if globals is None:
+            import __main__
+            globals = __main__.__dict__
+        if locals is None:
+            locals = globals
+        self.reset()
+        sys.settrace(self.trace_dispatch)
+        if not isinstance(cmd, types.CodeType):
+            cmd = cmd+'\n'
+        try:
+            try:
+                exec cmd in globals, locals
+            except BdbQuit:
+                pass
+        finally:
+            self.quitting = 1
+            sys.settrace(None)
+
+    def runeval(self, expr, globals=None, locals=None):
+        if globals is None:
+            import __main__
+            globals = __main__.__dict__
+        if locals is None:
+            locals = globals
+        self.reset()
+        sys.settrace(self.trace_dispatch)
+        if not isinstance(expr, types.CodeType):
+            expr = expr+'\n'
+        try:
+            try:
+                return eval(expr, globals, locals)
+            except BdbQuit:
+                pass
+        finally:
+            self.quitting = 1
+            sys.settrace(None)
+
+    def runctx(self, cmd, globals, locals):
+        # B/W compatibility
+        self.run(cmd, globals, locals)
+
+    # This method is more useful to debug a single function call.
+
+    def runcall(self, func, *args):
+        self.reset()
+        sys.settrace(self.trace_dispatch)
+        res = None
+        try:
+            try:
+                res = apply(func, args)
+            except BdbQuit:
+                pass
+        finally:
+            self.quitting = 1
+            sys.settrace(None)
+        return res
+
+
+def set_trace():
+    Bdb().set_trace()
+
+
+class Breakpoint:
+
+    """Breakpoint class
+
+    Implements temporary breakpoints, ignore counts, disabling and
+    (re)-enabling, and conditionals.
+
+    Breakpoints are indexed by number through bpbynumber and by
+    the file,line tuple using bplist.  The former points to a
+    single instance of class Breakpoint.  The latter points to a
+    list of such instances since there may be more than one
+    breakpoint per line.
+
+    """
+
+    # XXX Keeping state in the class is a mistake -- this means
+    # you cannot have more than one active Bdb instance.
+
+    next = 1        # Next bp to be assigned
+    bplist = {}     # indexed by (file, lineno) tuple
+    bpbynumber = [None] # Each entry is None or an instance of Bpt
+                # index 0 is unused, except for marking an
+                # effective break .... see effective()
+
+    def __init__(self, file, line, temporary=0, cond = None):
+        self.file = file    # This better be in canonical form!
+        self.line = line
+        self.temporary = temporary
+        self.cond = cond
+        self.enabled = 1
+        self.ignore = 0
+        self.hits = 0
+        self.number = Breakpoint.next
+        Breakpoint.next = Breakpoint.next + 1
+        # Build the two lists
+        self.bpbynumber.append(self)
+        if self.bplist.has_key((file, line)):
+            self.bplist[file, line].append(self)
+        else:
+            self.bplist[file, line] = [self]
+
+
+    def deleteMe(self):
+        index = (self.file, self.line)
+        self.bpbynumber[self.number] = None   # No longer in list
+        self.bplist[index].remove(self)
+        if not self.bplist[index]:
+            # No more bp for this f:l combo
+            del self.bplist[index]
+
+    def enable(self):
+        self.enabled = 1
+
+    def disable(self):
+        self.enabled = 0
+
+    def bpprint(self):
+        if self.temporary:
+            disp = 'del  '
+        else:
+            disp = 'keep '
+        if self.enabled:
+            disp = disp + 'yes'
+        else:
+            disp = disp + 'no '
+        print '%-4dbreakpoint    %s at %s:%d' % (self.number, disp,
+                             self.file, self.line)
+        if self.cond:
+            print '\tstop only if %s' % (self.cond,)
+        if self.ignore:
+            print '\tignore next %d hits' % (self.ignore)
+        if (self.hits):
+            if (self.hits > 1): ss = 's'
+            else: ss = ''
+            print ('\tbreakpoint already hit %d time%s' %
+                   (self.hits, ss))
+
+# -----------end of Breakpoint class----------
+
+# Determines if there is an effective (active) breakpoint at this
+# line of code.  Returns breakpoint number or 0 if none
+def effective(file, line, frame):
+    """Determine which breakpoint for this file:line is to be acted upon.
+
+    Called only if we know there is a bpt at this
+    location.  Returns breakpoint that was triggered and a flag
+    that indicates if it is ok to delete a temporary bp.
+
+    """
+    possibles = Breakpoint.bplist[file,line]
+    for i in range(0, len(possibles)):
+        b = possibles[i]
+        if b.enabled == 0:
+            continue
+        # Count every hit when bp is enabled
+        b.hits = b.hits + 1
+        if not b.cond:
+            # If unconditional, and ignoring,
+            # go on to next, else break
+            if b.ignore > 0:
+                b.ignore = b.ignore -1
+                continue
+            else:
+                # breakpoint and marker that's ok
+                # to delete if temporary
+                return (b,1)
+        else:
+            # Conditional bp.
+            # Ignore count applies only to those bpt hits where the
+            # condition evaluates to true.
+            try:
+                val = eval(b.cond, frame.f_globals,
+                       frame.f_locals)
+                if val:
+                    if b.ignore > 0:
+                        b.ignore = b.ignore -1
+                        # continue
+                    else:
+                        return (b,1)
+                # else:
+                #   continue
+            except:
+                # if eval fails, most conservative
+                # thing is to stop on breakpoint
+                # regardless of ignore count.
+                # Don't delete temporary,
+                # as another hint to user.
+                return (b,0)
+    return (None, None)
+
+# -------------------- testing --------------------
+
+class Tdb(Bdb):
+    def user_call(self, frame, args):
+        name = frame.f_code.co_name
+        if not name: name = '???'
+        print '+++ call', name, args
+    def user_line(self, frame):
+        import linecache
+        name = frame.f_code.co_name
+        if not name: name = '???'
+        fn = self.canonic(frame.f_code.co_filename)
+        line = linecache.getline(fn, frame.f_lineno)
+        print '+++', fn, frame.f_lineno, name, ':', line.strip()
+    def user_return(self, frame, retval):
+        print '+++ return', retval
+    def user_exception(self, frame, exc_stuff):
+        print '+++ exception', exc_stuff
+        self.set_continue()
+
+def foo(n):
+    print 'foo(', n, ')'
+    x = bar(n*10)
+    print 'bar returned', x
+
+def bar(a):
+    print 'bar(', a, ')'
+    return a/2
+
+def test():
+    t = Tdb()
+    t.run('import bdb; bdb.foo(10)')
+
+# end
diff --git a/lib-python/2.2/binhex.py b/lib-python/2.2/binhex.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/binhex.py
@@ -0,0 +1,531 @@
+"""Macintosh binhex compression/decompression.
+
+easy interface:
+binhex(inputfilename, outputfilename)
+hexbin(inputfilename, outputfilename)
+"""
+
+#
+# Jack Jansen, CWI, August 1995.
+#
+# The module is supposed to be as compatible as possible. Especially the
+# easy interface should work "as expected" on any platform.
+# XXXX Note: currently, textfiles appear in mac-form on all platforms.
+# We seem to lack a simple character-translate in python.
+# (we should probably use ISO-Latin-1 on all but the mac platform).
+# XXXX The simple routines are too simple: they expect to hold the complete
+# files in-core. Should be fixed.
+# XXXX It would be nice to handle AppleDouble format on unix
+# (for servers serving macs).
+# XXXX I don't understand what happens when you get 0x90 times the same byte on
+# input. The resulting code (xx 90 90) would appear to be interpreted as an
+# escaped *value* of 0x90. All coders I've seen appear to ignore this nicety...
+#
+import sys
+import os
+import struct
+import binascii
+
+__all__ = ["binhex","hexbin","Error"]
+
+class Error(Exception):
+    pass
+
+# States (what have we written)
+[_DID_HEADER, _DID_DATA, _DID_RSRC] = range(3)
+
+# Various constants
+REASONABLY_LARGE=32768  # Minimal amount we pass the rle-coder
+LINELEN=64
+RUNCHAR=chr(0x90)   # run-length introducer
+
+#
+# This code is no longer byte-order dependent
+
+#
+# Workarounds for non-mac machines.
+if os.name == 'mac':
+    import macfs
+    import MacOS
+    try:
+        openrf = MacOS.openrf
+    except AttributeError:
+        # Backward compatibility
+        openrf = open
+
+    def FInfo():
+        return macfs.FInfo()
+
+    def getfileinfo(name):
+        finfo = macfs.FSSpec(name).GetFInfo()
+        dir, file = os.path.split(name)
+        # XXXX Get resource/data sizes
+        fp = open(name, 'rb')
+        fp.seek(0, 2)
+        dlen = fp.tell()
+        fp = openrf(name, '*rb')
+        fp.seek(0, 2)
+        rlen = fp.tell()
+        return file, finfo, dlen, rlen
+
+    def openrsrc(name, *mode):
+        if not mode:
+            mode = '*rb'
+        else:
+            mode = '*' + mode[0]
+        return openrf(name, mode)
+
+else:
+    #
+    # Glue code for non-macintosh usage
+    #
+
+    class FInfo:
+        def __init__(self):
+            self.Type = '????'
+            self.Creator = '????'
+            self.Flags = 0
+
+    def getfileinfo(name):
+        finfo = FInfo()
+        # Quick check for textfile
+        fp = open(name)
+        data = open(name).read(256)
+        for c in data:
+            if not c.isspace() and (c<' ' or ord(c) > 0x7f):
+                break
+        else:
+            finfo.Type = 'TEXT'
+        fp.seek(0, 2)
+        dsize = fp.tell()
+        fp.close()
+        dir, file = os.path.split(name)
+        file = file.replace(':', '-', 1)
+        return file, finfo, dsize, 0
+
+    class openrsrc:
+        def __init__(self, *args):
+            pass
+
+        def read(self, *args):
+            return ''
+
+        def write(self, *args):
+            pass
+
+        def close(self):
+            pass
+
+class _Hqxcoderengine:
+    """Write data to the coder in 3-byte chunks"""
+
+    def __init__(self, ofp):
+        self.ofp = ofp
+        self.data = ''
+        self.hqxdata = ''
+        self.linelen = LINELEN-1
+
+    def write(self, data):
+        self.data = self.data + data
+        datalen = len(self.data)
+        todo = (datalen//3)*3
+        data = self.data[:todo]
+        self.data = self.data[todo:]
+        if not data:
+            return
+        self.hqxdata = self.hqxdata + binascii.b2a_hqx(data)
+        self._flush(0)
+
+    def _flush(self, force):
+        first = 0
+        while first <= len(self.hqxdata)-self.linelen:
+            last = first + self.linelen
+            self.ofp.write(self.hqxdata[first:last]+'\n')
+            self.linelen = LINELEN
+            first = last
+        self.hqxdata = self.hqxdata[first:]
+        if force:
+            self.ofp.write(self.hqxdata + ':\n')
+
+    def close(self):
+        if self.data:
+            self.hqxdata = \
+                 self.hqxdata + binascii.b2a_hqx(self.data)
+        self._flush(1)
+        self.ofp.close()
+        del self.ofp
+
+class _Rlecoderengine:
+    """Write data to the RLE-coder in suitably large chunks"""
+
+    def __init__(self, ofp):
+        self.ofp = ofp
+        self.data = ''
+
+    def write(self, data):
+        self.data = self.data + data
+        if len(self.data) < REASONABLY_LARGE:
+            return
+        rledata = binascii.rlecode_hqx(self.data)
+        self.ofp.write(rledata)
+        self.data = ''
+
+    def close(self):
+        if self.data:
+            rledata = binascii.rlecode_hqx(self.data)
+            self.ofp.write(rledata)
+        self.ofp.close()
+        del self.ofp
+
+class BinHex:
+    def __init__(self, (name, finfo, dlen, rlen), ofp):
+        if type(ofp) == type(''):
+            ofname = ofp
+            ofp = open(ofname, 'w')
+            if os.name == 'mac':
+                fss = macfs.FSSpec(ofname)
+                fss.SetCreatorType('BnHq', 'TEXT')
+        ofp.write('(This file must be converted with BinHex 4.0)\n\n:')
+        hqxer = _Hqxcoderengine(ofp)
+        self.ofp = _Rlecoderengine(hqxer)
+        self.crc = 0
+        if finfo is None:
+            finfo = FInfo()
+        self.dlen = dlen
+        self.rlen = rlen
+        self._writeinfo(name, finfo)
+        self.state = _DID_HEADER
+
+    def _writeinfo(self, name, finfo):
+        name = name
+        nl = len(name)
+        if nl > 63:
+            raise Error, 'Filename too long'
+        d = chr(nl) + name + '\0'
+        d2 = finfo.Type + finfo.Creator
+
+        # Force all structs to be packed with big-endian
+        d3 = struct.pack('>h', finfo.Flags)
+        d4 = struct.pack('>ii', self.dlen, self.rlen)
+        info = d + d2 + d3 + d4
+        self._write(info)
+        self._writecrc()
+
+    def _write(self, data):
+        self.crc = binascii.crc_hqx(data, self.crc)
+        self.ofp.write(data)
+
+    def _writecrc(self):
+        # XXXX Should this be here??
+        # self.crc = binascii.crc_hqx('\0\0', self.crc)
+        self.ofp.write(struct.pack('>h', self.crc))
+        self.crc = 0
+
+    def write(self, data):
+        if self.state != _DID_HEADER:
+            raise Error, 'Writing data at the wrong time'
+        self.dlen = self.dlen - len(data)
+        self._write(data)
+
+    def close_data(self):
+        if self.dlen != 0:
+            raise Error, 'Incorrect data size, diff='+`self.rlen`
+        self._writecrc()
+        self.state = _DID_DATA
+
+    def write_rsrc(self, data):
+        if self.state < _DID_DATA:
+            self.close_data()
+        if self.state != _DID_DATA:
+            raise Error, 'Writing resource data at the wrong time'
+        self.rlen = self.rlen - len(data)
+        self._write(data)
+
+    def close(self):
+        if self.state < _DID_DATA:
+            self.close_data()
+        if self.state != _DID_DATA:
+            raise Error, 'Close at the wrong time'
+        if self.rlen != 0:
+            raise Error, \
+                  "Incorrect resource-datasize, diff="+`self.rlen`
+        self._writecrc()
+        self.ofp.close()
+        self.state = None
+        del self.ofp
+
+def binhex(inp, out):
+    """(infilename, outfilename) - Create binhex-encoded copy of a file"""
+    finfo = getfileinfo(inp)
+    ofp = BinHex(finfo, out)
+
+    ifp = open(inp, 'rb')
+    # XXXX Do textfile translation on non-mac systems
+    while 1:
+        d = ifp.read(128000)
+        if not d: break
+        ofp.write(d)
+    ofp.close_data()
+    ifp.close()
+
+    ifp = openrsrc(inp, 'rb')
+    while 1:
+        d = ifp.read(128000)
+        if not d: break
+        ofp.write_rsrc(d)
+    ofp.close()
+    ifp.close()
+
+class _Hqxdecoderengine:
+    """Read data via the decoder in 4-byte chunks"""
+
+    def __init__(self, ifp):
+        self.ifp = ifp
+        self.eof = 0
+
+    def read(self, totalwtd):
+        """Read at least wtd bytes (or until EOF)"""
+        decdata = ''
+        wtd = totalwtd
+        #
+        # The loop here is convoluted, since we don't really now how
+        # much to decode: there may be newlines in the incoming data.
+        while wtd > 0:
+            if self.eof: return decdata
+            wtd = ((wtd+2)//3)*4
+            data = self.ifp.read(wtd)
+            #
+            # Next problem: there may not be a complete number of
+            # bytes in what we pass to a2b. Solve by yet another
+            # loop.
+            #
+            while 1:
+                try:
+                    decdatacur, self.eof = \
+                            binascii.a2b_hqx(data)
+                    break
+                except binascii.Incomplete:
+                    pass
+                newdata = self.ifp.read(1)
+                if not newdata:
+                    raise Error, \
+                          'Premature EOF on binhex file'
+                data = data + newdata
+            decdata = decdata + decdatacur
+            wtd = totalwtd - len(decdata)
+            if not decdata and not self.eof:
+                raise Error, 'Premature EOF on binhex file'
+        return decdata
+
+    def close(self):
+        self.ifp.close()
+
+class _Rledecoderengine:
+    """Read data via the RLE-coder"""
+
+    def __init__(self, ifp):
+        self.ifp = ifp
+        self.pre_buffer = ''
+        self.post_buffer = ''
+        self.eof = 0
+
+    def read(self, wtd):
+        if wtd > len(self.post_buffer):
+            self._fill(wtd-len(self.post_buffer))
+        rv = self.post_buffer[:wtd]
+        self.post_buffer = self.post_buffer[wtd:]
+        return rv
+
+    def _fill(self, wtd):
+        self.pre_buffer = self.pre_buffer + self.ifp.read(wtd+4)
+        if self.ifp.eof:
+            self.post_buffer = self.post_buffer + \
+                binascii.rledecode_hqx(self.pre_buffer)
+            self.pre_buffer = ''
+            return
+
+        #
+        # Obfuscated code ahead. We have to take care that we don't
+        # end up with an orphaned RUNCHAR later on. So, we keep a couple
+        # of bytes in the buffer, depending on what the end of
+        # the buffer looks like:
+        # '\220\0\220' - Keep 3 bytes: repeated \220 (escaped as \220\0)
+        # '?\220' - Keep 2 bytes: repeated something-else
+        # '\220\0' - Escaped \220: Keep 2 bytes.
+        # '?\220?' - Complete repeat sequence: decode all
+        # otherwise: keep 1 byte.
+        #
+        mark = len(self.pre_buffer)
+        if self.pre_buffer[-3:] == RUNCHAR + '\0' + RUNCHAR:
+            mark = mark - 3
+        elif self.pre_buffer[-1] == RUNCHAR:
+            mark = mark - 2
+        elif self.pre_buffer[-2:] == RUNCHAR + '\0':
+            mark = mark - 2
+        elif self.pre_buffer[-2] == RUNCHAR:
+            pass # Decode all
+        else:
+            mark = mark - 1
+
+        self.post_buffer = self.post_buffer + \
+            binascii.rledecode_hqx(self.pre_buffer[:mark])
+        self.pre_buffer = self.pre_buffer[mark:]
+
+    def close(self):
+        self.ifp.close()
+
+class HexBin:
+    def __init__(self, ifp):
+        if type(ifp) == type(''):
+            ifp = open(ifp)
+        #
+        # Find initial colon.
+        #
+        while 1:
+            ch = ifp.read(1)
+            if not ch:
+                raise Error, "No binhex data found"
+            # Cater for \r\n terminated lines (which show up as \n\r, hence
+            # all lines start with \r)
+            if ch == '\r':
+                continue
+            if ch == ':':
+                break
+            if ch != '\n':
+                dummy = ifp.readline()
+
+        hqxifp = _Hqxdecoderengine(ifp)
+        self.ifp = _Rledecoderengine(hqxifp)
+        self.crc = 0
+        self._readheader()
+
+    def _read(self, len):
+        data = self.ifp.read(len)
+        self.crc = binascii.crc_hqx(data, self.crc)
+        return data
+
+    def _checkcrc(self):
+        filecrc = struct.unpack('>h', self.ifp.read(2))[0] & 0xffff
+        #self.crc = binascii.crc_hqx('\0\0', self.crc)
+        # XXXX Is this needed??
+        self.crc = self.crc & 0xffff
+        if filecrc != self.crc:
+            raise Error, 'CRC error, computed %x, read %x' \
+                  %(self.crc, filecrc)
+        self.crc = 0
+
+    def _readheader(self):
+        len = self._read(1)
+        fname = self._read(ord(len))
+        rest = self._read(1+4+4+2+4+4)
+        self._checkcrc()
+
+        type = rest[1:5]
+        creator = rest[5:9]
+        flags = struct.unpack('>h', rest[9:11])[0]
+        self.dlen = struct.unpack('>l', rest[11:15])[0]
+        self.rlen = struct.unpack('>l', rest[15:19])[0]
+
+        self.FName = fname
+        self.FInfo = FInfo()
+        self.FInfo.Creator = creator
+        self.FInfo.Type = type
+        self.FInfo.Flags = flags
+
+        self.state = _DID_HEADER
+
+    def read(self, *n):
+        if self.state != _DID_HEADER:
+            raise Error, 'Read data at wrong time'
+        if n:
+            n = n[0]
+            n = min(n, self.dlen)
+        else:
+            n = self.dlen
+        rv = ''
+        while len(rv) < n:
+            rv = rv + self._read(n-len(rv))
+        self.dlen = self.dlen - n
+        return rv
+
+    def close_data(self):
+        if self.state != _DID_HEADER:
+            raise Error, 'close_data at wrong time'
+        if self.dlen:
+            dummy = self._read(self.dlen)
+        self._checkcrc()
+        self.state = _DID_DATA
+
+    def read_rsrc(self, *n):
+        if self.state == _DID_HEADER:
+            self.close_data()
+        if self.state != _DID_DATA:
+            raise Error, 'Read resource data at wrong time'
+        if n:
+            n = n[0]
+            n = min(n, self.rlen)
+        else:
+            n = self.rlen
+        self.rlen = self.rlen - n
+        return self._read(n)
+
+    def close(self):
+        if self.rlen:
+            dummy = self.read_rsrc(self.rlen)
+        self._checkcrc()
+        self.state = _DID_RSRC
+        self.ifp.close()
+
+def hexbin(inp, out):
+    """(infilename, outfilename) - Decode binhexed file"""
+    ifp = HexBin(inp)
+    finfo = ifp.FInfo
+    if not out:
+        out = ifp.FName
+    if os.name == 'mac':
+        ofss = macfs.FSSpec(out)
+        out = ofss.as_pathname()
+
+    ofp = open(out, 'wb')
+    # XXXX Do translation on non-mac systems
+    while 1:
+        d = ifp.read(128000)
+        if not d: break
+        ofp.write(d)
+    ofp.close()
+    ifp.close_data()
+
+    d = ifp.read_rsrc(128000)
+    if d:
+        ofp = openrsrc(out, 'wb')
+        ofp.write(d)
+        while 1:
+            d = ifp.read_rsrc(128000)
+            if not d: break
+            ofp.write(d)
+        ofp.close()
+
+    if os.name == 'mac':
+        nfinfo = ofss.GetFInfo()
+        nfinfo.Creator = finfo.Creator
+        nfinfo.Type = finfo.Type
+        nfinfo.Flags = finfo.Flags
+        ofss.SetFInfo(nfinfo)
+
+    ifp.close()
+
+def _test():
+    if os.name == 'mac':
+        fss, ok = macfs.PromptGetFile('File to convert:')
+        if not ok:
+            sys.exit(0)
+        fname = fss.as_pathname()
+    else:
+        fname = sys.argv[1]
+    binhex(fname, fname+'.hqx')
+    hexbin(fname+'.hqx', fname+'.viahqx')
+    #hexbin(fname, fname+'.unpacked')
+    sys.exit(1)
+
+if __name__ == '__main__':
+    _test()
diff --git a/lib-python/2.2/bisect.py b/lib-python/2.2/bisect.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/bisect.py
@@ -0,0 +1,78 @@
+"""Bisection algorithms."""
+
+def insort_right(a, x, lo=0, hi=None):
+    """Insert item x in list a, and keep it sorted assuming a is sorted.
+
+    If x is already in a, insert it to the right of the rightmost x.
+
+    Optional args lo (default 0) and hi (default len(a)) bound the
+    slice of a to be searched.
+    """
+
+    if hi is None:
+        hi = len(a)
+    while lo < hi:
+        mid = (lo+hi)//2
+        if x < a[mid]: hi = mid
+        else: lo = mid+1
+    a.insert(lo, x)
+
+insort = insort_right   # backward compatibility
+
+def bisect_right(a, x, lo=0, hi=None):
+    """Return the index where to insert item x in list a, assuming a is sorted.
+
+    The return value i is such that all e in a[:i] have e <= x, and all e in
+    a[i:] have e > x.  So if x already appears in the list, i points just
+    beyond the rightmost x already there.
+
+    Optional args lo (default 0) and hi (default len(a)) bound the
+    slice of a to be searched.
+    """
+
+    if hi is None:
+        hi = len(a)
+    while lo < hi:
+        mid = (lo+hi)//2
+        if x < a[mid]: hi = mid
+        else: lo = mid+1
+    return lo
+
+bisect = bisect_right   # backward compatibility
+
+def insort_left(a, x, lo=0, hi=None):
+    """Insert item x in list a, and keep it sorted assuming a is sorted.
+
+    If x is already in a, insert it to the left of the leftmost x.
+
+    Optional args lo (default 0) and hi (default len(a)) bound the
+    slice of a to be searched.
+    """
+
+    if hi is None:
+        hi = len(a)
+    while lo < hi:
+        mid = (lo+hi)//2
+        if a[mid] < x: lo = mid+1
+        else: hi = mid
+    a.insert(lo, x)
+
+
+def bisect_left(a, x, lo=0, hi=None):
+    """Return the index where to insert item x in list a, assuming a is sorted.
+
+    The return value i is such that all e in a[:i] have e < x, and all e in
+    a[i:] have e >= x.  So if x already appears in the list, i points just
+    before the leftmost x already there.
+
+    Optional args lo (default 0) and hi (default len(a)) bound the
+    slice of a to be searched.
+    """
+
+    if hi is None:
+        hi = len(a)
+    while lo < hi:
+        mid = (lo+hi)//2
+        if a[mid] < x: lo = mid+1
+        else: hi = mid
+    return lo
diff --git a/lib-python/2.2/calendar.py b/lib-python/2.2/calendar.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/calendar.py
@@ -0,0 +1,246 @@
+"""Calendar printing functions
+
+Note when comparing these calendars to the ones printed by cal(1): By
+default, these calendars have Monday as the first day of the week, and
+Sunday as the last (the European convention). Use setfirstweekday() to
+set the first day of the week (0=Monday, 6=Sunday)."""
+
+# Revision 2: uses functions from built-in time module
+
+# Import functions and variables from time module
+from time import localtime, mktime, strftime
+from types import SliceType
+
+__all__ = ["error","setfirstweekday","firstweekday","isleap",
+           "leapdays","weekday","monthrange","monthcalendar",
+           "prmonth","month","prcal","calendar","timegm",
+           "month_name", "month_abbr", "day_name", "day_abbr"]
+
+# Exception raised for bad input (with string parameter for details)
+error = ValueError
+
+# Constants for months referenced later
+January = 1
+February = 2
+
+# Number of days per month (except for February in leap years)
+mdays = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
+
+# This module used to have hard-coded lists of day and month names, as
+# English strings.  The classes following emulate a read-only version of
+# that, but supply localized names.  Note that the values are computed
+# fresh on each call, in case the user changes locale between calls.
+
+class _indexer:
+    def __getitem__(self, i):
+        if isinstance(i, SliceType):
+            return self.data[i.start : i.stop]
+        else:
+            # May raise an appropriate exception.
+            return self.data[i]
+
+class _localized_month(_indexer):
+    def __init__(self, format):
+        self.format = format
+
+    def __getitem__(self, i):
+        self.data = [strftime(self.format, (2001, j, 1, 12, 0, 0, 1, 1, 0))
+                     for j in range(1, 13)]
+        self.data.insert(0, "")
+        return _indexer.__getitem__(self, i)
+
+    def __len__(self):
+        return 13
+
+class _localized_day(_indexer):
+    def __init__(self, format):
+        self.format = format
+
+    def __getitem__(self, i):
+        # January 1, 2001, was a Monday.
+        self.data = [strftime(self.format, (2001, 1, j+1, 12, 0, 0, j, j+1, 0))
+                     for j in range(7)]
+        return _indexer.__getitem__(self, i)
+
+    def __len__(self_):
+        return 7
+
+# Full and abbreviated names of weekdays
+day_name = _localized_day('%A')
+day_abbr = _localized_day('%a')
+
+# Full and abbreviated names of months (1-based arrays!!!)
+month_name = _localized_month('%B')
+month_abbr = _localized_month('%b')
+
+# Constants for weekdays
+(MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY) = range(7)
+
+_firstweekday = 0                       # 0 = Monday, 6 = Sunday
+
+def firstweekday():
+    return _firstweekday
+
+def setfirstweekday(weekday):
+    """Set weekday (Monday=0, Sunday=6) to start each week."""
+    global _firstweekday
+    if not MONDAY <= weekday <= SUNDAY:
+        raise ValueError, \
+              'bad weekday number; must be 0 (Monday) to 6 (Sunday)'
+    _firstweekday = weekday
+
+def isleap(year):
+    """Return 1 for leap years, 0 for non-leap years."""
+    return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
+
+def leapdays(y1, y2):
+    """Return number of leap years in range [y1, y2).
+       Assume y1 <= y2."""
+    y1 -= 1
+    y2 -= 1
+    return (y2/4 - y1/4) - (y2/100 - y1/100) + (y2/400 - y1/400)
+
+def weekday(year, month, day):
+    """Return weekday (0-6 ~ Mon-Sun) for year (1970-...), month (1-12),
+       day (1-31)."""
+    secs = mktime((year, month, day, 0, 0, 0, 0, 0, 0))
+    tuple = localtime(secs)
+    return tuple[6]
+
+def monthrange(year, month):
+    """Return weekday (0-6 ~ Mon-Sun) and number of days (28-31) for
+       year, month."""
+    if not 1 <= month <= 12:
+        raise ValueError, 'bad month number'
+    day1 = weekday(year, month, 1)
+    ndays = mdays[month] + (month == February and isleap(year))
+    return day1, ndays
+
+def monthcalendar(year, month):
+    """Return a matrix representing a month's calendar.
+       Each row represents a week; days outside this month are zero."""
+    day1, ndays = monthrange(year, month)
+    rows = []
+    r7 = range(7)
+    day = (_firstweekday - day1 + 6) % 7 - 5   # for leading 0's in first week
+    while day <= ndays:
+        row = [0, 0, 0, 0, 0, 0, 0]
+        for i in r7:
+            if 1 <= day <= ndays: row[i] = day
+            day = day + 1
+        rows.append(row)
+    return rows
+
+def _center(str, width):
+    """Center a string in a field."""
+    n = width - len(str)
+    if n <= 0:
+        return str
+    return ' '*((n+1)/2) + str + ' '*((n)/2)
+
+def prweek(theweek, width):
+    """Print a single week (no newline)."""
+    print week(theweek, width),
+
+def week(theweek, width):
+    """Returns a single week in a string (no newline)."""
+    days = []
+    for day in theweek:
+        if day == 0:
+            s = ''
+        else:
+            s = '%2i' % day             # right-align single-digit days
+        days.append(_center(s, width))
+    return ' '.join(days)
+
+def weekheader(width):
+    """Return a header for a week."""
+    if width >= 9:
+        names = day_name
+    else:
+        names = day_abbr
+    days = []
+    for i in range(_firstweekday, _firstweekday + 7):
+        days.append(_center(names[i%7][:width], width))
+    return ' '.join(days)
+
+def prmonth(theyear, themonth, w=0, l=0):
+    """Print a month's calendar."""
+    print month(theyear, themonth, w, l),
+
+def month(theyear, themonth, w=0, l=0):
+    """Return a month's calendar string (multi-line)."""
+    w = max(2, w)
+    l = max(1, l)
+    s = (_center(month_name[themonth] + ' ' + `theyear`,
+                 7 * (w + 1) - 1).rstrip() +
+         '\n' * l + weekheader(w).rstrip() + '\n' * l)
+    for aweek in monthcalendar(theyear, themonth):
+        s = s + week(aweek, w).rstrip() + '\n' * l
+    return s[:-l] + '\n'
+
+# Spacing of month columns for 3-column year calendar
+_colwidth = 7*3 - 1         # Amount printed by prweek()
+_spacing = 6                # Number of spaces between columns
+
+def format3c(a, b, c, colwidth=_colwidth, spacing=_spacing):
+    """Prints 3-column formatting for year calendars"""
+    print format3cstring(a, b, c, colwidth, spacing)
+
+def format3cstring(a, b, c, colwidth=_colwidth, spacing=_spacing):
+    """Returns a string formatted from 3 strings, centered within 3 columns."""
+    return (_center(a, colwidth) + ' ' * spacing + _center(b, colwidth) +
+            ' ' * spacing + _center(c, colwidth))
+
+def prcal(year, w=0, l=0, c=_spacing):
+    """Print a year's calendar."""
+    print calendar(year, w, l, c),
+
+def calendar(year, w=0, l=0, c=_spacing):
+    """Returns a year's calendar as a multi-line string."""
+    w = max(2, w)
+    l = max(1, l)
+    c = max(2, c)
+    colwidth = (w + 1) * 7 - 1
+    s = _center(`year`, colwidth * 3 + c * 2).rstrip() + '\n' * l
+    header = weekheader(w)
+    header = format3cstring(header, header, header, colwidth, c).rstrip()
+    for q in range(January, January+12, 3):
+        s = (s + '\n' * l +
+             format3cstring(month_name[q], month_name[q+1], month_name[q+2],
+                            colwidth, c).rstrip() +
+             '\n' * l + header + '\n' * l)
+        data = []
+        height = 0
+        for amonth in range(q, q + 3):
+            cal = monthcalendar(year, amonth)
+            if len(cal) > height:
+                height = len(cal)
+            data.append(cal)
+        for i in range(height):
+            weeks = []
+            for cal in data:
+                if i >= len(cal):
+                    weeks.append('')
+                else:
+                    weeks.append(week(cal[i], w))
+            s = s + format3cstring(weeks[0], weeks[1], weeks[2],
+                                   colwidth, c).rstrip() + '\n' * l
+    return s[:-l] + '\n'
+
+EPOCH = 1970
+def timegm(tuple):
+    """Unrelated but handy function to calculate Unix timestamp from GMT."""
+    year, month, day, hour, minute, second = tuple[:6]
+    assert year >= EPOCH
+    assert 1 <= month <= 12
+    days = 365*(year-EPOCH) + leapdays(EPOCH, year)
+    for i in range(1, month):
+        days = days + mdays[i]
+    if month > 2 and isleap(year):
+        days = days + 1
+    days = days + day - 1
+    hours = days*24 + hour
+    minutes = hours*60 + minute
+    seconds = minutes*60 + second
+    return seconds
diff --git a/lib-python/2.2/cgi.py b/lib-python/2.2/cgi.py
new file mode 100755
--- /dev/null
+++ b/lib-python/2.2/cgi.py
@@ -0,0 +1,1040 @@
+#! /usr/local/bin/python
+
+# NOTE: the above "/usr/local/bin/python" is NOT a mistake.  It is
+# intentionally NOT "/usr/bin/env python".  On many systems
+# (e.g. Solaris), /usr/local/bin is not in $PATH as passed to CGI
+# scripts, and /usr/local/bin is the default directory where Python is
+# installed, so /usr/bin/env would be unable to find python.  Granted,
+# binary installations by Linux vendors often install Python in
+# /usr/bin.  So let those vendors patch cgi.py to match their choice
+# of installation.
+
+"""Support module for CGI (Common Gateway Interface) scripts.
+
+This module defines a number of utilities for use by CGI scripts
+written in Python.
+"""
+
+# XXX Perhaps there should be a slimmed version that doesn't contain
+# all those backwards compatible and debugging classes and functions?
+
+# History
+# -------
+#
+# Michael McLay started this module.  Steve Majewski changed the
+# interface to SvFormContentDict and FormContentDict.  The multipart
+# parsing was inspired by code submitted by Andreas Paepcke.  Guido van
+# Rossum rewrote, reformatted and documented the module and is currently
+# responsible for its maintenance.
+#
+
+__version__ = "2.6"
+
+
+# Imports
+# =======
+
+import sys
+import os
+import urllib
+import mimetools
+import rfc822
+import UserDict
+from StringIO import StringIO
+
+__all__ = ["MiniFieldStorage", "FieldStorage", "FormContentDict",
+           "SvFormContentDict", "InterpFormContentDict", "FormContent",
+           "parse", "parse_qs", "parse_qsl", "parse_multipart",
+           "parse_header", "print_exception", "print_environ",
+           "print_form", "print_directory", "print_arguments",
+           "print_environ_usage", "escape"]
+
+# Logging support
+# ===============
+
+logfile = ""            # Filename to log to, if not empty
+logfp = None            # File object to log to, if not None
+
+def initlog(*allargs):
+    """Write a log message, if there is a log file.
+
+    Even though this function is called initlog(), you should always
+    use log(); log is a variable that is set either to initlog
+    (initially), to dolog (once the log file has been opened), or to
+    nolog (when logging is disabled).
+
+    The first argument is a format string; the remaining arguments (if
+    any) are arguments to the % operator, so e.g.
+        log("%s: %s", "a", "b")
+    will write "a: b" to the log file, followed by a newline.
+
+    If the global logfp is not None, it should be a file object to
+    which log data is written.
+
+    If the global logfp is None, the global logfile may be a string
+    giving a filename to open, in append mode.  This file should be
+    world writable!!!  If the file can't be opened, logging is
+    silently disabled (since there is no safe place where we could
+    send an error message).
+
+    """
+    global logfp, log
+    if logfile and not logfp:
+        try:
+            logfp = open(logfile, "a")
+        except IOError:
+            pass
+    if not logfp:
+        log = nolog
+    else:
+        log = dolog
+    apply(log, allargs)
+
+def dolog(fmt, *args):
+    """Write a log message to the log file.  See initlog() for docs."""
+    logfp.write(fmt%args + "\n")
+
+def nolog(*allargs):
+    """Dummy function, assigned to log when logging is disabled."""
+    pass
+
+log = initlog           # The current logging function
+
+
+# Parsing functions
+# =================
+
+# Maximum input we will accept when REQUEST_METHOD is POST
+# 0 ==> unlimited input
+maxlen = 0
+
+def parse(fp=None, environ=os.environ, keep_blank_values=0, strict_parsing=0):
+    """Parse a query in the environment or from a file (default stdin)
+
+        Arguments, all optional:
+
+        fp              : file pointer; default: sys.stdin
+
+        environ         : environment dictionary; default: os.environ
+
+        keep_blank_values: flag indicating whether blank values in
+            URL encoded forms should be treated as blank strings.
+            A true value indicates that blanks should be retained as
+            blank strings.  The default false value indicates that
+            blank values are to be ignored and treated as if they were
+            not included.
+
+        strict_parsing: flag indicating what to do with parsing errors.
+            If false (the default), errors are silently ignored.
+            If true, errors raise a ValueError exception.
+    """
+    if not fp:
+        fp = sys.stdin
+    if not environ.has_key('REQUEST_METHOD'):
+        environ['REQUEST_METHOD'] = 'GET'       # For testing stand-alone
+    if environ['REQUEST_METHOD'] == 'POST':
+        ctype, pdict = parse_header(environ['CONTENT_TYPE'])
+        if ctype == 'multipart/form-data':
+            return parse_multipart(fp, pdict)
+        elif ctype == 'application/x-www-form-urlencoded':
+            clength = int(environ['CONTENT_LENGTH'])
+            if maxlen and clength > maxlen:
+                raise ValueError, 'Maximum content length exceeded'
+            qs = fp.read(clength)
+        else:
+            qs = ''                     # Unknown content-type
+        if environ.has_key('QUERY_STRING'):
+            if qs: qs = qs + '&'
+            qs = qs + environ['QUERY_STRING']
+        elif sys.argv[1:]:
+            if qs: qs = qs + '&'
+            qs = qs + sys.argv[1]
+        environ['QUERY_STRING'] = qs    # XXX Shouldn't, really
+    elif environ.has_key('QUERY_STRING'):
+        qs = environ['QUERY_STRING']
+    else:
+        if sys.argv[1:]:
+            qs = sys.argv[1]
+        else:
+            qs = ""
+        environ['QUERY_STRING'] = qs    # XXX Shouldn't, really
+    return parse_qs(qs, keep_blank_values, strict_parsing)
+
+
+def parse_qs(qs, keep_blank_values=0, strict_parsing=0):
+    """Parse a query given as a string argument.
+
+        Arguments:
+
+        qs: URL-encoded query string to be parsed
+
+        keep_blank_values: flag indicating whether blank values in
+            URL encoded queries should be treated as blank strings.
+            A true value indicates that blanks should be retained as
+            blank strings.  The default false value indicates that
+            blank values are to be ignored and treated as if they were
+            not included.
+
+        strict_parsing: flag indicating what to do with parsing errors.
+            If false (the default), errors are silently ignored.
+            If true, errors raise a ValueError exception.
+    """
+    dict = {}
+    for name, value in parse_qsl(qs, keep_blank_values, strict_parsing):
+        if dict.has_key(name):
+            dict[name].append(value)
+        else:
+            dict[name] = [value]
+    return dict
+
+def parse_qsl(qs, keep_blank_values=0, strict_parsing=0):
+    """Parse a query given as a string argument.
+
+    Arguments:
+
+    qs: URL-encoded query string to be parsed
+
+    keep_blank_values: flag indicating whether blank values in
+        URL encoded queries should be treated as blank strings.  A
+        true value indicates that blanks should be retained as blank
+        strings.  The default false value indicates that blank values
+        are to be ignored and treated as if they were  not included.
+
+    strict_parsing: flag indicating what to do with parsing errors. If
+        false (the default), errors are silently ignored. If true,
+        errors raise a ValueError exception.
+
+    Returns a list, as G-d intended.
+    """
+    pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
+    r = []
+    for name_value in pairs:
+        nv = name_value.split('=', 1)
+        if len(nv) != 2:
+            if strict_parsing:
+                raise ValueError, "bad query field: %s" % `name_value`
+            continue
+        if len(nv[1]) or keep_blank_values:
+            name = urllib.unquote(nv[0].replace('+', ' '))
+            value = urllib.unquote(nv[1].replace('+', ' '))
+            r.append((name, value))
+
+    return r
+
+
+def parse_multipart(fp, pdict):
+    """Parse multipart input.
+
+    Arguments:
+    fp   : input file
+    pdict: dictionary containing other parameters of conten-type header
+
+    Returns a dictionary just like parse_qs(): keys are the field names, each
+    value is a list of values for that field.  This is easy to use but not
+    much good if you are expecting megabytes to be uploaded -- in that case,
+    use the FieldStorage class instead which is much more flexible.  Note
+    that content-type is the raw, unparsed contents of the content-type
+    header.
+
+    XXX This does not parse nested multipart parts -- use FieldStorage for
+    that.
+
+    XXX This should really be subsumed by FieldStorage altogether -- no
+    point in having two implementations of the same parsing algorithm.
+
+    """
+    boundary = ""
+    if pdict.has_key('boundary'):
+        boundary = pdict['boundary']
+    if not valid_boundary(boundary):
+        raise ValueError,  ('Invalid boundary in multipart form: %s'
+                            % `boundary`)
+
+    nextpart = "--" + boundary
+    lastpart = "--" + boundary + "--"
+    partdict = {}
+    terminator = ""
+
+    while terminator != lastpart:
+        bytes = -1
+        data = None
+        if terminator:
+            # At start of next part.  Read headers first.
+            headers = mimetools.Message(fp)
+            clength = headers.getheader('content-length')
+            if clength:
+                try:
+                    bytes = int(clength)
+                except ValueError:
+                    pass
+            if bytes > 0:
+                if maxlen and bytes > maxlen:
+                    raise ValueError, 'Maximum content length exceeded'
+                data = fp.read(bytes)
+            else:
+                data = ""
+        # Read lines until end of part.
+        lines = []
+        while 1:
+            line = fp.readline()
+            if not line:
+                terminator = lastpart # End outer loop
+                break
+            if line[:2] == "--":
+                terminator = line.strip()
+                if terminator in (nextpart, lastpart):
+                    break
+            lines.append(line)
+        # Done with part.
+        if data is None:
+            continue
+        if bytes < 0:
+            if lines:
+                # Strip final line terminator
+                line = lines[-1]
+                if line[-2:] == "\r\n":
+                    line = line[:-2]
+                elif line[-1:] == "\n":
+                    line = line[:-1]
+                lines[-1] = line
+                data = "".join(lines)
+        line = headers['content-disposition']
+        if not line:
+            continue
+        key, params = parse_header(line)
+        if key != 'form-data':
+            continue
+        if params.has_key('name'):
+            name = params['name']
+        else:
+            continue
+        if partdict.has_key(name):
+            partdict[name].append(data)
+        else:
+            partdict[name] = [data]
+
+    return partdict
+
+
+def parse_header(line):
+    """Parse a Content-type like header.
+
+    Return the main content-type and a dictionary of options.
+
+    """
+    plist = map(lambda x: x.strip(), line.split(';'))
+    key = plist[0].lower()
+    del plist[0]
+    pdict = {}
+    for p in plist:
+        i = p.find('=')
+        if i >= 0:
+            name = p[:i].strip().lower()
+            value = p[i+1:].strip()
+            if len(value) >= 2 and value[0] == value[-1] == '"':
+                value = value[1:-1]
+            pdict[name] = value
+    return key, pdict
+
+
+# Classes for field storage
+# =========================
+
+class MiniFieldStorage:
+
+    """Like FieldStorage, for use when no file uploads are possible."""
+
+    # Dummy attributes
+    filename = None
+    list = None
+    type = None
+    file = None
+    type_options = {}
+    disposition = None
+    disposition_options = {}
+    headers = {}
+
+    def __init__(self, name, value):
+        """Constructor from field name and value."""
+        self.name = name
+        self.value = value
+        # self.file = StringIO(value)
+
+    def __repr__(self):
+        """Return printable representation."""
+        return "MiniFieldStorage(%s, %s)" % (`self.name`, `self.value`)
+
+
+class FieldStorage:
+
+    """Store a sequence of fields, reading multipart/form-data.
+
+    This class provides naming, typing, files stored on disk, and
+    more.  At the top level, it is accessible like a dictionary, whose
+    keys are the field names.  (Note: None can occur as a field name.)
+    The items are either a Python list (if there's multiple values) or
+    another FieldStorage or MiniFieldStorage object.  If it's a single
+    object, it has the following attributes:
+
+    name: the field name, if specified; otherwise None
+
+    filename: the filename, if specified; otherwise None; this is the
+        client side filename, *not* the file name on which it is
+        stored (that's a temporary file you don't deal with)
+
+    value: the value as a *string*; for file uploads, this
+        transparently reads the file every time you request the value
+
+    file: the file(-like) object from which you can read the data;
+        None if the data is stored a simple string
+
+    type: the content-type, or None if not specified
+
+    type_options: dictionary of options specified on the content-type
+        line
+
+    disposition: content-disposition, or None if not specified
+
+    disposition_options: dictionary of corresponding options
+
+    headers: a dictionary(-like) object (sometimes rfc822.Message or a
+        subclass thereof) containing *all* headers
+
+    The class is subclassable, mostly for the purpose of overriding
+    the make_file() method, which is called internally to come up with
+    a file open for reading and writing.  This makes it possible to
+    override the default choice of storing all files in a temporary
+    directory and unlinking them as soon as they have been opened.
+
+    """
+
+    def __init__(self, fp=None, headers=None, outerboundary="",
+                 environ=os.environ, keep_blank_values=0, strict_parsing=0):
+        """Constructor.  Read multipart/* until last part.
+
+        Arguments, all optional:
+
+        fp              : file pointer; default: sys.stdin
+            (not used when the request method is GET)
+
+        headers         : header dictionary-like object; default:
+            taken from environ as per CGI spec
+
+        outerboundary   : terminating multipart boundary
+            (for internal use only)
+
+        environ         : environment dictionary; default: os.environ
+
+        keep_blank_values: flag indicating whether blank values in
+            URL encoded forms should be treated as blank strings.
+            A true value indicates that blanks should be retained as
+            blank strings.  The default false value indicates that
+            blank values are to be ignored and treated as if they were
+            not included.
+
+        strict_parsing: flag indicating what to do with parsing errors.
+            If false (the default), errors are silently ignored.
+            If true, errors raise a ValueError exception.
+
+        """
+        method = 'GET'
+        self.keep_blank_values = keep_blank_values
+        self.strict_parsing = strict_parsing
+        if environ.has_key('REQUEST_METHOD'):
+            method = environ['REQUEST_METHOD'].upper()
+        if method == 'GET' or method == 'HEAD':
+            if environ.has_key('QUERY_STRING'):
+                qs = environ['QUERY_STRING']
+            elif sys.argv[1:]:
+                qs = sys.argv[1]
+            else:
+                qs = ""
+            fp = StringIO(qs)
+            if headers is None:
+                headers = {'content-type':
+                           "application/x-www-form-urlencoded"}
+        if headers is None:
+            headers = {}
+            if method == 'POST':
+                # Set default content-type for POST to what's traditional
+                headers['content-type'] = "application/x-www-form-urlencoded"
+            if environ.has_key('CONTENT_TYPE'):
+                headers['content-type'] = environ['CONTENT_TYPE']
+            if environ.has_key('CONTENT_LENGTH'):
+                headers['content-length'] = environ['CONTENT_LENGTH']
+        self.fp = fp or sys.stdin
+        self.headers = headers
+        self.outerboundary = outerboundary
+
+        # Process content-disposition header
+        cdisp, pdict = "", {}
+        if self.headers.has_key('content-disposition'):
+            cdisp, pdict = parse_header(self.headers['content-disposition'])
+        self.disposition = cdisp
+        self.disposition_options = pdict
+        self.name = None
+        if pdict.has_key('name'):
+            self.name = pdict['name']
+        self.filename = None
+        if pdict.has_key('filename'):
+            self.filename = pdict['filename']
+
+        # Process content-type header
+        #
+        # Honor any existing content-type header.  But if there is no
+        # content-type header, use some sensible defaults.  Assume
+        # outerboundary is "" at the outer level, but something non-false
+        # inside a multi-part.  The default for an inner part is text/plain,
+        # but for an outer part it should be urlencoded.  This should catch
+        # bogus clients which erroneously forget to include a content-type
+        # header.
+        #
+        # See below for what we do if there does exist a content-type header,
+        # but it happens to be something we don't understand.
+        if self.headers.has_key('content-type'):
+            ctype, pdict = parse_header(self.headers['content-type'])
+        elif self.outerboundary or method != 'POST':
+            ctype, pdict = "text/plain", {}
+        else:
+            ctype, pdict = 'application/x-www-form-urlencoded', {}
+        self.type = ctype
+        self.type_options = pdict
+        self.innerboundary = ""
+        if pdict.has_key('boundary'):
+            self.innerboundary = pdict['boundary']
+        clen = -1
+        if self.headers.has_key('content-length'):
+            try:
+                clen = int(self.headers['content-length'])
+            except:
+                pass
+            if maxlen and clen > maxlen:
+                raise ValueError, 'Maximum content length exceeded'
+        self.length = clen
+
+        self.list = self.file = None
+        self.done = 0
+        if ctype == 'application/x-www-form-urlencoded':
+            self.read_urlencoded()
+        elif ctype[:10] == 'multipart/':
+            self.read_multi(environ, keep_blank_values, strict_parsing)
+        else:
+            self.read_single()
+
+    def __repr__(self):
+        """Return a printable representation."""
+        return "FieldStorage(%s, %s, %s)" % (
+                `self.name`, `self.filename`, `self.value`)
+
+    def __getattr__(self, name):
+        if name != 'value':
+            raise AttributeError, name
+        if self.file:
+            self.file.seek(0)
+            value = self.file.read()
+            self.file.seek(0)
+        elif self.list is not None:
+            value = self.list
+        else:
+            value = None
+        return value
+
+    def __getitem__(self, key):
+        """Dictionary style indexing."""
+        if self.list is None:
+            raise TypeError, "not indexable"
+        found = []
+        for item in self.list:
+            if item.name == key: found.append(item)
+        if not found:
+            raise KeyError, key
+        if len(found) == 1:
+            return found[0]
+        else:
+            return found
+
+    def getvalue(self, key, default=None):
+        """Dictionary style get() method, including 'value' lookup."""
+        if self.has_key(key):
+            value = self[key]
+            if type(value) is type([]):
+                return map(lambda v: v.value, value)
+            else:
+                return value.value
+        else:
+            return default
+
+    def getfirst(self, key, default=None):
+        """ Return the first value received."""
+        if self.has_key(key):
+            value = self[key]
+            if type(value) is type([]):
+                return value[0].value
+            else:
+                return value.value
+        else:
+            return default
+
+    def getlist(self, key):
+        """ Return list of received values."""
+        if self.has_key(key):
+            value = self[key]
+            if type(value) is type([]):
+                return map(lambda v: v.value, value)
+            else:
+                return [value.value]
+        else:
+            return []
+
+    def keys(self):
+        """Dictionary style keys() method."""
+        if self.list is None:
+            raise TypeError, "not indexable"
+        keys = []
+        for item in self.list:
+            if item.name not in keys: keys.append(item.name)
+        return keys
+
+    def has_key(self, key):
+        """Dictionary style has_key() method."""
+        if self.list is None:
+            raise TypeError, "not indexable"
+        for item in self.list:
+            if item.name == key: return 1
+        return 0
+
+    def __len__(self):
+        """Dictionary style len(x) support."""
+        return len(self.keys())
+
+    def read_urlencoded(self):
+        """Internal: read data in query string format."""
+        qs = self.fp.read(self.length)
+        self.list = list = []
+        for key, value in parse_qsl(qs, self.keep_blank_values,
+                                    self.strict_parsing):
+            list.append(MiniFieldStorage(key, value))
+        self.skip_lines()
+
+    FieldStorageClass = None
+
+    def read_multi(self, environ, keep_blank_values, strict_parsing):
+        """Internal: read a part that is itself multipart."""
+        ib = self.innerboundary
+        if not valid_boundary(ib):
+            raise ValueError, ('Invalid boundary in multipart form: %s'
+                               % `ib`)
+        self.list = []
+        klass = self.FieldStorageClass or self.__class__
+        part = klass(self.fp, {}, ib,
+                     environ, keep_blank_values, strict_parsing)
+        # Throw first part away
+        while not part.done:
+            headers = rfc822.Message(self.fp)
+            part = klass(self.fp, headers, ib,
+                         environ, keep_blank_values, strict_parsing)
+            self.list.append(part)
+        self.skip_lines()
+
+    def read_single(self):
+        """Internal: read an atomic part."""
+        if self.length >= 0:
+            self.read_binary()
+            self.skip_lines()
+        else:
+            self.read_lines()
+        self.file.seek(0)
+
+    bufsize = 8*1024            # I/O buffering size for copy to file
+
+    def read_binary(self):
+        """Internal: read binary data."""
+        self.file = self.make_file('b')
+        todo = self.length
+        if todo >= 0:
+            while todo > 0:
+                data = self.fp.read(min(todo, self.bufsize))
+                if not data:
+                    self.done = -1
+                    break
+                self.file.write(data)
+                todo = todo - len(data)
+
+    def read_lines(self):
+        """Internal: read lines until EOF or outerboundary."""
+        self.file = self.__file = StringIO()
+        if self.outerboundary:
+            self.read_lines_to_outerboundary()
+        else:
+            self.read_lines_to_eof()
+
+    def __write(self, line):
+        if self.__file is not None:
+            if self.__file.tell() + len(line) > 1000:
+                self.file = self.make_file('')
+                self.file.write(self.__file.getvalue())
+                self.__file = None
+        self.file.write(line)
+
+    def read_lines_to_eof(self):
+        """Internal: read lines until EOF."""
+        while 1:
+            line = self.fp.readline()
+            if not line:
+                self.done = -1
+                break
+            self.__write(line)
+
+    def read_lines_to_outerboundary(self):
+        """Internal: read lines until outerboundary."""
+        next = "--" + self.outerboundary
+        last = next + "--"
+        delim = ""
+        while 1:
+            line = self.fp.readline()
+            if not line:
+                self.done = -1
+                break
+            if line[:2] == "--":
+                strippedline = line.strip()
+                if strippedline == next:
+                    break
+                if strippedline == last:
+                    self.done = 1
+                    break
+            odelim = delim
+            if line[-2:] == "\r\n":
+                delim = "\r\n"
+                line = line[:-2]
+            elif line[-1] == "\n":
+                delim = "\n"
+                line = line[:-1]
+            else:
+                delim = ""
+            self.__write(odelim + line)
+
+    def skip_lines(self):
+        """Internal: skip lines until outer boundary if defined."""
+        if not self.outerboundary or self.done:
+            return
+        next = "--" + self.outerboundary
+        last = next + "--"
+        while 1:
+            line = self.fp.readline()
+            if not line:
+                self.done = -1
+                break
+            if line[:2] == "--":
+                strippedline = line.strip()
+                if strippedline == next:
+                    break
+                if strippedline == last:
+                    self.done = 1
+                    break
+
+    def make_file(self, binary=None):
+        """Overridable: return a readable & writable file.
+
+        The file will be used as follows:
+        - data is written to it
+        - seek(0)
+        - data is read from it
+
+        The 'binary' argument is unused -- the file is always opened
+        in binary mode.
+
+        This version opens a temporary file for reading and writing,
+        and immediately deletes (unlinks) it.  The trick (on Unix!) is
+        that the file can still be used, but it can't be opened by
+        another process, and it will automatically be deleted when it
+        is closed or when the current process terminates.
+
+        If you want a more permanent file, you derive a class which
+        overrides this method.  If you want a visible temporary file
+        that is nevertheless automatically deleted when the script
+        terminates, try defining a __del__ method in a derived class
+        which unlinks the temporary files you have created.
+
+        """
+        import tempfile
+        return tempfile.TemporaryFile("w+b")
+
+
+
+# Backwards Compatibility Classes
+# ===============================
+
+class FormContentDict(UserDict.UserDict):
+    """Form content as dictionary with a list of values per field.
+
+    form = FormContentDict()
+
+    form[key] -> [value, value, ...]
+    form.has_key(key) -> Boolean
+    form.keys() -> [key, key, ...]
+    form.values() -> [[val, val, ...], [val, val, ...], ...]
+    form.items() ->  [(key, [val, val, ...]), (key, [val, val, ...]), ...]
+    form.dict == {key: [val, val, ...], ...}
+
+    """
+    def __init__(self, environ=os.environ):
+        self.dict = self.data = parse(environ=environ)
+        self.query_string = environ['QUERY_STRING']
+
+
+class SvFormContentDict(FormContentDict):
+    """Form content as dictionary expecting a single value per field.
+
+    If you only expect a single value for each field, then form[key]
+    will return that single value.  It will raise an IndexError if
+    that expectation is not true.  If you expect a field to have
+    possible multiple values, than you can use form.getlist(key) to
+    get all of the values.  values() and items() are a compromise:
+    they return single strings where there is a single value, and
+    lists of strings otherwise.
+
+    """
+    def __getitem__(self, key):
+        if len(self.dict[key]) > 1:
+            raise IndexError, 'expecting a single value'
+        return self.dict[key][0]
+    def getlist(self, key):
+        return self.dict[key]
+    def values(self):
+        result = []
+        for value in self.dict.values():
+            if len(value) == 1:
+                result.append(value[0])
+            else: result.append(value)
+        return result
+    def items(self):
+        result = []
+        for key, value in self.dict.items():
+            if len(value) == 1:
+                result.append((key, value[0]))
+            else: result.append((key, value))
+        return result
+
+
+class InterpFormContentDict(SvFormContentDict):
+    """This class is present for backwards compatibility only."""
+    def __getitem__(self, key):
+        v = SvFormContentDict.__getitem__(self, key)
+        if v[0] in '0123456789+-.':
+            try: return int(v)
+            except ValueError:
+                try: return float(v)
+                except ValueError: pass
+        return v.strip()
+    def values(self):
+        result = []
+        for key in self.keys():
+            try:
+                result.append(self[key])
+            except IndexError:
+                result.append(self.dict[key])
+        return result
+    def items(self):
+        result = []
+        for key in self.keys():
+            try:
+                result.append((key, self[key]))
+            except IndexError:
+                result.append((key, self.dict[key]))
+        return result
+
+
+class FormContent(FormContentDict):
+    """This class is present for backwards compatibility only."""
+    def values(self, key):
+        if self.dict.has_key(key) :return self.dict[key]
+        else: return None
+    def indexed_value(self, key, location):
+        if self.dict.has_key(key):
+            if len(self.dict[key]) > location:
+                return self.dict[key][location]
+            else: return None
+        else: return None
+    def value(self, key):
+        if self.dict.has_key(key): return self.dict[key][0]
+        else: return None
+    def length(self, key):
+        return len(self.dict[key])
+    def stripped(self, key):
+        if self.dict.has_key(key): return self.dict[key][0].strip()
+        else: return None
+    def pars(self):
+        return self.dict
+
+
+# Test/debug code
+# ===============
+
+def test(environ=os.environ):
+    """Robust test CGI script, usable as main program.
+
+    Write minimal HTTP headers and dump all information provided to
+    the script in HTML form.
+
+    """
+    import traceback
+    print "Content-type: text/html"
+    print
+    sys.stderr = sys.stdout
+    try:
+        form = FieldStorage()   # Replace with other classes to test those
+        print_directory()
+        print_arguments()
+        print_form(form)
+        print_environ(environ)
+        print_environ_usage()
+        def f():
+            exec "testing print_exception() -- <I>italics?</I>"
+        def g(f=f):
+            f()
+        print "<H3>What follows is a test, not an actual exception:</H3>"
+        g()
+    except:
+        print_exception()
+
+    print "<H1>Second try with a small maxlen...</H1>"
+
+    global maxlen
+    maxlen = 50
+    try:
+        form = FieldStorage()   # Replace with other classes to test those
+        print_directory()
+        print_arguments()
+        print_form(form)
+        print_environ(environ)
+    except:
+        print_exception()
+
+def print_exception(type=None, value=None, tb=None, limit=None):
+    if type is None:
+        type, value, tb = sys.exc_info()
+    import traceback
+    print
+    print "<H3>Traceback (most recent call last):</H3>"
+    list = traceback.format_tb(tb, limit) + \
+           traceback.format_exception_only(type, value)
+    print "<PRE>%s<B>%s</B></PRE>" % (
+        escape("".join(list[:-1])),
+        escape(list[-1]),
+        )
+    del tb
+
+def print_environ(environ=os.environ):
+    """Dump the shell environment as HTML."""
+    keys = environ.keys()
+    keys.sort()
+    print
+    print "<H3>Shell Environment:</H3>"
+    print "<DL>"
+    for key in keys:
+        print "<DT>", escape(key), "<DD>", escape(environ[key])
+    print "</DL>"
+    print
+
+def print_form(form):
+    """Dump the contents of a form as HTML."""
+    keys = form.keys()
+    keys.sort()
+    print
+    print "<H3>Form Contents:</H3>"
+    if not keys:
+        print "<P>No form fields."
+    print "<DL>"
+    for key in keys:
+        print "<DT>" + escape(key) + ":",
+        value = form[key]
+        print "<i>" + escape(`type(value)`) + "</i>"
+        print "<DD>" + escape(`value`)
+    print "</DL>"
+    print
+
+def print_directory():
+    """Dump the current directory as HTML."""
+    print
+    print "<H3>Current Working Directory:</H3>"
+    try:
+        pwd = os.getcwd()
+    except os.error, msg:
+        print "os.error:", escape(str(msg))
+    else:
+        print escape(pwd)
+    print
+
+def print_arguments():
+    print
+    print "<H3>Command Line Arguments:</H3>"
+    print
+    print sys.argv
+    print
+
+def print_environ_usage():
+    """Dump a list of environment variables used by CGI as HTML."""
+    print """
+<H3>These environment variables could have been set:</H3>
+<UL>
+<LI>AUTH_TYPE
+<LI>CONTENT_LENGTH
+<LI>CONTENT_TYPE
+<LI>DATE_GMT
+<LI>DATE_LOCAL
+<LI>DOCUMENT_NAME
+<LI>DOCUMENT_ROOT
+<LI>DOCUMENT_URI
+<LI>GATEWAY_INTERFACE
+<LI>LAST_MODIFIED
+<LI>PATH
+<LI>PATH_INFO
+<LI>PATH_TRANSLATED
+<LI>QUERY_STRING
+<LI>REMOTE_ADDR
+<LI>REMOTE_HOST
+<LI>REMOTE_IDENT
+<LI>REMOTE_USER
+<LI>REQUEST_METHOD
+<LI>SCRIPT_NAME
+<LI>SERVER_NAME
+<LI>SERVER_PORT
+<LI>SERVER_PROTOCOL
+<LI>SERVER_ROOT
+<LI>SERVER_SOFTWARE
+</UL>
+In addition, HTTP headers sent by the server may be passed in the
+environment as well.  Here are some common variable names:
+<UL>
+<LI>HTTP_ACCEPT
+<LI>HTTP_CONNECTION
+<LI>HTTP_HOST
+<LI>HTTP_PRAGMA
+<LI>HTTP_REFERER
+<LI>HTTP_USER_AGENT
+</UL>
+"""
+
+
+# Utilities
+# =========
+
+def escape(s, quote=None):
+    """Replace special characters '&', '<' and '>' by SGML entities."""
+    s = s.replace("&", "&amp;") # Must be done first!
+    s = s.replace("<", "&lt;")
+    s = s.replace(">", "&gt;")
+    if quote:
+        s = s.replace('"', "&quot;")
+    return s
+
+def valid_boundary(s, _vb_pattern="^[ -~]{0,200}[!-~]$"):
+    import re
+    return re.match(_vb_pattern, s)
+
+# Invoke mainline
+# ===============
+
+# Call test() when this file is run as a script (not imported as a module)
+if __name__ == '__main__':
+    test()
diff --git a/lib-python/2.2/cgitb.py b/lib-python/2.2/cgitb.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/cgitb.py
@@ -0,0 +1,205 @@
+"""Handle exceptions in CGI scripts by formatting tracebacks into nice HTML.
+
+To enable this module, do:
+
+    import cgitb; cgitb.enable()
+
+at the top of your CGI script.  The optional arguments to enable() are:
+
+    display     - if true, tracebacks are displayed in the web browser
+    logdir      - if set, tracebacks are written to files in this directory
+    context     - number of lines of source code to show for each stack frame
+
+By default, tracebacks are displayed but not saved, and context is 5.
+
+Alternatively, if you have caught an exception and want cgitb to display it
+for you, call cgitb.handler().  The optional argument to handler() is a 3-item
+tuple (etype, evalue, etb) just like the value of sys.exc_info()."""
+
+__author__ = 'Ka-Ping Yee'
+__version__ = '$Revision$'
+
+import sys
+
+def reset():
+    """Return a string that resets the CGI and browser to a known state."""
+    return '''<!--: spam
+Content-Type: text/html
+
+<body bgcolor="#f0f0f8"><font color="#f0f0f8" size="-5"> -->
+<body bgcolor="#f0f0f8"><font color="#f0f0f8" size="-5"> --> -->
+</font> </font> </font> </script> </object> </blockquote> </pre>
+</table> </table> </table> </table> </table> </font> </font> </font>'''
+
+__UNDEF__ = []                          # a special sentinel object
+def small(text): return '<small>' + text + '</small>'
+def strong(text): return '<strong>' + text + '</strong>'
+def grey(text): return '<font color="#909090">' + text + '</font>'
+
+def lookup(name, frame, locals):
+    """Find the value for a given name in the given environment."""
+    if name in locals:
+        return 'local', locals[name]
+    if name in frame.f_globals:
+        return 'global', frame.f_globals[name]
+    return None, __UNDEF__
+
+def scanvars(reader, frame, locals):
+    """Scan one logical line of Python and look up values of variables used."""
+    import tokenize, keyword
+    vars, lasttoken, parent, prefix = [], None, None, ''
+    for ttype, token, start, end, line in tokenize.generate_tokens(reader):
+        if ttype == tokenize.NEWLINE: break
+        if ttype == tokenize.NAME and token not in keyword.kwlist:
+            if lasttoken == '.':
+                if parent is not __UNDEF__:
+                    value = getattr(parent, token, __UNDEF__)
+                    vars.append((prefix + token, prefix, value))
+            else:
+                where, value = lookup(token, frame, locals)
+                vars.append((token, where, value))
+        elif token == '.':
+            prefix += lasttoken + '.'
+            parent = value
+        else:
+            parent, prefix = None, ''
+        lasttoken = token
+    return vars
+
+def html((etype, evalue, etb), context=5):
+    """Return a nice HTML document describing a given traceback."""
+    import os, types, time, traceback, linecache, inspect, pydoc
+
+    if type(etype) is types.ClassType:
+        etype = etype.__name__
+    pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable
+    date = time.ctime(time.time())
+    head = '<body bgcolor="#f0f0f8">' + pydoc.html.heading(
+        '<big><big><strong>%s</strong></big></big>' % str(etype),
+        '#ffffff', '#6622aa', pyver + '<br>' + date) + '''
+<p>A problem occurred in a Python script.  Here is the sequence of
+function calls leading up to the error, in the order they occurred.'''
+
+    indent = '<tt>' + small('&nbsp;' * 5) + '&nbsp;</tt>'
+    frames = []
+    records = inspect.getinnerframes(etb, context)
+    for frame, file, lnum, func, lines, index in records:
+        file = file and os.path.abspath(file) or '?'
+        link = '<a href="file://%s">%s</a>' % (file, pydoc.html.escape(file))
+        args, varargs, varkw, locals = inspect.getargvalues(frame)
+        call = ''
+        if func != '?':
+            call = 'in ' + strong(func) + \
+                inspect.formatargvalues(args, varargs, varkw, locals,
+                    formatvalue=lambda value: '=' + pydoc.html.repr(value))
+
+        highlight = {}
+        def reader(lnum=[lnum]):
+            highlight[lnum[0]] = 1
+            try: return linecache.getline(file, lnum[0])
+            finally: lnum[0] += 1
+        vars = scanvars(reader, frame, locals)
+
+        rows = ['<tr><td bgcolor="#d8bbff">%s%s %s</td></tr>' %
+                ('<big>&nbsp;</big>', link, call)]
+        if index is not None:
+            i = lnum - index
+            for line in lines:
+                num = small('&nbsp;' * (5-len(str(i))) + str(i)) + '&nbsp;'
+                line = '<tt>%s%s</tt>' % (num, pydoc.html.preformat(line))
+                if i in highlight:
+                    rows.append('<tr><td bgcolor="#ffccee">%s</td></tr>' % line)
+                else:
+                    rows.append('<tr><td>%s</td></tr>' % grey(line))
+                i += 1
+
+        done, dump = {}, []
+        for name, where, value in vars:
+            if name in done: continue
+            done[name] = 1
+            if value is not __UNDEF__:
+                if where == 'global': name = '<em>global</em> ' + strong(name)
+                elif where == 'local': name = strong(name)
+                else: name = where + strong(name.split('.')[-1])
+                dump.append('%s&nbsp;= %s' % (name, pydoc.html.repr(value)))
+            else:
+                dump.append(name + ' <em>undefined</em>')
+
+        rows.append('<tr><td>%s</td></tr>' % small(grey(', '.join(dump))))
+        frames.append('''<p>
+<table width="100%%" cellspacing=0 cellpadding=0 border=0>
+%s</table>''' % '\n'.join(rows))
+
+    exception = ['<p>%s: %s' % (strong(str(etype)), str(evalue))]
+    if type(evalue) is types.InstanceType:
+        for name in dir(evalue):
+            value = pydoc.html.repr(getattr(evalue, name))
+            exception.append('\n<br>%s%s&nbsp;=\n%s' % (indent, name, value))
+
+    import traceback
+    return head + ''.join(frames) + ''.join(exception) + '''
+
+
+<!-- The above is a description of an error in a Python program, formatted
+     for a Web browser because the 'cgitb' module was enabled.  In case you
+     are not reading this in a Web browser, here is the original traceback:
+
+%s
+-->
+''' % ''.join(traceback.format_exception(etype, evalue, etb))
+
+class Hook:
+    """A hook to replace sys.excepthook that shows tracebacks in HTML."""
+
+    def __init__(self, display=1, logdir=None, context=5, file=None):
+        self.display = display          # send tracebacks to browser if true
+        self.logdir = logdir            # log tracebacks to files if not None
+        self.context = context          # number of source code lines per frame
+        self.file = file or sys.stdout  # place to send the output
+
+    def __call__(self, etype, evalue, etb):
+        self.handle((etype, evalue, etb))
+
+    def handle(self, info=None):
+        info = info or sys.exc_info()
+        self.file.write(reset())
+
+        try:
+            text, doc = 0, html(info, self.context)
+        except:                         # just in case something goes wrong
+            import traceback
+            text, doc = 1, ''.join(traceback.format_exception(*info))
+
+        if self.display:
+            if text:
+                doc = doc.replace('&', '&amp;').replace('<', '&lt;')
+                self.file.write('<pre>' + doc + '</pre>\n')
+            else:
+                self.file.write(doc + '\n')
+        else:
+            self.file.write('<p>A problem occurred in a Python script.\n')
+
+        if self.logdir is not None:
+            import os, tempfile
+            name = tempfile.mktemp(['.html', '.txt'][text])
+            path = os.path.join(self.logdir, os.path.basename(name))
+            try:
+                file = open(path, 'w')
+                file.write(doc)
+                file.close()
+                msg = '<p> %s contains the description of this error.' % path
+            except:
+                msg = '<p> Tried to save traceback to %s, but failed.' % path
+            self.file.write(msg + '\n')
+        try:
+            self.file.flush()
+        except: pass
+
+handler = Hook().handle
+def enable(display=1, logdir=None, context=5):
+    """Install an exception handler that formats tracebacks as HTML.
+
+    The optional argument 'display' can be set to 0 to suppress sending the
+    traceback to the browser, and 'logdir' can be set to a directory to cause
+    tracebacks to be written to files there."""
+    sys.excepthook = Hook(display, logdir, context)
diff --git a/lib-python/2.2/chunk.py b/lib-python/2.2/chunk.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/chunk.py
@@ -0,0 +1,167 @@
+"""Simple class to read IFF chunks.
+
+An IFF chunk (used in formats such as AIFF, TIFF, RMFF (RealMedia File
+Format)) has the following structure:
+
++----------------+
+| ID (4 bytes)   |
++----------------+
+| size (4 bytes) |
++----------------+
+| data           |
+| ...            |
++----------------+
+
+The ID is a 4-byte string which identifies the type of chunk.
+
+The size field (a 32-bit value, encoded using big-endian byte order)
+gives the size of the whole chunk, including the 8-byte header.
+
+Usually an IFF-type file consists of one or more chunks.  The proposed
+usage of the Chunk class defined here is to instantiate an instance at
+the start of each chunk and read from the instance until it reaches
+the end, after which a new instance can be instantiated.  At the end
+of the file, creating a new instance will fail with a EOFError
+exception.
+
+Usage:
+while 1:
+    try:
+        chunk = Chunk(file)
+    except EOFError:
+        break
+    chunktype = chunk.getname()
+    while 1:
+        data = chunk.read(nbytes)
+        if not data:
+            pass
+        # do something with data
+
+The interface is file-like.  The implemented methods are:
+read, close, seek, tell, isatty.
+Extra methods are: skip() (called by close, skips to the end of the chunk),
+getname() (returns the name (ID) of the chunk)
+
+The __init__ method has one required argument, a file-like object
+(including a chunk instance), and one optional argument, a flag which
+specifies whether or not chunks are aligned on 2-byte boundaries.  The
+default is 1, i.e. aligned.
+"""
+
+class Chunk:
+    def __init__(self, file, align = 1, bigendian = 1, inclheader = 0):
+        import struct
+        self.closed = 0
+        self.align = align      # whether to align to word (2-byte) boundaries
+        if bigendian:
+            strflag = '>'
+        else:
+            strflag = '<'
+        self.file = file
+        self.chunkname = file.read(4)
+        if len(self.chunkname) < 4:
+            raise EOFError
+        try:
+            self.chunksize = struct.unpack(strflag+'l', file.read(4))[0]
+        except struct.error:
+            raise EOFError
+        if inclheader:
+            self.chunksize = self.chunksize - 8 # subtract header
+        self.size_read = 0
+        try:
+            self.offset = self.file.tell()
+        except (AttributeError, IOError):
+            self.seekable = 0
+        else:
+            self.seekable = 1
+
+    def getname(self):
+        """Return the name (ID) of the current chunk."""
+        return self.chunkname
+
+    def getsize(self):
+        """Return the size of the current chunk."""
+        return self.chunksize
+
+    def close(self):
+        if not self.closed:
+            self.skip()
+            self.closed = 1
+
+    def isatty(self):
+        if self.closed:
+            raise ValueError, "I/O operation on closed file"
+        return 0
+
+    def seek(self, pos, whence = 0):
+        """Seek to specified position into the chunk.
+        Default position is 0 (start of chunk).
+        If the file is not seekable, this will result in an error.
+        """
+
+        if self.closed:
+            raise ValueError, "I/O operation on closed file"
+        if not self.seekable:
+            raise IOError, "cannot seek"
+        if whence == 1:
+            pos = pos + self.size_read
+        elif whence == 2:
+            pos = pos + self.chunksize
+        if pos < 0 or pos > self.chunksize:
+            raise RuntimeError
+        self.file.seek(self.offset + pos, 0)
+        self.size_read = pos
+
+    def tell(self):
+        if self.closed:
+            raise ValueError, "I/O operation on closed file"
+        return self.size_read
+
+    def read(self, size = -1):
+        """Read at most size bytes from the chunk.
+        If size is omitted or negative, read until the end
+        of the chunk.
+        """
+
+        if self.closed:
+            raise ValueError, "I/O operation on closed file"
+        if self.size_read >= self.chunksize:
+            return ''
+        if size < 0:
+            size = self.chunksize - self.size_read
+        if size > self.chunksize - self.size_read:
+            size = self.chunksize - self.size_read
+        data = self.file.read(size)
+        self.size_read = self.size_read + len(data)
+        if self.size_read == self.chunksize and \
+           self.align and \
+           (self.chunksize & 1):
+            dummy = self.file.read(1)
+            self.size_read = self.size_read + len(dummy)
+        return data
+
+    def skip(self):
+        """Skip the rest of the chunk.
+        If you are not interested in the contents of the chunk,
+        this method should be called so that the file points to
+        the start of the next chunk.
+        """
+
+        if self.closed:
+            raise ValueError, "I/O operation on closed file"
+        if self.seekable:
+            try:
+                n = self.chunksize - self.size_read
+                # maybe fix alignment
+                if self.align and (self.chunksize & 1):
+                    n = n + 1
+                self.file.seek(n, 1)
+                self.size_read = self.size_read + n
+                return
+            except IOError:
+                pass
+        while self.size_read < self.chunksize:
+            n = min(8192, self.chunksize - self.size_read)
+            dummy = self.read(n)
+            if not dummy:
+                raise EOFError
diff --git a/lib-python/2.2/cmd.py b/lib-python/2.2/cmd.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/cmd.py
@@ -0,0 +1,336 @@
+"""A generic class to build line-oriented command interpreters.
+
+Interpreters constructed with this class obey the following conventions:
+
+1. End of file on input is processed as the command 'EOF'.
+2. A command is parsed out of each line by collecting the prefix composed
+   of characters in the identchars member.
+3. A command `foo' is dispatched to a method 'do_foo()'; the do_ method
+   is passed a single argument consisting of the remainder of the line.
+4. Typing an empty line repeats the last command.  (Actually, it calls the
+   method `emptyline', which may be overridden in a subclass.)
+5. There is a predefined `help' method.  Given an argument `topic', it
+   calls the command `help_topic'.  With no arguments, it lists all topics
+   with defined help_ functions, broken into up to three topics; documented
+   commands, miscellaneous help topics, and undocumented commands.
+6. The command '?' is a synonym for `help'.  The command '!' is a synonym
+   for `shell', if a do_shell method exists.
+7. If completion is enabled, completing commands will be done automatically,
+   and completing of commands args is done by calling complete_foo() with
+   arguments text, line, begidx, endidx.  text is string we are matching
+   against, all returned matches must begin with it.  line is the current
+   input line (lstripped), begidx and endidx are the beginning and end
+   indexes of the text being matched, which could be used to provide
+   different completion depending upon which position the argument is in.
+
+The `default' method may be overridden to intercept commands for which there
+is no do_ method.
+
+The `completedefault' method may be overridden to intercept completions for
+commands that have no complete_ method.
+
+The data member `self.ruler' sets the character used to draw separator lines
+in the help messages.  If empty, no ruler line is drawn.  It defaults to "=".
+
+If the value of `self.intro' is nonempty when the cmdloop method is called,
+it is printed out on interpreter startup.  This value may be overridden
+via an optional argument to the cmdloop() method.
+
+The data members `self.doc_header', `self.misc_header', and
+`self.undoc_header' set the headers used for the help function's
+listings of documented functions, miscellaneous topics, and undocumented
+functions respectively.
+
+These interpreters use raw_input; thus, if the readline module is loaded,
+they automatically support Emacs-like command history and editing features.
+"""
+
+import string, sys
+
+__all__ = ["Cmd"]
+
+PROMPT = '(Cmd) '
+IDENTCHARS = string.ascii_letters + string.digits + '_'
+
+class Cmd:
+    """A simple framework for writing line-oriented command interpreters.
+
+    These are often useful for test harnesses, administrative tools, and
+    prototypes that will later be wrapped in a more sophisticated interface.
+
+    A Cmd instance or subclass instance is a line-oriented interpreter
+    framework.  There is no good reason to instantiate Cmd itself; rather,
+    it's useful as a superclass of an interpreter class you define yourself
+    in order to inherit Cmd's methods and encapsulate action methods.
+
+    """
+    prompt = PROMPT
+    identchars = IDENTCHARS
+    ruler = '='
+    lastcmd = ''
+    intro = None
+    doc_leader = ""
+    doc_header = "Documented commands (type help <topic>):"
+    misc_header = "Miscellaneous help topics:"
+    undoc_header = "Undocumented commands:"
+    nohelp = "*** No help on %s"
+    use_rawinput = 1
+
+    def __init__(self, completekey='tab'):
+        """Instantiate a line-oriented interpreter framework.
+
+        The optional argument is the readline name of a completion key;
+        it defaults to the Tab key. If completekey is not None and the
+        readline module is available, command completion is done
+        automatically.
+
+        """
+        self.cmdqueue = []
+        self.completekey = completekey
+
+    def cmdloop(self, intro=None):
+        """Repeatedly issue a prompt, accept input, parse an initial prefix
+        off the received input, and dispatch to action methods, passing them
+        the remainder of the line as argument.
+
+        """
+
+        self.preloop()
+        if intro is not None:
+            self.intro = intro
+        if self.intro:
+            print self.intro
+        stop = None
+        while not stop:
+            if self.cmdqueue:
+                line = self.cmdqueue[0]
+                del self.cmdqueue[0]
+            else:
+                if self.use_rawinput:
+                    try:
+                        line = raw_input(self.prompt)
+                    except EOFError:
+                        line = 'EOF'
+                else:
+                    sys.stdout.write(self.prompt)
+                    sys.stdout.flush()
+                    line = sys.stdin.readline()
+                    if not len(line):
+                        line = 'EOF'
+                    else:
+                        line = line[:-1] # chop \n
+            line = self.precmd(line)
+            stop = self.onecmd(line)
+            stop = self.postcmd(stop, line)
+        self.postloop()
+
+    def precmd(self, line):
+        """Hook method executed just before the command line is
+        interpreted, but after the input prompt is generated and issued.
+
+        """
+        return line
+
+    def postcmd(self, stop, line):
+        """Hook method executed just after a command dispatch is finished."""
+        return stop
+
+    def preloop(self):
+        """Hook method executed once when the cmdloop() method is called."""
+        if self.completekey:
+            try:
+                import readline
+                self.old_completer = readline.get_completer()
+                readline.set_completer(self.complete)
+                readline.parse_and_bind(self.completekey+": complete")
+            except ImportError:
+                pass
+
+    def postloop(self):
+        """Hook method executed once when the cmdloop() method is about to
+        return.
+
+        """
+        if self.completekey:
+            try:
+                import readline
+                readline.set_completer(self.old_completer)
+            except ImportError:
+                pass
+
+    def parseline(self, line):
+        line = line.strip()
+        if not line:
+            return None, None, line
+        elif line[0] == '?':
+            line = 'help ' + line[1:]
+        elif line[0] == '!':
+            if hasattr(self, 'do_shell'):
+                line = 'shell ' + line[1:]
+            else:
+                return None, None, line
+        i, n = 0, len(line)
+        while i < n and line[i] in self.identchars: i = i+1
+        cmd, arg = line[:i], line[i:].strip()
+        return cmd, arg, line
+
+    def onecmd(self, line):
+        """Interpret the argument as though it had been typed in response
+        to the prompt.
+
+        This may be overridden, but should not normally need to be;
+        see the precmd() and postcmd() methods for useful execution hooks.
+        The return value is a flag indicating whether interpretation of
+        commands by the interpreter should stop.
+
+        """
+        cmd, arg, line = self.parseline(line)
+        if not line:
+            return self.emptyline()
+        if cmd is None:
+            return self.default(line)
+        self.lastcmd = line
+        if cmd == '':
+            return self.default(line)
+        else:
+            try:
+                func = getattr(self, 'do_' + cmd)
+            except AttributeError:
+                return self.default(line)
+            return func(arg)
+
+    def emptyline(self):
+        """Called when an empty line is entered in response to the prompt.
+
+        If this method is not overridden, it repeats the last nonempty
+        command entered.
+
+        """
+        if self.lastcmd:
+            return self.onecmd(self.lastcmd)
+
+    def default(self, line):
+        """Called on an input line when the command prefix is not recognized.
+
+        If this method is not overridden, it prints an error message and
+        returns.
+
+        """
+        print '*** Unknown syntax:', line
+
+    def completedefault(self, *ignored):
+        """Method called to complete an input line when no command-specific
+        complete_*() method is available.
+
+        By default, it returns an empty list.
+
+        """
+        return []
+
+    def completenames(self, text, *ignored):
+        dotext = 'do_'+text
+        return [a[3:] for a in self.get_names() if a.startswith(dotext)]
+
+    def complete(self, text, state):
+        """Return the next possible completion for 'text'.
+
+        If a command has not been entered, then complete against command list.
+        Otherwise try to call complete_<command> to get list of completions.
+        """
+        if state == 0:
+            import readline
+            origline = readline.get_line_buffer()
+            line = origline.lstrip()
+            stripped = len(origline) - len(line)
+            begidx = readline.get_begidx() - stripped
+            endidx = readline.get_endidx() - stripped
+            if begidx>0:
+                cmd, args, foo = self.parseline(line)
+                if cmd == '':
+                    compfunc = self.completedefault
+                else:
+                    try:
+                        compfunc = getattr(self, 'complete_' + cmd)
+                    except AttributeError:
+                        compfunc = self.completedefault
+            else:
+                compfunc = self.completenames
+            self.completion_matches = compfunc(text, line, begidx, endidx)
+        try:
+            return self.completion_matches[state]
+        except IndexError:
+            return None
+
+    def get_names(self):
+        # Inheritance says we have to look in class and
+        # base classes; order is not important.
+        names = []
+        classes = [self.__class__]
+        while classes:
+            aclass = classes[0]
+            if aclass.__bases__:
+                classes = classes + list(aclass.__bases__)
+            names = names + dir(aclass)
+            del classes[0]
+        return names
+
+    def complete_help(self, *args):
+        return self.completenames(*args)
+
+    def do_help(self, arg):
+        if arg:
+            # XXX check arg syntax
+            try:
+                func = getattr(self, 'help_' + arg)
+            except:
+                try:
+                    doc=getattr(self, 'do_' + arg).__doc__
+                    if doc:
+                        print doc
+                        return
+                except:
+                    pass
+                print self.nohelp % (arg,)
+                return
+            func()
+        else:
+            names = self.get_names()
+            cmds_doc = []
+            cmds_undoc = []
+            help = {}
+            for name in names:
+                if name[:5] == 'help_':
+                    help[name[5:]]=1
+            names.sort()
+            # There can be duplicates if routines overridden
+            prevname = ''
+            for name in names:
+                if name[:3] == 'do_':
+                    if name == prevname:
+                        continue
+                    prevname = name
+                    cmd=name[3:]
+                    if help.has_key(cmd):
+                        cmds_doc.append(cmd)
+                        del help[cmd]
+                    elif getattr(self, name).__doc__:
+                        cmds_doc.append(cmd)
+                    else:
+                        cmds_undoc.append(cmd)
+            print self.doc_leader
+            self.print_topics(self.doc_header,   cmds_doc,   15,80)
+            self.print_topics(self.misc_header,  help.keys(),15,80)
+            self.print_topics(self.undoc_header, cmds_undoc, 15,80)
+
+    def print_topics(self, header, cmds, cmdlen, maxcol):
+        if cmds:
+            print header
+            if self.ruler:
+                print self.ruler * len(header)
+            (cmds_per_line,junk)=divmod(maxcol,cmdlen)
+            col=cmds_per_line
+            for cmd in cmds:
+                if col==0: print
+                print (("%-"+`cmdlen`+"s") % cmd),
+                col = (col+1) % cmds_per_line
+            print "\n"
diff --git a/lib-python/2.2/code.py b/lib-python/2.2/code.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/code.py
@@ -0,0 +1,311 @@
+"""Utilities needed to emulate Python's interactive interpreter.
+
+"""
+
+# Inspired by similar code by Jeff Epler and Fredrik Lundh.
+
+
+import sys
+import traceback
+from codeop import CommandCompiler, compile_command
+
+__all__ = ["InteractiveInterpreter", "InteractiveConsole", "interact",
+           "compile_command"]
+
+def softspace(file, newvalue):
+    oldvalue = 0
+    try:
+        oldvalue = file.softspace
+    except AttributeError:
+        pass
+    try:
+        file.softspace = newvalue
+    except (AttributeError, TypeError):
+        # "attribute-less object" or "read-only attributes"
+        pass
+    return oldvalue
+
+class InteractiveInterpreter:
+    """Base class for InteractiveConsole.
+
+    This class deals with parsing and interpreter state (the user's
+    namespace); it doesn't deal with input buffering or prompting or
+    input file naming (the filename is always passed in explicitly).
+
+    """
+
+    def __init__(self, locals=None):
+        """Constructor.
+
+        The optional 'locals' argument specifies the dictionary in
+        which code will be executed; it defaults to a newly created
+        dictionary with key "__name__" set to "__console__" and key
+        "__doc__" set to None.
+
+        """
+        if locals is None:
+            locals = {"__name__": "__console__", "__doc__": None}
+        self.locals = locals
+        self.compile = CommandCompiler()
+
+    def runsource(self, source, filename="<input>", symbol="single"):
+        """Compile and run some source in the interpreter.
+
+        Arguments are as for compile_command().
+
+        One several things can happen:
+
+        1) The input is incorrect; compile_command() raised an
+        exception (SyntaxError or OverflowError).  A syntax traceback
+        will be printed by calling the showsyntaxerror() method.
+
+        2) The input is incomplete, and more input is required;
+        compile_command() returned None.  Nothing happens.
+
+        3) The input is complete; compile_command() returned a code
+        object.  The code is executed by calling self.runcode() (which
+        also handles run-time exceptions, except for SystemExit).
+
+        The return value is 1 in case 2, 0 in the other cases (unless
+        an exception is raised).  The return value can be used to
+        decide whether to use sys.ps1 or sys.ps2 to prompt the next
+        line.
+
+        """
+        try:
+            code = self.compile(source, filename, symbol)
+        except (OverflowError, SyntaxError, ValueError):
+            # Case 1
+            self.showsyntaxerror(filename)
+            return 0
+
+        if code is None:
+            # Case 2
+            return 1
+
+        # Case 3
+        self.runcode(code)
+        return 0
+
+    def runcode(self, code):
+        """Execute a code object.
+
+        When an exception occurs, self.showtraceback() is called to
+        display a traceback.  All exceptions are caught except
+        SystemExit, which is reraised.
+
+        A note about KeyboardInterrupt: this exception may occur
+        elsewhere in this code, and may not always be caught.  The
+        caller should be prepared to deal with it.
+
+        """
+        try:
+            exec code in self.locals
+        except SystemExit:
+            raise
+        except:
+            self.showtraceback()
+        else:
+            if softspace(sys.stdout, 0):
+                print
+
+    def showsyntaxerror(self, filename=None):
+        """Display the syntax error that just occurred.
+
+        This doesn't display a stack trace because there isn't one.
+
+        If a filename is given, it is stuffed in the exception instead
+        of what was there before (because Python's parser always uses
+        "<string>" when reading from a string).
+
+        The output is written by self.write(), below.
+
+        """
+        type, value, sys.last_traceback = sys.exc_info()
+        sys.last_type = type
+        sys.last_value = value
+        if filename and type is SyntaxError:
+            # Work hard to stuff the correct filename in the exception
+            try:
+                msg, (dummy_filename, lineno, offset, line) = value
+            except:
+                # Not the format we expect; leave it alone
+                pass
+            else:
+                # Stuff in the right filename
+                try:
+                    # Assume SyntaxError is a class exception
+                    value = SyntaxError(msg, (filename, lineno, offset, line))
+                except:
+                    # If that failed, assume SyntaxError is a string
+                    value = msg, (filename, lineno, offset, line)
+                sys.last_value = value
+        list = traceback.format_exception_only(type, value)
+        map(self.write, list)
+
+    def showtraceback(self):
+        """Display the exception that just occurred.
+
+        We remove the first stack item because it is our own code.
+
+        The output is written by self.write(), below.
+
+        """
+        try:
+            type, value, tb = sys.exc_info()
+            sys.last_type = type
+            sys.last_value = value
+            sys.last_traceback = tb
+            tblist = traceback.extract_tb(tb)
+            del tblist[:1]
+            list = traceback.format_list(tblist)
+            if list:
+                list.insert(0, "Traceback (most recent call last):\n")
+            list[len(list):] = traceback.format_exception_only(type, value)
+        finally:
+            tblist = tb = None
+        map(self.write, list)
+
+    def write(self, data):
+        """Write a string.
+
+        The base implementation writes to sys.stderr; a subclass may
+        replace this with a different implementation.
+
+        """
+        sys.stderr.write(data)
+
+
+class InteractiveConsole(InteractiveInterpreter):
+    """Closely emulate the behavior of the interactive Python interpreter.
+
+    This class builds on InteractiveInterpreter and adds prompting
+    using the familiar sys.ps1 and sys.ps2, and input buffering.
+
+    """
+
+    def __init__(self, locals=None, filename="<console>"):
+        """Constructor.
+
+        The optional locals argument will be passed to the
+        InteractiveInterpreter base class.
+
+        The optional filename argument should specify the (file)name
+        of the input stream; it will show up in tracebacks.
+
+        """
+        InteractiveInterpreter.__init__(self, locals)
+        self.filename = filename
+        self.resetbuffer()
+
+    def resetbuffer(self):
+        """Reset the input buffer."""
+        self.buffer = []
+
+    def interact(self, banner=None):
+        """Closely emulate the interactive Python console.
+
+        The optional banner argument specify the banner to print
+        before the first interaction; by default it prints a banner
+        similar to the one printed by the real Python interpreter,
+        followed by the current class name in parentheses (so as not
+        to confuse this with the real interpreter -- since it's so
+        close!).
+
+        """
+        try:
+            sys.ps1
+        except AttributeError:
+            sys.ps1 = ">>> "
+        try:
+            sys.ps2
+        except AttributeError:
+            sys.ps2 = "... "
+        cprt = 'Type "copyright", "credits" or "license" for more information.'
+        if banner is None:
+            self.write("Python %s on %s\n%s\n(%s)\n" %
+                       (sys.version, sys.platform, cprt,
+                        self.__class__.__name__))
+        else:
+            self.write("%s\n" % str(banner))
+        more = 0
+        while 1:
+            try:
+                if more:
+                    prompt = sys.ps2
+                else:
+                    prompt = sys.ps1
+                try:
+                    line = self.raw_input(prompt)
+                except EOFError:
+                    self.write("\n")
+                    break
+                else:
+                    more = self.push(line)
+            except KeyboardInterrupt:
+                self.write("\nKeyboardInterrupt\n")
+                self.resetbuffer()
+                more = 0
+
+    def push(self, line):
+        """Push a line to the interpreter.
+
+        The line should not have a trailing newline; it may have
+        internal newlines.  The line is appended to a buffer and the
+        interpreter's runsource() method is called with the
+        concatenated contents of the buffer as source.  If this
+        indicates that the command was executed or invalid, the buffer
+        is reset; otherwise, the command is incomplete, and the buffer
+        is left as it was after the line was appended.  The return
+        value is 1 if more input is required, 0 if the line was dealt
+        with in some way (this is the same as runsource()).
+
+        """
+        self.buffer.append(line)
+        source = "\n".join(self.buffer)
+        more = self.runsource(source, self.filename)
+        if not more:
+            self.resetbuffer()
+        return more
+
+    def raw_input(self, prompt=""):
+        """Write a prompt and read a line.
+
+        The returned line does not include the trailing newline.
+        When the user enters the EOF key sequence, EOFError is raised.
+
+        The base implementation uses the built-in function
+        raw_input(); a subclass may replace this with a different
+        implementation.
+
+        """
+        return raw_input(prompt)
+
+
+def interact(banner=None, readfunc=None, local=None):
+    """Closely emulate the interactive Python interpreter.
+
+    This is a backwards compatible interface to the InteractiveConsole
+    class.  When readfunc is not specified, it attempts to import the
+    readline module to enable GNU readline if it is available.
+
+    Arguments (all optional, all default to None):
+
+    banner -- passed to InteractiveConsole.interact()
+    readfunc -- if not None, replaces InteractiveConsole.raw_input()
+    local -- passed to InteractiveInterpreter.__init__()
+
+    """
+    console = InteractiveConsole(local)
+    if readfunc is not None:
+        console.raw_input = readfunc
+    else:
+        try:
+            import readline
+        except:
+            pass
+    console.interact(banner)
+
+
+if __name__ == '__main__':
+    interact()
diff --git a/lib-python/2.2/codecs.py b/lib-python/2.2/codecs.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/codecs.py
@@ -0,0 +1,636 @@
+""" codecs -- Python Codec Registry, API and helpers.
+
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+
+"""#"
+
+import struct, __builtin__
+
+### Registry and builtin stateless codec functions
+
+try:
+    from _codecs import *
+except ImportError, why:
+    raise SystemError,\
+          'Failed to load the builtin codecs: %s' % why
+
+__all__ = ["register", "lookup", "open", "EncodedFile", "BOM", "BOM_BE",
+           "BOM_LE", "BOM32_BE", "BOM32_LE", "BOM64_BE", "BOM64_LE"]
+
+### Constants
+
+#
+# Byte Order Mark (BOM) and its possible values (BOM_BE, BOM_LE)
+#
+BOM = struct.pack('=H', 0xFEFF)
+#
+BOM_BE = BOM32_BE = '\376\377'
+#       corresponds to Unicode U+FEFF in UTF-16 on big endian
+#       platforms == ZERO WIDTH NO-BREAK SPACE
+BOM_LE = BOM32_LE = '\377\376'
+#       corresponds to Unicode U+FFFE in UTF-16 on little endian
+#       platforms == defined as being an illegal Unicode character
+
+#
+# 64-bit Byte Order Marks
+#
+BOM64_BE = '\000\000\376\377'
+#       corresponds to Unicode U+0000FEFF in UCS-4
+BOM64_LE = '\377\376\000\000'
+#       corresponds to Unicode U+0000FFFE in UCS-4
+
+
+### Codec base classes (defining the API)
+
+class Codec:
+
+    """ Defines the interface for stateless encoders/decoders.
+
+        The .encode()/.decode() methods may implement different error
+        handling schemes by providing the errors argument. These
+        string values are defined:
+
+         'strict' - raise a ValueError error (or a subclass)
+         'ignore' - ignore the character and continue with the next
+         'replace' - replace with a suitable replacement character;
+                    Python will use the official U+FFFD REPLACEMENT
+                    CHARACTER for the builtin Unicode codecs.
+
+    """
+    def encode(self, input, errors='strict'):
+
+        """ Encodes the object input and returns a tuple (output
+            object, length consumed).
+
+            errors defines the error handling to apply. It defaults to
+            'strict' handling.
+
+            The method may not store state in the Codec instance. Use
+            StreamCodec for codecs which have to keep state in order to
+            make encoding/decoding efficient.
+
+            The encoder must be able to handle zero length input and
+            return an empty object of the output object type in this
+            situation.
+
+        """
+        raise NotImplementedError
+
+    def decode(self, input, errors='strict'):
+
+        """ Decodes the object input and returns a tuple (output
+            object, length consumed).
+
+            input must be an object which provides the bf_getreadbuf
+            buffer slot. Python strings, buffer objects and memory
+            mapped files are examples of objects providing this slot.
+
+            errors defines the error handling to apply. It defaults to
+            'strict' handling.
+
+            The method may not store state in the Codec instance. Use
+            StreamCodec for codecs which have to keep state in order to
+            make encoding/decoding efficient.
+
+            The decoder must be able to handle zero length input and
+            return an empty object of the output object type in this
+            situation.
+
+        """
+        raise NotImplementedError
+
+#
+# The StreamWriter and StreamReader class provide generic working
+# interfaces which can be used to implement new encoding submodules
+# very easily. See encodings/utf_8.py for an example on how this is
+# done.
+#
+
+class StreamWriter(Codec):
+
+    def __init__(self, stream, errors='strict'):
+
+        """ Creates a StreamWriter instance.
+
+            stream must be a file-like object open for writing
+            (binary) data.
+
+            The StreamWriter may implement different error handling
+            schemes by providing the errors keyword argument. These
+            parameters are defined:
+
+             'strict' - raise a ValueError (or a subclass)
+             'ignore' - ignore the character and continue with the next
+             'replace'- replace with a suitable replacement character
+
+        """
+        self.stream = stream
+        self.errors = errors
+
+    def write(self, object):
+
+        """ Writes the object's contents encoded to self.stream.
+        """
+        data, consumed = self.encode(object, self.errors)
+        self.stream.write(data)
+
+    def writelines(self, list):
+
+        """ Writes the concatenated list of strings to the stream
+            using .write().
+        """
+        self.write(''.join(list))
+
+    def reset(self):
+
+        """ Flushes and resets the codec buffers used for keeping state.
+
+            Calling this method should ensure that the data on the
+            output is put into a clean state, that allows appending
+            of new fresh data without having to rescan the whole
+            stream to recover state.
+
+        """
+        pass
+
+    def __getattr__(self, name,
+                    getattr=getattr):
+
+        """ Inherit all other methods from the underlying stream.
+        """
+        return getattr(self.stream, name)
+
+###
+
+class StreamReader(Codec):
+
+    def __init__(self, stream, errors='strict'):
+
+        """ Creates a StreamReader instance.
+
+            stream must be a file-like object open for reading
+            (binary) data.
+
+            The StreamReader may implement different error handling
+            schemes by providing the errors keyword argument. These
+            parameters are defined:
+
+             'strict' - raise a ValueError (or a subclass)
+             'ignore' - ignore the character and continue with the next
+             'replace'- replace with a suitable replacement character;
+
+        """
+        self.stream = stream
+        self.errors = errors
+
+    def read(self, size=-1):
+
+        """ Decodes data from the stream self.stream and returns the
+            resulting object.
+
+            size indicates the approximate maximum number of bytes to
+            read from the stream for decoding purposes. The decoder
+            can modify this setting as appropriate. The default value
+            -1 indicates to read and decode as much as possible.  size
+            is intended to prevent having to decode huge files in one
+            step.
+
+            The method should use a greedy read strategy meaning that
+            it should read as much data as is allowed within the
+            definition of the encoding and the given size, e.g.  if
+            optional encoding endings or state markers are available
+            on the stream, these should be read too.
+
+        """
+        # Unsliced reading:
+        if size < 0:
+            return self.decode(self.stream.read(), self.errors)[0]
+
+        # Sliced reading:
+        read = self.stream.read
+        decode = self.decode
+        data = read(size)
+        i = 0
+        while 1:
+            try:
+                object, decodedbytes = decode(data, self.errors)
+            except ValueError, why:
+                # This method is slow but should work under pretty much
+                # all conditions; at most 10 tries are made
+                i = i + 1
+                newdata = read(1)
+                if not newdata or i > 10:
+                    raise
+                data = data + newdata
+            else:
+                return object
+
+    def readline(self, size=None):
+
+        """ Read one line from the input stream and return the
+            decoded data.
+
+            Note: Unlike the .readlines() method, this method inherits
+            the line breaking knowledge from the underlying stream's
+            .readline() method -- there is currently no support for
+            line breaking using the codec decoder due to lack of line
+            buffering. Sublcasses should however, if possible, try to
+            implement this method using their own knowledge of line
+            breaking.
+
+            size, if given, is passed as size argument to the stream's
+            .readline() method.
+
+        """
+        if size is None:
+            line = self.stream.readline()
+        else:
+            line = self.stream.readline(size)
+        return self.decode(line, self.errors)[0]
+
+
+    def readlines(self, sizehint=None):
+
+        """ Read all lines available on the input stream
+            and return them as list of lines.
+
+            Line breaks are implemented using the codec's decoder
+            method and are included in the list entries.
+
+            sizehint, if given, is passed as size argument to the
+            stream's .read() method.
+
+        """
+        if sizehint is None:
+            data = self.stream.read()
+        else:
+            data = self.stream.read(sizehint)
+        return self.decode(data, self.errors)[0].splitlines(1)
+
+    def reset(self):
+
+        """ Resets the codec buffers used for keeping state.
+
+            Note that no stream repositioning should take place.
+            This method is primarily intended to be able to recover
+            from decoding errors.
+
+        """
+        pass
+
+    def __getattr__(self, name,
+                    getattr=getattr):
+
+        """ Inherit all other methods from the underlying stream.
+        """
+        return getattr(self.stream, name)
+
+###
+
+class StreamReaderWriter:
+
+    """ StreamReaderWriter instances allow wrapping streams which
+        work in both read and write modes.
+
+        The design is such that one can use the factory functions
+        returned by the codec.lookup() function to construct the
+        instance.
+
+    """
+    # Optional attributes set by the file wrappers below
+    encoding = 'unknown'
+
+    def __init__(self, stream, Reader, Writer, errors='strict'):
+
+        """ Creates a StreamReaderWriter instance.
+
+            stream must be a Stream-like object.
+
+            Reader, Writer must be factory functions or classes
+            providing the StreamReader, StreamWriter interface resp.
+
+            Error handling is done in the same way as defined for the
+            StreamWriter/Readers.
+
+        """
+        self.stream = stream
+        self.reader = Reader(stream, errors)
+        self.writer = Writer(stream, errors)
+        self.errors = errors
+
+    def read(self, size=-1):
+
+        return self.reader.read(size)
+
+    def readline(self, size=None):
+
+        return self.reader.readline(size)
+
+    def readlines(self, sizehint=None):
+
+        return self.reader.readlines(sizehint)
+
+    def write(self, data):
+
+        return self.writer.write(data)
+
+    def writelines(self, list):
+
+        return self.writer.writelines(list)
+
+    def reset(self):
+
+        self.reader.reset()
+        self.writer.reset()
+
+    def __getattr__(self, name,
+                    getattr=getattr):
+
+        """ Inherit all other methods from the underlying stream.
+        """
+        return getattr(self.stream, name)
+
+###
+
+class StreamRecoder:
+
+    """ StreamRecoder instances provide a frontend - backend
+        view of encoding data.
+
+        They use the complete set of APIs returned by the
+        codecs.lookup() function to implement their task.
+
+        Data written to the stream is first decoded into an
+        intermediate format (which is dependent on the given codec
+        combination) and then written to the stream using an instance
+        of the provided Writer class.
+
+        In the other direction, data is read from the stream using a
+        Reader instance and then return encoded data to the caller.
+
+    """
+    # Optional attributes set by the file wrappers below
+    data_encoding = 'unknown'
+    file_encoding = 'unknown'
+
+    def __init__(self, stream, encode, decode, Reader, Writer,
+                 errors='strict'):
+
+        """ Creates a StreamRecoder instance which implements a two-way
+            conversion: encode and decode work on the frontend (the
+            input to .read() and output of .write()) while
+            Reader and Writer work on the backend (reading and
+            writing to the stream).
+
+            You can use these objects to do transparent direct
+            recodings from e.g. latin-1 to utf-8 and back.
+
+            stream must be a file-like object.
+
+            encode, decode must adhere to the Codec interface, Reader,
+            Writer must be factory functions or classes providing the
+            StreamReader, StreamWriter interface resp.
+
+            encode and decode are needed for the frontend translation,
+            Reader and Writer for the backend translation. Unicode is
+            used as intermediate encoding.
+
+            Error handling is done in the same way as defined for the
+            StreamWriter/Readers.
+
+        """
+        self.stream = stream
+        self.encode = encode
+        self.decode = decode
+        self.reader = Reader(stream, errors)
+        self.writer = Writer(stream, errors)
+        self.errors = errors
+
+    def read(self, size=-1):
+
+        data = self.reader.read(size)
+        data, bytesencoded = self.encode(data, self.errors)
+        return data
+
+    def readline(self, size=None):
+
+        if size is None:
+            data = self.reader.readline()
+        else:
+            data = self.reader.readline(size)
+        data, bytesencoded = self.encode(data, self.errors)
+        return data
+
+    def readlines(self, sizehint=None):
+
+        if sizehint is None:
+            data = self.reader.read()
+        else:
+            data = self.reader.read(sizehint)
+        data, bytesencoded = self.encode(data, self.errors)
+        return data.splitlines(1)
+
+    def write(self, data):
+
+        data, bytesdecoded = self.decode(data, self.errors)
+        return self.writer.write(data)
+
+    def writelines(self, list):
+
+        data = ''.join(list)
+        data, bytesdecoded = self.decode(data, self.errors)
+        return self.writer.write(data)
+
+    def reset(self):
+
+        self.reader.reset()
+        self.writer.reset()
+
+    def __getattr__(self, name,
+                    getattr=getattr):
+
+        """ Inherit all other methods from the underlying stream.
+        """
+        return getattr(self.stream, name)
+
+### Shortcuts
+
+def open(filename, mode='rb', encoding=None, errors='strict', buffering=1):
+
+    """ Open an encoded file using the given mode and return
+        a wrapped version providing transparent encoding/decoding.
+
+        Note: The wrapped version will only accept the object format
+        defined by the codecs, i.e. Unicode objects for most builtin
+        codecs. Output is also codec dependent and will usually by
+        Unicode as well.
+
+        Files are always opened in binary mode, even if no binary mode
+        was specified. Thisis done to avoid data loss due to encodings
+        using 8-bit values. The default file mode is 'rb' meaning to
+        open the file in binary read mode.
+
+        encoding specifies the encoding which is to be used for the
+        the file.
+
+        errors may be given to define the error handling. It defaults
+        to 'strict' which causes ValueErrors to be raised in case an
+        encoding error occurs.
+
+        buffering has the same meaning as for the builtin open() API.
+        It defaults to line buffered.
+
+        The returned wrapped file object provides an extra attribute
+        .encoding which allows querying the used encoding. This
+        attribute is only available if an encoding was specified as
+        parameter.
+
+    """
+    if encoding is not None and \
+       'b' not in mode:
+        # Force opening of the file in binary mode
+        mode = mode + 'b'
+    file = __builtin__.open(filename, mode, buffering)
+    if encoding is None:
+        return file
+    (e, d, sr, sw) = lookup(encoding)
+    srw = StreamReaderWriter(file, sr, sw, errors)
+    # Add attributes to simplify introspection
+    srw.encoding = encoding
+    return srw
+
+def EncodedFile(file, data_encoding, file_encoding=None, errors='strict'):
+
+    """ Return a wrapped version of file which provides transparent
+        encoding translation.
+
+        Strings written to the wrapped file are interpreted according
+        to the given data_encoding and then written to the original
+        file as string using file_encoding. The intermediate encoding
+        will usually be Unicode but depends on the specified codecs.
+
+        Strings are read from the file using file_encoding and then
+        passed back to the caller as string using data_encoding.
+
+        If file_encoding is not given, it defaults to data_encoding.
+
+        errors may be given to define the error handling. It defaults
+        to 'strict' which causes ValueErrors to be raised in case an
+        encoding error occurs.
+
+        The returned wrapped file object provides two extra attributes
+        .data_encoding and .file_encoding which reflect the given
+        parameters of the same name. The attributes can be used for
+        introspection by Python programs.
+
+    """
+    if file_encoding is None:
+        file_encoding = data_encoding
+    encode, decode = lookup(data_encoding)[:2]
+    Reader, Writer = lookup(file_encoding)[2:]
+    sr = StreamRecoder(file,
+                       encode, decode, Reader, Writer,
+                       errors)
+    # Add attributes to simplify introspection
+    sr.data_encoding = data_encoding
+    sr.file_encoding = file_encoding
+    return sr
+
+### Helpers for codec lookup
+
+def getencoder(encoding):
+
+    """ Lookup up the codec for the given encoding and return
+        its encoder function.
+
+        Raises a LookupError in case the encoding cannot be found.
+
+    """
+    return lookup(encoding)[0]
+
+def getdecoder(encoding):
+
+    """ Lookup up the codec for the given encoding and return
+        its decoder function.
+
+        Raises a LookupError in case the encoding cannot be found.
+
+    """
+    return lookup(encoding)[1]
+
+def getreader(encoding):
+
+    """ Lookup up the codec for the given encoding and return
+        its StreamReader class or factory function.
+
+        Raises a LookupError in case the encoding cannot be found.
+
+    """
+    return lookup(encoding)[2]
+
+def getwriter(encoding):
+
+    """ Lookup up the codec for the given encoding and return
+        its StreamWriter class or factory function.
+
+        Raises a LookupError in case the encoding cannot be found.
+
+    """
+    return lookup(encoding)[3]
+
+### Helpers for charmap-based codecs
+
+def make_identity_dict(rng):
+
+    """ make_identity_dict(rng) -> dict
+
+        Return a dictionary where elements of the rng sequence are
+        mapped to themselves.
+
+    """
+    res = {}
+    for i in rng:
+        res[i]=i
+    return res
+
+def make_encoding_map(decoding_map):
+
+    """ Creates an encoding map from a decoding map.
+
+        If a target mapping in the decoding map occurrs multiple
+        times, then that target is mapped to None (undefined mapping),
+        causing an exception when encountered by the charmap codec
+        during translation.
+
+        One example where this happens is cp875.py which decodes
+        multiple character to \u001a.
+
+    """
+    m = {}
+    for k,v in decoding_map.items():
+        if not m.has_key(v):
+            m[v] = k
+        else:
+            m[v] = None
+    return m
+
+# Tell modulefinder that using codecs probably needs the encodings
+# package
+_false = 0
+if _false:
+    import encodings
+
+### Tests
+
+if __name__ == '__main__':
+
+    import sys
+
+    # Make stdout translate Latin-1 output into UTF-8 output
+    sys.stdout = EncodedFile(sys.stdout, 'latin-1', 'utf-8')
+
+    # Have stdin translate Latin-1 input into UTF-8 input
+    sys.stdin = EncodedFile(sys.stdin, 'utf-8', 'latin-1')
diff --git a/lib-python/2.2/codeop.py b/lib-python/2.2/codeop.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/codeop.py
@@ -0,0 +1,171 @@
+r"""Utilities to compile possibly incomplete Python source code.
+
+This module provides two interfaces, broadly similar to the builtin
+function compile(), that take progam text, a filename and a 'mode'
+and:
+
+- Return a code object if the command is complete and valid
+- Return None if the command is incomplete
+- Raise SyntaxError, ValueError or OverflowError if the command is a
+  syntax error (OverflowError and ValueError can be produced by
+  malformed literals).
+
+Approach:
+
+First, check if the source consists entirely of blank lines and
+comments; if so, replace it with 'pass', because the built-in
+parser doesn't always do the right thing for these.
+
+Compile three times: as is, with \n, and with \n\n appended.  If it
+compiles as is, it's complete.  If it compiles with one \n appended,
+we expect more.  If it doesn't compile either way, we compare the
+error we get when compiling with \n or \n\n appended.  If the errors
+are the same, the code is broken.  But if the errors are different, we
+expect more.  Not intuitive; not even guaranteed to hold in future
+releases; but this matches the compiler's behavior from Python 1.4
+through 2.2, at least.
+
+Caveat:
+
+It is possible (but not likely) that the parser stops parsing with a
+successful outcome before reaching the end of the source; in this
+case, trailing symbols may be ignored instead of causing an error.
+For example, a backslash followed by two newlines may be followed by
+arbitrary garbage.  This will be fixed once the API for the parser is
+better.
+
+The two interfaces are:
+
+compile_command(source, filename, symbol):
+
+    Compiles a single command in the manner described above.
+
+CommandCompiler():
+
+    Instances of this class have __call__ methods identical in
+    signature to compile_command; the difference is that if the
+    instance compiles program text containing a __future__ statement,
+    the instance 'remembers' and compiles all subsequent program texts
+    with the statement in force.
+
+The module also provides another class:
+
+Compile():
+
+    Instances of this class act like the built-in function compile,
+    but with 'memory' in the sense described above.
+"""
+
+import __future__
+
+_features = [getattr(__future__, fname)
+             for fname in __future__.all_feature_names]
+
+__all__ = ["compile_command", "Compile", "CommandCompiler"]
+
+def _maybe_compile(compiler, source, filename, symbol):
+    # Check for source consisting of only blank lines and comments
+    for line in source.split("\n"):
+        line = line.strip()
+        if line and line[0] != '#':
+            break               # Leave it alone
+    else:
+        if symbol != "eval":
+            source = "pass"     # Replace it with a 'pass' statement
+
+    err = err1 = err2 = None
+    code = code1 = code2 = None
+
+    try:
+        code = compiler(source, filename, symbol)
+    except SyntaxError, err:
+        pass
+
+    try:
+        code1 = compiler(source + "\n", filename, symbol)
+    except SyntaxError, err1:
+        pass
+
+    try:
+        code2 = compiler(source + "\n\n", filename, symbol)
+    except SyntaxError, err2:
+        pass
+
+    if code:
+        return code
+    try:
+        e1 = err1.__dict__
+    except AttributeError:
+        e1 = err1
+    try:
+        e2 = err2.__dict__
+    except AttributeError:
+        e2 = err2
+    if not code1 and e1 == e2:
+        raise SyntaxError, err1
+
+def compile_command(source, filename="<input>", symbol="single"):
+    r"""Compile a command and determine whether it is incomplete.
+
+    Arguments:
+
+    source -- the source string; may contain \n characters
+    filename -- optional filename from which source was read; default
+                "<input>"
+    symbol -- optional grammar start symbol; "single" (default) or "eval"
+
+    Return value / exceptions raised:
+
+    - Return a code object if the command is complete and valid
+    - Return None if the command is incomplete
+    - Raise SyntaxError, ValueError or OverflowError if the command is a
+      syntax error (OverflowError and ValueError can be produced by
+      malformed literals).
+    """
+    return _maybe_compile(compile, source, filename, symbol)
+
+class Compile:
+    """Instances of this class behave much like the built-in compile
+    function, but if one is used to compile text containing a future
+    statement, it "remembers" and compiles all subsequent program texts
+    with the statement in force."""
+    def __init__(self):
+        self.flags = 0
+
+    def __call__(self, source, filename, symbol):
+        codeob = compile(source, filename, symbol, self.flags, 1)
+        for feature in _features:
+            if codeob.co_flags & feature.compiler_flag:
+                self.flags |= feature.compiler_flag
+        return codeob
+
+class CommandCompiler:
+    """Instances of this class have __call__ methods identical in
+    signature to compile_command; the difference is that if the
+    instance compiles program text containing a __future__ statement,
+    the instance 'remembers' and compiles all subsequent program texts
+    with the statement in force."""
+
+    def __init__(self,):
+        self.compiler = Compile()
+
+    def __call__(self, source, filename="<input>", symbol="single"):
+        r"""Compile a command and determine whether it is incomplete.
+
+        Arguments:
+
+        source -- the source string; may contain \n characters
+        filename -- optional filename from which source was read;
+                    default "<input>"
+        symbol -- optional grammar start symbol; "single" (default) or
+                  "eval"
+
+        Return value / exceptions raised:
+
+        - Return a code object if the command is complete and valid
+        - Return None if the command is incomplete
+        - Raise SyntaxError, ValueError or OverflowError if the command is a
+          syntax error (OverflowError and ValueError can be produced by
+          malformed literals).
+        """
+        return _maybe_compile(self.compiler, source, filename, symbol)
diff --git a/lib-python/2.2/colorsys.py b/lib-python/2.2/colorsys.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/colorsys.py
@@ -0,0 +1,123 @@
+"""Conversion functions between RGB and other color systems.
+
+This modules provides two functions for each color system ABC:
+
+  rgb_to_abc(r, g, b) --> a, b, c
+  abc_to_rgb(a, b, c) --> r, g, b
+
+All inputs and outputs are triples of floats in the range [0.0...1.0].
+Inputs outside this range may cause exceptions or invalid outputs.
+
+Supported color systems:
+RGB: Red, Green, Blue components
+YIQ: used by composite video signals
+HLS: Hue, Luminance, Saturation
+HSV: Hue, Saturation, Value
+"""
+# References:
+# XXX Where's the literature?
+
+__all__ = ["rgb_to_yiq","yiq_to_rgb","rgb_to_hls","hls_to_rgb",
+           "rgb_to_hsv","hsv_to_rgb"]
+
+# Some floating point constants
+
+ONE_THIRD = 1.0/3.0
+ONE_SIXTH = 1.0/6.0
+TWO_THIRD = 2.0/3.0
+
+
+# YIQ: used by composite video signals (linear combinations of RGB)
+# Y: perceived grey level (0.0 == black, 1.0 == white)
+# I, Q: color components
+
+def rgb_to_yiq(r, g, b):
+    y = 0.30*r + 0.59*g + 0.11*b
+    i = 0.60*r - 0.28*g - 0.32*b
+    q = 0.21*r - 0.52*g + 0.31*b
+    return (y, i, q)
+
+def yiq_to_rgb(y, i, q):
+    r = y + 0.948262*i + 0.624013*q
+    g = y - 0.276066*i - 0.639810*q
+    b = y - 1.105450*i + 1.729860*q
+    if r < 0.0: r = 0.0
+    if g < 0.0: g = 0.0
+    if b < 0.0: b = 0.0
+    if r > 1.0: r = 1.0
+    if g > 1.0: g = 1.0
+    if b > 1.0: b = 1.0
+    return (r, g, b)
+
+
+# HLS: Hue, Luminance, S???
+# H: position in the spectrum
+# L: ???
+# S: ???
+
+def rgb_to_hls(r, g, b):
+    maxc = max(r, g, b)
+    minc = min(r, g, b)
+    # XXX Can optimize (maxc+minc) and (maxc-minc)
+    l = (minc+maxc)/2.0
+    if minc == maxc: return 0.0, l, 0.0
+    if l <= 0.5: s = (maxc-minc) / (maxc+minc)
+    else: s = (maxc-minc) / (2.0-maxc-minc)
+    rc = (maxc-r) / (maxc-minc)
+    gc = (maxc-g) / (maxc-minc)
+    bc = (maxc-b) / (maxc-minc)
+    if r == maxc: h = bc-gc
+    elif g == maxc: h = 2.0+rc-bc
+    else: h = 4.0+gc-rc
+    h = (h/6.0) % 1.0
+    return h, l, s
+
+def hls_to_rgb(h, l, s):
+    if s == 0.0: return l, l, l
+    if l <= 0.5: m2 = l * (1.0+s)
+    else: m2 = l+s-(l*s)
+    m1 = 2.0*l - m2
+    return (_v(m1, m2, h+ONE_THIRD), _v(m1, m2, h), _v(m1, m2, h-ONE_THIRD))
+
+def _v(m1, m2, hue):
+    hue = hue % 1.0
+    if hue < ONE_SIXTH: return m1 + (m2-m1)*hue*6.0
+    if hue < 0.5: return m2
+    if hue < TWO_THIRD: return m1 + (m2-m1)*(TWO_THIRD-hue)*6.0
+    return m1
+
+
+# HSV: Hue, Saturation, Value(?)
+# H: position in the spectrum
+# S: ???
+# V: ???
+
+def rgb_to_hsv(r, g, b):
+    maxc = max(r, g, b)
+    minc = min(r, g, b)
+    v = maxc
+    if minc == maxc: return 0.0, 0.0, v
+    s = (maxc-minc) / maxc
+    rc = (maxc-r) / (maxc-minc)
+    gc = (maxc-g) / (maxc-minc)
+    bc = (maxc-b) / (maxc-minc)
+    if r == maxc: h = bc-gc
+    elif g == maxc: h = 2.0+rc-bc
+    else: h = 4.0+gc-rc
+    h = (h/6.0) % 1.0
+    return h, s, v
+
+def hsv_to_rgb(h, s, v):
+    if s == 0.0: return v, v, v
+    i = int(h*6.0) # XXX assume int() truncates!
+    f = (h*6.0) - i
+    p = v*(1.0 - s)
+    q = v*(1.0 - s*f)
+    t = v*(1.0 - s*(1.0-f))
+    if i%6 == 0: return v, t, p
+    if i == 1: return q, v, p
+    if i == 2: return p, v, t
+    if i == 3: return p, q, v
+    if i == 4: return t, p, v
+    if i == 5: return v, p, q
+    # Cannot get here
diff --git a/lib-python/2.2/commands.py b/lib-python/2.2/commands.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/commands.py
@@ -0,0 +1,84 @@
+"""Execute shell commands via os.popen() and return status, output.
+
+Interface summary:
+
+       import commands
+
+       outtext = commands.getoutput(cmd)
+       (exitstatus, outtext) = commands.getstatusoutput(cmd)
+       outtext = commands.getstatus(file)  # returns output of "ls -ld file"
+
+A trailing newline is removed from the output string.
+
+Encapsulates the basic operation:
+
+      pipe = os.popen('{ ' + cmd + '; } 2>&1', 'r')
+      text = pipe.read()
+      sts = pipe.close()
+
+ [Note:  it would be nice to add functions to interpret the exit status.]
+"""
+
+__all__ = ["getstatusoutput","getoutput","getstatus"]
+
+# Module 'commands'
+#
+# Various tools for executing commands and looking at their output and status.
+#
+# NB This only works (and is only relevant) for UNIX.
+
+
+# Get 'ls -l' status for an object into a string
+#
+def getstatus(file):
+    """Return output of "ls -ld <file>" in a string."""
+    return getoutput('ls -ld' + mkarg(file))
+
+
+# Get the output from a shell command into a string.
+# The exit status is ignored; a trailing newline is stripped.
+# Assume the command will work with '{ ... ; } 2>&1' around it..
+#
+def getoutput(cmd):
+    """Return output (stdout or stderr) of executing cmd in a shell."""
+    return getstatusoutput(cmd)[1]
+
+
+# Ditto but preserving the exit status.
+# Returns a pair (sts, output)
+#
+def getstatusoutput(cmd):
+    """Return (status, output) of executing cmd in a shell."""
+    import os
+    pipe = os.popen('{ ' + cmd + '; } 2>&1', 'r')
+    text = pipe.read()
+    sts = pipe.close()
+    if sts is None: sts = 0
+    if text[-1:] == '\n': text = text[:-1]
+    return sts, text
+
+
+# Make command argument from directory and pathname (prefix space, add quotes).
+#
+def mk2arg(head, x):
+    import os
+    return mkarg(os.path.join(head, x))
+
+
+# Make a shell command argument from a string.
+# Return a string beginning with a space followed by a shell-quoted
+# version of the argument.
+# Two strategies: enclose in single quotes if it contains none;
+# otherwise, enclose in double quotes and prefix quotable characters
+# with backslash.
+#
+def mkarg(x):
+    if '\'' not in x:
+        return ' \'' + x + '\''
+    s = ' "'
+    for c in x:
+        if c in '\\$"`':
+            s = s + '\\'
+        s = s + c
+    s = s + '"'
+    return s
diff --git a/lib-python/2.2/compileall.py b/lib-python/2.2/compileall.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/compileall.py
@@ -0,0 +1,148 @@
+"""Module/script to "compile" all .py files to .pyc (or .pyo) file.
+
+When called as a script with arguments, this compiles the directories
+given as arguments recursively; the -l option prevents it from
+recursing into directories.
+
+Without arguments, if compiles all modules on sys.path, without
+recursing into subdirectories.  (Even though it should do so for
+packages -- for now, you'll have to deal with packages separately.)
+
+See module py_compile for details of the actual byte-compilation.
+
+"""
+
+import os
+import stat
+import sys
+import py_compile
+
+__all__ = ["compile_dir","compile_path"]
+
+def compile_dir(dir, maxlevels=10, ddir=None, force=0, rx=None):
+    """Byte-compile all modules in the given directory tree.
+
+    Arguments (only dir is required):
+
+    dir:       the directory to byte-compile
+    maxlevels: maximum recursion level (default 10)
+    ddir:      if given, purported directory name (this is the
+               directory name that will show up in error messages)
+    force:     if 1, force compilation, even if timestamps are up-to-date
+
+    """
+    print 'Listing', dir, '...'
+    try:
+        names = os.listdir(dir)
+    except os.error:
+        print "Can't list", dir
+        names = []
+    names.sort()
+    success = 1
+    for name in names:
+        fullname = os.path.join(dir, name)
+        if ddir:
+            dfile = os.path.join(ddir, name)
+        else:
+            dfile = None
+        if rx:
+            mo = rx.search(fullname)
+            if mo:
+                continue
+        if os.path.isfile(fullname):
+            head, tail = name[:-3], name[-3:]
+            if tail == '.py':
+                cfile = fullname + (__debug__ and 'c' or 'o')
+                ftime = os.stat(fullname)[stat.ST_MTIME]
+                try: ctime = os.stat(cfile)[stat.ST_MTIME]
+                except os.error: ctime = 0
+                if (ctime > ftime) and not force: continue
+                print 'Compiling', fullname, '...'
+                try:
+                    ok = py_compile.compile(fullname, None, dfile)
+                except KeyboardInterrupt:
+                    raise KeyboardInterrupt
+                except:
+                    # XXX py_compile catches SyntaxErrors
+                    if type(sys.exc_type) == type(''):
+                        exc_type_name = sys.exc_type
+                    else: exc_type_name = sys.exc_type.__name__
+                    print 'Sorry:', exc_type_name + ':',
+                    print sys.exc_value
+                    success = 0
+                else:
+                    if ok == 0:
+                        success = 0
+        elif maxlevels > 0 and \
+             name != os.curdir and name != os.pardir and \
+             os.path.isdir(fullname) and \
+             not os.path.islink(fullname):
+            if not compile_dir(fullname, maxlevels - 1, dfile, force, rx):
+                success = 0
+    return success
+
+def compile_path(skip_curdir=1, maxlevels=0, force=0):
+    """Byte-compile all module on sys.path.
+
+    Arguments (all optional):
+
+    skip_curdir: if true, skip current directory (default true)
+    maxlevels:   max recursion level (default 0)
+    force: as for compile_dir() (default 0)
+
+    """
+    success = 1
+    for dir in sys.path:
+        if (not dir or dir == os.curdir) and skip_curdir:
+            print 'Skipping current directory'
+        else:
+            success = success and compile_dir(dir, maxlevels, None, force)
+    return success
+
+def main():
+    """Script main program."""
+    import getopt
+    try:
+        opts, args = getopt.getopt(sys.argv[1:], 'lfd:x:')
+    except getopt.error, msg:
+        print msg
+        print "usage: python compileall.py [-l] [-f] [-d destdir] " \
+              "[-s regexp] [directory ...]"
+        print "-l: don't recurse down"
+        print "-f: force rebuild even if timestamps are up-to-date"
+        print "-d destdir: purported directory name for error messages"
+        print "   if no directory arguments, -l sys.path is assumed"
+        print "-x regexp: skip files matching the regular expression regexp"
+        print "   the regexp is search for in the full path of the file"
+        sys.exit(2)
+    maxlevels = 10
+    ddir = None
+    force = 0
+    rx = None
+    for o, a in opts:
+        if o == '-l': maxlevels = 0
+        if o == '-d': ddir = a
+        if o == '-f': force = 1
+        if o == '-x':
+            import re
+            rx = re.compile(a)
+    if ddir:
+        if len(args) != 1:
+            print "-d destdir require exactly one directory argument"
+            sys.exit(2)
+    success = 1
+    try:
+        if args:
+            for dir in args:
+                if not compile_dir(dir, maxlevels, ddir, force, rx):
+                    success = 0
+        else:
+            success = compile_path()
+    except KeyboardInterrupt:
+        print "\n[interrupt]"
+        success = 0
+    return success
+
+if __name__ == '__main__':
+    exit_status = not main()
+    sys.exit(exit_status)
diff --git a/lib-python/2.2/compiler/__init__.py b/lib-python/2.2/compiler/__init__.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/compiler/__init__.py
@@ -0,0 +1,26 @@
+"""Package for parsing and compiling Python source code
+
+There are several functions defined at the top level that are imported
+from modules contained in the package.
+
+parse(buf, mode="exec") -> AST
+    Converts a string containing Python source code to an abstract
+    syntax tree (AST).  The AST is defined in compiler.ast.
+
+parseFile(path) -> AST
+    The same as parse(open(path))
+
+walk(ast, visitor, verbose=None)
+    Does a pre-order walk over the ast using the visitor instance.
+    See compiler.visitor for details.
+
+compile(source, filename, mode, flags=None, dont_inherit=None)
+    Returns a code object.  A replacement for the builtin compile() function.
+
+compileFile(filename)
+    Generates a .pyc file by compiling filename.
+"""
+
+from transformer import parse, parseFile
+from visitor import walk
+from pycodegen import compile, compileFile
diff --git a/lib-python/2.2/compiler/ast.py b/lib-python/2.2/compiler/ast.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/compiler/ast.py
@@ -0,0 +1,1241 @@
+"""Python abstract syntax node definitions
+
+This file is automatically generated.
+"""
+from types import TupleType, ListType
+from consts import CO_VARARGS, CO_VARKEYWORDS
+
+def flatten(list):
+    l = []
+    for elt in list:
+        t = type(elt)
+        if t is TupleType or t is ListType:
+            for elt2 in flatten(elt):
+                l.append(elt2)
+        else:
+            l.append(elt)
+    return l
+
+def flatten_nodes(list):
+    return [n for n in flatten(list) if isinstance(n, Node)]
+
+def asList(nodes):
+    l = []
+    for item in nodes:
+        if hasattr(item, "asList"):
+            l.append(item.asList())
+        else:
+            t = type(item)
+            if t is TupleType or t is ListType:
+                l.append(tuple(asList(item)))
+            else:
+                l.append(item)
+    return l
+
+nodes = {}
+
+class Node: # an abstract base class
+    lineno = None # provide a lineno for nodes that don't have one
+    def getType(self):
+        pass # implemented by subclass
+    def getChildren(self):
+        pass # implemented by subclasses
+    def asList(self):
+        return tuple(asList(self.getChildren()))
+    def getChildNodes(self):
+        pass # implemented by subclasses
+
+class EmptyNode(Node):
+    pass
+
+class Slice(Node):
+    nodes["slice"] = "Slice"
+    def __init__(self, expr, flags, lower, upper):
+        self.expr = expr
+        self.flags = flags
+        self.lower = lower
+        self.upper = upper
+
+    def getChildren(self):
+        children = []
+        children.append(self.expr)
+        children.append(self.flags)
+        children.append(self.lower)
+        children.append(self.upper)
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.append(self.expr)
+        if self.lower is not None:            nodes.append(self.lower)
+        if self.upper is not None:            nodes.append(self.upper)
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "Slice(%s, %s, %s, %s)" % (repr(self.expr), repr(self.flags), repr(self.lower), repr(self.upper))
+
+class Const(Node):
+    nodes["const"] = "Const"
+    def __init__(self, value):
+        self.value = value
+
+    def getChildren(self):
+        return self.value,
+
+    def getChildNodes(self):
+        return ()
+
+    def __repr__(self):
+        return "Const(%s)" % (repr(self.value),)
+
+class Raise(Node):
+    nodes["raise"] = "Raise"
+    def __init__(self, expr1, expr2, expr3):
+        self.expr1 = expr1
+        self.expr2 = expr2
+        self.expr3 = expr3
+
+    def getChildren(self):
+        children = []
+        children.append(self.expr1)
+        children.append(self.expr2)
+        children.append(self.expr3)
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        if self.expr1 is not None:            nodes.append(self.expr1)
+        if self.expr2 is not None:            nodes.append(self.expr2)
+        if self.expr3 is not None:            nodes.append(self.expr3)
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "Raise(%s, %s, %s)" % (repr(self.expr1), repr(self.expr2), repr(self.expr3))
+
+class For(Node):
+    nodes["for"] = "For"
+    def __init__(self, assign, list, body, else_):
+        self.assign = assign
+        self.list = list
+        self.body = body
+        self.else_ = else_
+
+    def getChildren(self):
+        children = []
+        children.append(self.assign)
+        children.append(self.list)
+        children.append(self.body)
+        children.append(self.else_)
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.append(self.assign)
+        nodes.append(self.list)
+        nodes.append(self.body)
+        if self.else_ is not None:            nodes.append(self.else_)
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "For(%s, %s, %s, %s)" % (repr(self.assign), repr(self.list), repr(self.body), repr(self.else_))
+
+class AssTuple(Node):
+    nodes["asstuple"] = "AssTuple"
+    def __init__(self, nodes):
+        self.nodes = nodes
+
+    def getChildren(self):
+        children = []
+        children.extend(flatten(self.nodes))
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.extend(flatten_nodes(self.nodes))
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "AssTuple(%s)" % (repr(self.nodes),)
+
+class Mul(Node):
+    nodes["mul"] = "Mul"
+    def __init__(self, (left, right)):
+        self.left = left
+        self.right = right
+
+    def getChildren(self):
+        return self.left, self.right
+
+    def getChildNodes(self):
+        return self.left, self.right
+
+    def __repr__(self):
+        return "Mul((%s, %s))" % (repr(self.left), repr(self.right))
+
+class Invert(Node):
+    nodes["invert"] = "Invert"
+    def __init__(self, expr):
+        self.expr = expr
+
+    def getChildren(self):
+        return self.expr,
+
+    def getChildNodes(self):
+        return self.expr,
+
+    def __repr__(self):
+        return "Invert(%s)" % (repr(self.expr),)
+
+class RightShift(Node):
+    nodes["rightshift"] = "RightShift"
+    def __init__(self, (left, right)):
+        self.left = left
+        self.right = right
+
+    def getChildren(self):
+        return self.left, self.right
+
+    def getChildNodes(self):
+        return self.left, self.right
+
+    def __repr__(self):
+        return "RightShift((%s, %s))" % (repr(self.left), repr(self.right))
+
+class AssList(Node):
+    nodes["asslist"] = "AssList"
+    def __init__(self, nodes):
+        self.nodes = nodes
+
+    def getChildren(self):
+        children = []
+        children.extend(flatten(self.nodes))
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.extend(flatten_nodes(self.nodes))
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "AssList(%s)" % (repr(self.nodes),)
+
+class From(Node):
+    nodes["from"] = "From"
+    def __init__(self, modname, names):
+        self.modname = modname
+        self.names = names
+
+    def getChildren(self):
+        return self.modname, self.names
+
+    def getChildNodes(self):
+        return ()
+
+    def __repr__(self):
+        return "From(%s, %s)" % (repr(self.modname), repr(self.names))
+
+class Getattr(Node):
+    nodes["getattr"] = "Getattr"
+    def __init__(self, expr, attrname):
+        self.expr = expr
+        self.attrname = attrname
+
+    def getChildren(self):
+        return self.expr, self.attrname
+
+    def getChildNodes(self):
+        return self.expr,
+
+    def __repr__(self):
+        return "Getattr(%s, %s)" % (repr(self.expr), repr(self.attrname))
+
+class Dict(Node):
+    nodes["dict"] = "Dict"
+    def __init__(self, items):
+        self.items = items
+
+    def getChildren(self):
+        children = []
+        children.extend(flatten(self.items))
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.extend(flatten_nodes(self.items))
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "Dict(%s)" % (repr(self.items),)
+
+class Module(Node):
+    nodes["module"] = "Module"
+    def __init__(self, doc, node):
+        self.doc = doc
+        self.node = node
+
+    def getChildren(self):
+        return self.doc, self.node
+
+    def getChildNodes(self):
+        return self.node,
+
+    def __repr__(self):
+        return "Module(%s, %s)" % (repr(self.doc), repr(self.node))
+
+class Expression(Node):
+    # Expression is an artifical node class to support "eval"
+    nodes["expression"] = "Expression"
+    def __init__(self, node):
+        self.node = node
+
+    def getChildren(self):
+        return self.node,
+
+    def getChildNodes(self):
+        return self.node,
+
+    def __repr__(self):
+        return "Expression(%s)" % (repr(self.node))
+
+class UnaryAdd(Node):
+    nodes["unaryadd"] = "UnaryAdd"
+    def __init__(self, expr):
+        self.expr = expr
+
+    def getChildren(self):
+        return self.expr,
+
+    def getChildNodes(self):
+        return self.expr,
+
+    def __repr__(self):
+        return "UnaryAdd(%s)" % (repr(self.expr),)
+
+class Ellipsis(Node):
+    nodes["ellipsis"] = "Ellipsis"
+    def __init__(self, ):
+        pass
+
+    def getChildren(self):
+        return ()
+
+    def getChildNodes(self):
+        return ()
+
+    def __repr__(self):
+        return "Ellipsis()"
+
+class Print(Node):
+    nodes["print"] = "Print"
+    def __init__(self, nodes, dest):
+        self.nodes = nodes
+        self.dest = dest
+
+    def getChildren(self):
+        children = []
+        children.extend(flatten(self.nodes))
+        children.append(self.dest)
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.extend(flatten_nodes(self.nodes))
+        if self.dest is not None:            nodes.append(self.dest)
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "Print(%s, %s)" % (repr(self.nodes), repr(self.dest))
+
+class Import(Node):
+    nodes["import"] = "Import"
+    def __init__(self, names):
+        self.names = names
+
+    def getChildren(self):
+        return self.names,
+
+    def getChildNodes(self):
+        return ()
+
+    def __repr__(self):
+        return "Import(%s)" % (repr(self.names),)
+
+class Subscript(Node):
+    nodes["subscript"] = "Subscript"
+    def __init__(self, expr, flags, subs):
+        self.expr = expr
+        self.flags = flags
+        self.subs = subs
+
+    def getChildren(self):
+        children = []
+        children.append(self.expr)
+        children.append(self.flags)
+        children.extend(flatten(self.subs))
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.append(self.expr)
+        nodes.extend(flatten_nodes(self.subs))
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "Subscript(%s, %s, %s)" % (repr(self.expr), repr(self.flags), repr(self.subs))
+
+class TryExcept(Node):
+    nodes["tryexcept"] = "TryExcept"
+    def __init__(self, body, handlers, else_):
+        self.body = body
+        self.handlers = handlers
+        self.else_ = else_
+
+    def getChildren(self):
+        children = []
+        children.append(self.body)
+        children.extend(flatten(self.handlers))
+        children.append(self.else_)
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.append(self.body)
+        nodes.extend(flatten_nodes(self.handlers))
+        if self.else_ is not None:            nodes.append(self.else_)
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "TryExcept(%s, %s, %s)" % (repr(self.body), repr(self.handlers), repr(self.else_))
+
+class Or(Node):
+    nodes["or"] = "Or"
+    def __init__(self, nodes):
+        self.nodes = nodes
+
+    def getChildren(self):
+        children = []
+        children.extend(flatten(self.nodes))
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.extend(flatten_nodes(self.nodes))
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "Or(%s)" % (repr(self.nodes),)
+
+class Name(Node):
+    nodes["name"] = "Name"
+    def __init__(self, name):
+        self.name = name
+
+    def getChildren(self):
+        return self.name,
+
+    def getChildNodes(self):
+        return ()
+
+    def __repr__(self):
+        return "Name(%s)" % (repr(self.name),)
+
+class Function(Node):
+    nodes["function"] = "Function"
+    def __init__(self, name, argnames, defaults, flags, doc, code):
+        self.name = name
+        self.argnames = argnames
+        self.defaults = defaults
+        self.flags = flags
+        self.doc = doc
+        self.code = code
+        self.varargs = self.kwargs = None
+        if flags & CO_VARARGS:
+            self.varargs = 1
+        if flags & CO_VARKEYWORDS:
+            self.kwargs = 1
+
+
+
+    def getChildren(self):
+        children = []
+        children.append(self.name)
+        children.append(self.argnames)
+        children.extend(flatten(self.defaults))
+        children.append(self.flags)
+        children.append(self.doc)
+        children.append(self.code)
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.extend(flatten_nodes(self.defaults))
+        nodes.append(self.code)
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "Function(%s, %s, %s, %s, %s, %s)" % (repr(self.name), repr(self.argnames), repr(self.defaults), repr(self.flags), repr(self.doc), repr(self.code))
+
+class Assert(Node):
+    nodes["assert"] = "Assert"
+    def __init__(self, test, fail):
+        self.test = test
+        self.fail = fail
+
+    def getChildren(self):
+        children = []
+        children.append(self.test)
+        children.append(self.fail)
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.append(self.test)
+        if self.fail is not None:            nodes.append(self.fail)
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "Assert(%s, %s)" % (repr(self.test), repr(self.fail))
+
+class Return(Node):
+    nodes["return"] = "Return"
+    def __init__(self, value):
+        self.value = value
+
+    def getChildren(self):
+        return self.value,
+
+    def getChildNodes(self):
+        return self.value,
+
+    def __repr__(self):
+        return "Return(%s)" % (repr(self.value),)
+
+class Power(Node):
+    nodes["power"] = "Power"
+    def __init__(self, (left, right)):
+        self.left = left
+        self.right = right
+
+    def getChildren(self):
+        return self.left, self.right
+
+    def getChildNodes(self):
+        return self.left, self.right
+
+    def __repr__(self):
+        return "Power((%s, %s))" % (repr(self.left), repr(self.right))
+
+class Exec(Node):
+    nodes["exec"] = "Exec"
+    def __init__(self, expr, locals, globals):
+        self.expr = expr
+        self.locals = locals
+        self.globals = globals
+
+    def getChildren(self):
+        children = []
+        children.append(self.expr)
+        children.append(self.locals)
+        children.append(self.globals)
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.append(self.expr)
+        if self.locals is not None:            nodes.append(self.locals)
+        if self.globals is not None:            nodes.append(self.globals)
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "Exec(%s, %s, %s)" % (repr(self.expr), repr(self.locals), repr(self.globals))
+
+class Stmt(Node):
+    nodes["stmt"] = "Stmt"
+    def __init__(self, nodes):
+        self.nodes = nodes
+
+    def getChildren(self):
+        children = []
+        children.extend(flatten(self.nodes))
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.extend(flatten_nodes(self.nodes))
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "Stmt(%s)" % (repr(self.nodes),)
+
+class Sliceobj(Node):
+    nodes["sliceobj"] = "Sliceobj"
+    def __init__(self, nodes):
+        self.nodes = nodes
+
+    def getChildren(self):
+        children = []
+        children.extend(flatten(self.nodes))
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.extend(flatten_nodes(self.nodes))
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "Sliceobj(%s)" % (repr(self.nodes),)
+
+class Break(Node):
+    nodes["break"] = "Break"
+    def __init__(self, ):
+        pass
+
+    def getChildren(self):
+        return ()
+
+    def getChildNodes(self):
+        return ()
+
+    def __repr__(self):
+        return "Break()"
+
+class Bitand(Node):
+    nodes["bitand"] = "Bitand"
+    def __init__(self, nodes):
+        self.nodes = nodes
+
+    def getChildren(self):
+        children = []
+        children.extend(flatten(self.nodes))
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.extend(flatten_nodes(self.nodes))
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "Bitand(%s)" % (repr(self.nodes),)
+
+class FloorDiv(Node):
+    nodes["floordiv"] = "FloorDiv"
+    def __init__(self, (left, right)):
+        self.left = left
+        self.right = right
+
+    def getChildren(self):
+        return self.left, self.right
+
+    def getChildNodes(self):
+        return self.left, self.right
+
+    def __repr__(self):
+        return "FloorDiv((%s, %s))" % (repr(self.left), repr(self.right))
+
+class TryFinally(Node):
+    nodes["tryfinally"] = "TryFinally"
+    def __init__(self, body, final):
+        self.body = body
+        self.final = final
+
+    def getChildren(self):
+        return self.body, self.final
+
+    def getChildNodes(self):
+        return self.body, self.final
+
+    def __repr__(self):
+        return "TryFinally(%s, %s)" % (repr(self.body), repr(self.final))
+
+class Not(Node):
+    nodes["not"] = "Not"
+    def __init__(self, expr):
+        self.expr = expr
+
+    def getChildren(self):
+        return self.expr,
+
+    def getChildNodes(self):
+        return self.expr,
+
+    def __repr__(self):
+        return "Not(%s)" % (repr(self.expr),)
+
+class Class(Node):
+    nodes["class"] = "Class"
+    def __init__(self, name, bases, doc, code):
+        self.name = name
+        self.bases = bases
+        self.doc = doc
+        self.code = code
+
+    def getChildren(self):
+        children = []
+        children.append(self.name)
+        children.extend(flatten(self.bases))
+        children.append(self.doc)
+        children.append(self.code)
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.extend(flatten_nodes(self.bases))
+        nodes.append(self.code)
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "Class(%s, %s, %s, %s)" % (repr(self.name), repr(self.bases), repr(self.doc), repr(self.code))
+
+class Mod(Node):
+    nodes["mod"] = "Mod"
+    def __init__(self, (left, right)):
+        self.left = left
+        self.right = right
+
+    def getChildren(self):
+        return self.left, self.right
+
+    def getChildNodes(self):
+        return self.left, self.right
+
+    def __repr__(self):
+        return "Mod((%s, %s))" % (repr(self.left), repr(self.right))
+
+class Printnl(Node):
+    nodes["printnl"] = "Printnl"
+    def __init__(self, nodes, dest):
+        self.nodes = nodes
+        self.dest = dest
+
+    def getChildren(self):
+        children = []
+        children.extend(flatten(self.nodes))
+        children.append(self.dest)
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.extend(flatten_nodes(self.nodes))
+        if self.dest is not None:            nodes.append(self.dest)
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "Printnl(%s, %s)" % (repr(self.nodes), repr(self.dest))
+
+class Tuple(Node):
+    nodes["tuple"] = "Tuple"
+    def __init__(self, nodes):
+        self.nodes = nodes
+
+    def getChildren(self):
+        children = []
+        children.extend(flatten(self.nodes))
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.extend(flatten_nodes(self.nodes))
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "Tuple(%s)" % (repr(self.nodes),)
+
+class AssAttr(Node):
+    nodes["assattr"] = "AssAttr"
+    def __init__(self, expr, attrname, flags):
+        self.expr = expr
+        self.attrname = attrname
+        self.flags = flags
+
+    def getChildren(self):
+        return self.expr, self.attrname, self.flags
+
+    def getChildNodes(self):
+        return self.expr,
+
+    def __repr__(self):
+        return "AssAttr(%s, %s, %s)" % (repr(self.expr), repr(self.attrname), repr(self.flags))
+
+class Keyword(Node):
+    nodes["keyword"] = "Keyword"
+    def __init__(self, name, expr):
+        self.name = name
+        self.expr = expr
+
+    def getChildren(self):
+        return self.name, self.expr
+
+    def getChildNodes(self):
+        return self.expr,
+
+    def __repr__(self):
+        return "Keyword(%s, %s)" % (repr(self.name), repr(self.expr))
+
+class AugAssign(Node):
+    nodes["augassign"] = "AugAssign"
+    def __init__(self, node, op, expr):
+        self.node = node
+        self.op = op
+        self.expr = expr
+
+    def getChildren(self):
+        return self.node, self.op, self.expr
+
+    def getChildNodes(self):
+        return self.node, self.expr
+
+    def __repr__(self):
+        return "AugAssign(%s, %s, %s)" % (repr(self.node), repr(self.op), repr(self.expr))
+
+class List(Node):
+    nodes["list"] = "List"
+    def __init__(self, nodes):
+        self.nodes = nodes
+
+    def getChildren(self):
+        children = []
+        children.extend(flatten(self.nodes))
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.extend(flatten_nodes(self.nodes))
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "List(%s)" % (repr(self.nodes),)
+
+class Yield(Node):
+    nodes["yield"] = "Yield"
+    def __init__(self, value):
+        self.value = value
+
+    def getChildren(self):
+        return self.value,
+
+    def getChildNodes(self):
+        return self.value,
+
+    def __repr__(self):
+        return "Yield(%s)" % (repr(self.value),)
+
+class LeftShift(Node):
+    nodes["leftshift"] = "LeftShift"
+    def __init__(self, (left, right)):
+        self.left = left
+        self.right = right
+
+    def getChildren(self):
+        return self.left, self.right
+
+    def getChildNodes(self):
+        return self.left, self.right
+
+    def __repr__(self):
+        return "LeftShift((%s, %s))" % (repr(self.left), repr(self.right))
+
+class AssName(Node):
+    nodes["assname"] = "AssName"
+    def __init__(self, name, flags):
+        self.name = name
+        self.flags = flags
+
+    def getChildren(self):
+        return self.name, self.flags
+
+    def getChildNodes(self):
+        return ()
+
+    def __repr__(self):
+        return "AssName(%s, %s)" % (repr(self.name), repr(self.flags))
+
+class While(Node):
+    nodes["while"] = "While"
+    def __init__(self, test, body, else_):
+        self.test = test
+        self.body = body
+        self.else_ = else_
+
+    def getChildren(self):
+        children = []
+        children.append(self.test)
+        children.append(self.body)
+        children.append(self.else_)
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.append(self.test)
+        nodes.append(self.body)
+        if self.else_ is not None:            nodes.append(self.else_)
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "While(%s, %s, %s)" % (repr(self.test), repr(self.body), repr(self.else_))
+
+class Continue(Node):
+    nodes["continue"] = "Continue"
+    def __init__(self, ):
+        pass
+
+    def getChildren(self):
+        return ()
+
+    def getChildNodes(self):
+        return ()
+
+    def __repr__(self):
+        return "Continue()"
+
+class Backquote(Node):
+    nodes["backquote"] = "Backquote"
+    def __init__(self, expr):
+        self.expr = expr
+
+    def getChildren(self):
+        return self.expr,
+
+    def getChildNodes(self):
+        return self.expr,
+
+    def __repr__(self):
+        return "Backquote(%s)" % (repr(self.expr),)
+
+class Discard(Node):
+    nodes["discard"] = "Discard"
+    def __init__(self, expr):
+        self.expr = expr
+
+    def getChildren(self):
+        return self.expr,
+
+    def getChildNodes(self):
+        return self.expr,
+
+    def __repr__(self):
+        return "Discard(%s)" % (repr(self.expr),)
+
+class Div(Node):
+    nodes["div"] = "Div"
+    def __init__(self, (left, right)):
+        self.left = left
+        self.right = right
+
+    def getChildren(self):
+        return self.left, self.right
+
+    def getChildNodes(self):
+        return self.left, self.right
+
+    def __repr__(self):
+        return "Div((%s, %s))" % (repr(self.left), repr(self.right))
+
+class Assign(Node):
+    nodes["assign"] = "Assign"
+    def __init__(self, nodes, expr):
+        self.nodes = nodes
+        self.expr = expr
+
+    def getChildren(self):
+        children = []
+        children.extend(flatten(self.nodes))
+        children.append(self.expr)
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.extend(flatten_nodes(self.nodes))
+        nodes.append(self.expr)
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "Assign(%s, %s)" % (repr(self.nodes), repr(self.expr))
+
+class Lambda(Node):
+    nodes["lambda"] = "Lambda"
+    def __init__(self, argnames, defaults, flags, code):
+        self.argnames = argnames
+        self.defaults = defaults
+        self.flags = flags
+        self.code = code
+        self.varargs = self.kwargs = None
+        if flags & CO_VARARGS:
+            self.varargs = 1
+        if flags & CO_VARKEYWORDS:
+            self.kwargs = 1
+
+
+    def getChildren(self):
+        children = []
+        children.append(self.argnames)
+        children.extend(flatten(self.defaults))
+        children.append(self.flags)
+        children.append(self.code)
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.extend(flatten_nodes(self.defaults))
+        nodes.append(self.code)
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "Lambda(%s, %s, %s, %s)" % (repr(self.argnames), repr(self.defaults), repr(self.flags), repr(self.code))
+
+class And(Node):
+    nodes["and"] = "And"
+    def __init__(self, nodes):
+        self.nodes = nodes
+
+    def getChildren(self):
+        children = []
+        children.extend(flatten(self.nodes))
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.extend(flatten_nodes(self.nodes))
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "And(%s)" % (repr(self.nodes),)
+
+class Compare(Node):
+    nodes["compare"] = "Compare"
+    def __init__(self, expr, ops):
+        self.expr = expr
+        self.ops = ops
+
+    def getChildren(self):
+        children = []
+        children.append(self.expr)
+        children.extend(flatten(self.ops))
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.append(self.expr)
+        nodes.extend(flatten_nodes(self.ops))
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "Compare(%s, %s)" % (repr(self.expr), repr(self.ops))
+
+class Bitor(Node):
+    nodes["bitor"] = "Bitor"
+    def __init__(self, nodes):
+        self.nodes = nodes
+
+    def getChildren(self):
+        children = []
+        children.extend(flatten(self.nodes))
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.extend(flatten_nodes(self.nodes))
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "Bitor(%s)" % (repr(self.nodes),)
+
+class Bitxor(Node):
+    nodes["bitxor"] = "Bitxor"
+    def __init__(self, nodes):
+        self.nodes = nodes
+
+    def getChildren(self):
+        children = []
+        children.extend(flatten(self.nodes))
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.extend(flatten_nodes(self.nodes))
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "Bitxor(%s)" % (repr(self.nodes),)
+
+class CallFunc(Node):
+    nodes["callfunc"] = "CallFunc"
+    def __init__(self, node, args, star_args = None, dstar_args = None):
+        self.node = node
+        self.args = args
+        self.star_args = star_args
+        self.dstar_args = dstar_args
+
+    def getChildren(self):
+        children = []
+        children.append(self.node)
+        children.extend(flatten(self.args))
+        children.append(self.star_args)
+        children.append(self.dstar_args)
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.append(self.node)
+        nodes.extend(flatten_nodes(self.args))
+        if self.star_args is not None:            nodes.append(self.star_args)
+        if self.dstar_args is not None:            nodes.append(self.dstar_args)
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "CallFunc(%s, %s, %s, %s)" % (repr(self.node), repr(self.args), repr(self.star_args), repr(self.dstar_args))
+
+class Global(Node):
+    nodes["global"] = "Global"
+    def __init__(self, names):
+        self.names = names
+
+    def getChildren(self):
+        return self.names,
+
+    def getChildNodes(self):
+        return ()
+
+    def __repr__(self):
+        return "Global(%s)" % (repr(self.names),)
+
+class Add(Node):
+    nodes["add"] = "Add"
+    def __init__(self, (left, right)):
+        self.left = left
+        self.right = right
+
+    def getChildren(self):
+        return self.left, self.right
+
+    def getChildNodes(self):
+        return self.left, self.right
+
+    def __repr__(self):
+        return "Add((%s, %s))" % (repr(self.left), repr(self.right))
+
+class ListCompIf(Node):
+    nodes["listcompif"] = "ListCompIf"
+    def __init__(self, test):
+        self.test = test
+
+    def getChildren(self):
+        return self.test,
+
+    def getChildNodes(self):
+        return self.test,
+
+    def __repr__(self):
+        return "ListCompIf(%s)" % (repr(self.test),)
+
+class Sub(Node):
+    nodes["sub"] = "Sub"
+    def __init__(self, (left, right)):
+        self.left = left
+        self.right = right
+
+    def getChildren(self):
+        return self.left, self.right
+
+    def getChildNodes(self):
+        return self.left, self.right
+
+    def __repr__(self):
+        return "Sub((%s, %s))" % (repr(self.left), repr(self.right))
+
+class Pass(Node):
+    nodes["pass"] = "Pass"
+    def __init__(self, ):
+        pass
+
+    def getChildren(self):
+        return ()
+
+    def getChildNodes(self):
+        return ()
+
+    def __repr__(self):
+        return "Pass()"
+
+class UnarySub(Node):
+    nodes["unarysub"] = "UnarySub"
+    def __init__(self, expr):
+        self.expr = expr
+
+    def getChildren(self):
+        return self.expr,
+
+    def getChildNodes(self):
+        return self.expr,
+
+    def __repr__(self):
+        return "UnarySub(%s)" % (repr(self.expr),)
+
+class If(Node):
+    nodes["if"] = "If"
+    def __init__(self, tests, else_):
+        self.tests = tests
+        self.else_ = else_
+
+    def getChildren(self):
+        children = []
+        children.extend(flatten(self.tests))
+        children.append(self.else_)
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.extend(flatten_nodes(self.tests))
+        if self.else_ is not None:            nodes.append(self.else_)
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "If(%s, %s)" % (repr(self.tests), repr(self.else_))
+
+class ListComp(Node):
+    nodes["listcomp"] = "ListComp"
+    def __init__(self, expr, quals):
+        self.expr = expr
+        self.quals = quals
+
+    def getChildren(self):
+        children = []
+        children.append(self.expr)
+        children.extend(flatten(self.quals))
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.append(self.expr)
+        nodes.extend(flatten_nodes(self.quals))
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "ListComp(%s, %s)" % (repr(self.expr), repr(self.quals))
+
+class ListCompFor(Node):
+    nodes["listcompfor"] = "ListCompFor"
+    def __init__(self, assign, list, ifs):
+        self.assign = assign
+        self.list = list
+        self.ifs = ifs
+
+    def getChildren(self):
+        children = []
+        children.append(self.assign)
+        children.append(self.list)
+        children.extend(flatten(self.ifs))
+        return tuple(children)
+
+    def getChildNodes(self):
+        nodes = []
+        nodes.append(self.assign)
+        nodes.append(self.list)
+        nodes.extend(flatten_nodes(self.ifs))
+        return tuple(nodes)
+
+    def __repr__(self):
+        return "ListCompFor(%s, %s, %s)" % (repr(self.assign), repr(self.list), repr(self.ifs))
+
+klasses = globals()
+for k in nodes.keys():
+    nodes[k] = klasses[nodes[k]]
diff --git a/lib-python/2.2/compiler/consts.py b/lib-python/2.2/compiler/consts.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/compiler/consts.py
@@ -0,0 +1,19 @@
+# operation flags
+OP_ASSIGN = 'OP_ASSIGN'
+OP_DELETE = 'OP_DELETE'
+OP_APPLY = 'OP_APPLY'
+
+SC_LOCAL = 1
+SC_GLOBAL = 2
+SC_FREE = 3
+SC_CELL = 4
+SC_UNKNOWN = 5
+
+CO_OPTIMIZED = 0x0001
+CO_NEWLOCALS = 0x0002
+CO_VARARGS = 0x0004
+CO_VARKEYWORDS = 0x0008
+CO_NESTED = 0x0010
+CO_GENERATOR = 0x0020
+CO_GENERATOR_ALLOWED = 0x1000
+CO_FUTURE_DIVISION = 0x2000
diff --git a/lib-python/2.2/compiler/future.py b/lib-python/2.2/compiler/future.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/compiler/future.py
@@ -0,0 +1,72 @@
+"""Parser for future statements
+
+"""
+
+from compiler import ast, walk
+
+def is_future(stmt):
+    """Return true if statement is a well-formed future statement"""
+    if not isinstance(stmt, ast.From):
+        return 0
+    if stmt.modname == "__future__":
+        return 1
+    else:
+        return 0
+
+class FutureParser:
+
+    features = ("nested_scopes", "generators", "division")
+
+    def __init__(self):
+        self.found = {} # set
+
+    def visitModule(self, node):
+        stmt = node.node
+        for s in stmt.nodes:
+            if not self.check_stmt(s):
+                break
+
+    def check_stmt(self, stmt):
+        if is_future(stmt):
+            for name, asname in stmt.names:
+                if name in self.features:
+                    self.found[name] = 1
+                else:
+                    raise SyntaxError, \
+                          "future feature %s is not defined" % name
+            stmt.valid_future = 1
+            return 1
+        return 0
+
+    def get_features(self):
+        """Return list of features enabled by future statements"""
+        return self.found.keys()
+
+class BadFutureParser:
+    """Check for invalid future statements"""
+
+    def visitFrom(self, node):
+        if hasattr(node, 'valid_future'):
+            return
+        if node.modname != "__future__":
+            return
+        raise SyntaxError, "invalid future statement"
+
+def find_futures(node):
+    p1 = FutureParser()
+    p2 = BadFutureParser()
+    walk(node, p1)
+    walk(node, p2)
+    return p1.get_features()
+
+if __name__ == "__main__":
+    import sys
+    from compiler import parseFile, walk
+
+    for file in sys.argv[1:]:
+        print file
+        tree = parseFile(file)
+        v = FutureParser()
+        walk(tree, v)
+        print v.found
+        print
diff --git a/lib-python/2.2/compiler/misc.py b/lib-python/2.2/compiler/misc.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/compiler/misc.py
@@ -0,0 +1,74 @@
+import types
+
+def flatten(tup):
+    elts = []
+    for elt in tup:
+        if type(elt) == types.TupleType:
+            elts = elts + flatten(elt)
+        else:
+            elts.append(elt)
+    return elts
+
+class Set:
+    def __init__(self):
+        self.elts = {}
+    def __len__(self):
+        return len(self.elts)
+    def __contains__(self, elt):
+        return self.elts.has_key(elt)
+    def add(self, elt):
+        self.elts[elt] = elt
+    def elements(self):
+        return self.elts.keys()
+    def has_elt(self, elt):
+        return self.elts.has_key(elt)
+    def remove(self, elt):
+        del self.elts[elt]
+    def copy(self):
+        c = Set()
+        c.elts.update(self.elts)
+        return c
+
+class Stack:
+    def __init__(self):
+        self.stack = []
+        self.pop = self.stack.pop
+    def __len__(self):
+        return len(self.stack)
+    def push(self, elt):
+        self.stack.append(elt)
+    def top(self):
+        return self.stack[-1]
+    def __getitem__(self, index): # needed by visitContinue()
+        return self.stack[index]
+
+MANGLE_LEN = 256 # magic constant from compile.c
+
+def mangle(name, klass):
+    if not name.startswith('__'):
+        return name
+    if len(name) + 2 >= MANGLE_LEN:
+        return name
+    if name.endswith('__'):
+        return name
+    try:
+        i = 0
+        while klass[i] == '_':
+            i = i + 1
+    except IndexError:
+        return name
+    klass = klass[i:]
+
+    tlen = len(klass) + len(name)
+    if tlen > MANGLE_LEN:
+        klass = klass[:MANGLE_LEN-tlen]
+
+    return "_%s%s" % (klass, name)
+
+def set_filename(filename, tree):
+    """Set the filename attribute to filename on every node in tree"""
+    worklist = [tree]
+    while worklist:
+        node = worklist.pop(0)
+        node.filename = filename
+        worklist.extend(node.getChildNodes())
diff --git a/lib-python/2.2/compiler/pyassem.py b/lib-python/2.2/compiler/pyassem.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/compiler/pyassem.py
@@ -0,0 +1,824 @@
+"""A flow graph representation for Python bytecode"""
+
+import dis
+import new
+import string
+import sys
+import types
+
+from compiler import misc
+from compiler.consts import CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, \
+     CO_VARKEYWORDS
+
+def xxx_sort(l):
+    l = l[:]
+    def sorter(a, b):
+        return cmp(a.bid, b.bid)
+    l.sort(sorter)
+    return l
+
+class FlowGraph:
+    def __init__(self):
+        self.current = self.entry = Block()
+        self.exit = Block("exit")
+        self.blocks = misc.Set()
+        self.blocks.add(self.entry)
+        self.blocks.add(self.exit)
+
+    def startBlock(self, block):
+        if self._debug:
+            if self.current:
+                print "end", repr(self.current)
+                print "    next", self.current.next
+                print "   ", self.current.get_children()
+            print repr(block)
+        self.current = block
+
+    def nextBlock(self, block=None):
+        # XXX think we need to specify when there is implicit transfer
+        # from one block to the next.  might be better to represent this
+        # with explicit JUMP_ABSOLUTE instructions that are optimized
+        # out when they are unnecessary.
+        #
+        # I think this strategy works: each block has a child
+        # designated as "next" which is returned as the last of the
+        # children.  because the nodes in a graph are emitted in
+        # reverse post order, the "next" block will always be emitted
+        # immediately after its parent.
+        # Worry: maintaining this invariant could be tricky
+        if block is None:
+            block = self.newBlock()
+
+        # Note: If the current block ends with an unconditional
+        # control transfer, then it is incorrect to add an implicit
+        # transfer to the block graph.  The current code requires
+        # these edges to get the blocks emitted in the right order,
+        # however. :-(  If a client needs to remove these edges, call
+        # pruneEdges().
+
+        self.current.addNext(block)
+        self.startBlock(block)
+
+    def newBlock(self):
+        b = Block()
+        self.blocks.add(b)
+        return b
+
+    def startExitBlock(self):
+        self.startBlock(self.exit)
+
+    _debug = 0
+
+    def _enable_debug(self):
+        self._debug = 1
+
+    def _disable_debug(self):
+        self._debug = 0
+
+    def emit(self, *inst):
+        if self._debug:
+            print "\t", inst
+        if inst[0] == 'RETURN_VALUE':
+            self.current.addOutEdge(self.exit)
+        if len(inst) == 2 and isinstance(inst[1], Block):
+            self.current.addOutEdge(inst[1])
+        self.current.emit(inst)
+
+    def getBlocksInOrder(self):
+        """Return the blocks in reverse postorder
+
+        i.e. each node appears before all of its successors
+        """
+        # XXX make sure every node that doesn't have an explicit next
+        # is set so that next points to exit
+        for b in self.blocks.elements():
+            if b is self.exit:
+                continue
+            if not b.next:
+                b.addNext(self.exit)
+        order = dfs_postorder(self.entry, {})
+        order.reverse()
+        self.fixupOrder(order, self.exit)
+        # hack alert
+        if not self.exit in order:
+            order.append(self.exit)
+
+        return order
+
+    def fixupOrder(self, blocks, default_next):
+        """Fixup bad order introduced by DFS."""
+
+        # XXX This is a total mess.  There must be a better way to get
+        # the code blocks in the right order.
+
+        self.fixupOrderHonorNext(blocks, default_next)
+        self.fixupOrderForward(blocks, default_next)
+
+    def fixupOrderHonorNext(self, blocks, default_next):
+        """Fix one problem with DFS.
+
+        The DFS uses child block, but doesn't know about the special
+        "next" block.  As a result, the DFS can order blocks so that a
+        block isn't next to the right block for implicit control
+        transfers.
+        """
+        index = {}
+        for i in range(len(blocks)):
+            index[blocks[i]] = i
+
+        for i in range(0, len(blocks) - 1):
+            b = blocks[i]
+            n = blocks[i + 1]
+            if not b.next or b.next[0] == default_next or b.next[0] == n:
+                continue
+            # The blocks are in the wrong order.  Find the chain of
+            # blocks to insert where they belong.
+            cur = b
+            chain = []
+            elt = cur
+            while elt.next and elt.next[0] != default_next:
+                chain.append(elt.next[0])
+                elt = elt.next[0]
+            # Now remove the blocks in the chain from the current
+            # block list, so that they can be re-inserted.
+            l = []
+            for b in chain:
+                assert index[b] > i
+                l.append((index[b], b))
+            l.sort()
+            l.reverse()
+            for j, b in l:
+                del blocks[index[b]]
+            # Insert the chain in the proper location
+            blocks[i:i + 1] = [cur] + chain
+            # Finally, re-compute the block indexes
+            for i in range(len(blocks)):
+                index[blocks[i]] = i
+
+    def fixupOrderForward(self, blocks, default_next):
+        """Make sure all JUMP_FORWARDs jump forward"""
+        index = {}
+        chains = []
+        cur = []
+        for b in blocks:
+            index[b] = len(chains)
+            cur.append(b)
+            if b.next and b.next[0] == default_next:
+                chains.append(cur)
+                cur = []
+        chains.append(cur)
+
+        while 1:
+            constraints = []
+
+            for i in range(len(chains)):
+                l = chains[i]
+                for b in l:
+                    for c in b.get_children():
+                        if index[c] < i:
+                            forward_p = 0
+                            for inst in b.insts:
+                                if inst[0] == 'JUMP_FORWARD':
+                                    if inst[1] == c:
+                                        forward_p = 1
+                            if not forward_p:
+                                continue
+                            constraints.append((index[c], i))
+
+            if not constraints:
+                break
+
+            # XXX just do one for now
+            # do swaps to get things in the right order
+            goes_before, a_chain = constraints[0]
+            assert a_chain > goes_before
+            c = chains[a_chain]
+            chains.remove(c)
+            chains.insert(goes_before, c)
+
+        del blocks[:]
+        for c in chains:
+            for b in c:
+                blocks.append(b)
+
+    def getBlocks(self):
+        return self.blocks.elements()
+
+    def getRoot(self):
+        """Return nodes appropriate for use with dominator"""
+        return self.entry
+
+    def getContainedGraphs(self):
+        l = []
+        for b in self.getBlocks():
+            l.extend(b.getContainedGraphs())
+        return l
+
+def dfs_postorder(b, seen):
+    """Depth-first search of tree rooted at b, return in postorder"""
+    order = []
+    seen[b] = b
+    for c in b.get_children():
+        if seen.has_key(c):
+            continue
+        order = order + dfs_postorder(c, seen)
+    order.append(b)
+    return order
+
+class Block:
+    _count = 0
+
+    def __init__(self, label=''):
+        self.insts = []
+        self.inEdges = misc.Set()
+        self.outEdges = misc.Set()
+        self.label = label
+        self.bid = Block._count
+        self.next = []
+        Block._count = Block._count + 1
+
+    def __repr__(self):
+        if self.label:
+            return "<block %s id=%d>" % (self.label, self.bid)
+        else:
+            return "<block id=%d>" % (self.bid)
+
+    def __str__(self):
+        insts = map(str, self.insts)
+        return "<block %s %d:\n%s>" % (self.label, self.bid,
+                                       string.join(insts, '\n'))
+
+    def emit(self, inst):
+        op = inst[0]
+        if op[:4] == 'JUMP':
+            self.outEdges.add(inst[1])
+        self.insts.append(inst)
+
+    def getInstructions(self):
+        return self.insts
+
+    def addInEdge(self, block):
+        self.inEdges.add(block)
+
+    def addOutEdge(self, block):
+        self.outEdges.add(block)
+
+    def addNext(self, block):
+        self.next.append(block)
+        assert len(self.next) == 1, map(str, self.next)
+
+    _uncond_transfer = ('RETURN_VALUE', 'RAISE_VARARGS',
+                        'JUMP_ABSOLUTE', 'JUMP_FORWARD', 'CONTINUE_LOOP')
+
+    def pruneNext(self):
+        """Remove bogus edge for unconditional transfers
+
+        Each block has a next edge that accounts for implicit control
+        transfers, e.g. from a JUMP_IF_FALSE to the block that will be
+        executed if the test is true.
+
+        These edges must remain for the current assembler code to
+        work. If they are removed, the dfs_postorder gets things in
+        weird orders.  However, they shouldn't be there for other
+        purposes, e.g. conversion to SSA form.  This method will
+        remove the next edge when it follows an unconditional control
+        transfer.
+        """
+        try:
+            op, arg = self.insts[-1]
+        except (IndexError, ValueError):
+            return
+        if op in self._uncond_transfer:
+            self.next = []
+
+    def get_children(self):
+        if self.next and self.next[0] in self.outEdges:
+            self.outEdges.remove(self.next[0])
+        return self.outEdges.elements() + self.next
+
+    def getContainedGraphs(self):
+        """Return all graphs contained within this block.
+
+        For example, a MAKE_FUNCTION block will contain a reference to
+        the graph for the function body.
+        """
+        contained = []
+        for inst in self.insts:
+            if len(inst) == 1:
+                continue
+            op = inst[1]
+            if hasattr(op, 'graph'):
+                contained.append(op.graph)
+        return contained
+
+# flags for code objects
+
+# the FlowGraph is transformed in place; it exists in one of these states
+RAW = "RAW"
+FLAT = "FLAT"
+CONV = "CONV"
+DONE = "DONE"
+
+class PyFlowGraph(FlowGraph):
+    super_init = FlowGraph.__init__
+
+    def __init__(self, name, filename, args=(), optimized=0, klass=None):
+        self.super_init()
+        self.name = name
+        self.filename = filename
+        self.docstring = None
+        self.args = args # XXX
+        self.argcount = getArgCount(args)
+        self.klass = klass
+        if optimized:
+            self.flags = CO_OPTIMIZED | CO_NEWLOCALS
+        else:
+            self.flags = 0
+        self.consts = []
+        self.names = []
+        # Free variables found by the symbol table scan, including
+        # variables used only in nested scopes, are included here.
+        self.freevars = []
+        self.cellvars = []
+        # The closure list is used to track the order of cell
+        # variables and free variables in the resulting code object.
+        # The offsets used by LOAD_CLOSURE/LOAD_DEREF refer to both
+        # kinds of variables.
+        self.closure = []
+        self.varnames = list(args) or []
+        for i in range(len(self.varnames)):
+            var = self.varnames[i]
+            if isinstance(var, TupleArg):
+                self.varnames[i] = var.getName()
+        self.stage = RAW
+
+    def setDocstring(self, doc):
+        self.docstring = doc
+
+    def setFlag(self, flag):
+        self.flags = self.flags | flag
+        if flag == CO_VARARGS:
+            self.argcount = self.argcount - 1
+
+    def checkFlag(self, flag):
+        if self.flags & flag:
+            return 1
+
+    def setFreeVars(self, names):
+        self.freevars = list(names)
+
+    def setCellVars(self, names):
+        self.cellvars = names
+
+    def getCode(self):
+        """Get a Python code object"""
+        if self.stage == RAW:
+            self.computeStackDepth()
+            self.flattenGraph()
+        if self.stage == FLAT:
+            self.convertArgs()
+        if self.stage == CONV:
+            self.makeByteCode()
+        if self.stage == DONE:
+            return self.newCodeObject()
+        raise RuntimeError, "inconsistent PyFlowGraph state"
+
+    def dump(self, io=None):
+        if io:
+            save = sys.stdout
+            sys.stdout = io
+        pc = 0
+        for t in self.insts:
+            opname = t[0]
+            if opname == "SET_LINENO":
+                print
+            if len(t) == 1:
+                print "\t", "%3d" % pc, opname
+                pc = pc + 1
+            else:
+                print "\t", "%3d" % pc, opname, t[1]
+                pc = pc + 3
+        if io:
+            sys.stdout = save
+
+    def computeStackDepth(self):
+        """Compute the max stack depth.
+
+        Approach is to compute the stack effect of each basic block.
+        Then find the path through the code with the largest total
+        effect.
+        """
+        depth = {}
+        exit = None
+        for b in self.getBlocks():
+            depth[b] = findDepth(b.getInstructions())
+
+        seen = {}
+
+        def max_depth(b, d):
+            if seen.has_key(b):
+                return d
+            seen[b] = 1
+            d = d + depth[b]
+            children = b.get_children()
+            if children:
+                return max([max_depth(c, d) for c in children])
+            else:
+                if not b.label == "exit":
+                    return max_depth(self.exit, d)
+                else:
+                    return d
+
+        self.stacksize = max_depth(self.entry, 0)
+
+    def flattenGraph(self):
+        """Arrange the blocks in order and resolve jumps"""
+        assert self.stage == RAW
+        self.insts = insts = []
+        pc = 0
+        begin = {}
+        end = {}
+        for b in self.getBlocksInOrder():
+            begin[b] = pc
+            for inst in b.getInstructions():
+                insts.append(inst)
+                if len(inst) == 1:
+                    pc = pc + 1
+                else:
+                    # arg takes 2 bytes
+                    pc = pc + 3
+            end[b] = pc
+        pc = 0
+        for i in range(len(insts)):
+            inst = insts[i]
+            if len(inst) == 1:
+                pc = pc + 1
+            else:
+                pc = pc + 3
+            opname = inst[0]
+            if self.hasjrel.has_elt(opname):
+                oparg = inst[1]
+                offset = begin[oparg] - pc
+                insts[i] = opname, offset
+            elif self.hasjabs.has_elt(opname):
+                insts[i] = opname, begin[inst[1]]
+        self.stage = FLAT
+
+    hasjrel = misc.Set()
+    for i in dis.hasjrel:
+        hasjrel.add(dis.opname[i])
+    hasjabs = misc.Set()
+    for i in dis.hasjabs:
+        hasjabs.add(dis.opname[i])
+
+    def convertArgs(self):
+        """Convert arguments from symbolic to concrete form"""
+        assert self.stage == FLAT
+        self.consts.insert(0, self.docstring)
+        self.sort_cellvars()
+        for i in range(len(self.insts)):
+            t = self.insts[i]
+            if len(t) == 2:
+                opname, oparg = t
+                conv = self._converters.get(opname, None)
+                if conv:
+                    self.insts[i] = opname, conv(self, oparg)
+        self.stage = CONV
+
+    def sort_cellvars(self):
+        """Sort cellvars in the order of varnames and prune from freevars.
+        """
+        cells = {}
+        for name in self.cellvars:
+            cells[name] = 1
+        self.cellvars = [name for name in self.varnames
+                         if cells.has_key(name)]
+        for name in self.cellvars:
+            del cells[name]
+        self.cellvars = self.cellvars + cells.keys()
+        self.closure = self.cellvars + self.freevars
+
+    def _lookupName(self, name, list):
+        """Return index of name in list, appending if necessary
+
+        This routine uses a list instead of a dictionary, because a
+        dictionary can't store two different keys if the keys have the
+        same value but different types, e.g. 2 and 2L.  The compiler
+        must treat these two separately, so it does an explicit type
+        comparison before comparing the values.
+        """
+        t = type(name)
+        for i in range(len(list)):
+            if t == type(list[i]) and list[i] == name:
+                return i
+        end = len(list)
+        list.append(name)
+        return end
+
+    _converters = {}
+    def _convert_LOAD_CONST(self, arg):
+        if hasattr(arg, 'getCode'):
+            arg = arg.getCode()
+        return self._lookupName(arg, self.consts)
+
+    def _convert_LOAD_FAST(self, arg):
+        self._lookupName(arg, self.names)
+        return self._lookupName(arg, self.varnames)
+    _convert_STORE_FAST = _convert_LOAD_FAST
+    _convert_DELETE_FAST = _convert_LOAD_FAST
+
+    def _convert_LOAD_NAME(self, arg):
+        if self.klass is None:
+            self._lookupName(arg, self.varnames)
+        return self._lookupName(arg, self.names)
+
+    def _convert_NAME(self, arg):
+        if self.klass is None:
+            self._lookupName(arg, self.varnames)
+        return self._lookupName(arg, self.names)
+    _convert_STORE_NAME = _convert_NAME
+    _convert_DELETE_NAME = _convert_NAME
+    _convert_IMPORT_NAME = _convert_NAME
+    _convert_IMPORT_FROM = _convert_NAME
+    _convert_STORE_ATTR = _convert_NAME
+    _convert_LOAD_ATTR = _convert_NAME
+    _convert_DELETE_ATTR = _convert_NAME
+    _convert_LOAD_GLOBAL = _convert_NAME
+    _convert_STORE_GLOBAL = _convert_NAME
+    _convert_DELETE_GLOBAL = _convert_NAME
+
+    def _convert_DEREF(self, arg):
+        self._lookupName(arg, self.names)
+        self._lookupName(arg, self.varnames)
+        return self._lookupName(arg, self.closure)
+    _convert_LOAD_DEREF = _convert_DEREF
+    _convert_STORE_DEREF = _convert_DEREF
+
+    def _convert_LOAD_CLOSURE(self, arg):
+        self._lookupName(arg, self.varnames)
+        return self._lookupName(arg, self.closure)
+
+    _cmp = list(dis.cmp_op)
+    def _convert_COMPARE_OP(self, arg):
+        return self._cmp.index(arg)
+
+    # similarly for other opcodes...
+
+    for name, obj in locals().items():
+        if name[:9] == "_convert_":
+            opname = name[9:]
+            _converters[opname] = obj
+    del name, obj, opname
+
+    def makeByteCode(self):
+        assert self.stage == CONV
+        self.lnotab = lnotab = LineAddrTable()
+        for t in self.insts:
+            opname = t[0]
+            if len(t) == 1:
+                lnotab.addCode(self.opnum[opname])
+            else:
+                oparg = t[1]
+                if opname == "SET_LINENO":
+                    lnotab.nextLine(oparg)
+                hi, lo = twobyte(oparg)
+                try:
+                    lnotab.addCode(self.opnum[opname], lo, hi)
+                except ValueError:
+                    print opname, oparg
+                    print self.opnum[opname], lo, hi
+                    raise
+        self.stage = DONE
+
+    opnum = {}
+    for num in range(len(dis.opname)):
+        opnum[dis.opname[num]] = num
+    del num
+
+    def newCodeObject(self):
+        assert self.stage == DONE
+        if (self.flags & CO_NEWLOCALS) == 0:
+            nlocals = 0
+        else:
+            nlocals = len(self.varnames)
+        argcount = self.argcount
+        if self.flags & CO_VARKEYWORDS:
+            argcount = argcount - 1
+        return new.code(argcount, nlocals, self.stacksize, self.flags,
+                        self.lnotab.getCode(), self.getConsts(),
+                        tuple(self.names), tuple(self.varnames),
+                        self.filename, self.name, self.lnotab.firstline,
+                        self.lnotab.getTable(), tuple(self.freevars),
+                        tuple(self.cellvars))
+
+    def getConsts(self):
+        """Return a tuple for the const slot of the code object
+
+        Must convert references to code (MAKE_FUNCTION) to code
+        objects recursively.
+        """
+        l = []
+        for elt in self.consts:
+            if isinstance(elt, PyFlowGraph):
+                elt = elt.getCode()
+            l.append(elt)
+        return tuple(l)
+
+def isJump(opname):
+    if opname[:4] == 'JUMP':
+        return 1
+
+class TupleArg:
+    """Helper for marking func defs with nested tuples in arglist"""
+    def __init__(self, count, names):
+        self.count = count
+        self.names = names
+    def __repr__(self):
+        return "TupleArg(%s, %s)" % (self.count, self.names)
+    def getName(self):
+        return ".%d" % self.count
+
+def getArgCount(args):
+    argcount = len(args)
+    if args:
+        for arg in args:
+            if isinstance(arg, TupleArg):
+                numNames = len(misc.flatten(arg.names))
+                argcount = argcount - numNames
+    return argcount
+
+def twobyte(val):
+    """Convert an int argument into high and low bytes"""
+    assert type(val) == types.IntType
+    return divmod(val, 256)
+
+class LineAddrTable:
+    """lnotab
+
+    This class builds the lnotab, which is documented in compile.c.
+    Here's a brief recap:
+
+    For each SET_LINENO instruction after the first one, two bytes are
+    added to lnotab.  (In some cases, multiple two-byte entries are
+    added.)  The first byte is the distance in bytes between the
+    instruction for the last SET_LINENO and the current SET_LINENO.
+    The second byte is offset in line numbers.  If either offset is
+    greater than 255, multiple two-byte entries are added -- see
+    compile.c for the delicate details.
+    """
+
+    def __init__(self):
+        self.code = []
+        self.codeOffset = 0
+        self.firstline = 0
+        self.lastline = 0
+        self.lastoff = 0
+        self.lnotab = []
+
+    def addCode(self, *args):
+        for arg in args:
+            self.code.append(chr(arg))
+        self.codeOffset = self.codeOffset + len(args)
+
+    def nextLine(self, lineno):
+        if self.firstline == 0:
+            self.firstline = lineno
+            self.lastline = lineno
+        else:
+            # compute deltas
+            addr = self.codeOffset - self.lastoff
+            line = lineno - self.lastline
+            # Python assumes that lineno always increases with
+            # increasing bytecode address (lnotab is unsigned char).
+            # Depending on when SET_LINENO instructions are emitted
+            # this is not always true.  Consider the code:
+            #     a = (1,
+            #          b)
+            # In the bytecode stream, the assignment to "a" occurs
+            # after the loading of "b".  This works with the C Python
+            # compiler because it only generates a SET_LINENO instruction
+            # for the assignment.
+            if line > 0:
+                push = self.lnotab.append
+                while addr > 255:
+                    push(255); push(0)
+                    addr -= 255
+                while line > 255:
+                    push(addr); push(255)
+                    line -= 255
+                    addr = 0
+                if addr > 0 or line > 0:
+                    push(addr); push(line)
+                self.lastline = lineno
+                self.lastoff = self.codeOffset
+
+    def getCode(self):
+        return string.join(self.code, '')
+
+    def getTable(self):
+        return string.join(map(chr, self.lnotab), '')
+
+class StackDepthTracker:
+    # XXX 1. need to keep track of stack depth on jumps
+    # XXX 2. at least partly as a result, this code is broken
+
+    def findDepth(self, insts, debug=0):
+        depth = 0
+        maxDepth = 0
+        for i in insts:
+            opname = i[0]
+            if debug:
+                print i,
+            delta = self.effect.get(opname, None)
+            if delta is not None:
+                depth = depth + delta
+            else:
+                # now check patterns
+                for pat, pat_delta in self.patterns:
+                    if opname[:len(pat)] == pat:
+                        delta = pat_delta
+                        depth = depth + delta
+                        break
+                # if we still haven't found a match
+                if delta is None:
+                    meth = getattr(self, opname, None)
+                    if meth is not None:
+                        depth = depth + meth(i[1])
+            if depth > maxDepth:
+                maxDepth = depth
+            if debug:
+                print depth, maxDepth
+        return maxDepth
+
+    effect = {
+        'POP_TOP': -1,
+        'DUP_TOP': 1,
+        'SLICE+1': -1,
+        'SLICE+2': -1,
+        'SLICE+3': -2,
+        'STORE_SLICE+0': -1,
+        'STORE_SLICE+1': -2,
+        'STORE_SLICE+2': -2,
+        'STORE_SLICE+3': -3,
+        'DELETE_SLICE+0': -1,
+        'DELETE_SLICE+1': -2,
+        'DELETE_SLICE+2': -2,
+        'DELETE_SLICE+3': -3,
+        'STORE_SUBSCR': -3,
+        'DELETE_SUBSCR': -2,
+        # PRINT_EXPR?
+        'PRINT_ITEM': -1,
+        'RETURN_VALUE': -1,
+        'EXEC_STMT': -3,
+        'BUILD_CLASS': -2,
+        'STORE_NAME': -1,
+        'STORE_ATTR': -2,
+        'DELETE_ATTR': -1,
+        'STORE_GLOBAL': -1,
+        'BUILD_MAP': 1,
+        'COMPARE_OP': -1,
+        'STORE_FAST': -1,
+        'IMPORT_STAR': -1,
+        'IMPORT_NAME': 0,
+        'IMPORT_FROM': 1,
+        'LOAD_ATTR': 0, # unlike other loads
+        # close enough...
+        'SETUP_EXCEPT': 3,
+        'SETUP_FINALLY': 3,
+        'FOR_ITER': 1,
+        }
+    # use pattern match
+    patterns = [
+        ('BINARY_', -1),
+        ('LOAD_', 1),
+        ]
+
+    def UNPACK_SEQUENCE(self, count):
+        return count-1
+    def BUILD_TUPLE(self, count):
+        return -count+1
+    def BUILD_LIST(self, count):
+        return -count+1
+    def CALL_FUNCTION(self, argc):
+        hi, lo = divmod(argc, 256)
+        return -(lo + hi * 2)
+    def CALL_FUNCTION_VAR(self, argc):
+        return self.CALL_FUNCTION(argc)-1
+    def CALL_FUNCTION_KW(self, argc):
+        return self.CALL_FUNCTION(argc)-1
+    def CALL_FUNCTION_VAR_KW(self, argc):
+        return self.CALL_FUNCTION(argc)-2
+    def MAKE_FUNCTION(self, argc):
+        return -argc
+    def MAKE_CLOSURE(self, argc):
+        # XXX need to account for free variables too!
+        return -argc
+    def BUILD_SLICE(self, argc):
+        if argc == 2:
+            return -1
+        elif argc == 3:
+            return -2
+    def DUP_TOPX(self, argc):
+        return argc
+
+findDepth = StackDepthTracker().findDepth
diff --git a/lib-python/2.2/compiler/pycodegen.py b/lib-python/2.2/compiler/pycodegen.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/compiler/pycodegen.py
@@ -0,0 +1,1388 @@
+import imp
+import os
+import marshal
+import stat
+import string
+import struct
+import sys
+import types
+from cStringIO import StringIO
+
+from compiler import ast, parse, walk, syntax
+from compiler import pyassem, misc, future, symbols
+from compiler.consts import SC_LOCAL, SC_GLOBAL, SC_FREE, SC_CELL
+from compiler.consts import CO_VARARGS, CO_VARKEYWORDS, CO_NEWLOCALS,\
+     CO_NESTED, CO_GENERATOR, CO_GENERATOR_ALLOWED, CO_FUTURE_DIVISION
+from compiler.pyassem import TupleArg
+
+# Do we have Python 1.x or Python 2.x?
+try:
+    VERSION = sys.version_info[0]
+except AttributeError:
+    VERSION = 1
+
+callfunc_opcode_info = {
+    # (Have *args, Have **args) : opcode
+    (0,0) : "CALL_FUNCTION",
+    (1,0) : "CALL_FUNCTION_VAR",
+    (0,1) : "CALL_FUNCTION_KW",
+    (1,1) : "CALL_FUNCTION_VAR_KW",
+}
+
+LOOP = 1
+EXCEPT = 2
+TRY_FINALLY = 3
+END_FINALLY = 4
+
+# XXX this doesn't seem to be used
+class BlockStack(misc.Stack):
+    __super_init = misc.Stack.__init__
+
+    def __init__(self):
+        self.__super_init(self)
+        self.loop = None
+
+def compileFile(filename, display=0):
+    f = open(filename)
+    buf = f.read()
+    f.close()
+    mod = Module(buf, filename)
+    try:
+        mod.compile(display)
+    except SyntaxError, err:
+        raise
+    else:
+        f = open(filename + "c", "wb")
+        mod.dump(f)
+        f.close()
+
+def compile(source, filename, mode, flags=None, dont_inherit=None):
+    """Replacement for builtin compile() function"""
+    if flags is not None or dont_inherit is not None:
+        raise RuntimeError, "not implemented yet"
+
+    if mode == "single":
+        gen = Interactive(source, filename)
+    elif mode == "exec":
+        gen = Module(source, filename)
+    elif mode == "eval":
+        gen = Expression(source, filename)
+    else:
+        raise ValueError("compile() 3rd arg must be 'exec' or "
+                         "'eval' or 'single'")
+    gen.compile()
+    return gen.code
+
+class AbstractCompileMode:
+
+    mode = None # defined by subclass
+
+    def __init__(self, source, filename):
+        self.source = source
+        self.filename = filename
+        self.code = None
+
+    def _get_tree(self):
+        tree = parse(self.source, self.mode)
+        misc.set_filename(self.filename, tree)
+        syntax.check(tree)
+        return tree
+
+    def compile(self):
+        pass # implemented by subclass
+
+    def getCode(self):
+        return self.code
+
+class Expression(AbstractCompileMode):
+
+    mode = "eval"
+
+    def compile(self):
+        tree = self._get_tree()
+        gen = ExpressionCodeGenerator(tree)
+        self.code = gen.getCode()
+
+class Interactive(AbstractCompileMode):
+
+    mode = "single"
+
+    def compile(self):
+        tree = self._get_tree()
+        gen = InteractiveCodeGenerator(tree)
+        self.code = gen.getCode()
+
+class Module(AbstractCompileMode):
+
+    mode = "exec"
+
+    def compile(self, display=0):
+        tree = self._get_tree()
+        gen = ModuleCodeGenerator(tree)
+        if display:
+            import pprint
+            print pprint.pprint(tree)
+        self.code = gen.getCode()
+
+    def dump(self, f):
+        f.write(self.getPycHeader())
+        marshal.dump(self.code, f)
+
+    MAGIC = imp.get_magic()
+
+    def getPycHeader(self):
+        # compile.c uses marshal to write a long directly, with
+        # calling the interface that would also generate a 1-byte code
+        # to indicate the type of the value.  simplest way to get the
+        # same effect is to call marshal and then skip the code.
+        mtime = os.stat(self.filename)[stat.ST_MTIME]
+        mtime = struct.pack('<i', mtime)
+        return self.MAGIC + mtime
+
+class LocalNameFinder:
+    """Find local names in scope"""
+    def __init__(self, names=()):
+        self.names = misc.Set()
+        self.globals = misc.Set()
+        for name in names:
+            self.names.add(name)
+
+    # XXX list comprehensions and for loops
+
+    def getLocals(self):
+        for elt in self.globals.elements():
+            if self.names.has_elt(elt):
+                self.names.remove(elt)
+        return self.names
+
+    def visitDict(self, node):
+        pass
+
+    def visitGlobal(self, node):
+        for name in node.names:
+            self.globals.add(name)
+
+    def visitFunction(self, node):
+        self.names.add(node.name)
+
+    def visitLambda(self, node):
+        pass
+
+    def visitImport(self, node):
+        for name, alias in node.names:
+            self.names.add(alias or name)
+
+    def visitFrom(self, node):
+        for name, alias in node.names:
+            self.names.add(alias or name)
+
+    def visitClass(self, node):
+        self.names.add(node.name)
+
+    def visitAssName(self, node):
+        self.names.add(node.name)
+
+def is_constant_false(node):
+    if isinstance(node, ast.Const):
+        if not node.value:
+            return 1
+    return 0
+
+class CodeGenerator:
+    """Defines basic code generator for Python bytecode
+
+    This class is an abstract base class.  Concrete subclasses must
+    define an __init__() that defines self.graph and then calls the
+    __init__() defined in this class.
+
+    The concrete class must also define the class attributes
+    NameFinder, FunctionGen, and ClassGen.  These attributes can be
+    defined in the initClass() method, which is a hook for
+    initializing these methods after all the classes have been
+    defined.
+    """
+
+    optimized = 0 # is namespace access optimized?
+    __initialized = None
+    class_name = None # provide default for instance variable
+
+    def __init__(self):
+        if self.__initialized is None:
+            self.initClass()
+            self.__class__.__initialized = 1
+        self.checkClass()
+        self.locals = misc.Stack()
+        self.setups = misc.Stack()
+        self.curStack = 0
+        self.maxStack = 0
+        self.last_lineno = None
+        self._setupGraphDelegation()
+        self._div_op = "BINARY_DIVIDE"
+
+        # XXX set flags based on future features
+        futures = self.get_module().futures
+        for feature in futures:
+            if feature == "division":
+                self.graph.setFlag(CO_FUTURE_DIVISION)
+                self._div_op = "BINARY_TRUE_DIVIDE"
+            elif feature == "generators":
+                self.graph.setFlag(CO_GENERATOR_ALLOWED)
+
+    def initClass(self):
+        """This method is called once for each class"""
+
+    def checkClass(self):
+        """Verify that class is constructed correctly"""
+        try:
+            assert hasattr(self, 'graph')
+            assert getattr(self, 'NameFinder')
+            assert getattr(self, 'FunctionGen')
+            assert getattr(self, 'ClassGen')
+        except AssertionError, msg:
+            intro = "Bad class construction for %s" % self.__class__.__name__
+            raise AssertionError, intro
+
+    def _setupGraphDelegation(self):
+        self.emit = self.graph.emit
+        self.newBlock = self.graph.newBlock
+        self.startBlock = self.graph.startBlock
+        self.nextBlock = self.graph.nextBlock
+        self.setDocstring = self.graph.setDocstring
+
+    def getCode(self):
+        """Return a code object"""
+        return self.graph.getCode()
+
+    def mangle(self, name):
+        if self.class_name is not None:
+            return misc.mangle(name, self.class_name)
+        else:
+            return name
+
+    def parseSymbols(self, tree):
+        s = symbols.SymbolVisitor()
+        walk(tree, s)
+        return s.scopes
+
+    def get_module(self):
+        raise RuntimeError, "should be implemented by subclasses"
+
+    # Next five methods handle name access
+
+    def isLocalName(self, name):
+        return self.locals.top().has_elt(name)
+
+    def storeName(self, name):
+        self._nameOp('STORE', name)
+
+    def loadName(self, name):
+        self._nameOp('LOAD', name)
+
+    def delName(self, name):
+        self._nameOp('DELETE', name)
+
+    def _nameOp(self, prefix, name):
+        name = self.mangle(name)
+        scope = self.scope.check_name(name)
+        if scope == SC_LOCAL:
+            if not self.optimized:
+                self.emit(prefix + '_NAME', name)
+            else:
+                self.emit(prefix + '_FAST', name)
+        elif scope == SC_GLOBAL:
+            if not self.optimized:
+                self.emit(prefix + '_NAME', name)
+            else:
+                self.emit(prefix + '_GLOBAL', name)
+        elif scope == SC_FREE or scope == SC_CELL:
+            self.emit(prefix + '_DEREF', name)
+        else:
+            raise RuntimeError, "unsupported scope for var %s: %d" % \
+                  (name, scope)
+
+    def _implicitNameOp(self, prefix, name):
+        """Emit name ops for names generated implicitly by for loops
+
+        The interpreter generates names that start with a period or
+        dollar sign.  The symbol table ignores these names because
+        they aren't present in the program text.
+        """
+        if self.optimized:
+            self.emit(prefix + '_FAST', name)
+        else:
+            self.emit(prefix + '_NAME', name)
+
+    def set_lineno(self, node, force=0):
+        """Emit SET_LINENO if node has lineno attribute and it is
+        different than the last lineno emitted.
+
+        Returns true if SET_LINENO was emitted.
+
+        There are no rules for when an AST node should have a lineno
+        attribute.  The transformer and AST code need to be reviewed
+        and a consistent policy implemented and documented.  Until
+        then, this method works around missing line numbers.
+        """
+        lineno = getattr(node, 'lineno', None)
+        if lineno is not None and (lineno != self.last_lineno
+                                   or force):
+            self.emit('SET_LINENO', lineno)
+            self.last_lineno = lineno
+            return 1
+        return 0
+
+    # The first few visitor methods handle nodes that generator new
+    # code objects.  They use class attributes to determine what
+    # specialized code generators to use.
+
+    NameFinder = LocalNameFinder
+    FunctionGen = None
+    ClassGen = None
+
+    def visitModule(self, node):
+        self.scopes = self.parseSymbols(node)
+        self.scope = self.scopes[node]
+        self.emit('SET_LINENO', 0)
+        if node.doc:
+            self.emit('LOAD_CONST', node.doc)
+            self.storeName('__doc__')
+        lnf = walk(node.node, self.NameFinder(), verbose=0)
+        self.locals.push(lnf.getLocals())
+        self.visit(node.node)
+        self.emit('LOAD_CONST', None)
+        self.emit('RETURN_VALUE')
+
+    def visitExpression(self, node):
+        self.set_lineno(node)
+        self.scopes = self.parseSymbols(node)
+        self.scope = self.scopes[node]
+        self.visit(node.node)
+        self.emit('RETURN_VALUE')
+
+    def visitFunction(self, node):
+        self._visitFuncOrLambda(node, isLambda=0)
+        if node.doc:
+            self.setDocstring(node.doc)
+        self.storeName(node.name)
+
+    def visitLambda(self, node):
+        self._visitFuncOrLambda(node, isLambda=1)
+
+    def _visitFuncOrLambda(self, node, isLambda=0):
+        gen = self.FunctionGen(node, self.scopes, isLambda,
+                               self.class_name, self.get_module())
+        walk(node.code, gen)
+        gen.finish()
+        self.set_lineno(node)
+        for default in node.defaults:
+            self.visit(default)
+        frees = gen.scope.get_free_vars()
+        if frees:
+            for name in frees:
+                self.emit('LOAD_CLOSURE', name)
+            self.emit('LOAD_CONST', gen)
+            self.emit('MAKE_CLOSURE', len(node.defaults))
+        else:
+            self.emit('LOAD_CONST', gen)
+            self.emit('MAKE_FUNCTION', len(node.defaults))
+
+    def visitClass(self, node):
+        gen = self.ClassGen(node, self.scopes,
+                            self.get_module())
+        walk(node.code, gen)
+        gen.finish()
+        self.set_lineno(node)
+        self.emit('LOAD_CONST', node.name)
+        for base in node.bases:
+            self.visit(base)
+        self.emit('BUILD_TUPLE', len(node.bases))
+        frees = gen.scope.get_free_vars()
+        for name in frees:
+            self.emit('LOAD_CLOSURE', name)
+        self.emit('LOAD_CONST', gen)
+        if frees:
+            self.emit('MAKE_CLOSURE', 0)
+        else:
+            self.emit('MAKE_FUNCTION', 0)
+        self.emit('CALL_FUNCTION', 0)
+        self.emit('BUILD_CLASS')
+        self.storeName(node.name)
+
+    # The rest are standard visitor methods
+
+    # The next few implement control-flow statements
+
+    def visitIf(self, node):
+        end = self.newBlock()
+        numtests = len(node.tests)
+        for i in range(numtests):
+            test, suite = node.tests[i]
+            if is_constant_false(test):
+                # XXX will need to check generator stuff here
+                continue
+            self.set_lineno(test)
+            self.visit(test)
+            nextTest = self.newBlock()
+            self.emit('JUMP_IF_FALSE', nextTest)
+            self.nextBlock()
+            self.emit('POP_TOP')
+            self.visit(suite)
+            self.emit('JUMP_FORWARD', end)
+            self.startBlock(nextTest)
+            self.emit('POP_TOP')
+        if node.else_:
+            self.visit(node.else_)
+        self.nextBlock(end)
+
+    def visitWhile(self, node):
+        self.set_lineno(node)
+
+        loop = self.newBlock()
+        else_ = self.newBlock()
+
+        after = self.newBlock()
+        self.emit('SETUP_LOOP', after)
+
+        self.nextBlock(loop)
+        self.setups.push((LOOP, loop))
+
+        self.set_lineno(node, force=1)
+        self.visit(node.test)
+        self.emit('JUMP_IF_FALSE', else_ or after)
+
+        self.nextBlock()
+        self.emit('POP_TOP')
+        self.visit(node.body)
+        self.emit('JUMP_ABSOLUTE', loop)
+
+        self.startBlock(else_) # or just the POPs if not else clause
+        self.emit('POP_TOP')
+        self.emit('POP_BLOCK')
+        self.setups.pop()
+        if node.else_:
+            self.visit(node.else_)
+        self.nextBlock(after)
+
+    def visitFor(self, node):
+        start = self.newBlock()
+        anchor = self.newBlock()
+        after = self.newBlock()
+        self.setups.push((LOOP, start))
+
+        self.set_lineno(node)
+        self.emit('SETUP_LOOP', after)
+        self.visit(node.list)
+        self.emit('GET_ITER')
+
+        self.nextBlock(start)
+        self.set_lineno(node, force=1)
+        self.emit('FOR_ITER', anchor)
+        self.visit(node.assign)
+        self.visit(node.body)
+        self.emit('JUMP_ABSOLUTE', start)
+        self.nextBlock(anchor)
+        self.emit('POP_BLOCK')
+        self.setups.pop()
+        if node.else_:
+            self.visit(node.else_)
+        self.nextBlock(after)
+
+    def visitBreak(self, node):
+        if not self.setups:
+            raise SyntaxError, "'break' outside loop (%s, %d)" % \
+                  (node.filename, node.lineno)
+        self.set_lineno(node)
+        self.emit('BREAK_LOOP')
+
+    def visitContinue(self, node):
+        if not self.setups:
+            raise SyntaxError, "'continue' outside loop (%s, %d)" % \
+                  (node.filename, node.lineno)
+        kind, block = self.setups.top()
+        if kind == LOOP:
+            self.set_lineno(node)
+            self.emit('JUMP_ABSOLUTE', block)
+            self.nextBlock()
+        elif kind == EXCEPT or kind == TRY_FINALLY:
+            self.set_lineno(node)
+            # find the block that starts the loop
+            top = len(self.setups)
+            while top > 0:
+                top = top - 1
+                kind, loop_block = self.setups[top]
+                if kind == LOOP:
+                    break
+            if kind != LOOP:
+                raise SyntaxError, "'continue' outside loop (%s, %d)" % \
+                      (node.filename, node.lineno)
+            self.emit('CONTINUE_LOOP', loop_block)
+            self.nextBlock()
+        elif kind == END_FINALLY:
+            msg = "'continue' not allowed inside 'finally' clause (%s, %d)"
+            raise SyntaxError, msg % (node.filename, node.lineno)
+
+    def visitTest(self, node, jump):
+        end = self.newBlock()
+        for child in node.nodes[:-1]:
+            self.visit(child)
+            self.emit(jump, end)
+            self.nextBlock()
+            self.emit('POP_TOP')
+        self.visit(node.nodes[-1])
+        self.nextBlock(end)
+
+    def visitAnd(self, node):
+        self.visitTest(node, 'JUMP_IF_FALSE')
+
+    def visitOr(self, node):
+        self.visitTest(node, 'JUMP_IF_TRUE')
+
+    def visitCompare(self, node):
+        self.visit(node.expr)
+        cleanup = self.newBlock()
+        for op, code in node.ops[:-1]:
+            self.visit(code)
+            self.emit('DUP_TOP')
+            self.emit('ROT_THREE')
+            self.emit('COMPARE_OP', op)
+            self.emit('JUMP_IF_FALSE', cleanup)
+            self.nextBlock()
+            self.emit('POP_TOP')
+        # now do the last comparison
+        if node.ops:
+            op, code = node.ops[-1]
+            self.visit(code)
+            self.emit('COMPARE_OP', op)
+        if len(node.ops) > 1:
+            end = self.newBlock()
+            self.emit('JUMP_FORWARD', end)
+            self.startBlock(cleanup)
+            self.emit('ROT_TWO')
+            self.emit('POP_TOP')
+            self.nextBlock(end)
+
+    # list comprehensions
+    __list_count = 0
+
+    def visitListComp(self, node):
+        self.set_lineno(node)
+        # setup list
+        append = "$append%d" % self.__list_count
+        self.__list_count = self.__list_count + 1
+        self.emit('BUILD_LIST', 0)
+        self.emit('DUP_TOP')
+        self.emit('LOAD_ATTR', 'append')
+        self._implicitNameOp('STORE', append)
+
+        stack = []
+        for i, for_ in zip(range(len(node.quals)), node.quals):
+            start, anchor = self.visit(for_)
+            cont = None
+            for if_ in for_.ifs:
+                if cont is None:
+                    cont = self.newBlock()
+                self.visit(if_, cont)
+            stack.insert(0, (start, cont, anchor))
+
+        self._implicitNameOp('LOAD', append)
+        self.visit(node.expr)
+        self.emit('CALL_FUNCTION', 1)
+        self.emit('POP_TOP')
+
+        for start, cont, anchor in stack:
+            if cont:
+                skip_one = self.newBlock()
+                self.emit('JUMP_FORWARD', skip_one)
+                self.startBlock(cont)
+                self.emit('POP_TOP')
+                self.nextBlock(skip_one)
+            self.emit('JUMP_ABSOLUTE', start)
+            self.startBlock(anchor)
+        self._implicitNameOp('DELETE', append)
+
+        self.__list_count = self.__list_count - 1
+
+    def visitListCompFor(self, node):
+        start = self.newBlock()
+        anchor = self.newBlock()
+
+        self.visit(node.list)
+        self.emit('GET_ITER')
+        self.nextBlock(start)
+        self.emit('SET_LINENO', node.lineno)
+        self.emit('FOR_ITER', anchor)
+        self.nextBlock()
+        self.visit(node.assign)
+        return start, anchor
+
+    def visitListCompIf(self, node, branch):
+        self.set_lineno(node, force=1)
+        self.visit(node.test)
+        self.emit('JUMP_IF_FALSE', branch)
+        self.newBlock()
+        self.emit('POP_TOP')
+
+    # exception related
+
+    def visitAssert(self, node):
+        # XXX would be interesting to implement this via a
+        # transformation of the AST before this stage
+        end = self.newBlock()
+        self.set_lineno(node)
+        # XXX __debug__ and AssertionError appear to be special cases
+        # -- they are always loaded as globals even if there are local
+        # names.  I guess this is a sort of renaming op.
+        self.emit('LOAD_GLOBAL', '__debug__')
+        self.emit('JUMP_IF_FALSE', end)
+        self.nextBlock()
+        self.emit('POP_TOP')
+        self.visit(node.test)
+        self.emit('JUMP_IF_TRUE', end)
+        self.nextBlock()
+        self.emit('POP_TOP')
+        self.emit('LOAD_GLOBAL', 'AssertionError')
+        if node.fail:
+            self.visit(node.fail)
+            self.emit('RAISE_VARARGS', 2)
+        else:
+            self.emit('RAISE_VARARGS', 1)
+        self.nextBlock(end)
+        self.emit('POP_TOP')
+
+    def visitRaise(self, node):
+        self.set_lineno(node)
+        n = 0
+        if node.expr1:
+            self.visit(node.expr1)
+            n = n + 1
+        if node.expr2:
+            self.visit(node.expr2)
+            n = n + 1
+        if node.expr3:
+            self.visit(node.expr3)
+            n = n + 1
+        self.emit('RAISE_VARARGS', n)
+
+    def visitTryExcept(self, node):
+        body = self.newBlock()
+        handlers = self.newBlock()
+        end = self.newBlock()
+        if node.else_:
+            lElse = self.newBlock()
+        else:
+            lElse = end
+        self.set_lineno(node)
+        self.emit('SETUP_EXCEPT', handlers)
+        self.nextBlock(body)
+        self.setups.push((EXCEPT, body))
+        self.visit(node.body)
+        self.emit('POP_BLOCK')
+        self.setups.pop()
+        self.emit('JUMP_FORWARD', lElse)
+        self.startBlock(handlers)
+
+        last = len(node.handlers) - 1
+        for i in range(len(node.handlers)):
+            expr, target, body = node.handlers[i]
+            self.set_lineno(expr)
+            if expr:
+                self.emit('DUP_TOP')
+                self.visit(expr)
+                self.emit('COMPARE_OP', 'exception match')
+                next = self.newBlock()
+                self.emit('JUMP_IF_FALSE', next)
+                self.nextBlock()
+                self.emit('POP_TOP')
+            self.emit('POP_TOP')
+            if target:
+                self.visit(target)
+            else:
+                self.emit('POP_TOP')
+            self.emit('POP_TOP')
+            self.visit(body)
+            self.emit('JUMP_FORWARD', end)
+            if expr:
+                self.nextBlock(next)
+            else:
+                self.nextBlock()
+            if expr: # XXX
+                self.emit('POP_TOP')
+        self.emit('END_FINALLY')
+        if node.else_:
+            self.nextBlock(lElse)
+            self.visit(node.else_)
+        self.nextBlock(end)
+
+    def visitTryFinally(self, node):
+        body = self.newBlock()
+        final = self.newBlock()
+        self.set_lineno(node)
+        self.emit('SETUP_FINALLY', final)
+        self.nextBlock(body)
+        self.setups.push((TRY_FINALLY, body))
+        self.visit(node.body)
+        self.emit('POP_BLOCK')
+        self.setups.pop()
+        self.emit('LOAD_CONST', None)
+        self.nextBlock(final)
+        self.setups.push((END_FINALLY, final))
+        self.visit(node.final)
+        self.emit('END_FINALLY')
+        self.setups.pop()
+
+    # misc
+
+    def visitDiscard(self, node):
+        self.set_lineno(node)
+        self.visit(node.expr)
+        self.emit('POP_TOP')
+
+    def visitConst(self, node):
+        self.emit('LOAD_CONST', node.value)
+
+    def visitKeyword(self, node):
+        self.emit('LOAD_CONST', node.name)
+        self.visit(node.expr)
+
+    def visitGlobal(self, node):
+        # no code to generate
+        pass
+
+    def visitName(self, node):
+        self.set_lineno(node)
+        self.loadName(node.name)
+
+    def visitPass(self, node):
+        self.set_lineno(node)
+
+    def visitImport(self, node):
+        self.set_lineno(node)
+        for name, alias in node.names:
+            if VERSION > 1:
+                self.emit('LOAD_CONST', None)
+            self.emit('IMPORT_NAME', name)
+            mod = string.split(name, ".")[0]
+            self.storeName(alias or mod)
+
+    def visitFrom(self, node):
+        self.set_lineno(node)
+        fromlist = map(lambda (name, alias): name, node.names)
+        if VERSION > 1:
+            self.emit('LOAD_CONST', tuple(fromlist))
+        self.emit('IMPORT_NAME', node.modname)
+        for name, alias in node.names:
+            if VERSION > 1:
+                if name == '*':
+                    self.namespace = 0
+                    self.emit('IMPORT_STAR')
+                    # There can only be one name w/ from ... import *
+                    assert len(node.names) == 1
+                    return
+                else:
+                    self.emit('IMPORT_FROM', name)
+                    self._resolveDots(name)
+                    self.storeName(alias or name)
+            else:
+                self.emit('IMPORT_FROM', name)
+        self.emit('POP_TOP')
+
+    def _resolveDots(self, name):
+        elts = string.split(name, ".")
+        if len(elts) == 1:
+            return
+        for elt in elts[1:]:
+            self.emit('LOAD_ATTR', elt)
+
+    def visitGetattr(self, node):
+        self.visit(node.expr)
+        self.emit('LOAD_ATTR', self.mangle(node.attrname))
+
+    # next five implement assignments
+
+    def visitAssign(self, node):
+        self.set_lineno(node)
+        self.visit(node.expr)
+        dups = len(node.nodes) - 1
+        for i in range(len(node.nodes)):
+            elt = node.nodes[i]
+            if i < dups:
+                self.emit('DUP_TOP')
+            if isinstance(elt, ast.Node):
+                self.visit(elt)
+
+    def visitAssName(self, node):
+        if node.flags == 'OP_ASSIGN':
+            self.storeName(node.name)
+        elif node.flags == 'OP_DELETE':
+            self.set_lineno(node)
+            self.delName(node.name)
+        else:
+            print "oops", node.flags
+
+    def visitAssAttr(self, node):
+        self.visit(node.expr)
+        if node.flags == 'OP_ASSIGN':
+            self.emit('STORE_ATTR', self.mangle(node.attrname))
+        elif node.flags == 'OP_DELETE':
+            self.emit('DELETE_ATTR', self.mangle(node.attrname))
+        else:
+            print "warning: unexpected flags:", node.flags
+            print node
+
+    def _visitAssSequence(self, node, op='UNPACK_SEQUENCE'):
+        if findOp(node) != 'OP_DELETE':
+            self.emit(op, len(node.nodes))
+        for child in node.nodes:
+            self.visit(child)
+
+    if VERSION > 1:
+        visitAssTuple = _visitAssSequence
+        visitAssList = _visitAssSequence
+    else:
+        def visitAssTuple(self, node):
+            self._visitAssSequence(node, 'UNPACK_TUPLE')
+
+        def visitAssList(self, node):
+            self._visitAssSequence(node, 'UNPACK_LIST')
+
+    # augmented assignment
+
+    def visitAugAssign(self, node):
+        self.set_lineno(node)
+        aug_node = wrap_aug(node.node)
+        self.visit(aug_node, "load")
+        self.visit(node.expr)
+        self.emit(self._augmented_opcode[node.op])
+        self.visit(aug_node, "store")
+
+    _augmented_opcode = {
+        '+=' : 'INPLACE_ADD',
+        '-=' : 'INPLACE_SUBTRACT',
+        '*=' : 'INPLACE_MULTIPLY',
+        '/=' : 'INPLACE_DIVIDE',
+        '//=': 'INPLACE_FLOOR_DIVIDE',
+        '%=' : 'INPLACE_MODULO',
+        '**=': 'INPLACE_POWER',
+        '>>=': 'INPLACE_RSHIFT',
+        '<<=': 'INPLACE_LSHIFT',
+        '&=' : 'INPLACE_AND',
+        '^=' : 'INPLACE_XOR',
+        '|=' : 'INPLACE_OR',
+        }
+
+    def visitAugName(self, node, mode):
+        if mode == "load":
+            self.loadName(node.name)
+        elif mode == "store":
+            self.storeName(node.name)
+
+    def visitAugGetattr(self, node, mode):
+        if mode == "load":
+            self.visit(node.expr)
+            self.emit('DUP_TOP')
+            self.emit('LOAD_ATTR', self.mangle(node.attrname))
+        elif mode == "store":
+            self.emit('ROT_TWO')
+            self.emit('STORE_ATTR', self.mangle(node.attrname))
+
+    def visitAugSlice(self, node, mode):
+        if mode == "load":
+            self.visitSlice(node, 1)
+        elif mode == "store":
+            slice = 0
+            if node.lower:
+                slice = slice | 1
+            if node.upper:
+                slice = slice | 2
+            if slice == 0:
+                self.emit('ROT_TWO')
+            elif slice == 3:
+                self.emit('ROT_FOUR')
+            else:
+                self.emit('ROT_THREE')
+            self.emit('STORE_SLICE+%d' % slice)
+
+    def visitAugSubscript(self, node, mode):
+        if len(node.subs) > 1:
+            raise SyntaxError, "augmented assignment to tuple is not possible"
+        if mode == "load":
+            self.visitSubscript(node, 1)
+        elif mode == "store":
+            self.emit('ROT_THREE')
+            self.emit('STORE_SUBSCR')
+
+    def visitExec(self, node):
+        self.visit(node.expr)
+        if node.locals is None:
+            self.emit('LOAD_CONST', None)
+        else:
+            self.visit(node.locals)
+        if node.globals is None:
+            self.emit('DUP_TOP')
+        else:
+            self.visit(node.globals)
+        self.emit('EXEC_STMT')
+
+    def visitCallFunc(self, node):
+        pos = 0
+        kw = 0
+        self.set_lineno(node)
+        self.visit(node.node)
+        for arg in node.args:
+            self.visit(arg)
+            if isinstance(arg, ast.Keyword):
+                kw = kw + 1
+            else:
+                pos = pos + 1
+        if node.star_args is not None:
+            self.visit(node.star_args)
+        if node.dstar_args is not None:
+            self.visit(node.dstar_args)
+        have_star = node.star_args is not None
+        have_dstar = node.dstar_args is not None
+        opcode = callfunc_opcode_info[have_star, have_dstar]
+        self.emit(opcode, kw << 8 | pos)
+
+    def visitPrint(self, node, newline=0):
+        self.set_lineno(node)
+        if node.dest:
+            self.visit(node.dest)
+        for child in node.nodes:
+            if node.dest:
+                self.emit('DUP_TOP')
+            self.visit(child)
+            if node.dest:
+                self.emit('ROT_TWO')
+                self.emit('PRINT_ITEM_TO')
+            else:
+                self.emit('PRINT_ITEM')
+        if node.dest and not newline:
+            self.emit('POP_TOP')
+
+    def visitPrintnl(self, node):
+        self.visitPrint(node, newline=1)
+        if node.dest:
+            self.emit('PRINT_NEWLINE_TO')
+        else:
+            self.emit('PRINT_NEWLINE')
+
+    def visitReturn(self, node):
+        self.set_lineno(node)
+        self.visit(node.value)
+        self.emit('RETURN_VALUE')
+
+    def visitYield(self, node):
+        self.set_lineno(node)
+        self.visit(node.value)
+        self.emit('YIELD_STMT')
+
+    # slice and subscript stuff
+
+    def visitSlice(self, node, aug_flag=None):
+        # aug_flag is used by visitAugSlice
+        self.visit(node.expr)
+        slice = 0
+        if node.lower:
+            self.visit(node.lower)
+            slice = slice | 1
+        if node.upper:
+            self.visit(node.upper)
+            slice = slice | 2
+        if aug_flag:
+            if slice == 0:
+                self.emit('DUP_TOP')
+            elif slice == 3:
+                self.emit('DUP_TOPX', 3)
+            else:
+                self.emit('DUP_TOPX', 2)
+        if node.flags == 'OP_APPLY':
+            self.emit('SLICE+%d' % slice)
+        elif node.flags == 'OP_ASSIGN':
+            self.emit('STORE_SLICE+%d' % slice)
+        elif node.flags == 'OP_DELETE':
+            self.emit('DELETE_SLICE+%d' % slice)
+        else:
+            print "weird slice", node.flags
+            raise
+
+    def visitSubscript(self, node, aug_flag=None):
+        self.visit(node.expr)
+        for sub in node.subs:
+            self.visit(sub)
+        if aug_flag:
+            self.emit('DUP_TOPX', 2)
+        if len(node.subs) > 1:
+            self.emit('BUILD_TUPLE', len(node.subs))
+        if node.flags == 'OP_APPLY':
+            self.emit('BINARY_SUBSCR')
+        elif node.flags == 'OP_ASSIGN':
+            self.emit('STORE_SUBSCR')
+        elif node.flags == 'OP_DELETE':
+            self.emit('DELETE_SUBSCR')
+
+    # binary ops
+
+    def binaryOp(self, node, op):
+        self.visit(node.left)
+        self.visit(node.right)
+        self.emit(op)
+
+    def visitAdd(self, node):
+        return self.binaryOp(node, 'BINARY_ADD')
+
+    def visitSub(self, node):
+        return self.binaryOp(node, 'BINARY_SUBTRACT')
+
+    def visitMul(self, node):
+        return self.binaryOp(node, 'BINARY_MULTIPLY')
+
+    def visitDiv(self, node):
+        return self.binaryOp(node, self._div_op)
+
+    def visitFloorDiv(self, node):
+        return self.binaryOp(node, 'BINARY_FLOOR_DIVIDE')
+
+    def visitMod(self, node):
+        return self.binaryOp(node, 'BINARY_MODULO')
+
+    def visitPower(self, node):
+        return self.binaryOp(node, 'BINARY_POWER')
+
+    def visitLeftShift(self, node):
+        return self.binaryOp(node, 'BINARY_LSHIFT')
+
+    def visitRightShift(self, node):
+        return self.binaryOp(node, 'BINARY_RSHIFT')
+
+    # unary ops
+
+    def unaryOp(self, node, op):
+        self.visit(node.expr)
+        self.emit(op)
+
+    def visitInvert(self, node):
+        return self.unaryOp(node, 'UNARY_INVERT')
+
+    def visitUnarySub(self, node):
+        return self.unaryOp(node, 'UNARY_NEGATIVE')
+
+    def visitUnaryAdd(self, node):
+        return self.unaryOp(node, 'UNARY_POSITIVE')
+
+    def visitUnaryInvert(self, node):
+        return self.unaryOp(node, 'UNARY_INVERT')
+
+    def visitNot(self, node):
+        return self.unaryOp(node, 'UNARY_NOT')
+
+    def visitBackquote(self, node):
+        return self.unaryOp(node, 'UNARY_CONVERT')
+
+    # bit ops
+
+    def bitOp(self, nodes, op):
+        self.visit(nodes[0])
+        for node in nodes[1:]:
+            self.visit(node)
+            self.emit(op)
+
+    def visitBitand(self, node):
+        return self.bitOp(node.nodes, 'BINARY_AND')
+
+    def visitBitor(self, node):
+        return self.bitOp(node.nodes, 'BINARY_OR')
+
+    def visitBitxor(self, node):
+        return self.bitOp(node.nodes, 'BINARY_XOR')
+
+    # object constructors
+
+    def visitEllipsis(self, node):
+        self.emit('LOAD_CONST', Ellipsis)
+
+    def visitTuple(self, node):
+        self.set_lineno(node)
+        for elt in node.nodes:
+            self.visit(elt)
+        self.emit('BUILD_TUPLE', len(node.nodes))
+
+    def visitList(self, node):
+        self.set_lineno(node)
+        for elt in node.nodes:
+            self.visit(elt)
+        self.emit('BUILD_LIST', len(node.nodes))
+
+    def visitSliceobj(self, node):
+        for child in node.nodes:
+            self.visit(child)
+        self.emit('BUILD_SLICE', len(node.nodes))
+
+    def visitDict(self, node):
+        lineno = getattr(node, 'lineno', None)
+        if lineno:
+            self.emit('SET_LINENO', lineno)
+        self.emit('BUILD_MAP', 0)
+        for k, v in node.items:
+            lineno2 = getattr(node, 'lineno', None)
+            if lineno2 is not None and lineno != lineno2:
+                self.emit('SET_LINENO', lineno2)
+                lineno = lineno2
+            self.emit('DUP_TOP')
+            self.visit(v)
+            self.emit('ROT_TWO')
+            self.visit(k)
+            self.emit('STORE_SUBSCR')
+
+class NestedScopeMixin:
+    """Defines initClass() for nested scoping (Python 2.2-compatible)"""
+    def initClass(self):
+        self.__class__.NameFinder = LocalNameFinder
+        self.__class__.FunctionGen = FunctionCodeGenerator
+        self.__class__.ClassGen = ClassCodeGenerator
+
+class ModuleCodeGenerator(NestedScopeMixin, CodeGenerator):
+    __super_init = CodeGenerator.__init__
+
+    scopes = None
+
+    def __init__(self, tree):
+        self.graph = pyassem.PyFlowGraph("<module>", tree.filename)
+        self.futures = future.find_futures(tree)
+        self.__super_init()
+        walk(tree, self)
+
+    def get_module(self):
+        return self
+
+class ExpressionCodeGenerator(NestedScopeMixin, CodeGenerator):
+    __super_init = CodeGenerator.__init__
+
+    scopes = None
+    futures = ()
+
+    def __init__(self, tree):
+        self.graph = pyassem.PyFlowGraph("<expression>", tree.filename)
+        self.__super_init()
+        walk(tree, self)
+
+    def get_module(self):
+        return self
+
+class InteractiveCodeGenerator(NestedScopeMixin, CodeGenerator):
+
+    __super_init = CodeGenerator.__init__
+
+    scopes = None
+    futures = ()
+
+    def __init__(self, tree):
+        self.graph = pyassem.PyFlowGraph("<interactive>", tree.filename)
+        self.__super_init()
+        self.set_lineno(tree)
+        walk(tree, self)
+        self.emit('RETURN_VALUE')
+
+    def get_module(self):
+        return self
+    
+    def visitDiscard(self, node):
+        # XXX Discard means it's an expression.  Perhaps this is a bad
+        # name.
+        self.visit(node.expr)
+        self.emit('PRINT_EXPR')
+
+class AbstractFunctionCode:
+    optimized = 1
+    lambdaCount = 0
+
+    def __init__(self, func, scopes, isLambda, class_name, mod):
+        self.class_name = class_name
+        self.module = mod
+        if isLambda:
+            klass = FunctionCodeGenerator
+            name = "<lambda.%d>" % klass.lambdaCount
+            klass.lambdaCount = klass.lambdaCount + 1
+        else:
+            name = func.name
+        args, hasTupleArg = generateArgList(func.argnames)
+        self.graph = pyassem.PyFlowGraph(name, func.filename, args,
+                                         optimized=1)
+        self.isLambda = isLambda
+        self.super_init()
+
+        if not isLambda and func.doc:
+            self.setDocstring(func.doc)
+
+        lnf = walk(func.code, self.NameFinder(args), verbose=0)
+        self.locals.push(lnf.getLocals())
+        if func.varargs:
+            self.graph.setFlag(CO_VARARGS)
+        if func.kwargs:
+            self.graph.setFlag(CO_VARKEYWORDS)
+        self.set_lineno(func)
+        if hasTupleArg:
+            self.generateArgUnpack(func.argnames)
+
+    def get_module(self):
+        return self.module
+
+    def finish(self):
+        self.graph.startExitBlock()
+        if not self.isLambda:
+            self.emit('LOAD_CONST', None)
+        self.emit('RETURN_VALUE')
+
+    def generateArgUnpack(self, args):
+        for i in range(len(args)):
+            arg = args[i]
+            if type(arg) == types.TupleType:
+                self.emit('LOAD_FAST', '.%d' % (i * 2))
+                self.unpackSequence(arg)
+
+    def unpackSequence(self, tup):
+        if VERSION > 1:
+            self.emit('UNPACK_SEQUENCE', len(tup))
+        else:
+            self.emit('UNPACK_TUPLE', len(tup))
+        for elt in tup:
+            if type(elt) == types.TupleType:
+                self.unpackSequence(elt)
+            else:
+                self._nameOp('STORE', elt)
+
+    unpackTuple = unpackSequence
+
+class FunctionCodeGenerator(NestedScopeMixin, AbstractFunctionCode,
+                            CodeGenerator):
+    super_init = CodeGenerator.__init__ # call be other init
+    scopes = None
+
+    __super_init = AbstractFunctionCode.__init__
+
+    def __init__(self, func, scopes, isLambda, class_name, mod):
+        self.scopes = scopes
+        self.scope = scopes[func]
+        self.__super_init(func, scopes, isLambda, class_name, mod)
+        self.graph.setFreeVars(self.scope.get_free_vars())
+        self.graph.setCellVars(self.scope.get_cell_vars())
+        if self.graph.checkFlag(CO_GENERATOR_ALLOWED):
+            if self.scope.generator is not None:
+                self.graph.setFlag(CO_GENERATOR)
+
+class AbstractClassCode:
+
+    def __init__(self, klass, scopes, module):
+        self.class_name = klass.name
+        self.module = module
+        self.graph = pyassem.PyFlowGraph(klass.name, klass.filename,
+                                           optimized=0, klass=1)
+        self.super_init()
+        lnf = walk(klass.code, self.NameFinder(), verbose=0)
+        self.locals.push(lnf.getLocals())
+        self.graph.setFlag(CO_NEWLOCALS)
+        if klass.doc:
+            self.setDocstring(klass.doc)
+
+    def get_module(self):
+        return self.module
+
+    def finish(self):
+        self.graph.startExitBlock()
+        self.emit('LOAD_LOCALS')
+        self.emit('RETURN_VALUE')
+
+class ClassCodeGenerator(NestedScopeMixin, AbstractClassCode, CodeGenerator):
+    super_init = CodeGenerator.__init__
+    scopes = None
+
+    __super_init = AbstractClassCode.__init__
+
+    def __init__(self, klass, scopes, module):
+        self.scopes = scopes
+        self.scope = scopes[klass]
+        self.__super_init(klass, scopes, module)
+        self.graph.setFreeVars(self.scope.get_free_vars())
+        self.graph.setCellVars(self.scope.get_cell_vars())
+        self.set_lineno(klass)
+        if klass.doc:
+            self.emit("LOAD_CONST", klass.doc)
+            self.storeName("__doc__")
+
+def generateArgList(arglist):
+    """Generate an arg list marking TupleArgs"""
+    args = []
+    extra = []
+    count = 0
+    for i in range(len(arglist)):
+        elt = arglist[i]
+        if type(elt) == types.StringType:
+            args.append(elt)
+        elif type(elt) == types.TupleType:
+            args.append(TupleArg(i * 2, elt))
+            extra.extend(misc.flatten(elt))
+            count = count + 1
+        else:
+            raise ValueError, "unexpect argument type:", elt
+    return args + extra, count
+
+def findOp(node):
+    """Find the op (DELETE, LOAD, STORE) in an AssTuple tree"""
+    v = OpFinder()
+    walk(node, v, verbose=0)
+    return v.op
+
+class OpFinder:
+    def __init__(self):
+        self.op = None
+    def visitAssName(self, node):
+        if self.op is None:
+            self.op = node.flags
+        elif self.op != node.flags:
+            raise ValueError, "mixed ops in stmt"
+    visitAssAttr = visitAssName
+    visitSubscript = visitAssName
+
+class Delegator:
+    """Base class to support delegation for augmented assignment nodes
+
+    To generator code for augmented assignments, we use the following
+    wrapper classes.  In visitAugAssign, the left-hand expression node
+    is visited twice.  The first time the visit uses the normal method
+    for that node .  The second time the visit uses a different method
+    that generates the appropriate code to perform the assignment.
+    These delegator classes wrap the original AST nodes in order to
+    support the variant visit methods.
+    """
+    def __init__(self, obj):
+        self.obj = obj
+
+    def __getattr__(self, attr):
+        return getattr(self.obj, attr)
+
+class AugGetattr(Delegator):
+    pass
+
+class AugName(Delegator):
+    pass
+
+class AugSlice(Delegator):
+    pass
+
+class AugSubscript(Delegator):
+    pass
+
+wrapper = {
+    ast.Getattr: AugGetattr,
+    ast.Name: AugName,
+    ast.Slice: AugSlice,
+    ast.Subscript: AugSubscript,
+    }
+
+def wrap_aug(node):
+    return wrapper[node.__class__](node)
+
+if __name__ == "__main__":
+    import sys
+
+    for file in sys.argv[1:]:
+        compileFile(file)
diff --git a/lib-python/2.2/compiler/symbols.py b/lib-python/2.2/compiler/symbols.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/compiler/symbols.py
@@ -0,0 +1,419 @@
+"""Module symbol-table generator"""
+
+from compiler import ast
+from compiler.consts import SC_LOCAL, SC_GLOBAL, SC_FREE, SC_CELL, SC_UNKNOWN
+from compiler.misc import mangle
+import types
+
+
+import sys
+
+MANGLE_LEN = 256
+
+class Scope:
+    # XXX how much information do I need about each name?
+    def __init__(self, name, module, klass=None):
+        self.name = name
+        self.module = module
+        self.defs = {}
+        self.uses = {}
+        self.globals = {}
+        self.params = {}
+        self.frees = {}
+        self.cells = {}
+        self.children = []
+        # nested is true if the class could contain free variables,
+        # i.e. if it is nested within another function.
+        self.nested = None
+        self.generator = None
+        self.klass = None
+        if klass is not None:
+            for i in range(len(klass)):
+                if klass[i] != '_':
+                    self.klass = klass[i:]
+                    break
+
+    def __repr__(self):
+        return "<%s: %s>" % (self.__class__.__name__, self.name)
+
+    def mangle(self, name):
+        if self.klass is None:
+            return name
+        return mangle(name, self.klass)
+
+    def add_def(self, name):
+        self.defs[self.mangle(name)] = 1
+
+    def add_use(self, name):
+        self.uses[self.mangle(name)] = 1
+
+    def add_global(self, name):
+        name = self.mangle(name)
+        if self.uses.has_key(name) or self.defs.has_key(name):
+            pass # XXX warn about global following def/use
+        if self.params.has_key(name):
+            raise SyntaxError, "%s in %s is global and parameter" % \
+                  (name, self.name)
+        self.globals[name] = 1
+        self.module.add_def(name)
+
+    def add_param(self, name):
+        name = self.mangle(name)
+        self.defs[name] = 1
+        self.params[name] = 1
+
+    def get_names(self):
+        d = {}
+        d.update(self.defs)
+        d.update(self.uses)
+        d.update(self.globals)
+        return d.keys()
+
+    def add_child(self, child):
+        self.children.append(child)
+
+    def get_children(self):
+        return self.children
+
+    def DEBUG(self):
+        print >> sys.stderr, self.name, self.nested and "nested" or ""
+        print >> sys.stderr, "\tglobals: ", self.globals
+        print >> sys.stderr, "\tcells: ", self.cells
+        print >> sys.stderr, "\tdefs: ", self.defs
+        print >> sys.stderr, "\tuses: ", self.uses
+        print >> sys.stderr, "\tfrees:", self.frees
+
+    def check_name(self, name):
+        """Return scope of name.
+
+        The scope of a name could be LOCAL, GLOBAL, FREE, or CELL.
+        """
+        if self.globals.has_key(name):
+            return SC_GLOBAL
+        if self.cells.has_key(name):
+            return SC_CELL
+        if self.defs.has_key(name):
+            return SC_LOCAL
+        if self.nested and (self.frees.has_key(name) or
+                            self.uses.has_key(name)):
+            return SC_FREE
+        if self.nested:
+            return SC_UNKNOWN
+        else:
+            return SC_GLOBAL
+
+    def get_free_vars(self):
+        if not self.nested:
+            return ()
+        free = {}
+        free.update(self.frees)
+        for name in self.uses.keys():
+            if not (self.defs.has_key(name) or
+                    self.globals.has_key(name)):
+                free[name] = 1
+        return free.keys()
+
+    def handle_children(self):
+        for child in self.children:
+            frees = child.get_free_vars()
+            globals = self.add_frees(frees)
+            for name in globals:
+                child.force_global(name)
+
+    def force_global(self, name):
+        """Force name to be global in scope.
+
+        Some child of the current node had a free reference to name.
+        When the child was processed, it was labelled a free
+        variable.  Now that all its enclosing scope have been
+        processed, the name is known to be a global or builtin.  So
+        walk back down the child chain and set the name to be global
+        rather than free.
+
+        Be careful to stop if a child does not think the name is
+        free.
+        """
+        self.globals[name] = 1
+        if self.frees.has_key(name):
+            del self.frees[name]
+        for child in self.children:
+            if child.check_name(name) == SC_FREE:
+                child.force_global(name)
+
+    def add_frees(self, names):
+        """Process list of free vars from nested scope.
+
+        Returns a list of names that are either 1) declared global in the
+        parent or 2) undefined in a top-level parent.  In either case,
+        the nested scope should treat them as globals.
+        """
+        child_globals = []
+        for name in names:
+            sc = self.check_name(name)
+            if self.nested:
+                if sc == SC_UNKNOWN or sc == SC_FREE \
+                   or isinstance(self, ClassScope):
+                    self.frees[name] = 1
+                elif sc == SC_GLOBAL:
+                    child_globals.append(name)
+                elif isinstance(self, FunctionScope) and sc == SC_LOCAL:
+                    self.cells[name] = 1
+                elif sc != SC_CELL:
+                    child_globals.append(name)
+            else:
+                if sc == SC_LOCAL:
+                    self.cells[name] = 1
+                elif sc != SC_CELL:
+                    child_globals.append(name)
+        return child_globals
+
+    def get_cell_vars(self):
+        return self.cells.keys()
+
+class ModuleScope(Scope):
+    __super_init = Scope.__init__
+
+    def __init__(self):
+        self.__super_init("global", self)
+
+class FunctionScope(Scope):
+    pass
+
+class LambdaScope(FunctionScope):
+    __super_init = Scope.__init__
+
+    __counter = 1
+
+    def __init__(self, module, klass=None):
+        i = self.__counter
+        self.__counter += 1
+        self.__super_init("lambda.%d" % i, module, klass)
+
+class ClassScope(Scope):
+    __super_init = Scope.__init__
+
+    def __init__(self, name, module):
+        self.__super_init(name, module, name)
+
+class SymbolVisitor:
+    def __init__(self):
+        self.scopes = {}
+        self.klass = None
+
+    # node that define new scopes
+
+    def visitModule(self, node):
+        scope = self.module = self.scopes[node] = ModuleScope()
+        self.visit(node.node, scope)
+
+    visitExpression = visitModule
+
+    def visitFunction(self, node, parent):
+        parent.add_def(node.name)
+        for n in node.defaults:
+            self.visit(n, parent)
+        scope = FunctionScope(node.name, self.module, self.klass)
+        if parent.nested or isinstance(parent, FunctionScope):
+            scope.nested = 1
+        self.scopes[node] = scope
+        self._do_args(scope, node.argnames)
+        self.visit(node.code, scope)
+        self.handle_free_vars(scope, parent)
+
+    def visitLambda(self, node, parent):
+        for n in node.defaults:
+            self.visit(n, parent)
+        scope = LambdaScope(self.module, self.klass)
+        if parent.nested or isinstance(parent, FunctionScope):
+            scope.nested = 1
+        self.scopes[node] = scope
+        self._do_args(scope, node.argnames)
+        self.visit(node.code, scope)
+        self.handle_free_vars(scope, parent)
+
+    def _do_args(self, scope, args):
+        for name in args:
+            if type(name) == types.TupleType:
+                self._do_args(scope, name)
+            else:
+                scope.add_param(name)
+
+    def handle_free_vars(self, scope, parent):
+        parent.add_child(scope)
+        scope.handle_children()
+
+    def visitClass(self, node, parent):
+        parent.add_def(node.name)
+        for n in node.bases:
+            self.visit(n, parent)
+        scope = ClassScope(node.name, self.module)
+        if parent.nested or isinstance(parent, FunctionScope):
+            scope.nested = 1
+        if node.doc is not None:
+            scope.add_def('__doc__')
+        self.scopes[node] = scope
+        prev = self.klass
+        self.klass = node.name
+        self.visit(node.code, scope)
+        self.klass = prev
+        self.handle_free_vars(scope, parent)
+
+    # name can be a def or a use
+
+    # XXX a few calls and nodes expect a third "assign" arg that is
+    # true if the name is being used as an assignment.  only
+    # expressions contained within statements may have the assign arg.
+
+    def visitName(self, node, scope, assign=0):
+        if assign:
+            scope.add_def(node.name)
+        else:
+            scope.add_use(node.name)
+
+    # operations that bind new names
+
+    def visitFor(self, node, scope):
+        self.visit(node.assign, scope, 1)
+        self.visit(node.list, scope)
+        self.visit(node.body, scope)
+        if node.else_:
+            self.visit(node.else_, scope)
+
+    def visitFrom(self, node, scope):
+        for name, asname in node.names:
+            if name == "*":
+                continue
+            scope.add_def(asname or name)
+
+    def visitImport(self, node, scope):
+        for name, asname in node.names:
+            i = name.find(".")
+            if i > -1:
+                name = name[:i]
+            scope.add_def(asname or name)
+
+    def visitGlobal(self, node, scope):
+        for name in node.names:
+            scope.add_global(name)
+
+    def visitAssign(self, node, scope):
+        """Propagate assignment flag down to child nodes.
+
+        The Assign node doesn't itself contains the variables being
+        assigned to.  Instead, the children in node.nodes are visited
+        with the assign flag set to true.  When the names occur in
+        those nodes, they are marked as defs.
+
+        Some names that occur in an assignment target are not bound by
+        the assignment, e.g. a name occurring inside a slice.  The
+        visitor handles these nodes specially; they do not propagate
+        the assign flag to their children.
+        """
+        for n in node.nodes:
+            self.visit(n, scope, 1)
+        self.visit(node.expr, scope)
+
+    def visitAssName(self, node, scope, assign=1):
+        scope.add_def(node.name)
+
+    def visitAssAttr(self, node, scope, assign=0):
+        self.visit(node.expr, scope, 0)
+
+    def visitSubscript(self, node, scope, assign=0):
+        self.visit(node.expr, scope, 0)
+        for n in node.subs:
+            self.visit(n, scope, 0)
+
+    def visitSlice(self, node, scope, assign=0):
+        self.visit(node.expr, scope, 0)
+        if node.lower:
+            self.visit(node.lower, scope, 0)
+        if node.upper:
+            self.visit(node.upper, scope, 0)
+
+    def visitAugAssign(self, node, scope):
+        # If the LHS is a name, then this counts as assignment.
+        # Otherwise, it's just use.
+        self.visit(node.node, scope)
+        if isinstance(node.node, ast.Name):
+            self.visit(node.node, scope, 1) # XXX worry about this
+        self.visit(node.expr, scope)
+
+    # prune if statements if tests are false
+
+    _const_types = types.StringType, types.IntType, types.FloatType
+
+    def visitIf(self, node, scope):
+        for test, body in node.tests:
+            if isinstance(test, ast.Const):
+                if type(test.value) in self._const_types:
+                    if not test.value:
+                        continue
+            self.visit(test, scope)
+            self.visit(body, scope)
+        if node.else_:
+            self.visit(node.else_, scope)
+
+    # a yield statement signals a generator
+
+    def visitYield(self, node, scope):
+        scope.generator = 1
+        self.visit(node.value, scope)
+
+def sort(l):
+    l = l[:]
+    l.sort()
+    return l
+
+def list_eq(l1, l2):
+    return sort(l1) == sort(l2)
+
+if __name__ == "__main__":
+    import sys
+    from compiler import parseFile, walk
+    import symtable
+
+    def get_names(syms):
+        return [s for s in [s.get_name() for s in syms.get_symbols()]
+                if not (s.startswith('_[') or s.startswith('.'))]
+
+    for file in sys.argv[1:]:
+        print file
+        f = open(file)
+        buf = f.read()
+        f.close()
+        syms = symtable.symtable(buf, file, "exec")
+        mod_names = get_names(syms)
+        tree = parseFile(file)
+        s = SymbolVisitor()
+        walk(tree, s)
+
+        # compare module-level symbols
+        names2 = s.scopes[tree].get_names()
+
+        if not list_eq(mod_names, names2):
+            print
+            print "oops", file
+            print sort(mod_names)
+            print sort(names2)
+            sys.exit(-1)
+
+        d = {}
+        d.update(s.scopes)
+        del d[tree]
+        scopes = d.values()
+        del d
+
+        for s in syms.get_symbols():
+            if s.is_namespace():
+                l = [sc for sc in scopes
+                     if sc.name == s.get_name()]
+                if len(l) > 1:
+                    print "skipping", s.get_name()
+                else:
+                    if not list_eq(get_names(s.get_namespace()),
+                                   l[0].get_names()):
+                        print s.get_name()
+                        print sort(get_names(s.get_namespace()))
+                        print sort(l[0].get_names())
+                        sys.exit(-1)
diff --git a/lib-python/2.2/compiler/syntax.py b/lib-python/2.2/compiler/syntax.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/compiler/syntax.py
@@ -0,0 +1,46 @@
+"""Check for errs in the AST.
+
+The Python parser does not catch all syntax errors.  Others, like
+assignments with invalid targets, are caught in the code generation
+phase.
+
+The compiler package catches some errors in the transformer module.
+But it seems clearer to write checkers that use the AST to detect
+errors.
+"""
+
+from compiler import ast, walk
+
+def check(tree, multi=None):
+    v = SyntaxErrorChecker(multi)
+    walk(tree, v)
+    return v.errors
+
+class SyntaxErrorChecker:
+    """A visitor to find syntax errors in the AST."""
+
+    def __init__(self, multi=None):
+        """Create new visitor object.
+
+        If optional argument multi is not None, then print messages
+        for each error rather than raising a SyntaxError for the
+        first.
+        """
+        self.multi = multi
+        self.errors = 0
+
+    def error(self, node, msg):
+        self.errors = self.errors + 1
+        if self.multi is not None:
+            print "%s:%s: %s" % (node.filename, node.lineno, msg)
+        else:
+            raise SyntaxError, "%s (%s:%s)" % (msg, node.filename, node.lineno)
+
+    def visitAssign(self, node):
+        # the transformer module handles many of these
+        for target in node.nodes:
+            pass
+##            if isinstance(target, ast.AssList):
+##                if target.lineno is None:
+##                    target.lineno = node.lineno
+##                self.error(target, "can't assign to list comprehension")
diff --git a/lib-python/2.2/compiler/transformer.py b/lib-python/2.2/compiler/transformer.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/compiler/transformer.py
@@ -0,0 +1,1359 @@
+"""Parse tree transformation module.
+
+Transforms Python source code into an abstract syntax tree (AST)
+defined in the ast module.
+
+The simplest ways to invoke this module are via parse and parseFile.
+parse(buf) -> AST
+parseFile(path) -> AST
+"""
+
+# Original version written by Greg Stein (gstein at lyra.org)
+#                         and Bill Tutt (rassilon at lima.mudlib.org)
+# February 1997.
+#
+# Modifications and improvements for Python 2.0 by Jeremy Hylton and
+# Mark Hammond
+
+# Portions of this file are:
+# Copyright (C) 1997-1998 Greg Stein. All Rights Reserved.
+#
+# This module is provided under a BSD-ish license. See
+#   http://www.opensource.org/licenses/bsd-license.html
+# and replace OWNER, ORGANIZATION, and YEAR as appropriate.
+
+from ast import *
+import parser
+# Care must be taken to use only symbols and tokens defined in Python
+# 1.5.2 for code branches executed in 1.5.2
+import symbol
+import token
+import string
+import sys
+
+error = 'walker.error'
+
+from consts import CO_VARARGS, CO_VARKEYWORDS
+from consts import OP_ASSIGN, OP_DELETE, OP_APPLY
+
+def parseFile(path):
+    f = open(path)
+    src = f.read()
+    f.close()
+    return parse(src)
+
+def parse(buf, mode="exec"):
+    if mode == "exec" or mode == "single":
+        return Transformer().parsesuite(buf)
+    elif mode == "eval":
+        return Transformer().parseexpr(buf)
+    else:
+        raise ValueError("compile() arg 3 must be"
+                         " 'exec' or 'eval' or 'single'")
+
+def asList(nodes):
+    l = []
+    for item in nodes:
+        if hasattr(item, "asList"):
+            l.append(item.asList())
+        else:
+            if type(item) is type( (None, None) ):
+                l.append(tuple(asList(item)))
+            elif type(item) is type( [] ):
+                l.append(asList(item))
+            else:
+                l.append(item)
+    return l
+
+def Node(*args):
+    kind = args[0]
+    if nodes.has_key(kind):
+        try:
+            return apply(nodes[kind], args[1:])
+        except TypeError:
+            print nodes[kind], len(args), args
+            raise
+    else:
+        raise error, "Can't find appropriate Node type: %s" % str(args)
+        #return apply(ast.Node, args)
+
+class Transformer:
+    """Utility object for transforming Python parse trees.
+
+    Exposes the following methods:
+        tree = transform(ast_tree)
+        tree = parsesuite(text)
+        tree = parseexpr(text)
+        tree = parsefile(fileob | filename)
+    """
+
+    def __init__(self):
+        self._dispatch = {}
+        for value, name in symbol.sym_name.items():
+            if hasattr(self, name):
+                self._dispatch[value] = getattr(self, name)
+        self._dispatch[token.NEWLINE] = self.com_NEWLINE
+        self._atom_dispatch = {token.LPAR: self.atom_lpar,
+                               token.LSQB: self.atom_lsqb,
+                               token.LBRACE: self.atom_lbrace,
+                               token.BACKQUOTE: self.atom_backquote,
+                               token.NUMBER: self.atom_number,
+                               token.STRING: self.atom_string,
+                               token.NAME: self.atom_name,
+                               }
+
+    def transform(self, tree):
+        """Transform an AST into a modified parse tree."""
+        if type(tree) != type(()) and type(tree) != type([]):
+            tree = parser.ast2tuple(tree, line_info=1)
+        return self.compile_node(tree)
+
+    def parsesuite(self, text):
+        """Return a modified parse tree for the given suite text."""
+        # Hack for handling non-native line endings on non-DOS like OSs.
+        text = string.replace(text, '\x0d', '')
+        return self.transform(parser.suite(text))
+
+    def parseexpr(self, text):
+        """Return a modified parse tree for the given expression text."""
+        return self.transform(parser.expr(text))
+
+    def parsefile(self, file):
+        """Return a modified parse tree for the contents of the given file."""
+        if type(file) == type(''):
+            file = open(file)
+        return self.parsesuite(file.read())
+
+    # --------------------------------------------------------------
+    #
+    # PRIVATE METHODS
+    #
+
+    def compile_node(self, node):
+        ### emit a line-number node?
+        n = node[0]
+        if n == symbol.single_input:
+            return self.single_input(node[1:])
+        if n == symbol.file_input:
+            return self.file_input(node[1:])
+        if n == symbol.eval_input:
+            return self.eval_input(node[1:])
+        if n == symbol.lambdef:
+            return self.lambdef(node[1:])
+        if n == symbol.funcdef:
+            return self.funcdef(node[1:])
+        if n == symbol.classdef:
+            return self.classdef(node[1:])
+
+        raise error, ('unexpected node type', n)
+
+    def single_input(self, node):
+        ### do we want to do anything about being "interactive" ?
+
+        # NEWLINE | simple_stmt | compound_stmt NEWLINE
+        n = node[0][0]
+        if n != token.NEWLINE:
+            return self.com_stmt(node[0])
+
+        return Pass()
+
+    def file_input(self, nodelist):
+        doc = self.get_docstring(nodelist, symbol.file_input)
+        if doc is not None:
+            i = 1
+        else:
+            i = 0
+        stmts = []
+        for node in nodelist[i:]:
+            if node[0] != token.ENDMARKER and node[0] != token.NEWLINE:
+                self.com_append_stmt(stmts, node)
+        return Module(doc, Stmt(stmts))
+
+    def eval_input(self, nodelist):
+        # from the built-in function input()
+        ### is this sufficient?
+        return Expression(self.com_node(nodelist[0]))
+
+    def funcdef(self, nodelist):
+        # funcdef: 'def' NAME parameters ':' suite
+        # parameters: '(' [varargslist] ')'
+
+        lineno = nodelist[1][2]
+        name = nodelist[1][1]
+        args = nodelist[2][2]
+
+        if args[0] == symbol.varargslist:
+            names, defaults, flags = self.com_arglist(args[1:])
+        else:
+            names = defaults = ()
+            flags = 0
+        doc = self.get_docstring(nodelist[4])
+
+        # code for function
+        code = self.com_node(nodelist[4])
+
+        if doc is not None:
+            assert isinstance(code, Stmt)
+            assert isinstance(code.nodes[0], Discard)
+            del code.nodes[0]
+        n = Function(name, names, defaults, flags, doc, code)
+        n.lineno = lineno
+        return n
+
+    def lambdef(self, nodelist):
+        # lambdef: 'lambda' [varargslist] ':' test
+        if nodelist[2][0] == symbol.varargslist:
+            names, defaults, flags = self.com_arglist(nodelist[2][1:])
+        else:
+            names = defaults = ()
+            flags = 0
+
+        # code for lambda
+        code = self.com_node(nodelist[-1])
+
+        n = Lambda(names, defaults, flags, code)
+        n.lineno = nodelist[1][2]
+        return n
+
+    def classdef(self, nodelist):
+        # classdef: 'class' NAME ['(' testlist ')'] ':' suite
+
+        name = nodelist[1][1]
+        doc = self.get_docstring(nodelist[-1])
+        if nodelist[2][0] == token.COLON:
+            bases = []
+        else:
+            bases = self.com_bases(nodelist[3])
+
+        # code for class
+        code = self.com_node(nodelist[-1])
+
+        if doc is not None:
+            assert isinstance(code, Stmt)
+            assert isinstance(code.nodes[0], Discard)
+            del code.nodes[0]
+
+        n = Class(name, bases, doc, code)
+        n.lineno = nodelist[1][2]
+        return n
+
+    def stmt(self, nodelist):
+        return self.com_stmt(nodelist[0])
+
+    small_stmt = stmt
+    flow_stmt = stmt
+    compound_stmt = stmt
+
+    def simple_stmt(self, nodelist):
+        # small_stmt (';' small_stmt)* [';'] NEWLINE
+        stmts = []
+        for i in range(0, len(nodelist), 2):
+            self.com_append_stmt(stmts, nodelist[i])
+        return Stmt(stmts)
+
+    def parameters(self, nodelist):
+        raise error
+
+    def varargslist(self, nodelist):
+        raise error
+
+    def fpdef(self, nodelist):
+        raise error
+
+    def fplist(self, nodelist):
+        raise error
+
+    def dotted_name(self, nodelist):
+        raise error
+
+    def comp_op(self, nodelist):
+        raise error
+
+    def trailer(self, nodelist):
+        raise error
+
+    def sliceop(self, nodelist):
+        raise error
+
+    def argument(self, nodelist):
+        raise error
+
+    # --------------------------------------------------------------
+    #
+    # STATEMENT NODES  (invoked by com_node())
+    #
+
+    def expr_stmt(self, nodelist):
+        # augassign testlist | testlist ('=' testlist)*
+        en = nodelist[-1]
+        exprNode = self.lookup_node(en)(en[1:])
+        if len(nodelist) == 1:
+            n = Discard(exprNode)
+            n.lineno = exprNode.lineno
+            return n
+        if nodelist[1][0] == token.EQUAL:
+            nodes = []
+            for i in range(0, len(nodelist) - 2, 2):
+                nodes.append(self.com_assign(nodelist[i], OP_ASSIGN))
+            n = Assign(nodes, exprNode)
+            n.lineno = nodelist[1][2]
+        else:
+            lval = self.com_augassign(nodelist[0])
+            op = self.com_augassign_op(nodelist[1])
+            n = AugAssign(lval, op[1], exprNode)
+            n.lineno = op[2]
+        return n
+
+    def print_stmt(self, nodelist):
+        # print ([ test (',' test)* [','] ] | '>>' test [ (',' test)+ [','] ])
+        items = []
+        if len(nodelist) == 1:
+            start = 1
+            dest = None
+        elif nodelist[1][0] == token.RIGHTSHIFT:
+            assert len(nodelist) == 3 \
+                   or nodelist[3][0] == token.COMMA
+            dest = self.com_node(nodelist[2])
+            start = 4
+        else:
+            dest = None
+            start = 1
+        for i in range(start, len(nodelist), 2):
+            items.append(self.com_node(nodelist[i]))
+        if nodelist[-1][0] == token.COMMA:
+            n = Print(items, dest)
+            n.lineno = nodelist[0][2]
+            return n
+        n = Printnl(items, dest)
+        n.lineno = nodelist[0][2]
+        return n
+
+    def del_stmt(self, nodelist):
+        return self.com_assign(nodelist[1], OP_DELETE)
+
+    def pass_stmt(self, nodelist):
+        n = Pass()
+        n.lineno = nodelist[0][2]
+        return n
+
+    def break_stmt(self, nodelist):
+        n = Break()
+        n.lineno = nodelist[0][2]
+        return n
+
+    def continue_stmt(self, nodelist):
+        n = Continue()
+        n.lineno = nodelist[0][2]
+        return n
+
+    def return_stmt(self, nodelist):
+        # return: [testlist]
+        if len(nodelist) < 2:
+            n = Return(Const(None))
+            n.lineno = nodelist[0][2]
+            return n
+        n = Return(self.com_node(nodelist[1]))
+        n.lineno = nodelist[0][2]
+        return n
+
+    def yield_stmt(self, nodelist):
+        n = Yield(self.com_node(nodelist[1]))
+        n.lineno = nodelist[0][2]
+        return n
+
+    def raise_stmt(self, nodelist):
+        # raise: [test [',' test [',' test]]]
+        if len(nodelist) > 5:
+            expr3 = self.com_node(nodelist[5])
+        else:
+            expr3 = None
+        if len(nodelist) > 3:
+            expr2 = self.com_node(nodelist[3])
+        else:
+            expr2 = None
+        if len(nodelist) > 1:
+            expr1 = self.com_node(nodelist[1])
+        else:
+            expr1 = None
+        n = Raise(expr1, expr2, expr3)
+        n.lineno = nodelist[0][2]
+        return n
+
+    def import_stmt(self, nodelist):
+        # import_stmt: 'import' dotted_as_name (',' dotted_as_name)* |
+        # from: 'from' dotted_name 'import'
+        #                        ('*' | import_as_name (',' import_as_name)*)
+        if nodelist[0][1] == 'from':
+            names = []
+            if nodelist[3][0] == token.NAME:
+                for i in range(3, len(nodelist), 2):
+                    names.append((nodelist[i][1], None))
+            else:
+                for i in range(3, len(nodelist), 2):
+                    names.append(self.com_import_as_name(nodelist[i]))
+            n = From(self.com_dotted_name(nodelist[1]), names)
+            n.lineno = nodelist[0][2]
+            return n
+
+        if nodelist[1][0] == symbol.dotted_name:
+            names = [(self.com_dotted_name(nodelist[1][1:]), None)]
+        else:
+            names = []
+            for i in range(1, len(nodelist), 2):
+                names.append(self.com_dotted_as_name(nodelist[i]))
+        n = Import(names)
+        n.lineno = nodelist[0][2]
+        return n
+
+    def global_stmt(self, nodelist):
+        # global: NAME (',' NAME)*
+        names = []
+        for i in range(1, len(nodelist), 2):
+            names.append(nodelist[i][1])
+        n = Global(names)
+        n.lineno = nodelist[0][2]
+        return n
+
+    def exec_stmt(self, nodelist):
+        # exec_stmt: 'exec' expr ['in' expr [',' expr]]
+        expr1 = self.com_node(nodelist[1])
+        if len(nodelist) >= 4:
+            expr2 = self.com_node(nodelist[3])
+            if len(nodelist) >= 6:
+                expr3 = self.com_node(nodelist[5])
+            else:
+                expr3 = None
+        else:
+            expr2 = expr3 = None
+
+        n = Exec(expr1, expr2, expr3)
+        n.lineno = nodelist[0][2]
+        return n
+
+    def assert_stmt(self, nodelist):
+        # 'assert': test, [',' test]
+        expr1 = self.com_node(nodelist[1])
+        if (len(nodelist) == 4):
+            expr2 = self.com_node(nodelist[3])
+        else:
+            expr2 = None
+        n = Assert(expr1, expr2)
+        n.lineno = nodelist[0][2]
+        return n
+
+    def if_stmt(self, nodelist):
+        # if: test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
+        tests = []
+        for i in range(0, len(nodelist) - 3, 4):
+            testNode = self.com_node(nodelist[i + 1])
+            suiteNode = self.com_node(nodelist[i + 3])
+            tests.append((testNode, suiteNode))
+
+        if len(nodelist) % 4 == 3:
+            elseNode = self.com_node(nodelist[-1])
+##      elseNode.lineno = nodelist[-1][1][2]
+        else:
+            elseNode = None
+        n = If(tests, elseNode)
+        n.lineno = nodelist[0][2]
+        return n
+
+    def while_stmt(self, nodelist):
+        # 'while' test ':' suite ['else' ':' suite]
+
+        testNode = self.com_node(nodelist[1])
+        bodyNode = self.com_node(nodelist[3])
+
+        if len(nodelist) > 4:
+            elseNode = self.com_node(nodelist[6])
+        else:
+            elseNode = None
+
+        n = While(testNode, bodyNode, elseNode)
+        n.lineno = nodelist[0][2]
+        return n
+
+    def for_stmt(self, nodelist):
+        # 'for' exprlist 'in' exprlist ':' suite ['else' ':' suite]
+
+        assignNode = self.com_assign(nodelist[1], OP_ASSIGN)
+        listNode = self.com_node(nodelist[3])
+        bodyNode = self.com_node(nodelist[5])
+
+        if len(nodelist) > 8:
+            elseNode = self.com_node(nodelist[8])
+        else:
+            elseNode = None
+
+        n = For(assignNode, listNode, bodyNode, elseNode)
+        n.lineno = nodelist[0][2]
+        return n
+
+    def try_stmt(self, nodelist):
+        # 'try' ':' suite (except_clause ':' suite)+ ['else' ':' suite]
+        # | 'try' ':' suite 'finally' ':' suite
+        if nodelist[3][0] != symbol.except_clause:
+            return self.com_try_finally(nodelist)
+
+        return self.com_try_except(nodelist)
+
+    def suite(self, nodelist):
+        # simple_stmt | NEWLINE INDENT NEWLINE* (stmt NEWLINE*)+ DEDENT
+        if len(nodelist) == 1:
+            return self.com_stmt(nodelist[0])
+
+        stmts = []
+        for node in nodelist:
+            if node[0] == symbol.stmt:
+                self.com_append_stmt(stmts, node)
+        return Stmt(stmts)
+
+    # --------------------------------------------------------------
+    #
+    # EXPRESSION NODES  (invoked by com_node())
+    #
+
+    def testlist(self, nodelist):
+        # testlist: expr (',' expr)* [',']
+        # testlist_safe: test [(',' test)+ [',']]
+        # exprlist: expr (',' expr)* [',']
+        return self.com_binary(Tuple, nodelist)
+
+    testlist_safe = testlist # XXX
+    exprlist = testlist
+
+    def test(self, nodelist):
+        # and_test ('or' and_test)* | lambdef
+        if len(nodelist) == 1 and nodelist[0][0] == symbol.lambdef:
+            return self.lambdef(nodelist[0])
+        return self.com_binary(Or, nodelist)
+
+    def and_test(self, nodelist):
+        # not_test ('and' not_test)*
+        return self.com_binary(And, nodelist)
+
+    def not_test(self, nodelist):
+        # 'not' not_test | comparison
+        result = self.com_node(nodelist[-1])
+        if len(nodelist) == 2:
+            n = Not(result)
+            n.lineno = nodelist[0][2]
+            return n
+        return result
+
+    def comparison(self, nodelist):
+        # comparison: expr (comp_op expr)*
+        node = self.com_node(nodelist[0])
+        if len(nodelist) == 1:
+            return node
+
+        results = []
+        for i in range(2, len(nodelist), 2):
+            nl = nodelist[i-1]
+
+            # comp_op: '<' | '>' | '=' | '>=' | '<=' | '<>' | '!=' | '=='
+            #          | 'in' | 'not' 'in' | 'is' | 'is' 'not'
+            n = nl[1]
+            if n[0] == token.NAME:
+                type = n[1]
+                if len(nl) == 3:
+                    if type == 'not':
+                        type = 'not in'
+                    else:
+                        type = 'is not'
+            else:
+                type = _cmp_types[n[0]]
+
+            lineno = nl[1][2]
+            results.append((type, self.com_node(nodelist[i])))
+
+        # we need a special "compare" node so that we can distinguish
+        #   3 < x < 5   from    (3 < x) < 5
+        # the two have very different semantics and results (note that the
+        # latter form is always true)
+
+        n = Compare(node, results)
+        n.lineno = lineno
+        return n
+
+    def expr(self, nodelist):
+        # xor_expr ('|' xor_expr)*
+        return self.com_binary(Bitor, nodelist)
+
+    def xor_expr(self, nodelist):
+        # xor_expr ('^' xor_expr)*
+        return self.com_binary(Bitxor, nodelist)
+
+    def and_expr(self, nodelist):
+        # xor_expr ('&' xor_expr)*
+        return self.com_binary(Bitand, nodelist)
+
+    def shift_expr(self, nodelist):
+        # shift_expr ('<<'|'>>' shift_expr)*
+        node = self.com_node(nodelist[0])
+        for i in range(2, len(nodelist), 2):
+            right = self.com_node(nodelist[i])
+            if nodelist[i-1][0] == token.LEFTSHIFT:
+                node = LeftShift([node, right])
+                node.lineno = nodelist[1][2]
+            elif nodelist[i-1][0] == token.RIGHTSHIFT:
+                node = RightShift([node, right])
+                node.lineno = nodelist[1][2]
+            else:
+                raise ValueError, "unexpected token: %s" % nodelist[i-1][0]
+        return node
+
+    def arith_expr(self, nodelist):
+        node = self.com_node(nodelist[0])
+        for i in range(2, len(nodelist), 2):
+            right = self.com_node(nodelist[i])
+            if nodelist[i-1][0] == token.PLUS:
+                node = Add([node, right])
+                node.lineno = nodelist[1][2]
+            elif nodelist[i-1][0] == token.MINUS:
+                node = Sub([node, right])
+                node.lineno = nodelist[1][2]
+            else:
+                raise ValueError, "unexpected token: %s" % nodelist[i-1][0]
+        return node
+
+    def term(self, nodelist):
+        node = self.com_node(nodelist[0])
+        for i in range(2, len(nodelist), 2):
+            right = self.com_node(nodelist[i])
+            t = nodelist[i-1][0]
+            if t == token.STAR:
+                node = Mul([node, right])
+            elif t == token.SLASH:
+                node = Div([node, right])
+            elif t == token.PERCENT:
+                node = Mod([node, right])
+            elif t == token.DOUBLESLASH:
+                node = FloorDiv([node, right])
+            else:
+                raise ValueError, "unexpected token: %s" % t
+            node.lineno = nodelist[1][2]
+        return node
+
+    def factor(self, nodelist):
+        elt = nodelist[0]
+        t = elt[0]
+        node = self.com_node(nodelist[-1])
+        if t == token.PLUS:
+            node = UnaryAdd(node)
+            node.lineno = elt[2]
+        elif t == token.MINUS:
+            node = UnarySub(node)
+            node.lineno = elt[2]
+        elif t == token.TILDE:
+            node = Invert(node)
+            node.lineno = elt[2]
+        return node
+
+    def power(self, nodelist):
+        # power: atom trailer* ('**' factor)*
+        node = self.com_node(nodelist[0])
+        for i in range(1, len(nodelist)):
+            elt = nodelist[i]
+            if elt[0] == token.DOUBLESTAR:
+                n = Power([node, self.com_node(nodelist[i+1])])
+                n.lineno = elt[2]
+                return n
+
+            node = self.com_apply_trailer(node, elt)
+
+        return node
+
+    def atom(self, nodelist):
+        n = self._atom_dispatch[nodelist[0][0]](nodelist)
+        n.lineno = nodelist[0][2]
+        return n
+
+    def atom_lpar(self, nodelist):
+        if nodelist[1][0] == token.RPAR:
+            n = Tuple(())
+            n.lineno = nodelist[0][2]
+            return n
+        return self.com_node(nodelist[1])
+
+    def atom_lsqb(self, nodelist):
+        if nodelist[1][0] == token.RSQB:
+            n = List(())
+            n.lineno = nodelist[0][2]
+            return n
+        return self.com_list_constructor(nodelist[1])
+
+    def atom_lbrace(self, nodelist):
+        if nodelist[1][0] == token.RBRACE:
+            return Dict(())
+        return self.com_dictmaker(nodelist[1])
+
+    def atom_backquote(self, nodelist):
+        n = Backquote(self.com_node(nodelist[1]))
+        n.lineno = nodelist[0][2]
+        return n
+
+    def atom_number(self, nodelist):
+        ### need to verify this matches compile.c
+        k = eval(nodelist[0][1])
+        n = Const(k)
+        n.lineno = nodelist[0][2]
+        return n
+
+    def atom_string(self, nodelist):
+        ### need to verify this matches compile.c
+        k = ''
+        for node in nodelist:
+            k = k + eval(node[1])
+        n = Const(k)
+        n.lineno = nodelist[0][2]
+        return n
+
+    def atom_name(self, nodelist):
+        ### any processing to do?
+        n = Name(nodelist[0][1])
+        n.lineno = nodelist[0][2]
+        return n
+
+    # --------------------------------------------------------------
+    #
+    # INTERNAL PARSING UTILITIES
+    #
+
+    # The use of com_node() introduces a lot of extra stack frames,
+    # enough to cause a stack overflow compiling test.test_parser with
+    # the standard interpreter recursionlimit.  The com_node() is a
+    # convenience function that hides the dispatch details, but comes
+    # at a very high cost.  It is more efficient to dispatch directly
+    # in the callers.  In these cases, use lookup_node() and call the
+    # dispatched node directly.
+
+    def lookup_node(self, node):
+        return self._dispatch[node[0]]
+
+    def com_node(self, node):
+        # Note: compile.c has handling in com_node for del_stmt, pass_stmt,
+        #       break_stmt, stmt, small_stmt, flow_stmt, simple_stmt,
+        #       and compound_stmt.
+        #       We'll just dispatch them.
+        return self._dispatch[node[0]](node[1:])
+
+    def com_NEWLINE(self, *args):
+        # A ';' at the end of a line can make a NEWLINE token appear
+        # here, Render it harmless. (genc discards ('discard',
+        # ('const', xxxx)) Nodes)
+        return Discard(Const(None))
+
+    def com_arglist(self, nodelist):
+        # varargslist:
+        #     (fpdef ['=' test] ',')* ('*' NAME [',' '**' NAME] | '**' NAME)
+        #   | fpdef ['=' test] (',' fpdef ['=' test])* [',']
+        # fpdef: NAME | '(' fplist ')'
+        # fplist: fpdef (',' fpdef)* [',']
+        names = []
+        defaults = []
+        flags = 0
+
+        i = 0
+        while i < len(nodelist):
+            node = nodelist[i]
+            if node[0] == token.STAR or node[0] == token.DOUBLESTAR:
+                if node[0] == token.STAR:
+                    node = nodelist[i+1]
+                    if node[0] == token.NAME:
+                        names.append(node[1])
+                        flags = flags | CO_VARARGS
+                        i = i + 3
+
+                if i < len(nodelist):
+                    # should be DOUBLESTAR
+                    t = nodelist[i][0]
+                    if t == token.DOUBLESTAR:
+                        node = nodelist[i+1]
+                    else:
+                        raise ValueError, "unexpected token: %s" % t
+                    names.append(node[1])
+                    flags = flags | CO_VARKEYWORDS
+
+                break
+
+            # fpdef: NAME | '(' fplist ')'
+            names.append(self.com_fpdef(node))
+
+            i = i + 1
+            if i >= len(nodelist):
+                break
+
+            if nodelist[i][0] == token.EQUAL:
+                defaults.append(self.com_node(nodelist[i + 1]))
+                i = i + 2
+            elif len(defaults):
+                # Treat "(a=1, b)" as "(a=1, b=None)"
+                defaults.append(Const(None))
+
+            i = i + 1
+
+        return names, defaults, flags
+
+    def com_fpdef(self, node):
+        # fpdef: NAME | '(' fplist ')'
+        if node[1][0] == token.LPAR:
+            return self.com_fplist(node[2])
+        return node[1][1]
+
+    def com_fplist(self, node):
+        # fplist: fpdef (',' fpdef)* [',']
+        if len(node) == 2:
+            return self.com_fpdef(node[1])
+        list = []
+        for i in range(1, len(node), 2):
+            list.append(self.com_fpdef(node[i]))
+        return tuple(list)
+
+    def com_dotted_name(self, node):
+        # String together the dotted names and return the string
+        name = ""
+        for n in node:
+            if type(n) == type(()) and n[0] == 1:
+                name = name + n[1] + '.'
+        return name[:-1]
+
+    def com_dotted_as_name(self, node):
+        dot = self.com_dotted_name(node[1])
+        if len(node) <= 2:
+            return dot, None
+        if node[0] == symbol.dotted_name:
+            pass
+        else:
+            assert node[2][1] == 'as'
+            assert node[3][0] == token.NAME
+            return dot, node[3][1]
+
+    def com_import_as_name(self, node):
+        if node[0] == token.STAR:
+            return '*', None
+        assert node[0] == symbol.import_as_name
+        node = node[1:]
+        if len(node) == 1:
+            assert node[0][0] == token.NAME
+            return node[0][1], None
+
+        assert node[1][1] == 'as', node
+        assert node[2][0] == token.NAME
+        return node[0][1], node[2][1]
+
+    def com_bases(self, node):
+        bases = []
+        for i in range(1, len(node), 2):
+            bases.append(self.com_node(node[i]))
+        return bases
+
+    def com_try_finally(self, nodelist):
+        # try_fin_stmt: "try" ":" suite "finally" ":" suite
+        n = TryFinally(self.com_node(nodelist[2]),
+                       self.com_node(nodelist[5]))
+        n.lineno = nodelist[0][2]
+        return n
+
+    def com_try_except(self, nodelist):
+        # try_except: 'try' ':' suite (except_clause ':' suite)* ['else' suite]
+        #tryexcept:  [TryNode, [except_clauses], elseNode)]
+        stmt = self.com_node(nodelist[2])
+        clauses = []
+        elseNode = None
+        for i in range(3, len(nodelist), 3):
+            node = nodelist[i]
+            if node[0] == symbol.except_clause:
+                # except_clause: 'except' [expr [',' expr]] */
+                if len(node) > 2:
+                    expr1 = self.com_node(node[2])
+                    if len(node) > 4:
+                        expr2 = self.com_assign(node[4], OP_ASSIGN)
+                    else:
+                        expr2 = None
+                else:
+                    expr1 = expr2 = None
+                clauses.append((expr1, expr2, self.com_node(nodelist[i+2])))
+
+            if node[0] == token.NAME:
+                elseNode = self.com_node(nodelist[i+2])
+        n = TryExcept(self.com_node(nodelist[2]), clauses, elseNode)
+        n.lineno = nodelist[0][2]
+        return n
+
+    def com_augassign_op(self, node):
+        assert node[0] == symbol.augassign
+        return node[1]
+
+    def com_augassign(self, node):
+        """Return node suitable for lvalue of augmented assignment
+
+        Names, slices, and attributes are the only allowable nodes.
+        """
+        l = self.com_node(node)
+        if l.__class__ in (Name, Slice, Subscript, Getattr):
+            return l
+        raise SyntaxError, "can't assign to %s" % l.__class__.__name__
+
+    def com_assign(self, node, assigning):
+        # return a node suitable for use as an "lvalue"
+        # loop to avoid trivial recursion
+        while 1:
+            t = node[0]
+            if t == symbol.exprlist or t == symbol.testlist:
+                if len(node) > 2:
+                    return self.com_assign_tuple(node, assigning)
+                node = node[1]
+            elif t in _assign_types:
+                if len(node) > 2:
+                    raise SyntaxError, "can't assign to operator"
+                node = node[1]
+            elif t == symbol.power:
+                if node[1][0] != symbol.atom:
+                    raise SyntaxError, "can't assign to operator"
+                if len(node) > 2:
+                    primary = self.com_node(node[1])
+                    for i in range(2, len(node)-1):
+                        ch = node[i]
+                        if ch[0] == token.DOUBLESTAR:
+                            raise SyntaxError, "can't assign to operator"
+                        primary = self.com_apply_trailer(primary, ch)
+                    return self.com_assign_trailer(primary, node[-1],
+                                                   assigning)
+                node = node[1]
+            elif t == symbol.atom:
+                t = node[1][0]
+                if t == token.LPAR:
+                    node = node[2]
+                    if node[0] == token.RPAR:
+                        raise SyntaxError, "can't assign to ()"
+                elif t == token.LSQB:
+                    node = node[2]
+                    if node[0] == token.RSQB:
+                        raise SyntaxError, "can't assign to []"
+                    return self.com_assign_list(node, assigning)
+                elif t == token.NAME:
+                    return self.com_assign_name(node[1], assigning)
+                else:
+                    raise SyntaxError, "can't assign to literal"
+            else:
+                raise SyntaxError, "bad assignment"
+
+    def com_assign_tuple(self, node, assigning):
+        assigns = []
+        for i in range(1, len(node), 2):
+            assigns.append(self.com_assign(node[i], assigning))
+        return AssTuple(assigns)
+
+    def com_assign_list(self, node, assigning):
+        assigns = []
+        for i in range(1, len(node), 2):
+            if i + 1 < len(node):
+                if node[i + 1][0] == symbol.list_for:
+                    raise SyntaxError, "can't assign to list comprehension"
+                assert node[i + 1][0] == token.COMMA, node[i + 1]
+            assigns.append(self.com_assign(node[i], assigning))
+        return AssList(assigns)
+
+    def com_assign_name(self, node, assigning):
+        n = AssName(node[1], assigning)
+        n.lineno = node[2]
+        return n
+
+    def com_assign_trailer(self, primary, node, assigning):
+        t = node[1][0]
+        if t == token.DOT:
+            return self.com_assign_attr(primary, node[2], assigning)
+        if t == token.LSQB:
+            return self.com_subscriptlist(primary, node[2], assigning)
+        if t == token.LPAR:
+            raise SyntaxError, "can't assign to function call"
+        raise SyntaxError, "unknown trailer type: %s" % t
+
+    def com_assign_attr(self, primary, node, assigning):
+        return AssAttr(primary, node[1], assigning)
+
+    def com_binary(self, constructor, nodelist):
+        "Compile 'NODE (OP NODE)*' into (type, [ node1, ..., nodeN ])."
+        l = len(nodelist)
+        if l == 1:
+            n = nodelist[0]
+            return self.lookup_node(n)(n[1:])
+        items = []
+        for i in range(0, l, 2):
+            n = nodelist[i]
+            items.append(self.lookup_node(n)(n[1:]))
+        return constructor(items)
+
+    def com_stmt(self, node):
+        result = self.lookup_node(node)(node[1:])
+        assert result is not None
+        if isinstance(result, Stmt):
+            return result
+        return Stmt([result])
+
+    def com_append_stmt(self, stmts, node):
+        result = self.com_node(node)
+        assert result is not None
+        if isinstance(result, Stmt):
+            stmts.extend(result.nodes)
+        else:
+            stmts.append(result)
+
+    if hasattr(symbol, 'list_for'):
+        def com_list_constructor(self, nodelist):
+            # listmaker: test ( list_for | (',' test)* [','] )
+            values = []
+            for i in range(1, len(nodelist)):
+                if nodelist[i][0] == symbol.list_for:
+                    assert len(nodelist[i:]) == 1
+                    return self.com_list_comprehension(values[0],
+                                                       nodelist[i])
+                elif nodelist[i][0] == token.COMMA:
+                    continue
+                values.append(self.com_node(nodelist[i]))
+            return List(values)
+
+        def com_list_comprehension(self, expr, node):
+            # list_iter: list_for | list_if
+            # list_for: 'for' exprlist 'in' testlist [list_iter]
+            # list_if: 'if' test [list_iter]
+
+            # XXX should raise SyntaxError for assignment
+
+            lineno = node[1][2]
+            fors = []
+            while node:
+                t = node[1][1]
+                if t == 'for':
+                    assignNode = self.com_assign(node[2], OP_ASSIGN)
+                    listNode = self.com_node(node[4])
+                    newfor = ListCompFor(assignNode, listNode, [])
+                    newfor.lineno = node[1][2]
+                    fors.append(newfor)
+                    if len(node) == 5:
+                        node = None
+                    else:
+                        node = self.com_list_iter(node[5])
+                elif t == 'if':
+                    test = self.com_node(node[2])
+                    newif = ListCompIf(test)
+                    newif.lineno = node[1][2]
+                    newfor.ifs.append(newif)
+                    if len(node) == 3:
+                        node = None
+                    else:
+                        node = self.com_list_iter(node[3])
+                else:
+                    raise SyntaxError, \
+                          ("unexpected list comprehension element: %s %d"
+                           % (node, lineno))
+            n = ListComp(expr, fors)
+            n.lineno = lineno
+            return n
+
+        def com_list_iter(self, node):
+            assert node[0] == symbol.list_iter
+            return node[1]
+    else:
+        def com_list_constructor(self, nodelist):
+            values = []
+            for i in range(1, len(nodelist), 2):
+                values.append(self.com_node(nodelist[i]))
+            return List(values)
+
+    def com_dictmaker(self, nodelist):
+        # dictmaker: test ':' test (',' test ':' value)* [',']
+        items = []
+        for i in range(1, len(nodelist), 4):
+            items.append((self.com_node(nodelist[i]),
+                          self.com_node(nodelist[i+2])))
+        return Dict(items)
+
+    def com_apply_trailer(self, primaryNode, nodelist):
+        t = nodelist[1][0]
+        if t == token.LPAR:
+            return self.com_call_function(primaryNode, nodelist[2])
+        if t == token.DOT:
+            return self.com_select_member(primaryNode, nodelist[2])
+        if t == token.LSQB:
+            return self.com_subscriptlist(primaryNode, nodelist[2], OP_APPLY)
+
+        raise SyntaxError, 'unknown node type: %s' % t
+
+    def com_select_member(self, primaryNode, nodelist):
+        if nodelist[0] != token.NAME:
+            raise SyntaxError, "member must be a name"
+        n = Getattr(primaryNode, nodelist[1])
+        n.lineno = nodelist[2]
+        return n
+
+    def com_call_function(self, primaryNode, nodelist):
+        if nodelist[0] == token.RPAR:
+            return CallFunc(primaryNode, [])
+        args = []
+        kw = 0
+        len_nodelist = len(nodelist)
+        for i in range(1, len_nodelist, 2):
+            node = nodelist[i]
+            if node[0] == token.STAR or node[0] == token.DOUBLESTAR:
+                break
+            kw, result = self.com_argument(node, kw)
+            args.append(result)
+        else:
+            # No broken by star arg, so skip the last one we processed.
+            i = i + 1
+        if i < len_nodelist and nodelist[i][0] == token.COMMA:
+            # need to accept an application that looks like "f(a, b,)"
+            i = i + 1
+        star_node = dstar_node = None
+        while i < len_nodelist:
+            tok = nodelist[i]
+            ch = nodelist[i+1]
+            i = i + 3
+            if tok[0]==token.STAR:
+                if star_node is not None:
+                    raise SyntaxError, 'already have the varargs indentifier'
+                star_node = self.com_node(ch)
+            elif tok[0]==token.DOUBLESTAR:
+                if dstar_node is not None:
+                    raise SyntaxError, 'already have the kwargs indentifier'
+                dstar_node = self.com_node(ch)
+            else:
+                raise SyntaxError, 'unknown node type: %s' % tok
+
+        return CallFunc(primaryNode, args, star_node, dstar_node)
+
+    def com_argument(self, nodelist, kw):
+        if len(nodelist) == 2:
+            if kw:
+                raise SyntaxError, "non-keyword arg after keyword arg"
+            return 0, self.com_node(nodelist[1])
+        result = self.com_node(nodelist[3])
+        n = nodelist[1]
+        while len(n) == 2 and n[0] != token.NAME:
+            n = n[1]
+        if n[0] != token.NAME:
+            raise SyntaxError, "keyword can't be an expression (%s)"%n[0]
+        node = Keyword(n[1], result)
+        node.lineno = n[2]
+        return 1, node
+
+    def com_subscriptlist(self, primary, nodelist, assigning):
+        # slicing:      simple_slicing | extended_slicing
+        # simple_slicing:   primary "[" short_slice "]"
+        # extended_slicing: primary "[" slice_list "]"
+        # slice_list:   slice_item ("," slice_item)* [","]
+
+        # backwards compat slice for '[i:j]'
+        if len(nodelist) == 2:
+            sub = nodelist[1]
+            if (sub[1][0] == token.COLON or \
+                            (len(sub) > 2 and sub[2][0] == token.COLON)) and \
+                            sub[-1][0] != symbol.sliceop:
+                return self.com_slice(primary, sub, assigning)
+
+        subscripts = []
+        for i in range(1, len(nodelist), 2):
+            subscripts.append(self.com_subscript(nodelist[i]))
+
+        return Subscript(primary, assigning, subscripts)
+
+    def com_subscript(self, node):
+        # slice_item: expression | proper_slice | ellipsis
+        ch = node[1]
+        t = ch[0]
+        if t == token.DOT and node[2][0] == token.DOT:
+            return Ellipsis()
+        if t == token.COLON or len(node) > 2:
+            return self.com_sliceobj(node)
+        return self.com_node(ch)
+
+    def com_sliceobj(self, node):
+        # proper_slice: short_slice | long_slice
+        # short_slice:  [lower_bound] ":" [upper_bound]
+        # long_slice:   short_slice ":" [stride]
+        # lower_bound:  expression
+        # upper_bound:  expression
+        # stride:       expression
+        #
+        # Note: a stride may be further slicing...
+
+        items = []
+
+        if node[1][0] == token.COLON:
+            items.append(Const(None))
+            i = 2
+        else:
+            items.append(self.com_node(node[1]))
+            # i == 2 is a COLON
+            i = 3
+
+        if i < len(node) and node[i][0] == symbol.test:
+            items.append(self.com_node(node[i]))
+            i = i + 1
+        else:
+            items.append(Const(None))
+
+        # a short_slice has been built. look for long_slice now by looking
+        # for strides...
+        for j in range(i, len(node)):
+            ch = node[j]
+            if len(ch) == 2:
+                items.append(Const(None))
+            else:
+                items.append(self.com_node(ch[2]))
+
+        return Sliceobj(items)
+
+    def com_slice(self, primary, node, assigning):
+        # short_slice:  [lower_bound] ":" [upper_bound]
+        lower = upper = None
+        if len(node) == 3:
+            if node[1][0] == token.COLON:
+                upper = self.com_node(node[2])
+            else:
+                lower = self.com_node(node[1])
+        elif len(node) == 4:
+            lower = self.com_node(node[1])
+            upper = self.com_node(node[3])
+        return Slice(primary, assigning, lower, upper)
+
+    def get_docstring(self, node, n=None):
+        if n is None:
+            n = node[0]
+            node = node[1:]
+        if n == symbol.suite:
+            if len(node) == 1:
+                return self.get_docstring(node[0])
+            for sub in node:
+                if sub[0] == symbol.stmt:
+                    return self.get_docstring(sub)
+            return None
+        if n == symbol.file_input:
+            for sub in node:
+                if sub[0] == symbol.stmt:
+                    return self.get_docstring(sub)
+            return None
+        if n == symbol.atom:
+            if node[0][0] == token.STRING:
+                s = ''
+                for t in node:
+                    s = s + eval(t[1])
+                return s
+            return None
+        if n == symbol.stmt or n == symbol.simple_stmt \
+           or n == symbol.small_stmt:
+            return self.get_docstring(node[0])
+        if n in _doc_nodes and len(node) == 1:
+            return self.get_docstring(node[0])
+        return None
+
+
+_doc_nodes = [
+    symbol.expr_stmt,
+    symbol.testlist,
+    symbol.testlist_safe,
+    symbol.test,
+    symbol.and_test,
+    symbol.not_test,
+    symbol.comparison,
+    symbol.expr,
+    symbol.xor_expr,
+    symbol.and_expr,
+    symbol.shift_expr,
+    symbol.arith_expr,
+    symbol.term,
+    symbol.factor,
+    symbol.power,
+    ]
+
+# comp_op: '<' | '>' | '=' | '>=' | '<=' | '<>' | '!=' | '=='
+#             | 'in' | 'not' 'in' | 'is' | 'is' 'not'
+_cmp_types = {
+    token.LESS : '<',
+    token.GREATER : '>',
+    token.EQEQUAL : '==',
+    token.EQUAL : '==',
+    token.LESSEQUAL : '<=',
+    token.GREATEREQUAL : '>=',
+    token.NOTEQUAL : '!=',
+    }
+
+_legal_node_types = [
+    symbol.funcdef,
+    symbol.classdef,
+    symbol.stmt,
+    symbol.small_stmt,
+    symbol.flow_stmt,
+    symbol.simple_stmt,
+    symbol.compound_stmt,
+    symbol.expr_stmt,
+    symbol.print_stmt,
+    symbol.del_stmt,
+    symbol.pass_stmt,
+    symbol.break_stmt,
+    symbol.continue_stmt,
+    symbol.return_stmt,
+    symbol.raise_stmt,
+    symbol.import_stmt,
+    symbol.global_stmt,
+    symbol.exec_stmt,
+    symbol.assert_stmt,
+    symbol.if_stmt,
+    symbol.while_stmt,
+    symbol.for_stmt,
+    symbol.try_stmt,
+    symbol.suite,
+    symbol.testlist,
+    symbol.testlist_safe,
+    symbol.test,
+    symbol.and_test,
+    symbol.not_test,
+    symbol.comparison,
+    symbol.exprlist,
+    symbol.expr,
+    symbol.xor_expr,
+    symbol.and_expr,
+    symbol.shift_expr,
+    symbol.arith_expr,
+    symbol.term,
+    symbol.factor,
+    symbol.power,
+    symbol.atom,
+    ]
+
+if hasattr(symbol, 'yield_stmt'):
+    _legal_node_types.append(symbol.yield_stmt)
+
+_assign_types = [
+    symbol.test,
+    symbol.and_test,
+    symbol.not_test,
+    symbol.comparison,
+    symbol.expr,
+    symbol.xor_expr,
+    symbol.and_expr,
+    symbol.shift_expr,
+    symbol.arith_expr,
+    symbol.term,
+    symbol.factor,
+    ]
+
+import types
+_names = {}
+for k, v in symbol.sym_name.items():
+    _names[k] = v
+for k, v in token.tok_name.items():
+    _names[k] = v
+
+def debug_tree(tree):
+    l = []
+    for elt in tree:
+        if type(elt) == types.IntType:
+            l.append(_names.get(elt, elt))
+        elif type(elt) == types.StringType:
+            l.append(elt)
+        else:
+            l.append(debug_tree(elt))
+    return l
diff --git a/lib-python/2.2/compiler/visitor.py b/lib-python/2.2/compiler/visitor.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/compiler/visitor.py
@@ -0,0 +1,121 @@
+from compiler import ast
+
+# XXX should probably rename ASTVisitor to ASTWalker
+# XXX can it be made even more generic?
+
+class ASTVisitor:
+    """Performs a depth-first walk of the AST
+
+    The ASTVisitor will walk the AST, performing either a preorder or
+    postorder traversal depending on which method is called.
+
+    methods:
+    preorder(tree, visitor)
+    postorder(tree, visitor)
+        tree: an instance of ast.Node
+        visitor: an instance with visitXXX methods
+
+    The ASTVisitor is responsible for walking over the tree in the
+    correct order.  For each node, it checks the visitor argument for
+    a method named 'visitNodeType' where NodeType is the name of the
+    node's class, e.g. Class.  If the method exists, it is called
+    with the node as its sole argument.
+
+    The visitor method for a particular node type can control how
+    child nodes are visited during a preorder walk.  (It can't control
+    the order during a postorder walk, because it is called _after_
+    the walk has occurred.)  The ASTVisitor modifies the visitor
+    argument by adding a visit method to the visitor; this method can
+    be used to visit a particular child node.  If the visitor method
+    returns a true value, the ASTVisitor will not traverse the child
+    nodes.
+
+    XXX The interface for controlling the preorder walk needs to be
+    re-considered.  The current interface is convenient for visitors
+    that mostly let the ASTVisitor do everything.  For something like
+    a code generator, where you want to walk to occur in a specific
+    order, it's a pain to add "return 1" to the end of each method.
+    """
+
+    VERBOSE = 0
+
+    def __init__(self):
+        self.node = None
+        self._cache = {}
+
+    def default(self, node, *args):
+        for child in node.getChildNodes():
+            self.dispatch(child, *args)
+
+    def dispatch(self, node, *args):
+        self.node = node
+        klass = node.__class__
+        meth = self._cache.get(klass, None)
+        if meth is None:
+            className = klass.__name__
+            meth = getattr(self.visitor, 'visit' + className, self.default)
+            self._cache[klass] = meth
+##        if self.VERBOSE > 0:
+##            className = klass.__name__
+##            if self.VERBOSE == 1:
+##                if meth == 0:
+##                    print "dispatch", className
+##            else:
+##                print "dispatch", className, (meth and meth.__name__ or '')
+        return meth(node, *args)
+
+    def preorder(self, tree, visitor, *args):
+        """Do preorder walk of tree using visitor"""
+        self.visitor = visitor
+        visitor.visit = self.dispatch
+        self.dispatch(tree, *args) # XXX *args make sense?
+
+class ExampleASTVisitor(ASTVisitor):
+    """Prints examples of the nodes that aren't visited
+
+    This visitor-driver is only useful for development, when it's
+    helpful to develop a visitor incremently, and get feedback on what
+    you still have to do.
+    """
+    examples = {}
+
+    def dispatch(self, node, *args):
+        self.node = node
+        meth = self._cache.get(node.__class__, None)
+        className = node.__class__.__name__
+        if meth is None:
+            meth = getattr(self.visitor, 'visit' + className, 0)
+            self._cache[node.__class__] = meth
+        if self.VERBOSE > 1:
+            print "dispatch", className, (meth and meth.__name__ or '')
+        if meth:
+            meth(node, *args)
+        elif self.VERBOSE > 0:
+            klass = node.__class__
+            if not self.examples.has_key(klass):
+                self.examples[klass] = klass
+                print
+                print self.visitor
+                print klass
+                for attr in dir(node):
+                    if attr[0] != '_':
+                        print "\t", "%-12.12s" % attr, getattr(node, attr)
+                print
+            return self.default(node, *args)
+
+# XXX this is an API change
+
+_walker = ASTVisitor
+def walk(tree, visitor, walker=None, verbose=None):
+    if walker is None:
+        walker = _walker()
+    if verbose is not None:
+        walker.VERBOSE = verbose
+    walker.preorder(tree, visitor)
+    return walker.visitor
+
+def dumpNode(node):
+    print node.__class__
+    for attr in dir(node):
+        if attr[0] != '_':
+            print "\t", "%-10.10s" % attr, getattr(node, attr)
diff --git a/lib-python/2.2/copy.py b/lib-python/2.2/copy.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/copy.py
@@ -0,0 +1,381 @@
+"""Generic (shallow and deep) copying operations.
+
+Interface summary:
+
+        import copy
+
+        x = copy.copy(y)        # make a shallow copy of y
+        x = copy.deepcopy(y)    # make a deep copy of y
+
+For module specific errors, copy.error is raised.
+
+The difference between shallow and deep copying is only relevant for
+compound objects (objects that contain other objects, like lists or
+class instances).
+
+- A shallow copy constructs a new compound object and then (to the
+  extent possible) inserts *the same objects* into in that the
+  original contains.
+
+- A deep copy constructs a new compound object and then, recursively,
+  inserts *copies* into it of the objects found in the original.
+
+Two problems often exist with deep copy operations that don't exist
+with shallow copy operations:
+
+ a) recursive objects (compound objects that, directly or indirectly,
+    contain a reference to themselves) may cause a recursive loop
+
+ b) because deep copy copies *everything* it may copy too much, e.g.
+    administrative data structures that should be shared even between
+    copies
+
+Python's deep copy operation avoids these problems by:
+
+ a) keeping a table of objects already copied during the current
+    copying pass
+
+ b) letting user-defined classes override the copying operation or the
+    set of components copied
+
+This version does not copy types like module, class, function, method,
+nor stack trace, stack frame, nor file, socket, window, nor array, nor
+any similar types.
+
+Classes can use the same interfaces to control copying that they use
+to control pickling: they can define methods called __getinitargs__(),
+__getstate__() and __setstate__().  See the documentation for module
+"pickle" for information on these methods.
+"""
+
+# XXX need to support copy_reg here too...
+
+import types
+
+class Error(Exception):
+    pass
+error = Error   # backward compatibility
+
+try:
+    from org.python.core import PyStringMap
+except ImportError:
+    PyStringMap = None
+
+__all__ = ["Error", "error", "copy", "deepcopy"]
+
+def copy(x):
+    """Shallow copy operation on arbitrary Python objects.
+
+    See the module's __doc__ string for more info.
+    """
+
+    try:
+        copierfunction = _copy_dispatch[type(x)]
+    except KeyError:
+        try:
+            copier = x.__copy__
+        except AttributeError:
+            try:
+                reductor = x.__reduce__
+            except AttributeError:
+                raise error, \
+                      "un(shallow)copyable object of type %s" % type(x)
+            else:
+                y = _reconstruct(x, reductor(), 0)
+        else:
+            y = copier()
+    else:
+        y = copierfunction(x)
+    return y
+
+_copy_dispatch = d = {}
+
+def _copy_atomic(x):
+    return x
+d[types.NoneType] = _copy_atomic
+d[types.IntType] = _copy_atomic
+d[types.LongType] = _copy_atomic
+d[types.FloatType] = _copy_atomic
+try:
+    d[types.ComplexType] = _copy_atomic
+except AttributeError:
+    pass
+d[types.StringType] = _copy_atomic
+try:
+    d[types.UnicodeType] = _copy_atomic
+except AttributeError:
+    pass
+try:
+    d[types.CodeType] = _copy_atomic
+except AttributeError:
+    pass
+d[types.TypeType] = _copy_atomic
+d[types.XRangeType] = _copy_atomic
+d[types.ClassType] = _copy_atomic
+d[types.BuiltinFunctionType] = _copy_atomic
+
+def _copy_list(x):
+    return x[:]
+d[types.ListType] = _copy_list
+
+def _copy_tuple(x):
+    return x[:]
+d[types.TupleType] = _copy_tuple
+
+def _copy_dict(x):
+    return x.copy()
+d[types.DictionaryType] = _copy_dict
+if PyStringMap is not None:
+    d[PyStringMap] = _copy_dict
+
+def _copy_inst(x):
+    if hasattr(x, '__copy__'):
+        return x.__copy__()
+    if hasattr(x, '__getinitargs__'):
+        args = x.__getinitargs__()
+        y = apply(x.__class__, args)
+    else:
+        y = _EmptyClass()
+        y.__class__ = x.__class__
+    if hasattr(x, '__getstate__'):
+        state = x.__getstate__()
+    else:
+        state = x.__dict__
+    if hasattr(y, '__setstate__'):
+        y.__setstate__(state)
+    else:
+        y.__dict__.update(state)
+    return y
+d[types.InstanceType] = _copy_inst
+
+del d
+
+def deepcopy(x, memo = None):
+    """Deep copy operation on arbitrary Python objects.
+
+    See the module's __doc__ string for more info.
+    """
+
+    if memo is None:
+        memo = {}
+    d = id(x)
+    if memo.has_key(d):
+        return memo[d]
+    try:
+        copierfunction = _deepcopy_dispatch[type(x)]
+    except KeyError:
+        try:
+            issc = issubclass(type(x), type)
+        except TypeError:
+            issc = 0
+        if issc:
+            y = _deepcopy_dispatch[type](x, memo)
+        else:
+            try:
+                copier = x.__deepcopy__
+            except AttributeError:
+                try:
+                    reductor = x.__reduce__
+                except AttributeError:
+                    raise error, \
+                       "un-deep-copyable object of type %s" % type(x)
+                else:
+                    y = _reconstruct(x, reductor(), 1, memo)
+            else:
+                y = copier(memo)
+    else:
+        y = copierfunction(x, memo)
+    memo[d] = y
+    _keep_alive(x, memo) # Make sure x lives at least as long as d
+    return y
+
+_deepcopy_dispatch = d = {}
+
+def _deepcopy_atomic(x, memo):
+    return x
+d[types.NoneType] = _deepcopy_atomic
+d[types.IntType] = _deepcopy_atomic
+d[types.LongType] = _deepcopy_atomic
+d[types.FloatType] = _deepcopy_atomic
+try:
+    d[types.ComplexType] = _deepcopy_atomic
+except AttributeError:
+    pass
+d[types.StringType] = _deepcopy_atomic
+try:
+    d[types.UnicodeType] = _deepcopy_atomic
+except AttributeError:
+    pass
+try:
+    d[types.CodeType] = _deepcopy_atomic
+except AttributeError:
+    pass
+d[types.TypeType] = _deepcopy_atomic
+d[types.XRangeType] = _deepcopy_atomic
+d[types.ClassType] = _deepcopy_atomic
+d[types.BuiltinFunctionType] = _deepcopy_atomic
+
+def _deepcopy_list(x, memo):
+    y = []
+    memo[id(x)] = y
+    for a in x:
+        y.append(deepcopy(a, memo))
+    return y
+d[types.ListType] = _deepcopy_list
+
+def _deepcopy_tuple(x, memo):
+    y = []
+    for a in x:
+        y.append(deepcopy(a, memo))
+    d = id(x)
+    try:
+        return memo[d]
+    except KeyError:
+        pass
+    for i in range(len(x)):
+        if x[i] is not y[i]:
+            y = tuple(y)
+            break
+    else:
+        y = x
+    memo[d] = y
+    return y
+d[types.TupleType] = _deepcopy_tuple
+
+def _deepcopy_dict(x, memo):
+    y = {}
+    memo[id(x)] = y
+    for key in x.keys():
+        y[deepcopy(key, memo)] = deepcopy(x[key], memo)
+    return y
+d[types.DictionaryType] = _deepcopy_dict
+if PyStringMap is not None:
+    d[PyStringMap] = _deepcopy_dict
+
+def _keep_alive(x, memo):
+    """Keeps a reference to the object x in the memo.
+
+    Because we remember objects by their id, we have
+    to assure that possibly temporary objects are kept
+    alive by referencing them.
+    We store a reference at the id of the memo, which should
+    normally not be used unless someone tries to deepcopy
+    the memo itself...
+    """
+    try:
+        memo[id(memo)].append(x)
+    except KeyError:
+        # aha, this is the first one :-)
+        memo[id(memo)]=[x]
+
+def _deepcopy_inst(x, memo):
+    if hasattr(x, '__deepcopy__'):
+        return x.__deepcopy__(memo)
+    if hasattr(x, '__getinitargs__'):
+        args = x.__getinitargs__()
+        args = deepcopy(args, memo)
+        y = apply(x.__class__, args)
+    else:
+        y = _EmptyClass()
+        y.__class__ = x.__class__
+    memo[id(x)] = y
+    if hasattr(x, '__getstate__'):
+        state = x.__getstate__()
+    else:
+        state = x.__dict__
+    state = deepcopy(state, memo)
+    if hasattr(y, '__setstate__'):
+        y.__setstate__(state)
+    else:
+        y.__dict__.update(state)
+    return y
+d[types.InstanceType] = _deepcopy_inst
+
+def _reconstruct(x, info, deep, memo=None):
+    if isinstance(info, str):
+        return x
+    assert isinstance(info, tuple)
+    if memo is None:
+        memo = {}
+    n = len(info)
+    assert n in (2, 3)
+    callable, args = info[:2]
+    if n > 2:
+        state = info[2]
+    else:
+        state = {}
+    if deep:
+        args = deepcopy(args, memo)
+    y = callable(*args)
+    if state:
+        if deep:
+            state = deepcopy(state, memo)
+        if hasattr(y, '__setstate__'):
+            y.__setstate__(state)
+        else:
+            y.__dict__.update(state)
+    return y
+
+del d
+
+del types
+
+# Helper for instance creation without calling __init__
+class _EmptyClass:
+    pass
+
+def _test():
+    l = [None, 1, 2L, 3.14, 'xyzzy', (1, 2L), [3.14, 'abc'],
+         {'abc': 'ABC'}, (), [], {}]
+    l1 = copy(l)
+    print l1==l
+    l1 = map(copy, l)
+    print l1==l
+    l1 = deepcopy(l)
+    print l1==l
+    class C:
+        def __init__(self, arg=None):
+            self.a = 1
+            self.arg = arg
+            if __name__ == '__main__':
+                import sys
+                file = sys.argv[0]
+            else:
+                file = __file__
+            self.fp = open(file)
+            self.fp.close()
+        def __getstate__(self):
+            return {'a': self.a, 'arg': self.arg}
+        def __setstate__(self, state):
+            for key in state.keys():
+                setattr(self, key, state[key])
+        def __deepcopy__(self, memo = None):
+            new = self.__class__(deepcopy(self.arg, memo))
+            new.a = self.a
+            return new
+    c = C('argument sketch')
+    l.append(c)
+    l2 = copy(l)
+    print l == l2
+    print l
+    print l2
+    l2 = deepcopy(l)
+    print l == l2
+    print l
+    print l2
+    l.append({l[1]: l, 'xyz': l[2]})
+    l3 = copy(l)
+    import repr
+    print map(repr.repr, l)
+    print map(repr.repr, l1)
+    print map(repr.repr, l2)
+    print map(repr.repr, l3)
+    l3 = deepcopy(l)
+    import repr
+    print map(repr.repr, l)
+    print map(repr.repr, l1)
+    print map(repr.repr, l2)
+    print map(repr.repr, l3)
+
+if __name__ == '__main__':
+    _test()
diff --git a/lib-python/2.2/copy_reg.py b/lib-python/2.2/copy_reg.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/copy_reg.py
@@ -0,0 +1,78 @@
+"""Helper to provide extensibility for pickle/cPickle.
+
+This is only useful to add pickle support for extension types defined in
+C, not for instances of user-defined classes.
+"""
+
+from types import ClassType as _ClassType
+
+__all__ = ["pickle","constructor"]
+
+dispatch_table = {}
+safe_constructors = {}
+
+def pickle(ob_type, pickle_function, constructor_ob=None):
+    if type(ob_type) is _ClassType:
+        raise TypeError("copy_reg is not intended for use with classes")
+
+    if not callable(pickle_function):
+        raise TypeError("reduction functions must be callable")
+    dispatch_table[ob_type] = pickle_function
+
+    if constructor_ob is not None:
+        constructor(constructor_ob)
+
+def constructor(object):
+    if not callable(object):
+        raise TypeError("constructors must be callable")
+    safe_constructors[object] = 1
+
+# Example: provide pickling support for complex numbers.
+
+try:
+    complex
+except NameError:
+    pass
+else:
+
+    def pickle_complex(c):
+        return complex, (c.real, c.imag)
+
+    pickle(complex, pickle_complex, complex)
+
+# Support for picking new-style objects
+
+def _reconstructor(cls, base, state):
+    obj = base.__new__(cls, state)
+    base.__init__(obj, state)
+    return obj
+_reconstructor.__safe_for_unpickling__ = 1
+
+_HEAPTYPE = 1<<9
+
+def _reduce(self):
+    for base in self.__class__.__mro__:
+        if hasattr(base, '__flags__') and not base.__flags__ & _HEAPTYPE:
+            break
+    else:
+        base = object # not really reachable
+    if base is object:
+        state = None
+    else:
+        if base is self.__class__:
+            raise TypeError, "can't pickle %s objects" % base.__name__
+        state = base(self)
+    args = (self.__class__, base, state)
+    try:
+        getstate = self.__getstate__
+    except AttributeError:
+        try:
+            dict = self.__dict__
+        except AttributeError:
+            dict = None
+    else:
+        dict = getstate()
+    if dict:
+        return _reconstructor, args, dict
+    else:
+        return _reconstructor, args
diff --git a/lib-python/2.2/curses/__init__.py b/lib-python/2.2/curses/__init__.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/curses/__init__.py
@@ -0,0 +1,54 @@
+"""curses
+
+The main package for curses support for Python.  Normally used by importing
+the package, and perhaps a particular module inside it.
+
+   import curses
+   from curses import textpad
+   curses.initwin()
+   ...
+   
+"""
+
+__revision__ = "$Id$"
+
+from _curses import *
+from curses.wrapper import wrapper
+
+# Some constants, most notably the ACS_* ones, are only added to the C
+# _curses module's dictionary after initscr() is called.  (Some
+# versions of SGI's curses don't define values for those constants
+# until initscr() has been called.)  This wrapper function calls the
+# underlying C initscr(), and then copies the constants from the
+# _curses module to the curses package's dictionary.  Don't do 'from
+# curses import *' if you'll be needing the ACS_* constants.
+
+def initscr():
+    import _curses, curses
+    stdscr = _curses.initscr()
+    for key, value in _curses.__dict__.items():
+        if key[0:4] == 'ACS_' or key in ('LINES', 'COLS'):
+            setattr(curses, key, value)
+    
+    return stdscr
+
+# This is a similar wrapper for start_color(), which adds the COLORS and
+# COLOR_PAIRS variables which are only available after start_color() is
+# called.
+ 
+def start_color():
+    import _curses, curses
+    retval = _curses.start_color()
+    if hasattr(_curses, 'COLORS'):
+        curses.COLORS = _curses.COLORS
+    if hasattr(_curses, 'COLOR_PAIRS'):
+        curses.COLOR_PAIRS = _curses.COLOR_PAIRS
+    return retval 
+
+# Import Python has_key() implementation if _curses doesn't contain has_key()
+
+try:
+    has_key
+except NameError:
+    from has_key import has_key
+
diff --git a/lib-python/2.2/curses/ascii.py b/lib-python/2.2/curses/ascii.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/curses/ascii.py
@@ -0,0 +1,100 @@
+"""Constants and membership tests for ASCII characters"""
+
+NUL	= 0x00	# ^@
+SOH	= 0x01	# ^A
+STX	= 0x02	# ^B
+ETX	= 0x03	# ^C
+EOT	= 0x04	# ^D
+ENQ	= 0x05	# ^E
+ACK	= 0x06	# ^F
+BEL	= 0x07	# ^G
+BS	= 0x08	# ^H
+TAB	= 0x09	# ^I
+HT	= 0x09	# ^I
+LF	= 0x0a	# ^J
+NL	= 0x0a	# ^J
+VT	= 0x0b	# ^K
+FF	= 0x0c	# ^L
+CR	= 0x0d	# ^M
+SO	= 0x0e	# ^N
+SI	= 0x0f	# ^O
+DLE	= 0x10	# ^P
+DC1	= 0x11	# ^Q
+DC2	= 0x12	# ^R
+DC3	= 0x13	# ^S
+DC4	= 0x14	# ^T
+NAK	= 0x15	# ^U
+SYN	= 0x16	# ^V
+ETB	= 0x17	# ^W
+CAN	= 0x18	# ^X
+EM	= 0x19	# ^Y
+SUB	= 0x1a	# ^Z
+ESC	= 0x1b	# ^[
+FS	= 0x1c	# ^\
+GS	= 0x1d	# ^]
+RS	= 0x1e	# ^^
+US	= 0x1f	# ^_
+SP	= 0x20	# space
+DEL	= 0x7f	# delete
+
+controlnames = [
+"NUL", "SOH", "STX", "ETX", "EOT", "ENQ", "ACK", "BEL",
+"BS",  "HT",  "LF",  "VT",  "FF",  "CR",  "SO",  "SI",
+"DLE", "DC1", "DC2", "DC3", "DC4", "NAK", "SYN", "ETB",
+"CAN", "EM",  "SUB", "ESC", "FS",  "GS",  "RS",  "US",
+"SP"
+]
+
+def _ctoi(c):
+    if type(c) == type(""):
+        return ord(c)
+    else:
+        return c
+
+def isalnum(c): return isalpha(c) or isdigit(c)
+def isalpha(c): return isupper(c) or islower(c)
+def isascii(c): return _ctoi(c) <= 127		# ?
+def isblank(c): return _ctoi(c) in (8,32)
+def iscntrl(c): return _ctoi(c) <= 31
+def isdigit(c): return _ctoi(c) >= 48 and _ctoi(c) <= 57
+def isgraph(c): return _ctoi(c) >= 33 and _ctoi(c) <= 126
+def islower(c): return _ctoi(c) >= 97 and _ctoi(c) <= 122
+def isprint(c): return _ctoi(c) >= 32 and _ctoi(c) <= 126
+def ispunct(c): return _ctoi(c) != 32 and not isalnum(c)
+def isspace(c): return _ctoi(c) in (9, 10, 11, 12, 13, 32)
+def isupper(c): return _ctoi(c) >= 65 and _ctoi(c) <= 90
+def isxdigit(c): return isdigit(c) or \
+    (_ctoi(c) >= 65 and _ctoi(c) <= 70) or (_ctoi(c) >= 97 and _ctoi(c) <= 102)
+def isctrl(c): return _ctoi(c) < 32
+def ismeta(c): return _ctoi(c) > 127
+
+def ascii(c):
+    if type(c) == type(""):
+        return chr(_ctoi(c) & 0x7f)
+    else:
+        return _ctoi(c) & 0x7f
+
+def ctrl(c):
+    if type(c) == type(""):
+        return chr(_ctoi(c) & 0x1f)
+    else:
+        return _ctoi(c) & 0x1f
+
+def alt(c):
+    if type(c) == type(""):
+        return chr(_ctoi(c) | 0x80)
+    else:
+        return _ctoi(c) | 0x80
+
+def unctrl(c):
+    bits = _ctoi(c)
+    if bits == 0x7f:
+        rep = "^?"
+    elif bits & 0x20:
+        rep = chr((bits & 0x7f) | 0x20)
+    else:
+        rep = "^" + chr(((bits & 0x7f) | 0x20) + 0x20)
+    if bits & 0x80:
+        return "!" + rep
+    return rep
+
diff --git a/lib-python/2.2/curses/has_key.py b/lib-python/2.2/curses/has_key.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/curses/has_key.py
@@ -0,0 +1,189 @@
+
+#
+# Emulation of has_key() function for platforms that don't use ncurses
+#
+
+import _curses
+
+# Table mapping curses keys to the terminfo capability name
+
+_capability_names = {
+    _curses.KEY_A1: 'ka1', 
+    _curses.KEY_A3: 'ka3', 
+    _curses.KEY_B2: 'kb2', 
+    _curses.KEY_BACKSPACE: 'kbs', 
+    _curses.KEY_BEG: 'kbeg', 
+    _curses.KEY_BTAB: 'kcbt', 
+    _curses.KEY_C1: 'kc1', 
+    _curses.KEY_C3: 'kc3', 
+    _curses.KEY_CANCEL: 'kcan', 
+    _curses.KEY_CATAB: 'ktbc', 
+    _curses.KEY_CLEAR: 'kclr', 
+    _curses.KEY_CLOSE: 'kclo', 
+    _curses.KEY_COMMAND: 'kcmd', 
+    _curses.KEY_COPY: 'kcpy', 
+    _curses.KEY_CREATE: 'kcrt', 
+    _curses.KEY_CTAB: 'kctab', 
+    _curses.KEY_DC: 'kdch1', 
+    _curses.KEY_DL: 'kdl1', 
+    _curses.KEY_DOWN: 'kcud1', 
+    _curses.KEY_EIC: 'krmir', 
+    _curses.KEY_END: 'kend', 
+    _curses.KEY_ENTER: 'kent', 
+    _curses.KEY_EOL: 'kel', 
+    _curses.KEY_EOS: 'ked', 
+    _curses.KEY_EXIT: 'kext', 
+    _curses.KEY_F0: 'kf0', 
+    _curses.KEY_F1: 'kf1', 
+    _curses.KEY_F10: 'kf10', 
+    _curses.KEY_F11: 'kf11', 
+    _curses.KEY_F12: 'kf12', 
+    _curses.KEY_F13: 'kf13', 
+    _curses.KEY_F14: 'kf14', 
+    _curses.KEY_F15: 'kf15', 
+    _curses.KEY_F16: 'kf16', 
+    _curses.KEY_F17: 'kf17', 
+    _curses.KEY_F18: 'kf18', 
+    _curses.KEY_F19: 'kf19', 
+    _curses.KEY_F2: 'kf2', 
+    _curses.KEY_F20: 'kf20', 
+    _curses.KEY_F21: 'kf21', 
+    _curses.KEY_F22: 'kf22', 
+    _curses.KEY_F23: 'kf23', 
+    _curses.KEY_F24: 'kf24', 
+    _curses.KEY_F25: 'kf25', 
+    _curses.KEY_F26: 'kf26', 
+    _curses.KEY_F27: 'kf27', 
+    _curses.KEY_F28: 'kf28', 
+    _curses.KEY_F29: 'kf29', 
+    _curses.KEY_F3: 'kf3', 
+    _curses.KEY_F30: 'kf30', 
+    _curses.KEY_F31: 'kf31', 
+    _curses.KEY_F32: 'kf32', 
+    _curses.KEY_F33: 'kf33', 
+    _curses.KEY_F34: 'kf34', 
+    _curses.KEY_F35: 'kf35', 
+    _curses.KEY_F36: 'kf36', 
+    _curses.KEY_F37: 'kf37', 
+    _curses.KEY_F38: 'kf38', 
+    _curses.KEY_F39: 'kf39', 
+    _curses.KEY_F4: 'kf4', 
+    _curses.KEY_F40: 'kf40', 
+    _curses.KEY_F41: 'kf41', 
+    _curses.KEY_F42: 'kf42', 
+    _curses.KEY_F43: 'kf43', 
+    _curses.KEY_F44: 'kf44', 
+    _curses.KEY_F45: 'kf45', 
+    _curses.KEY_F46: 'kf46', 
+    _curses.KEY_F47: 'kf47', 
+    _curses.KEY_F48: 'kf48', 
+    _curses.KEY_F49: 'kf49', 
+    _curses.KEY_F5: 'kf5', 
+    _curses.KEY_F50: 'kf50', 
+    _curses.KEY_F51: 'kf51', 
+    _curses.KEY_F52: 'kf52', 
+    _curses.KEY_F53: 'kf53', 
+    _curses.KEY_F54: 'kf54', 
+    _curses.KEY_F55: 'kf55', 
+    _curses.KEY_F56: 'kf56', 
+    _curses.KEY_F57: 'kf57', 
+    _curses.KEY_F58: 'kf58', 
+    _curses.KEY_F59: 'kf59', 
+    _curses.KEY_F6: 'kf6', 
+    _curses.KEY_F60: 'kf60', 
+    _curses.KEY_F61: 'kf61', 
+    _curses.KEY_F62: 'kf62', 
+    _curses.KEY_F63: 'kf63', 
+    _curses.KEY_F7: 'kf7', 
+    _curses.KEY_F8: 'kf8', 
+    _curses.KEY_F9: 'kf9', 
+    _curses.KEY_FIND: 'kfnd', 
+    _curses.KEY_HELP: 'khlp', 
+    _curses.KEY_HOME: 'khome', 
+    _curses.KEY_IC: 'kich1', 
+    _curses.KEY_IL: 'kil1', 
+    _curses.KEY_LEFT: 'kcub1', 
+    _curses.KEY_LL: 'kll', 
+    _curses.KEY_MARK: 'kmrk', 
+    _curses.KEY_MESSAGE: 'kmsg', 
+    _curses.KEY_MOVE: 'kmov', 
+    _curses.KEY_NEXT: 'knxt', 
+    _curses.KEY_NPAGE: 'knp', 
+    _curses.KEY_OPEN: 'kopn', 
+    _curses.KEY_OPTIONS: 'kopt', 
+    _curses.KEY_PPAGE: 'kpp', 
+    _curses.KEY_PREVIOUS: 'kprv', 
+    _curses.KEY_PRINT: 'kprt', 
+    _curses.KEY_REDO: 'krdo', 
+    _curses.KEY_REFERENCE: 'kref', 
+    _curses.KEY_REFRESH: 'krfr', 
+    _curses.KEY_REPLACE: 'krpl', 
+    _curses.KEY_RESTART: 'krst', 
+    _curses.KEY_RESUME: 'kres', 
+    _curses.KEY_RIGHT: 'kcuf1', 
+    _curses.KEY_SAVE: 'ksav', 
+    _curses.KEY_SBEG: 'kBEG', 
+    _curses.KEY_SCANCEL: 'kCAN', 
+    _curses.KEY_SCOMMAND: 'kCMD', 
+    _curses.KEY_SCOPY: 'kCPY', 
+    _curses.KEY_SCREATE: 'kCRT', 
+    _curses.KEY_SDC: 'kDC', 
+    _curses.KEY_SDL: 'kDL', 
+    _curses.KEY_SELECT: 'kslt', 
+    _curses.KEY_SEND: 'kEND', 
+    _curses.KEY_SEOL: 'kEOL', 
+    _curses.KEY_SEXIT: 'kEXT', 
+    _curses.KEY_SF: 'kind', 
+    _curses.KEY_SFIND: 'kFND', 
+    _curses.KEY_SHELP: 'kHLP', 
+    _curses.KEY_SHOME: 'kHOM', 
+    _curses.KEY_SIC: 'kIC', 
+    _curses.KEY_SLEFT: 'kLFT', 
+    _curses.KEY_SMESSAGE: 'kMSG', 
+    _curses.KEY_SMOVE: 'kMOV', 
+    _curses.KEY_SNEXT: 'kNXT', 
+    _curses.KEY_SOPTIONS: 'kOPT', 
+    _curses.KEY_SPREVIOUS: 'kPRV', 
+    _curses.KEY_SPRINT: 'kPRT', 
+    _curses.KEY_SR: 'kri', 
+    _curses.KEY_SREDO: 'kRDO', 
+    _curses.KEY_SREPLACE: 'kRPL', 
+    _curses.KEY_SRIGHT: 'kRIT', 
+    _curses.KEY_SRSUME: 'kRES', 
+    _curses.KEY_SSAVE: 'kSAV', 
+    _curses.KEY_SSUSPEND: 'kSPD', 
+    _curses.KEY_STAB: 'khts', 
+    _curses.KEY_SUNDO: 'kUND', 
+    _curses.KEY_SUSPEND: 'kspd', 
+    _curses.KEY_UNDO: 'kund', 
+    _curses.KEY_UP: 'kcuu1'
+    }
+
+def has_key(ch):
+    if type(ch) == type( '' ): ch = ord(ch)
+
+    # Figure out the correct capability name for the keycode.
+    capability_name = _capability_names[ch]
+
+    #Check the current terminal description for that capability;
+    #if present, return true, else return false.
+    if _curses.tigetstr( capability_name ): return 1
+    else: return 0
+
+if __name__ == '__main__':
+    # Compare the output of this implementation and the ncurses has_key,
+    # on platforms where has_key is already available
+    try:
+        L = []
+        _curses.initscr()
+        for key in _capability_names.keys():
+            system = _curses.has_key(key)
+            python = has_key(key)
+            if system != python:
+                L.append( 'Mismatch for key %s, system=%i, Python=%i'
+                          % (_curses.keyname( key ), system, python) )
+    finally:
+        _curses.endwin()
+        for i in L: print i
+        
+    
diff --git a/lib-python/2.2/curses/panel.py b/lib-python/2.2/curses/panel.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/curses/panel.py
@@ -0,0 +1,9 @@
+"""curses.panel
+
+Module for using panels with curses.
+"""
+
+__revision__ = "$Id$"
+
+from _curses_panel import *
+
diff --git a/lib-python/2.2/curses/textpad.py b/lib-python/2.2/curses/textpad.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/curses/textpad.py
@@ -0,0 +1,167 @@
+"""Simple textbox editing widget with Emacs-like keybindings."""
+
+import sys, curses, ascii
+
+def rectangle(win, uly, ulx, lry, lrx):
+    "Draw a rectangle."
+    win.vline(uly+1, ulx, curses.ACS_VLINE, lry - uly - 1)
+    win.hline(uly, ulx+1, curses.ACS_HLINE, lrx - ulx - 1)
+    win.hline(lry, ulx+1, curses.ACS_HLINE, lrx - ulx - 1)
+    win.vline(uly+1, lrx, curses.ACS_VLINE, lry - uly - 1)
+    win.addch(uly, ulx, curses.ACS_ULCORNER)
+    win.addch(uly, lrx, curses.ACS_URCORNER)
+    win.addch(lry, lrx, curses.ACS_LRCORNER)
+    win.addch(lry, ulx, curses.ACS_LLCORNER)
+
+class Textbox:
+    """Editing widget using the interior of a window object.
+     Supports the following Emacs-like key bindings:
+
+    Ctrl-A      Go to left edge of window.
+    Ctrl-B      Cursor left, wrapping to previous line if appropriate.
+    Ctrl-D      Delete character under cursor.
+    Ctrl-E      Go to right edge (stripspaces off) or end of line (stripspaces on).
+    Ctrl-F      Cursor right, wrapping to next line when appropriate.
+    Ctrl-G      Terminate, returning the window contents.
+    Ctrl-H      Delete character backward.
+    Ctrl-J      Terminate if the window is 1 line, otherwise insert newline.
+    Ctrl-K      If line is blank, delete it, otherwise clear to end of line.
+    Ctrl-L      Refresh screen.
+    Ctrl-N      Cursor down; move down one line.
+    Ctrl-O      Insert a blank line at cursor location.
+    Ctrl-P      Cursor up; move up one line.
+
+    Move operations do nothing if the cursor is at an edge where the movement
+    is not possible.  The following synonyms are supported where possible:
+
+    KEY_LEFT = Ctrl-B, KEY_RIGHT = Ctrl-F, KEY_UP = Ctrl-P, KEY_DOWN = Ctrl-N
+    KEY_BACKSPACE = Ctrl-h
+    """
+    def __init__(self, win):
+        self.win = win
+        (self.maxy, self.maxx) = win.getmaxyx()
+        self.maxy = self.maxy - 1
+        self.maxx = self.maxx - 1
+        self.stripspaces = 1
+        self.lastcmd = None
+        win.keypad(1)
+
+    def _end_of_line(self, y):
+        "Go to the location of the first blank on the given line."
+        last = self.maxx
+        while 1:
+            if ascii.ascii(self.win.inch(y, last)) != ascii.SP:
+                last = last + 1
+                break
+            elif last == 0:
+                break
+            last = last - 1
+        return last
+
+    def do_command(self, ch):
+        "Process a single editing command."
+        (y, x) = self.win.getyx()
+        self.lastcmd = ch
+        if ascii.isprint(ch):
+            if y < self.maxy or x < self.maxx:
+                # The try-catch ignores the error we trigger from some curses
+                # versions by trying to write into the lowest-rightmost spot
+                # in the window.
+                try:
+                    self.win.addch(ch)
+                except curses.error:
+                    pass
+        elif ch == ascii.SOH:				# ^a
+            self.win.move(y, 0)
+        elif ch in (ascii.STX,curses.KEY_LEFT, ascii.BS,curses.KEY_BACKSPACE):
+            if x > 0:
+                self.win.move(y, x-1)
+            elif y == 0:
+                pass
+            elif self.stripspaces:
+                self.win.move(y-1, self._end_of_line(y-1))
+            else:
+                self.win.move(y-1, self.maxx)
+            if ch in (ascii.BS, curses.KEY_BACKSPACE):
+                self.win.delch()
+        elif ch == ascii.EOT:				# ^d
+            self.win.delch()
+        elif ch == ascii.ENQ:				# ^e
+            if self.stripspaces:
+                self.win.move(y, self._end_of_line(y))
+            else:
+                self.win.move(y, self.maxx)
+        elif ch in (ascii.ACK, curses.KEY_RIGHT):	# ^f
+            if x < self.maxx:
+                self.win.move(y, x+1)
+            elif y == self.maxy:
+                pass
+            else:
+                self.win.move(y+1, 0)
+        elif ch == ascii.BEL:				# ^g
+            return 0
+        elif ch == ascii.NL:				# ^j
+            if self.maxy == 0:
+                return 0
+            elif y < self.maxy:
+                self.win.move(y+1, 0)
+        elif ch == ascii.VT:				# ^k
+            if x == 0 and self._end_of_line(y) == 0:
+                self.win.deleteln()
+            else:
+                self.win.clrtoeol()
+        elif ch == ascii.FF:				# ^l
+            self.win.refresh()
+        elif ch in (ascii.SO, curses.KEY_DOWN):		# ^n
+            if y < self.maxy:
+                self.win.move(y+1, x)
+                if x > self._end_of_line(y+1):
+                    self.win.move(y+1, self._end_of_line(y+1))
+        elif ch == ascii.SI:				# ^o
+            self.win.insertln()
+        elif ch in (ascii.DLE, curses.KEY_UP):		# ^p
+            if y > 0:
+                self.win.move(y-1, x)
+                if x > self._end_of_line(y-1):
+                    self.win.move(y-1, self._end_of_line(y-1))
+        return 1
+        
+    def gather(self):
+        "Collect and return the contents of the window."
+        result = ""
+        for y in range(self.maxy+1):
+            self.win.move(y, 0)
+            stop = self._end_of_line(y)
+            #sys.stderr.write("y=%d, _end_of_line(y)=%d\n" % (y, stop))
+            if stop == 0 and self.stripspaces:
+                continue
+            for x in range(self.maxx+1):
+                if self.stripspaces and x == stop:
+                    break
+                result = result + chr(ascii.ascii(self.win.inch(y, x)))
+            if self.maxy > 0:
+                result = result + "\n"
+        return result
+
+    def edit(self, validate=None):
+        "Edit in the widget window and collect the results."
+        while 1:
+            ch = self.win.getch()
+            if validate:
+                ch = validate(ch)
+            if not ch:
+                continue
+            if not self.do_command(ch):
+                break
+            self.win.refresh()
+        return self.gather()
+
+if __name__ == '__main__':
+    def test_editbox(stdscr):
+        win = curses.newwin(4, 9, 15, 20)
+        rectangle(stdscr, 14, 19, 19, 29)
+        stdscr.refresh()
+        return Textbox(win).edit()
+
+    str = curses.wrapper(test_editbox)
+    print str
diff --git a/lib-python/2.2/curses/wrapper.py b/lib-python/2.2/curses/wrapper.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/curses/wrapper.py
@@ -0,0 +1,63 @@
+"""curses.wrapper
+
+Contains one function, wrapper(), which runs another function which
+should be the rest of your curses-based application.  If the
+application raises an exception, wrapper() will restore the terminal
+to a sane state so you can read the resulting traceback.
+
+"""
+
+import sys, curses
+
+def wrapper(func, *rest):
+    """Wrapper function that initializes curses and calls another function,
+    restoring normal keyboard/screen behavior on error.
+    The callable object 'func' is then passed the main window 'stdscr'
+    as its first argument, followed by any other arguments passed to
+    wrapper().
+    """
+    
+    res = None
+    try:
+	# Initialize curses
+        stdscr=curses.initscr()
+        
+	# Turn off echoing of keys, and enter cbreak mode,
+	# where no buffering is performed on keyboard input
+        curses.noecho()
+        curses.cbreak()
+
+	# In keypad mode, escape sequences for special keys
+	# (like the cursor keys) will be interpreted and
+	# a special value like curses.KEY_LEFT will be returned
+        stdscr.keypad(1)
+
+        # Start color, too.  Harmless if the terminal doesn't have
+        # color; user can test with has_color() later on.  The try/catch
+        # works around a minor bit of over-conscientiousness in the curses
+        # module -- the error return from C start_color() is ignorable.
+        try:
+            curses.start_color()
+        except:
+            pass
+
+        res = apply(func, (stdscr,) + rest)
+    except:
+	# In the event of an error, restore the terminal
+	# to a sane state.
+        stdscr.keypad(0)
+        curses.echo()
+        curses.nocbreak()
+        curses.endwin()
+        
+        # Pass the exception upwards
+        (exc_type, exc_value, exc_traceback) = sys.exc_info()
+        raise exc_type, exc_value, exc_traceback
+    else:
+	# Set everything back to normal
+        stdscr.keypad(0)
+        curses.echo()
+        curses.nocbreak()
+        curses.endwin()		 # Terminate curses
+
+        return res
diff --git a/lib-python/2.2/dbhash.py b/lib-python/2.2/dbhash.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/dbhash.py
@@ -0,0 +1,16 @@
+"""Provide a (g)dbm-compatible interface to bsdhash.hashopen."""
+
+import sys
+try:
+    import bsddb
+except ImportError:
+    # prevent a second import of this module from spuriously succeeding
+    del sys.modules[__name__]
+    raise
+
+__all__ = ["error","open"]
+
+error = bsddb.error                     # Exported for anydbm
+
+def open(file, flag = 'r', mode=0666):
+    return bsddb.hashopen(file, flag, mode)
diff --git a/lib-python/2.2/difflib.py b/lib-python/2.2/difflib.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/difflib.py
@@ -0,0 +1,1088 @@
+#! /usr/bin/env python
+
+from __future__ import generators
+
+"""
+Module difflib -- helpers for computing deltas between objects.
+
+Function get_close_matches(word, possibilities, n=3, cutoff=0.6):
+    Use SequenceMatcher to return list of the best "good enough" matches.
+
+Function ndiff(a, b):
+    Return a delta: the difference between `a` and `b` (lists of strings).
+
+Function restore(delta, which):
+    Return one of the two sequences that generated an ndiff delta.
+
+Class SequenceMatcher:
+    A flexible class for comparing pairs of sequences of any type.
+
+Class Differ:
+    For producing human-readable deltas from sequences of lines of text.
+"""
+
+__all__ = ['get_close_matches', 'ndiff', 'restore', 'SequenceMatcher',
+           'Differ', 'IS_CHARACTER_JUNK', 'IS_LINE_JUNK']
+
+def _calculate_ratio(matches, length):
+    if length:
+        return 2.0 * matches / length
+    return 1.0
+
+class SequenceMatcher:
+
+    """
+    SequenceMatcher is a flexible class for comparing pairs of sequences of
+    any type, so long as the sequence elements are hashable.  The basic
+    algorithm predates, and is a little fancier than, an algorithm
+    published in the late 1980's by Ratcliff and Obershelp under the
+    hyperbolic name "gestalt pattern matching".  The basic idea is to find
+    the longest contiguous matching subsequence that contains no "junk"
+    elements (R-O doesn't address junk).  The same idea is then applied
+    recursively to the pieces of the sequences to the left and to the right
+    of the matching subsequence.  This does not yield minimal edit
+    sequences, but does tend to yield matches that "look right" to people.
+
+    SequenceMatcher tries to compute a "human-friendly diff" between two
+    sequences.  Unlike e.g. UNIX(tm) diff, the fundamental notion is the
+    longest *contiguous* & junk-free matching subsequence.  That's what
+    catches peoples' eyes.  The Windows(tm) windiff has another interesting
+    notion, pairing up elements that appear uniquely in each sequence.
+    That, and the method here, appear to yield more intuitive difference
+    reports than does diff.  This method appears to be the least vulnerable
+    to synching up on blocks of "junk lines", though (like blank lines in
+    ordinary text files, or maybe "<P>" lines in HTML files).  That may be
+    because this is the only method of the 3 that has a *concept* of
+    "junk" <wink>.
+
+    Example, comparing two strings, and considering blanks to be "junk":
+
+    >>> s = SequenceMatcher(lambda x: x == " ",
+    ...                     "private Thread currentThread;",
+    ...                     "private volatile Thread currentThread;")
+    >>>
+
+    .ratio() returns a float in [0, 1], measuring the "similarity" of the
+    sequences.  As a rule of thumb, a .ratio() value over 0.6 means the
+    sequences are close matches:
+
+    >>> print round(s.ratio(), 3)
+    0.866
+    >>>
+
+    If you're only interested in where the sequences match,
+    .get_matching_blocks() is handy:
+
+    >>> for block in s.get_matching_blocks():
+    ...     print "a[%d] and b[%d] match for %d elements" % block
+    a[0] and b[0] match for 8 elements
+    a[8] and b[17] match for 6 elements
+    a[14] and b[23] match for 15 elements
+    a[29] and b[38] match for 0 elements
+
+    Note that the last tuple returned by .get_matching_blocks() is always a
+    dummy, (len(a), len(b), 0), and this is the only case in which the last
+    tuple element (number of elements matched) is 0.
+
+    If you want to know how to change the first sequence into the second,
+    use .get_opcodes():
+
+    >>> for opcode in s.get_opcodes():
+    ...     print "%6s a[%d:%d] b[%d:%d]" % opcode
+     equal a[0:8] b[0:8]
+    insert a[8:8] b[8:17]
+     equal a[8:14] b[17:23]
+     equal a[14:29] b[23:38]
+
+    See the Differ class for a fancy human-friendly file differencer, which
+    uses SequenceMatcher both to compare sequences of lines, and to compare
+    sequences of characters within similar (near-matching) lines.
+
+    See also function get_close_matches() in this module, which shows how
+    simple code building on SequenceMatcher can be used to do useful work.
+
+    Timing:  Basic R-O is cubic time worst case and quadratic time expected
+    case.  SequenceMatcher is quadratic time for the worst case and has
+    expected-case behavior dependent in a complicated way on how many
+    elements the sequences have in common; best case time is linear.
+
+    Methods:
+
+    __init__(isjunk=None, a='', b='')
+        Construct a SequenceMatcher.
+
+    set_seqs(a, b)
+        Set the two sequences to be compared.
+
+    set_seq1(a)
+        Set the first sequence to be compared.
+
+    set_seq2(b)
+        Set the second sequence to be compared.
+
+    find_longest_match(alo, ahi, blo, bhi)
+        Find longest matching block in a[alo:ahi] and b[blo:bhi].
+
+    get_matching_blocks()
+        Return list of triples describing matching subsequences.
+
+    get_opcodes()
+        Return list of 5-tuples describing how to turn a into b.
+
+    ratio()
+        Return a measure of the sequences' similarity (float in [0,1]).
+
+    quick_ratio()
+        Return an upper bound on .ratio() relatively quickly.
+
+    real_quick_ratio()
+        Return an upper bound on ratio() very quickly.
+    """
+
+    def __init__(self, isjunk=None, a='', b=''):
+        """Construct a SequenceMatcher.
+
+        Optional arg isjunk is None (the default), or a one-argument
+        function that takes a sequence element and returns true iff the
+        element is junk.  None is equivalent to passing "lambda x: 0", i.e.
+        no elements are considered to be junk.  For example, pass
+            lambda x: x in " \\t"
+        if you're comparing lines as sequences of characters, and don't
+        want to synch up on blanks or hard tabs.
+
+        Optional arg a is the first of two sequences to be compared.  By
+        default, an empty string.  The elements of a must be hashable.  See
+        also .set_seqs() and .set_seq1().
+
+        Optional arg b is the second of two sequences to be compared.  By
+        default, an empty string.  The elements of b must be hashable. See
+        also .set_seqs() and .set_seq2().
+        """
+
+        # Members:
+        # a
+        #      first sequence
+        # b
+        #      second sequence; differences are computed as "what do
+        #      we need to do to 'a' to change it into 'b'?"
+        # b2j
+        #      for x in b, b2j[x] is a list of the indices (into b)
+        #      at which x appears; junk elements do not appear
+        # b2jhas
+        #      b2j.has_key
+        # fullbcount
+        #      for x in b, fullbcount[x] == the number of times x
+        #      appears in b; only materialized if really needed (used
+        #      only for computing quick_ratio())
+        # matching_blocks
+        #      a list of (i, j, k) triples, where a[i:i+k] == b[j:j+k];
+        #      ascending & non-overlapping in i and in j; terminated by
+        #      a dummy (len(a), len(b), 0) sentinel
+        # opcodes
+        #      a list of (tag, i1, i2, j1, j2) tuples, where tag is
+        #      one of
+        #          'replace'   a[i1:i2] should be replaced by b[j1:j2]
+        #          'delete'    a[i1:i2] should be deleted
+        #          'insert'    b[j1:j2] should be inserted
+        #          'equal'     a[i1:i2] == b[j1:j2]
+        # isjunk
+        #      a user-supplied function taking a sequence element and
+        #      returning true iff the element is "junk" -- this has
+        #      subtle but helpful effects on the algorithm, which I'll
+        #      get around to writing up someday <0.9 wink>.
+        #      DON'T USE!  Only __chain_b uses this.  Use isbjunk.
+        # isbjunk
+        #      for x in b, isbjunk(x) == isjunk(x) but much faster;
+        #      it's really the has_key method of a hidden dict.
+        #      DOES NOT WORK for x in a!
+
+        self.isjunk = isjunk
+        self.a = self.b = None
+        self.set_seqs(a, b)
+
+    def set_seqs(self, a, b):
+        """Set the two sequences to be compared.
+
+        >>> s = SequenceMatcher()
+        >>> s.set_seqs("abcd", "bcde")
+        >>> s.ratio()
+        0.75
+        """
+
+        self.set_seq1(a)
+        self.set_seq2(b)
+
+    def set_seq1(self, a):
+        """Set the first sequence to be compared.
+
+        The second sequence to be compared is not changed.
+
+        >>> s = SequenceMatcher(None, "abcd", "bcde")
+        >>> s.ratio()
+        0.75
+        >>> s.set_seq1("bcde")
+        >>> s.ratio()
+        1.0
+        >>>
+
+        SequenceMatcher computes and caches detailed information about the
+        second sequence, so if you want to compare one sequence S against
+        many sequences, use .set_seq2(S) once and call .set_seq1(x)
+        repeatedly for each of the other sequences.
+
+        See also set_seqs() and set_seq2().
+        """
+
+        if a is self.a:
+            return
+        self.a = a
+        self.matching_blocks = self.opcodes = None
+
+    def set_seq2(self, b):
+        """Set the second sequence to be compared.
+
+        The first sequence to be compared is not changed.
+
+        >>> s = SequenceMatcher(None, "abcd", "bcde")
+        >>> s.ratio()
+        0.75
+        >>> s.set_seq2("abcd")
+        >>> s.ratio()
+        1.0
+        >>>
+
+        SequenceMatcher computes and caches detailed information about the
+        second sequence, so if you want to compare one sequence S against
+        many sequences, use .set_seq2(S) once and call .set_seq1(x)
+        repeatedly for each of the other sequences.
+
+        See also set_seqs() and set_seq1().
+        """
+
+        if b is self.b:
+            return
+        self.b = b
+        self.matching_blocks = self.opcodes = None
+        self.fullbcount = None
+        self.__chain_b()
+
+    # For each element x in b, set b2j[x] to a list of the indices in
+    # b where x appears; the indices are in increasing order; note that
+    # the number of times x appears in b is len(b2j[x]) ...
+    # when self.isjunk is defined, junk elements don't show up in this
+    # map at all, which stops the central find_longest_match method
+    # from starting any matching block at a junk element ...
+    # also creates the fast isbjunk function ...
+    # note that this is only called when b changes; so for cross-product
+    # kinds of matches, it's best to call set_seq2 once, then set_seq1
+    # repeatedly
+
+    def __chain_b(self):
+        # Because isjunk is a user-defined (not C) function, and we test
+        # for junk a LOT, it's important to minimize the number of calls.
+        # Before the tricks described here, __chain_b was by far the most
+        # time-consuming routine in the whole module!  If anyone sees
+        # Jim Roskind, thank him again for profile.py -- I never would
+        # have guessed that.
+        # The first trick is to build b2j ignoring the possibility
+        # of junk.  I.e., we don't call isjunk at all yet.  Throwing
+        # out the junk later is much cheaper than building b2j "right"
+        # from the start.
+        b = self.b
+        self.b2j = b2j = {}
+        self.b2jhas = b2jhas = b2j.has_key
+        for i in xrange(len(b)):
+            elt = b[i]
+            if b2jhas(elt):
+                b2j[elt].append(i)
+            else:
+                b2j[elt] = [i]
+
+        # Now b2j.keys() contains elements uniquely, and especially when
+        # the sequence is a string, that's usually a good deal smaller
+        # than len(string).  The difference is the number of isjunk calls
+        # saved.
+        isjunk, junkdict = self.isjunk, {}
+        if isjunk:
+            for elt in b2j.keys():
+                if isjunk(elt):
+                    junkdict[elt] = 1   # value irrelevant; it's a set
+                    del b2j[elt]
+
+        # Now for x in b, isjunk(x) == junkdict.has_key(x), but the
+        # latter is much faster.  Note too that while there may be a
+        # lot of junk in the sequence, the number of *unique* junk
+        # elements is probably small.  So the memory burden of keeping
+        # this dict alive is likely trivial compared to the size of b2j.
+        self.isbjunk = junkdict.has_key
+
+    def find_longest_match(self, alo, ahi, blo, bhi):
+        """Find longest matching block in a[alo:ahi] and b[blo:bhi].
+
+        If isjunk is not defined:
+
+        Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
+            alo <= i <= i+k <= ahi
+            blo <= j <= j+k <= bhi
+        and for all (i',j',k') meeting those conditions,
+            k >= k'
+            i <= i'
+            and if i == i', j <= j'
+
+        In other words, of all maximal matching blocks, return one that
+        starts earliest in a, and of all those maximal matching blocks that
+        start earliest in a, return the one that starts earliest in b.
+
+        >>> s = SequenceMatcher(None, " abcd", "abcd abcd")
+        >>> s.find_longest_match(0, 5, 0, 9)
+        (0, 4, 5)
+
+        If isjunk is defined, first the longest matching block is
+        determined as above, but with the additional restriction that no
+        junk element appears in the block.  Then that block is extended as
+        far as possible by matching (only) junk elements on both sides.  So
+        the resulting block never matches on junk except as identical junk
+        happens to be adjacent to an "interesting" match.
+
+        Here's the same example as before, but considering blanks to be
+        junk.  That prevents " abcd" from matching the " abcd" at the tail
+        end of the second sequence directly.  Instead only the "abcd" can
+        match, and matches the leftmost "abcd" in the second sequence:
+
+        >>> s = SequenceMatcher(lambda x: x==" ", " abcd", "abcd abcd")
+        >>> s.find_longest_match(0, 5, 0, 9)
+        (1, 0, 4)
+
+        If no blocks match, return (alo, blo, 0).
+
+        >>> s = SequenceMatcher(None, "ab", "c")
+        >>> s.find_longest_match(0, 2, 0, 1)
+        (0, 0, 0)
+        """
+
+        # CAUTION:  stripping common prefix or suffix would be incorrect.
+        # E.g.,
+        #    ab
+        #    acab
+        # Longest matching block is "ab", but if common prefix is
+        # stripped, it's "a" (tied with "b").  UNIX(tm) diff does so
+        # strip, so ends up claiming that ab is changed to acab by
+        # inserting "ca" in the middle.  That's minimal but unintuitive:
+        # "it's obvious" that someone inserted "ac" at the front.
+        # Windiff ends up at the same place as diff, but by pairing up
+        # the unique 'b's and then matching the first two 'a's.
+
+        a, b, b2j, isbjunk = self.a, self.b, self.b2j, self.isbjunk
+        besti, bestj, bestsize = alo, blo, 0
+        # find longest junk-free match
+        # during an iteration of the loop, j2len[j] = length of longest
+        # junk-free match ending with a[i-1] and b[j]
+        j2len = {}
+        nothing = []
+        for i in xrange(alo, ahi):
+            # look at all instances of a[i] in b; note that because
+            # b2j has no junk keys, the loop is skipped if a[i] is junk
+            j2lenget = j2len.get
+            newj2len = {}
+            for j in b2j.get(a[i], nothing):
+                # a[i] matches b[j]
+                if j < blo:
+                    continue
+                if j >= bhi:
+                    break
+                k = newj2len[j] = j2lenget(j-1, 0) + 1
+                if k > bestsize:
+                    besti, bestj, bestsize = i-k+1, j-k+1, k
+            j2len = newj2len
+
+        # Now that we have a wholly interesting match (albeit possibly
+        # empty!), we may as well suck up the matching junk on each
+        # side of it too.  Can't think of a good reason not to, and it
+        # saves post-processing the (possibly considerable) expense of
+        # figuring out what to do with it.  In the case of an empty
+        # interesting match, this is clearly the right thing to do,
+        # because no other kind of match is possible in the regions.
+        while besti > alo and bestj > blo and \
+              isbjunk(b[bestj-1]) and \
+              a[besti-1] == b[bestj-1]:
+            besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
+        while besti+bestsize < ahi and bestj+bestsize < bhi and \
+              isbjunk(b[bestj+bestsize]) and \
+              a[besti+bestsize] == b[bestj+bestsize]:
+            bestsize = bestsize + 1
+
+        return besti, bestj, bestsize
+
+    def get_matching_blocks(self):
+        """Return list of triples describing matching subsequences.
+
+        Each triple is of the form (i, j, n), and means that
+        a[i:i+n] == b[j:j+n].  The triples are monotonically increasing in
+        i and in j.
+
+        The last triple is a dummy, (len(a), len(b), 0), and is the only
+        triple with n==0.
+
+        >>> s = SequenceMatcher(None, "abxcd", "abcd")
+        >>> s.get_matching_blocks()
+        [(0, 0, 2), (3, 2, 2), (5, 4, 0)]
+        """
+
+        if self.matching_blocks is not None:
+            return self.matching_blocks
+        self.matching_blocks = []
+        la, lb = len(self.a), len(self.b)
+        self.__helper(0, la, 0, lb, self.matching_blocks)
+        self.matching_blocks.append( (la, lb, 0) )
+        return self.matching_blocks
+
+    # builds list of matching blocks covering a[alo:ahi] and
+    # b[blo:bhi], appending them in increasing order to answer
+
+    def __helper(self, alo, ahi, blo, bhi, answer):
+        i, j, k = x = self.find_longest_match(alo, ahi, blo, bhi)
+        # a[alo:i] vs b[blo:j] unknown
+        # a[i:i+k] same as b[j:j+k]
+        # a[i+k:ahi] vs b[j+k:bhi] unknown
+        if k:
+            if alo < i and blo < j:
+                self.__helper(alo, i, blo, j, answer)
+            answer.append(x)
+            if i+k < ahi and j+k < bhi:
+                self.__helper(i+k, ahi, j+k, bhi, answer)
+
+    def get_opcodes(self):
+        """Return list of 5-tuples describing how to turn a into b.
+
+        Each tuple is of the form (tag, i1, i2, j1, j2).  The first tuple
+        has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
+        tuple preceding it, and likewise for j1 == the previous j2.
+
+        The tags are strings, with these meanings:
+
+        'replace':  a[i1:i2] should be replaced by b[j1:j2]
+        'delete':   a[i1:i2] should be deleted.
+                    Note that j1==j2 in this case.
+        'insert':   b[j1:j2] should be inserted at a[i1:i1].
+                    Note that i1==i2 in this case.
+        'equal':    a[i1:i2] == b[j1:j2]
+
+        >>> a = "qabxcd"
+        >>> b = "abycdf"
+        >>> s = SequenceMatcher(None, a, b)
+        >>> for tag, i1, i2, j1, j2 in s.get_opcodes():
+        ...    print ("%7s a[%d:%d] (%s) b[%d:%d] (%s)" %
+        ...           (tag, i1, i2, a[i1:i2], j1, j2, b[j1:j2]))
+         delete a[0:1] (q) b[0:0] ()
+          equal a[1:3] (ab) b[0:2] (ab)
+        replace a[3:4] (x) b[2:3] (y)
+          equal a[4:6] (cd) b[3:5] (cd)
+         insert a[6:6] () b[5:6] (f)
+        """
+
+        if self.opcodes is not None:
+            return self.opcodes
+        i = j = 0
+        self.opcodes = answer = []
+        for ai, bj, size in self.get_matching_blocks():
+            # invariant:  we've pumped out correct diffs to change
+            # a[:i] into b[:j], and the next matching block is
+            # a[ai:ai+size] == b[bj:bj+size].  So we need to pump
+            # out a diff to change a[i:ai] into b[j:bj], pump out
+            # the matching block, and move (i,j) beyond the match
+            tag = ''
+            if i < ai and j < bj:
+                tag = 'replace'
+            elif i < ai:
+                tag = 'delete'
+            elif j < bj:
+                tag = 'insert'
+            if tag:
+                answer.append( (tag, i, ai, j, bj) )
+            i, j = ai+size, bj+size
+            # the list of matching blocks is terminated by a
+            # sentinel with size 0
+            if size:
+                answer.append( ('equal', ai, i, bj, j) )
+        return answer
+
+    def ratio(self):
+        """Return a measure of the sequences' similarity (float in [0,1]).
+
+        Where T is the total number of elements in both sequences, and
+        M is the number of matches, this is 2,0*M / T.
+        Note that this is 1 if the sequences are identical, and 0 if
+        they have nothing in common.
+
+        .ratio() is expensive to compute if you haven't already computed
+        .get_matching_blocks() or .get_opcodes(), in which case you may
+        want to try .quick_ratio() or .real_quick_ratio() first to get an
+        upper bound.
+
+        >>> s = SequenceMatcher(None, "abcd", "bcde")
+        >>> s.ratio()
+        0.75
+        >>> s.quick_ratio()
+        0.75
+        >>> s.real_quick_ratio()
+        1.0
+        """
+
+        matches = reduce(lambda sum, triple: sum + triple[-1],
+                         self.get_matching_blocks(), 0)
+        return _calculate_ratio(matches, len(self.a) + len(self.b))
+
+    def quick_ratio(self):
+        """Return an upper bound on ratio() relatively quickly.
+
+        This isn't defined beyond that it is an upper bound on .ratio(), and
+        is faster to compute.
+        """
+
+        # viewing a and b as multisets, set matches to the cardinality
+        # of their intersection; this counts the number of matches
+        # without regard to order, so is clearly an upper bound
+        if self.fullbcount is None:
+            self.fullbcount = fullbcount = {}
+            for elt in self.b:
+                fullbcount[elt] = fullbcount.get(elt, 0) + 1
+        fullbcount = self.fullbcount
+        # avail[x] is the number of times x appears in 'b' less the
+        # number of times we've seen it in 'a' so far ... kinda
+        avail = {}
+        availhas, matches = avail.has_key, 0
+        for elt in self.a:
+            if availhas(elt):
+                numb = avail[elt]
+            else:
+                numb = fullbcount.get(elt, 0)
+            avail[elt] = numb - 1
+            if numb > 0:
+                matches = matches + 1
+        return _calculate_ratio(matches, len(self.a) + len(self.b))
+
+    def real_quick_ratio(self):
+        """Return an upper bound on ratio() very quickly.
+
+        This isn't defined beyond that it is an upper bound on .ratio(), and
+        is faster to compute than either .ratio() or .quick_ratio().
+        """
+
+        la, lb = len(self.a), len(self.b)
+        # can't have more matches than the number of elements in the
+        # shorter sequence
+        return _calculate_ratio(min(la, lb), la + lb)
+
+def get_close_matches(word, possibilities, n=3, cutoff=0.6):
+    """Use SequenceMatcher to return list of the best "good enough" matches.
+
+    word is a sequence for which close matches are desired (typically a
+    string).
+
+    possibilities is a list of sequences against which to match word
+    (typically a list of strings).
+
+    Optional arg n (default 3) is the maximum number of close matches to
+    return.  n must be > 0.
+
+    Optional arg cutoff (default 0.6) is a float in [0, 1].  Possibilities
+    that don't score at least that similar to word are ignored.
+
+    The best (no more than n) matches among the possibilities are returned
+    in a list, sorted by similarity score, most similar first.
+
+    >>> get_close_matches("appel", ["ape", "apple", "peach", "puppy"])
+    ['apple', 'ape']
+    >>> import keyword as _keyword
+    >>> get_close_matches("wheel", _keyword.kwlist)
+    ['while']
+    >>> get_close_matches("apple", _keyword.kwlist)
+    []
+    >>> get_close_matches("accept", _keyword.kwlist)
+    ['except']
+    """
+
+    if not n >  0:
+        raise ValueError("n must be > 0: " + `n`)
+    if not 0.0 <= cutoff <= 1.0:
+        raise ValueError("cutoff must be in [0.0, 1.0]: " + `cutoff`)
+    result = []
+    s = SequenceMatcher()
+    s.set_seq2(word)
+    for x in possibilities:
+        s.set_seq1(x)
+        if s.real_quick_ratio() >= cutoff and \
+           s.quick_ratio() >= cutoff and \
+           s.ratio() >= cutoff:
+            result.append((s.ratio(), x))
+    # Sort by score.
+    result.sort()
+    # Retain only the best n.
+    result = result[-n:]
+    # Move best-scorer to head of list.
+    result.reverse()
+    # Strip scores.
+    return [x for score, x in result]
+
+
+def _count_leading(line, ch):
+    """
+    Return number of `ch` characters at the start of `line`.
+
+    Example:
+
+    >>> _count_leading('   abc', ' ')
+    3
+    """
+
+    i, n = 0, len(line)
+    while i < n and line[i] == ch:
+        i += 1
+    return i
+
+class Differ:
+    r"""
+    Differ is a class for comparing sequences of lines of text, and
+    producing human-readable differences or deltas.  Differ uses
+    SequenceMatcher both to compare sequences of lines, and to compare
+    sequences of characters within similar (near-matching) lines.
+
+    Each line of a Differ delta begins with a two-letter code:
+
+        '- '    line unique to sequence 1
+        '+ '    line unique to sequence 2
+        '  '    line common to both sequences
+        '? '    line not present in either input sequence
+
+    Lines beginning with '? ' attempt to guide the eye to intraline
+    differences, and were not present in either input sequence.  These lines
+    can be confusing if the sequences contain tab characters.
+
+    Note that Differ makes no claim to produce a *minimal* diff.  To the
+    contrary, minimal diffs are often counter-intuitive, because they synch
+    up anywhere possible, sometimes accidental matches 100 pages apart.
+    Restricting synch points to contiguous matches preserves some notion of
+    locality, at the occasional cost of producing a longer diff.
+
+    Example: Comparing two texts.
+
+    First we set up the texts, sequences of individual single-line strings
+    ending with newlines (such sequences can also be obtained from the
+    `readlines()` method of file-like objects):
+
+    >>> text1 = '''  1. Beautiful is better than ugly.
+    ...   2. Explicit is better than implicit.
+    ...   3. Simple is better than complex.
+    ...   4. Complex is better than complicated.
+    ... '''.splitlines(1)
+    >>> len(text1)
+    4
+    >>> text1[0][-1]
+    '\n'
+    >>> text2 = '''  1. Beautiful is better than ugly.
+    ...   3.   Simple is better than complex.
+    ...   4. Complicated is better than complex.
+    ...   5. Flat is better than nested.
+    ... '''.splitlines(1)
+
+    Next we instantiate a Differ object:
+
+    >>> d = Differ()
+
+    Note that when instantiating a Differ object we may pass functions to
+    filter out line and character 'junk'.  See Differ.__init__ for details.
+
+    Finally, we compare the two:
+
+    >>> result = list(d.compare(text1, text2))
+
+    'result' is a list of strings, so let's pretty-print it:
+
+    >>> from pprint import pprint as _pprint
+    >>> _pprint(result)
+    ['    1. Beautiful is better than ugly.\n',
+     '-   2. Explicit is better than implicit.\n',
+     '-   3. Simple is better than complex.\n',
+     '+   3.   Simple is better than complex.\n',
+     '?     ++\n',
+     '-   4. Complex is better than complicated.\n',
+     '?            ^                     ---- ^\n',
+     '+   4. Complicated is better than complex.\n',
+     '?           ++++ ^                      ^\n',
+     '+   5. Flat is better than nested.\n']
+
+    As a single multi-line string it looks like this:
+
+    >>> print ''.join(result),
+        1. Beautiful is better than ugly.
+    -   2. Explicit is better than implicit.
+    -   3. Simple is better than complex.
+    +   3.   Simple is better than complex.
+    ?     ++
+    -   4. Complex is better than complicated.
+    ?            ^                     ---- ^
+    +   4. Complicated is better than complex.
+    ?           ++++ ^                      ^
+    +   5. Flat is better than nested.
+
+    Methods:
+
+    __init__(linejunk=None, charjunk=None)
+        Construct a text differencer, with optional filters.
+
+    compare(a, b)
+        Compare two sequences of lines; generate the resulting delta.
+    """
+
+    def __init__(self, linejunk=None, charjunk=None):
+        """
+        Construct a text differencer, with optional filters.
+
+        The two optional keyword parameters are for filter functions:
+
+        - `linejunk`: A function that should accept a single string argument,
+          and return true iff the string is junk. The module-level function
+          `IS_LINE_JUNK` may be used to filter out lines without visible
+          characters, except for at most one splat ('#').
+
+        - `charjunk`: A function that should accept a string of length 1. The
+          module-level function `IS_CHARACTER_JUNK` may be used to filter out
+          whitespace characters (a blank or tab; **note**: bad idea to include
+          newline in this!).
+        """
+
+        self.linejunk = linejunk
+        self.charjunk = charjunk
+
+    def compare(self, a, b):
+        r"""
+        Compare two sequences of lines; generate the resulting delta.
+
+        Each sequence must contain individual single-line strings ending with
+        newlines. Such sequences can be obtained from the `readlines()` method
+        of file-like objects.  The delta generated also consists of newline-
+        terminated strings, ready to be printed as-is via the writeline()
+        method of a file-like object.
+
+        Example:
+
+        >>> print ''.join(Differ().compare('one\ntwo\nthree\n'.splitlines(1),
+        ...                                'ore\ntree\nemu\n'.splitlines(1))),
+        - one
+        ?  ^
+        + ore
+        ?  ^
+        - two
+        - three
+        ?  -
+        + tree
+        + emu
+        """
+
+        cruncher = SequenceMatcher(self.linejunk, a, b)
+        for tag, alo, ahi, blo, bhi in cruncher.get_opcodes():
+            if tag == 'replace':
+                g = self._fancy_replace(a, alo, ahi, b, blo, bhi)
+            elif tag == 'delete':
+                g = self._dump('-', a, alo, ahi)
+            elif tag == 'insert':
+                g = self._dump('+', b, blo, bhi)
+            elif tag == 'equal':
+                g = self._dump(' ', a, alo, ahi)
+            else:
+                raise ValueError, 'unknown tag ' + `tag`
+
+            for line in g:
+                yield line
+
+    def _dump(self, tag, x, lo, hi):
+        """Generate comparison results for a same-tagged range."""
+        for i in xrange(lo, hi):
+            yield '%s %s' % (tag, x[i])
+
+    def _plain_replace(self, a, alo, ahi, b, blo, bhi):
+        assert alo < ahi and blo < bhi
+        # dump the shorter block first -- reduces the burden on short-term
+        # memory if the blocks are of very different sizes
+        if bhi - blo < ahi - alo:
+            first  = self._dump('+', b, blo, bhi)
+            second = self._dump('-', a, alo, ahi)
+        else:
+            first  = self._dump('-', a, alo, ahi)
+            second = self._dump('+', b, blo, bhi)
+
+        for g in first, second:
+            for line in g:
+                yield line
+
+    def _fancy_replace(self, a, alo, ahi, b, blo, bhi):
+        r"""
+        When replacing one block of lines with another, search the blocks
+        for *similar* lines; the best-matching pair (if any) is used as a
+        synch point, and intraline difference marking is done on the
+        similar pair. Lots of work, but often worth it.
+
+        Example:
+
+        >>> d = Differ()
+        >>> d._fancy_replace(['abcDefghiJkl\n'], 0, 1, ['abcdefGhijkl\n'], 0, 1)
+        >>> print ''.join(d.results),
+        - abcDefghiJkl
+        ?    ^  ^  ^
+        + abcdefGhijkl
+        ?    ^  ^  ^
+        """
+
+        # don't synch up unless the lines have a similarity score of at
+        # least cutoff; best_ratio tracks the best score seen so far
+        best_ratio, cutoff = 0.74, 0.75
+        cruncher = SequenceMatcher(self.charjunk)
+        eqi, eqj = None, None   # 1st indices of equal lines (if any)
+
+        # search for the pair that matches best without being identical
+        # (identical lines must be junk lines, & we don't want to synch up
+        # on junk -- unless we have to)
+        for j in xrange(blo, bhi):
+            bj = b[j]
+            cruncher.set_seq2(bj)
+            for i in xrange(alo, ahi):
+                ai = a[i]
+                if ai == bj:
+                    if eqi is None:
+                        eqi, eqj = i, j
+                    continue
+                cruncher.set_seq1(ai)
+                # computing similarity is expensive, so use the quick
+                # upper bounds first -- have seen this speed up messy
+                # compares by a factor of 3.
+                # note that ratio() is only expensive to compute the first
+                # time it's called on a sequence pair; the expensive part
+                # of the computation is cached by cruncher
+                if cruncher.real_quick_ratio() > best_ratio and \
+                      cruncher.quick_ratio() > best_ratio and \
+                      cruncher.ratio() > best_ratio:
+                    best_ratio, best_i, best_j = cruncher.ratio(), i, j
+        if best_ratio < cutoff:
+            # no non-identical "pretty close" pair
+            if eqi is None:
+                # no identical pair either -- treat it as a straight replace
+                for line in self._plain_replace(a, alo, ahi, b, blo, bhi):
+                    yield line
+                return
+            # no close pair, but an identical pair -- synch up on that
+            best_i, best_j, best_ratio = eqi, eqj, 1.0
+        else:
+            # there's a close pair, so forget the identical pair (if any)
+            eqi = None
+
+        # a[best_i] very similar to b[best_j]; eqi is None iff they're not
+        # identical
+
+        # pump out diffs from before the synch point
+        for line in self._fancy_helper(a, alo, best_i, b, blo, best_j):
+            yield line
+
+        # do intraline marking on the synch pair
+        aelt, belt = a[best_i], b[best_j]
+        if eqi is None:
+            # pump out a '-', '?', '+', '?' quad for the synched lines
+            atags = btags = ""
+            cruncher.set_seqs(aelt, belt)
+            for tag, ai1, ai2, bj1, bj2 in cruncher.get_opcodes():
+                la, lb = ai2 - ai1, bj2 - bj1
+                if tag == 'replace':
+                    atags += '^' * la
+                    btags += '^' * lb
+                elif tag == 'delete':
+                    atags += '-' * la
+                elif tag == 'insert':
+                    btags += '+' * lb
+                elif tag == 'equal':
+                    atags += ' ' * la
+                    btags += ' ' * lb
+                else:
+                    raise ValueError, 'unknown tag ' + `tag`
+            for line in self._qformat(aelt, belt, atags, btags):
+                yield line
+        else:
+            # the synch pair is identical
+            yield '  ' + aelt
+
+        # pump out diffs from after the synch point
+        for line in self._fancy_helper(a, best_i+1, ahi, b, best_j+1, bhi):
+            yield line
+
+    def _fancy_helper(self, a, alo, ahi, b, blo, bhi):
+        g = []
+        if alo < ahi:
+            if blo < bhi:
+                g = self._fancy_replace(a, alo, ahi, b, blo, bhi)
+            else:
+                g = self._dump('-', a, alo, ahi)
+        elif blo < bhi:
+            g = self._dump('+', b, blo, bhi)
+
+        for line in g:
+            yield line
+
+    def _qformat(self, aline, bline, atags, btags):
+        r"""
+        Format "?" output and deal with leading tabs.
+
+        Example:
+
+        >>> d = Differ()
+        >>> d._qformat('\tabcDefghiJkl\n', '\t\tabcdefGhijkl\n',
+        ...            '  ^ ^  ^      ', '+  ^ ^  ^      ')
+        >>> for line in d.results: print repr(line)
+        ...
+        '- \tabcDefghiJkl\n'
+        '? \t ^ ^  ^\n'
+        '+ \t\tabcdefGhijkl\n'
+        '? \t  ^ ^  ^\n'
+        """
+
+        # Can hurt, but will probably help most of the time.
+        common = min(_count_leading(aline, "\t"),
+                     _count_leading(bline, "\t"))
+        common = min(common, _count_leading(atags[:common], " "))
+        atags = atags[common:].rstrip()
+        btags = btags[common:].rstrip()
+
+        yield "- " + aline
+        if atags:
+            yield "? %s%s\n" % ("\t" * common, atags)
+
+        yield "+ " + bline
+        if btags:
+            yield "? %s%s\n" % ("\t" * common, btags)
+
+# With respect to junk, an earlier version of ndiff simply refused to
+# *start* a match with a junk element.  The result was cases like this:
+#     before: private Thread currentThread;
+#     after:  private volatile Thread currentThread;
+# If you consider whitespace to be junk, the longest contiguous match
+# not starting with junk is "e Thread currentThread".  So ndiff reported
+# that "e volatil" was inserted between the 't' and the 'e' in "private".
+# While an accurate view, to people that's absurd.  The current version
+# looks for matching blocks that are entirely junk-free, then extends the
+# longest one of those as far as possible but only with matching junk.
+# So now "currentThread" is matched, then extended to suck up the
+# preceding blank; then "private" is matched, and extended to suck up the
+# following blank; then "Thread" is matched; and finally ndiff reports
+# that "volatile " was inserted before "Thread".  The only quibble
+# remaining is that perhaps it was really the case that " volatile"
+# was inserted after "private".  I can live with that <wink>.
+
+import re
+
+def IS_LINE_JUNK(line, pat=re.compile(r"\s*#?\s*$").match):
+    r"""
+    Return 1 for ignorable line: iff `line` is blank or contains a single '#'.
+
+    Examples:
+
+    >>> IS_LINE_JUNK('\n')
+    1
+    >>> IS_LINE_JUNK('  #   \n')
+    1
+    >>> IS_LINE_JUNK('hello\n')
+    0
+    """
+
+    return pat(line) is not None
+
+def IS_CHARACTER_JUNK(ch, ws=" \t"):
+    r"""
+    Return 1 for ignorable character: iff `ch` is a space or tab.
+
+    Examples:
+
+    >>> IS_CHARACTER_JUNK(' ')
+    1
+    >>> IS_CHARACTER_JUNK('\t')
+    1
+    >>> IS_CHARACTER_JUNK('\n')
+    0
+    >>> IS_CHARACTER_JUNK('x')
+    0
+    """
+
+    return ch in ws
+
+del re
+
+def ndiff(a, b, linejunk=IS_LINE_JUNK, charjunk=IS_CHARACTER_JUNK):
+    r"""
+    Compare `a` and `b` (lists of strings); return a `Differ`-style delta.
+
+    Optional keyword parameters `linejunk` and `charjunk` are for filter
+    functions (or None):
+
+    - linejunk: A function that should accept a single string argument, and
+      return true iff the string is junk. The default is module-level function
+      IS_LINE_JUNK, which filters out lines without visible characters, except
+      for at most one splat ('#').
+
+    - charjunk: A function that should accept a string of length 1. The
+      default is module-level function IS_CHARACTER_JUNK, which filters out
+      whitespace characters (a blank or tab; note: bad idea to include newline
+      in this!).
+
+    Tools/scripts/ndiff.py is a command-line front-end to this function.
+
+    Example:
+
+    >>> diff = ndiff('one\ntwo\nthree\n'.splitlines(1),
+    ...              'ore\ntree\nemu\n'.splitlines(1))
+    >>> print ''.join(diff),
+    - one
+    ?  ^
+    + ore
+    ?  ^
+    - two
+    - three
+    ?  -
+    + tree
+    + emu
+    """
+    return Differ(linejunk, charjunk).compare(a, b)
+
+def restore(delta, which):
+    r"""
+    Generate one of the two sequences that generated a delta.
+
+    Given a `delta` produced by `Differ.compare()` or `ndiff()`, extract
+    lines originating from file 1 or 2 (parameter `which`), stripping off line
+    prefixes.
+
+    Examples:
+
+    >>> diff = ndiff('one\ntwo\nthree\n'.splitlines(1),
+    ...              'ore\ntree\nemu\n'.splitlines(1))
+    >>> diff = list(diff)
+    >>> print ''.join(restore(diff, 1)),
+    one
+    two
+    three
+    >>> print ''.join(restore(diff, 2)),
+    ore
+    tree
+    emu
+    """
+    try:
+        tag = {1: "- ", 2: "+ "}[int(which)]
+    except KeyError:
+        raise ValueError, ('unknown delta choice (must be 1 or 2): %r'
+                           % which)
+    prefixes = ("  ", tag)
+    for line in delta:
+        if line[:2] in prefixes:
+            yield line[2:]
+
+def _test():
+    import doctest, difflib
+    return doctest.testmod(difflib)
+
+if __name__ == "__main__":
+    _test()
diff --git a/lib-python/2.2/dircache.py b/lib-python/2.2/dircache.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/dircache.py
@@ -0,0 +1,44 @@
+"""Read and cache directory listings.
+
+The listdir() routine returns a sorted list of the files in a directory,
+using a cache to avoid reading the directory more often than necessary.
+The annotate() routine appends slashes to directories."""
+
+import os
+
+__all__ = ["listdir", "opendir", "annotate", "reset"]
+
+cache = {}
+
+def reset():
+    """Reset the cache completely."""
+    global cache
+    cache = {}
+
+def listdir(path):
+    """List directory contents, using cache."""
+    try:
+        cached_mtime, list = cache[path]
+        del cache[path]
+    except KeyError:
+        cached_mtime, list = -1, []
+    try:
+        mtime = os.stat(path)[8]
+    except os.error:
+        return []
+    if mtime != cached_mtime:
+        try:
+            list = os.listdir(path)
+        except os.error:
+            return []
+        list.sort()
+    cache[path] = mtime, list
+    return list
+
+opendir = listdir # XXX backward compatibility
+
+def annotate(head, list):
+    """Add '/' suffixes to directories."""
+    for i in range(len(list)):
+        if os.path.isdir(os.path.join(head, list[i])):
+            list[i] = list[i] + '/'
diff --git a/lib-python/2.2/dis.py b/lib-python/2.2/dis.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/dis.py
@@ -0,0 +1,327 @@
+"""Disassembler of Python byte code into mnemonics."""
+
+import sys
+import types
+
+__all__ = ["dis","disassemble","distb","disco","opname","cmp_op",
+           "hasconst","hasname","hasjrel","hasjabs","haslocal",
+           "hascompare", "hasfree"]
+
+def dis(x=None):
+    """Disassemble classes, methods, functions, or code.
+
+    With no argument, disassemble the last traceback.
+
+    """
+    if not x:
+        distb()
+        return
+    if type(x) is types.InstanceType:
+        x = x.__class__
+    if hasattr(x, 'im_func'):
+        x = x.im_func
+    if hasattr(x, 'func_code'):
+        x = x.func_code
+    if hasattr(x, '__dict__'):
+        items = x.__dict__.items()
+        items.sort()
+        for name, x1 in items:
+            if type(x1) in (types.MethodType,
+                            types.FunctionType,
+                            types.CodeType):
+                print "Disassembly of %s:" % name
+                try:
+                    dis(x1)
+                except TypeError, msg:
+                    print "Sorry:", msg
+                print
+    elif hasattr(x, 'co_code'):
+        disassemble(x)
+    else:
+        raise TypeError, \
+              "don't know how to disassemble %s objects" % \
+              type(x).__name__
+
+def distb(tb=None):
+    """Disassemble a traceback (default: last traceback)."""
+    if not tb:
+        try:
+            tb = sys.last_traceback
+        except AttributeError:
+            raise RuntimeError, "no last traceback to disassemble"
+        while tb.tb_next: tb = tb.tb_next
+    disassemble(tb.tb_frame.f_code, tb.tb_lasti)
+
+def disassemble(co, lasti=-1):
+    """Disassemble a code object."""
+    code = co.co_code
+    labels = findlabels(code)
+    n = len(code)
+    i = 0
+    extended_arg = 0
+    free = None
+    while i < n:
+        c = code[i]
+        op = ord(c)
+        if op == SET_LINENO and i > 0: print # Extra blank line
+        if i == lasti: print '-->',
+        else: print '   ',
+        if i in labels: print '>>',
+        else: print '  ',
+        print `i`.rjust(4),
+        print opname[op].ljust(20),
+        i = i+1
+        if op >= HAVE_ARGUMENT:
+            oparg = ord(code[i]) + ord(code[i+1])*256 + extended_arg
+            extended_arg = 0
+            i = i+2
+            if op == EXTENDED_ARG:
+                extended_arg = oparg*65536L
+            print `oparg`.rjust(5),
+            if op in hasconst:
+                print '(' + `co.co_consts[oparg]` + ')',
+            elif op in hasname:
+                print '(' + co.co_names[oparg] + ')',
+            elif op in hasjrel:
+                print '(to ' + `i + oparg` + ')',
+            elif op in haslocal:
+                print '(' + co.co_varnames[oparg] + ')',
+            elif op in hascompare:
+                print '(' + cmp_op[oparg] + ')',
+            elif op in hasfree:
+                if free is None:
+                    free = co.co_cellvars + co.co_freevars
+                print '(' + free[oparg] + ')',
+        print
+
+disco = disassemble                     # XXX For backwards compatibility
+
+def findlabels(code):
+    """Detect all offsets in a byte code which are jump targets.
+
+    Return the list of offsets.
+
+    """
+    labels = []
+    n = len(code)
+    i = 0
+    while i < n:
+        c = code[i]
+        op = ord(c)
+        i = i+1
+        if op >= HAVE_ARGUMENT:
+            oparg = ord(code[i]) + ord(code[i+1])*256
+            i = i+2
+            label = -1
+            if op in hasjrel:
+                label = i+oparg
+            elif op in hasjabs:
+                label = oparg
+            if label >= 0:
+                if label not in labels:
+                    labels.append(label)
+    return labels
+
+cmp_op = ('<', '<=', '==', '!=', '>', '>=', 'in', 'not in', 'is',
+        'is not', 'exception match', 'BAD')
+
+hasconst = []
+hasname = []
+hasjrel = []
+hasjabs = []
+haslocal = []
+hascompare = []
+hasfree = []
+
+opname = [''] * 256
+for op in range(256): opname[op] = '<' + `op` + '>'
+
+def def_op(name, op):
+    opname[op] = name
+
+def name_op(name, op):
+    opname[op] = name
+    hasname.append(op)
+
+def jrel_op(name, op):
+    opname[op] = name
+    hasjrel.append(op)
+
+def jabs_op(name, op):
+    opname[op] = name
+    hasjabs.append(op)
+
+# Instruction opcodes for compiled code
+
+def_op('STOP_CODE', 0)
+def_op('POP_TOP', 1)
+def_op('ROT_TWO', 2)
+def_op('ROT_THREE', 3)
+def_op('DUP_TOP', 4)
+def_op('ROT_FOUR', 5)
+
+def_op('UNARY_POSITIVE', 10)
+def_op('UNARY_NEGATIVE', 11)
+def_op('UNARY_NOT', 12)
+def_op('UNARY_CONVERT', 13)
+
+def_op('UNARY_INVERT', 15)
+
+def_op('BINARY_POWER', 19)
+
+def_op('BINARY_MULTIPLY', 20)
+def_op('BINARY_DIVIDE', 21)
+def_op('BINARY_MODULO', 22)
+def_op('BINARY_ADD', 23)
+def_op('BINARY_SUBTRACT', 24)
+def_op('BINARY_SUBSCR', 25)
+def_op('BINARY_FLOOR_DIVIDE', 26)
+def_op('BINARY_TRUE_DIVIDE', 27)
+def_op('INPLACE_FLOOR_DIVIDE', 28)
+def_op('INPLACE_TRUE_DIVIDE', 29)
+
+def_op('SLICE+0', 30)
+def_op('SLICE+1', 31)
+def_op('SLICE+2', 32)
+def_op('SLICE+3', 33)
+
+def_op('STORE_SLICE+0', 40)
+def_op('STORE_SLICE+1', 41)
+def_op('STORE_SLICE+2', 42)
+def_op('STORE_SLICE+3', 43)
+
+def_op('DELETE_SLICE+0', 50)
+def_op('DELETE_SLICE+1', 51)
+def_op('DELETE_SLICE+2', 52)
+def_op('DELETE_SLICE+3', 53)
+
+def_op('INPLACE_ADD', 55)
+def_op('INPLACE_SUBTRACT', 56)
+def_op('INPLACE_MULTIPLY', 57)
+def_op('INPLACE_DIVIDE', 58)
+def_op('INPLACE_MODULO', 59)
+def_op('STORE_SUBSCR', 60)
+def_op('DELETE_SUBSCR', 61)
+
+def_op('BINARY_LSHIFT', 62)
+def_op('BINARY_RSHIFT', 63)
+def_op('BINARY_AND', 64)
+def_op('BINARY_XOR', 65)
+def_op('BINARY_OR', 66)
+def_op('INPLACE_POWER', 67)
+def_op('GET_ITER', 68)
+
+def_op('PRINT_EXPR', 70)
+def_op('PRINT_ITEM', 71)
+def_op('PRINT_NEWLINE', 72)
+def_op('PRINT_ITEM_TO', 73)
+def_op('PRINT_NEWLINE_TO', 74)
+def_op('INPLACE_LSHIFT', 75)
+def_op('INPLACE_RSHIFT', 76)
+def_op('INPLACE_AND', 77)
+def_op('INPLACE_XOR', 78)
+def_op('INPLACE_OR', 79)
+def_op('BREAK_LOOP', 80)
+
+def_op('LOAD_LOCALS', 82)
+def_op('RETURN_VALUE', 83)
+def_op('IMPORT_STAR', 84)
+def_op('EXEC_STMT', 85)
+def_op('YIELD_STMT', 86)
+
+def_op('POP_BLOCK', 87)
+def_op('END_FINALLY', 88)
+def_op('BUILD_CLASS', 89)
+
+HAVE_ARGUMENT = 90              # Opcodes from here have an argument:
+
+name_op('STORE_NAME', 90)       # Index in name list
+name_op('DELETE_NAME', 91)      # ""
+def_op('UNPACK_SEQUENCE', 92)   # Number of tuple items
+jrel_op('FOR_ITER', 93)
+
+name_op('STORE_ATTR', 95)       # Index in name list
+name_op('DELETE_ATTR', 96)      # ""
+name_op('STORE_GLOBAL', 97)     # ""
+name_op('DELETE_GLOBAL', 98)    # ""
+def_op('DUP_TOPX', 99)          # number of items to duplicate
+def_op('LOAD_CONST', 100)       # Index in const list
+hasconst.append(100)
+name_op('LOAD_NAME', 101)       # Index in name list
+def_op('BUILD_TUPLE', 102)      # Number of tuple items
+def_op('BUILD_LIST', 103)       # Number of list items
+def_op('BUILD_MAP', 104)        # Always zero for now
+name_op('LOAD_ATTR', 105)       # Index in name list
+def_op('COMPARE_OP', 106)       # Comparison operator
+hascompare.append(106)
+name_op('IMPORT_NAME', 107)     # Index in name list
+name_op('IMPORT_FROM', 108)     # Index in name list
+
+jrel_op('JUMP_FORWARD', 110)    # Number of bytes to skip
+jrel_op('JUMP_IF_FALSE', 111)   # ""
+jrel_op('JUMP_IF_TRUE', 112)    # ""
+jabs_op('JUMP_ABSOLUTE', 113)   # Target byte offset from beginning of code
+jrel_op('FOR_LOOP', 114)        # Number of bytes to skip
+
+name_op('LOAD_GLOBAL', 116)     # Index in name list
+
+jabs_op('CONTINUE_LOOP', 119)   # Target address
+jrel_op('SETUP_LOOP', 120)      # Distance to target address
+jrel_op('SETUP_EXCEPT', 121)    # ""
+jrel_op('SETUP_FINALLY', 122)   # ""
+
+def_op('LOAD_FAST', 124)        # Local variable number
+haslocal.append(124)
+def_op('STORE_FAST', 125)       # Local variable number
+haslocal.append(125)
+def_op('DELETE_FAST', 126)      # Local variable number
+haslocal.append(126)
+
+def_op('SET_LINENO', 127)       # Current line number
+SET_LINENO = 127
+
+def_op('RAISE_VARARGS', 130)    # Number of raise arguments (1, 2, or 3)
+def_op('CALL_FUNCTION', 131)    # #args + (#kwargs << 8)
+def_op('MAKE_FUNCTION', 132)    # Number of args with default values
+def_op('BUILD_SLICE', 133)      # Number of items
+
+def_op('MAKE_CLOSURE', 134)
+def_op('LOAD_CLOSURE', 135)
+hasfree.append(135)
+def_op('LOAD_DEREF', 136)
+hasfree.append(136)
+def_op('STORE_DEREF', 137)
+hasfree.append(137)
+
+def_op('CALL_FUNCTION_VAR', 140)     # #args + (#kwargs << 8)
+def_op('CALL_FUNCTION_KW', 141)      # #args + (#kwargs << 8)
+def_op('CALL_FUNCTION_VAR_KW', 142)  # #args + (#kwargs << 8)
+
+def_op('EXTENDED_ARG', 143)
+EXTENDED_ARG = 143
+
+def _test():
+    """Simple test program to disassemble a file."""
+    if sys.argv[1:]:
+        if sys.argv[2:]:
+            sys.stderr.write("usage: python dis.py [-|file]\n")
+            sys.exit(2)
+        fn = sys.argv[1]
+        if not fn or fn == "-":
+            fn = None
+    else:
+        fn = None
+    if not fn:
+        f = sys.stdin
+    else:
+        f = open(fn)
+    source = f.read()
+    if fn:
+        f.close()
+    else:
+        fn = "<stdin>"
+    code = compile(source, fn, "exec")
+    dis(code)
+
+if __name__ == "__main__":
+    _test()
diff --git a/lib-python/2.2/distutils/README b/lib-python/2.2/distutils/README
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/README
@@ -0,0 +1,18 @@
+This directory contains only a subset of the Distutils, specifically the
+Python modules in the 'distutils' and 'distutils.command' packages.
+Technically, this is all you need to distribute and install Python modules
+using the Distutils.  Most people will want some documentation and other
+help, though.  Currently, everything can be found at the Distutils web page:
+
+    http://www.python.org/sigs/distutils-sig/
+
+From there you can access the latest documentation, or download a standalone
+Distutils release that includes all the code in this directory, plus
+documentation, test scripts, examples, etc.
+
+The Distutils documentation isn't yet part of the standard Python
+documentation set, but will be soon.
+
+        Greg Ward (gward at python.net)
+
+$Id$
diff --git a/lib-python/2.2/distutils/__init__.py b/lib-python/2.2/distutils/__init__.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/__init__.py
@@ -0,0 +1,13 @@
+"""distutils
+
+The main package for the Python Module Distribution Utilities.  Normally
+used from a setup script as
+
+   from distutils.core import setup
+
+   setup (...)
+"""
+
+__revision__ = "$Id$"
+
+__version__ = "1.0.3"
diff --git a/lib-python/2.2/distutils/archive_util.py b/lib-python/2.2/distutils/archive_util.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/archive_util.py
@@ -0,0 +1,173 @@
+"""distutils.archive_util
+
+Utility functions for creating archive files (tarballs, zip files,
+that sort of thing)."""
+
+# created 2000/04/03, Greg Ward (extracted from util.py)
+
+__revision__ = "$Id$"
+
+import os
+from distutils.errors import DistutilsExecError
+from distutils.spawn import spawn
+from distutils.dir_util import mkpath
+
+def make_tarball (base_name, base_dir, compress="gzip",
+                  verbose=0, dry_run=0):
+    """Create a (possibly compressed) tar file from all the files under
+    'base_dir'.  'compress' must be "gzip" (the default), "compress",
+    "bzip2", or None.  Both "tar" and the compression utility named by
+    'compress' must be on the default program search path, so this is
+    probably Unix-specific.  The output tar file will be named 'base_dir' +
+    ".tar", possibly plus the appropriate compression extension (".gz",
+    ".bz2" or ".Z").  Return the output filename.
+    """
+    # XXX GNU tar 1.13 has a nifty option to add a prefix directory.
+    # It's pretty new, though, so we certainly can't require it --
+    # but it would be nice to take advantage of it to skip the
+    # "create a tree of hardlinks" step!  (Would also be nice to
+    # detect GNU tar to use its 'z' option and save a step.)
+
+    compress_ext = { 'gzip': ".gz",
+                     'bzip2': '.bz2',
+                     'compress': ".Z" }
+
+    # flags for compression program, each element of list will be an argument
+    compress_flags = {'gzip': ["-f9"],
+                      'compress': ["-f"],
+                      'bzip2': ['-f9']}
+
+    if compress is not None and compress not in compress_ext.keys():
+        raise ValueError, \
+              "bad value for 'compress': must be None, 'gzip', or 'compress'"
+
+    archive_name = base_name + ".tar"
+    mkpath(os.path.dirname(archive_name), verbose=verbose, dry_run=dry_run)
+    cmd = ["tar", "-cf", archive_name, base_dir]
+    spawn(cmd, verbose=verbose, dry_run=dry_run)
+
+    if compress:
+        spawn([compress] + compress_flags[compress] + [archive_name],
+              verbose=verbose, dry_run=dry_run)
+        return archive_name + compress_ext[compress]
+    else:
+        return archive_name
+
+# make_tarball ()
+
+
+def make_zipfile (base_name, base_dir, verbose=0, dry_run=0):
+    """Create a zip file from all the files under 'base_dir'.  The output
+    zip file will be named 'base_dir' + ".zip".  Uses either the InfoZIP
+    "zip" utility (if installed and found on the default search path) or
+    the "zipfile" Python module (if available).  If neither tool is
+    available, raises DistutilsExecError.  Returns the name of the output
+    zip file.
+    """
+    # This initially assumed the Unix 'zip' utility -- but
+    # apparently InfoZIP's zip.exe works the same under Windows, so
+    # no changes needed!
+
+    zip_filename = base_name + ".zip"
+    mkpath(os.path.dirname(zip_filename), verbose=verbose, dry_run=dry_run)
+    try:
+        spawn(["zip", "-rq", zip_filename, base_dir],
+              verbose=verbose, dry_run=dry_run)
+    except DistutilsExecError:
+
+        # XXX really should distinguish between "couldn't find
+        # external 'zip' command" and "zip failed" -- shouldn't try
+        # again in the latter case.  (I think fixing this will
+        # require some cooperation from the spawn module -- perhaps
+        # a utility function to search the path, so we can fallback
+        # on zipfile.py without the failed spawn.)
+        try:
+            import zipfile
+        except ImportError:
+            raise DistutilsExecError, \
+                  ("unable to create zip file '%s': " +
+                   "could neither find a standalone zip utility nor " +
+                   "import the 'zipfile' module") % zip_filename
+
+        if verbose:
+            print "creating '%s' and adding '%s' to it" % \
+                  (zip_filename, base_dir)
+
+        def visit (z, dirname, names):
+            for name in names:
+                path = os.path.normpath(os.path.join(dirname, name))
+                if os.path.isfile(path):
+                    z.write(path, path)
+
+        if not dry_run:
+            z = zipfile.ZipFile(zip_filename, "w",
+                                compression=zipfile.ZIP_DEFLATED)
+
+            os.path.walk(base_dir, visit, z)
+            z.close()
+
+    return zip_filename
+
+# make_zipfile ()
+
+
+ARCHIVE_FORMATS = {
+    'gztar': (make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"),
+    'bztar': (make_tarball, [('compress', 'bzip2')], "bzip2'ed tar-file"),
+    'ztar':  (make_tarball, [('compress', 'compress')], "compressed tar file"),
+    'tar':   (make_tarball, [('compress', None)], "uncompressed tar file"),
+    'zip':   (make_zipfile, [],"ZIP file")
+    }
+
+def check_archive_formats (formats):
+    for format in formats:
+        if not ARCHIVE_FORMATS.has_key(format):
+            return format
+    else:
+        return None
+
+def make_archive (base_name, format,
+                  root_dir=None, base_dir=None,
+                  verbose=0, dry_run=0):
+    """Create an archive file (eg. zip or tar).  'base_name' is the name
+    of the file to create, minus any format-specific extension; 'format'
+    is the archive format: one of "zip", "tar", "ztar", or "gztar".
+    'root_dir' is a directory that will be the root directory of the
+    archive; ie. we typically chdir into 'root_dir' before creating the
+    archive.  'base_dir' is the directory where we start archiving from;
+    ie. 'base_dir' will be the common prefix of all files and
+    directories in the archive.  'root_dir' and 'base_dir' both default
+    to the current directory.  Returns the name of the archive file.
+    """
+    save_cwd = os.getcwd()
+    if root_dir is not None:
+        if verbose:
+            print "changing into '%s'" % root_dir
+        base_name = os.path.abspath(base_name)
+        if not dry_run:
+            os.chdir(root_dir)
+
+    if base_dir is None:
+        base_dir = os.curdir
+
+    kwargs = { 'verbose': verbose,
+               'dry_run': dry_run }
+
+    try:
+        format_info = ARCHIVE_FORMATS[format]
+    except KeyError:
+        raise ValueError, "unknown archive format '%s'" % format
+
+    func = format_info[0]
+    for (arg,val) in format_info[1]:
+        kwargs[arg] = val
+    filename = apply(func, (base_name, base_dir), kwargs)
+
+    if root_dir is not None:
+        if verbose:
+            print "changing back to '%s'" % save_cwd
+        os.chdir(save_cwd)
+
+    return filename
+
+# make_archive ()
diff --git a/lib-python/2.2/distutils/bcppcompiler.py b/lib-python/2.2/distutils/bcppcompiler.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/bcppcompiler.py
@@ -0,0 +1,409 @@
+"""distutils.bcppcompiler
+
+Contains BorlandCCompiler, an implementation of the abstract CCompiler class
+for the Borland C++ compiler.
+"""
+
+# This implementation by Lyle Johnson, based on the original msvccompiler.py
+# module and using the directions originally published by Gordon Williams.
+
+# XXX looks like there's a LOT of overlap between these two classes:
+# someone should sit down and factor out the common code as
+# WindowsCCompiler!  --GPW
+
+__revision__ = "$Id$"
+
+
+import sys, os
+from distutils.errors import \
+     DistutilsExecError, DistutilsPlatformError, \
+     CompileError, LibError, LinkError, UnknownFileError
+from distutils.ccompiler import \
+     CCompiler, gen_preprocess_options, gen_lib_options
+from distutils.file_util import write_file
+from distutils.dep_util import newer
+
+class BCPPCompiler(CCompiler) :
+    """Concrete class that implements an interface to the Borland C/C++
+    compiler, as defined by the CCompiler abstract class.
+    """
+
+    compiler_type = 'bcpp'
+
+    # Just set this so CCompiler's constructor doesn't barf.  We currently
+    # don't use the 'set_executables()' bureaucracy provided by CCompiler,
+    # as it really isn't necessary for this sort of single-compiler class.
+    # Would be nice to have a consistent interface with UnixCCompiler,
+    # though, so it's worth thinking about.
+    executables = {}
+
+    # Private class data (need to distinguish C from C++ source for compiler)
+    _c_extensions = ['.c']
+    _cpp_extensions = ['.cc', '.cpp', '.cxx']
+
+    # Needed for the filename generation methods provided by the
+    # base class, CCompiler.
+    src_extensions = _c_extensions + _cpp_extensions
+    obj_extension = '.obj'
+    static_lib_extension = '.lib'
+    shared_lib_extension = '.dll'
+    static_lib_format = shared_lib_format = '%s%s'
+    exe_extension = '.exe'
+
+
+    def __init__ (self,
+                  verbose=0,
+                  dry_run=0,
+                  force=0):
+
+        CCompiler.__init__ (self, verbose, dry_run, force)
+
+        # These executables are assumed to all be in the path.
+        # Borland doesn't seem to use any special registry settings to
+        # indicate their installation locations.
+
+        self.cc = "bcc32.exe"
+        self.linker = "ilink32.exe"
+        self.lib = "tlib.exe"
+
+        self.preprocess_options = None
+        self.compile_options = ['/tWM', '/O2', '/q', '/g0']
+        self.compile_options_debug = ['/tWM', '/Od', '/q', '/g0']
+
+        self.ldflags_shared = ['/Tpd', '/Gn', '/q', '/x']
+        self.ldflags_shared_debug = ['/Tpd', '/Gn', '/q', '/x']
+        self.ldflags_static = []
+        self.ldflags_exe = ['/Gn', '/q', '/x']
+        self.ldflags_exe_debug = ['/Gn', '/q', '/x','/r']
+
+
+    # -- Worker methods ------------------------------------------------
+
+    def compile (self,
+                 sources,
+                 output_dir=None,
+                 macros=None,
+                 include_dirs=None,
+                 debug=0,
+                 extra_preargs=None,
+                 extra_postargs=None):
+
+        (output_dir, macros, include_dirs) = \
+            self._fix_compile_args (output_dir, macros, include_dirs)
+        (objects, skip_sources) = self._prep_compile (sources, output_dir)
+
+        if extra_postargs is None:
+            extra_postargs = []
+
+        pp_opts = gen_preprocess_options (macros, include_dirs)
+        compile_opts = extra_preargs or []
+        compile_opts.append ('-c')
+        if debug:
+            compile_opts.extend (self.compile_options_debug)
+        else:
+            compile_opts.extend (self.compile_options)
+
+        for i in range (len (sources)):
+            src = sources[i] ; obj = objects[i]
+            ext = (os.path.splitext (src))[1]
+
+            if skip_sources[src]:
+                self.announce ("skipping %s (%s up-to-date)" % (src, obj))
+            else:
+                src = os.path.normpath(src)
+                obj = os.path.normpath(obj)
+                self.mkpath(os.path.dirname(obj))
+
+                if ext == '.res':
+                    # This is already a binary file -- skip it.
+                    continue # the 'for' loop
+                if ext == '.rc':
+                    # This needs to be compiled to a .res file -- do it now.
+                    try:
+                        self.spawn (["brcc32", "-fo", obj, src])
+                    except DistutilsExecError, msg:
+                        raise CompileError, msg
+                    continue # the 'for' loop
+
+                # The next two are both for the real compiler.
+                if ext in self._c_extensions:
+                    input_opt = ""
+                elif ext in self._cpp_extensions:
+                    input_opt = "-P"
+                else:
+                    # Unknown file type -- no extra options.  The compiler
+                    # will probably fail, but let it just in case this is a
+                    # file the compiler recognizes even if we don't.
+                    input_opt = ""
+
+                output_opt = "-o" + obj
+
+                # Compiler command line syntax is: "bcc32 [options] file(s)".
+                # Note that the source file names must appear at the end of
+                # the command line.
+                try:
+                    self.spawn ([self.cc] + compile_opts + pp_opts +
+                                [input_opt, output_opt] +
+                                extra_postargs + [src])
+                except DistutilsExecError, msg:
+                    raise CompileError, msg
+
+        return objects
+
+    # compile ()
+
+
+    def create_static_lib (self,
+                           objects,
+                           output_libname,
+                           output_dir=None,
+                           debug=0,
+                           extra_preargs=None,
+                           extra_postargs=None):
+
+        (objects, output_dir) = self._fix_object_args (objects, output_dir)
+        output_filename = \
+            self.library_filename (output_libname, output_dir=output_dir)
+
+        if self._need_link (objects, output_filename):
+            lib_args = [output_filename, '/u'] + objects
+            if debug:
+                pass                    # XXX what goes here?
+            if extra_preargs:
+                lib_args[:0] = extra_preargs
+            if extra_postargs:
+                lib_args.extend (extra_postargs)
+            try:
+                self.spawn ([self.lib] + lib_args)
+            except DistutilsExecError, msg:
+                raise LibError, msg
+        else:
+            self.announce ("skipping %s (up-to-date)" % output_filename)
+
+    # create_static_lib ()
+
+
+    def link (self,
+              target_desc,
+              objects,
+              output_filename,
+              output_dir=None,
+              libraries=None,
+              library_dirs=None,
+              runtime_library_dirs=None,
+              export_symbols=None,
+              debug=0,
+              extra_preargs=None,
+              extra_postargs=None,
+              build_temp=None):
+
+        # XXX this ignores 'build_temp'!  should follow the lead of
+        # msvccompiler.py
+
+        (objects, output_dir) = self._fix_object_args (objects, output_dir)
+        (libraries, library_dirs, runtime_library_dirs) = \
+            self._fix_lib_args (libraries, library_dirs, runtime_library_dirs)
+
+        if runtime_library_dirs:
+            self.warn ("I don't know what to do with 'runtime_library_dirs': "
+                       + str (runtime_library_dirs))
+
+        if output_dir is not None:
+            output_filename = os.path.join (output_dir, output_filename)
+
+        if self._need_link (objects, output_filename):
+
+            # Figure out linker args based on type of target.
+            if target_desc == CCompiler.EXECUTABLE:
+                startup_obj = 'c0w32'
+                if debug:
+                    ld_args = self.ldflags_exe_debug[:]
+                else:
+                    ld_args = self.ldflags_exe[:]
+            else:
+                startup_obj = 'c0d32'
+                if debug:
+                    ld_args = self.ldflags_shared_debug[:]
+                else:
+                    ld_args = self.ldflags_shared[:]
+
+
+            # Create a temporary exports file for use by the linker
+            if export_symbols is None:
+                def_file = ''
+            else:
+                head, tail = os.path.split (output_filename)
+                modname, ext = os.path.splitext (tail)
+                temp_dir = os.path.dirname(objects[0]) # preserve tree structure
+                def_file = os.path.join (temp_dir, '%s.def' % modname)
+                contents = ['EXPORTS']
+                for sym in (export_symbols or []):
+                    contents.append('  %s=_%s' % (sym, sym))
+                self.execute(write_file, (def_file, contents),
+                             "writing %s" % def_file)
+
+            # Borland C++ has problems with '/' in paths
+            objects2 = map(os.path.normpath, objects)
+            # split objects in .obj and .res files
+            # Borland C++ needs them at different positions in the command line
+            objects = [startup_obj]
+            resources = []
+            for file in objects2:
+                (base, ext) = os.path.splitext(os.path.normcase(file))
+                if ext == '.res':
+                    resources.append(file)
+                else:
+                    objects.append(file)
+
+
+            for l in library_dirs:
+                ld_args.append("/L%s" % os.path.normpath(l))
+            ld_args.append("/L.") # we sometimes use relative paths
+
+            # list of object files
+            ld_args.extend(objects)
+
+            # XXX the command-line syntax for Borland C++ is a bit wonky;
+            # certain filenames are jammed together in one big string, but
+            # comma-delimited.  This doesn't mesh too well with the
+            # Unix-centric attitude (with a DOS/Windows quoting hack) of
+            # 'spawn()', so constructing the argument list is a bit
+            # awkward.  Note that doing the obvious thing and jamming all
+            # the filenames and commas into one argument would be wrong,
+            # because 'spawn()' would quote any filenames with spaces in
+            # them.  Arghghh!.  Apparently it works fine as coded...
+
+            # name of dll/exe file
+            ld_args.extend([',',output_filename])
+            # no map file and start libraries
+            ld_args.append(',,')
+
+            for lib in libraries:
+                # see if we find it and if there is a bcpp specific lib
+                # (xxx_bcpp.lib)
+                libfile = self.find_library_file(library_dirs, lib, debug)
+                if libfile is None:
+                    ld_args.append(lib)
+                    # probably a BCPP internal library -- don't warn
+                    #    self.warn('library %s not found.' % lib)
+                else:
+                    # full name which prefers bcpp_xxx.lib over xxx.lib
+                    ld_args.append(libfile)
+
+            # some default libraries
+            ld_args.append ('import32')
+            ld_args.append ('cw32mt')
+
+            # def file for export symbols
+            ld_args.extend([',',def_file])
+            # add resource files
+            ld_args.append(',')
+            ld_args.extend(resources)
+
+
+            if extra_preargs:
+                ld_args[:0] = extra_preargs
+            if extra_postargs:
+                ld_args.extend(extra_postargs)
+
+            self.mkpath (os.path.dirname (output_filename))
+            try:
+                self.spawn ([self.linker] + ld_args)
+            except DistutilsExecError, msg:
+                raise LinkError, msg
+
+        else:
+            self.announce ("skipping %s (up-to-date)" % output_filename)
+
+    # link ()
+
+    # -- Miscellaneous methods -----------------------------------------
+
+
+    def find_library_file (self, dirs, lib, debug=0):
+        # List of effective library names to try, in order of preference:
+        # xxx_bcpp.lib is better than xxx.lib
+        # and xxx_d.lib is better than xxx.lib if debug is set
+        #
+        # The "_bcpp" suffix is to handle a Python installation for people
+        # with multiple compilers (primarily Distutils hackers, I suspect
+        # ;-).  The idea is they'd have one static library for each
+        # compiler they care about, since (almost?) every Windows compiler
+        # seems to have a different format for static libraries.
+        if debug:
+            dlib = (lib + "_d")
+            try_names = (dlib + "_bcpp", lib + "_bcpp", dlib, lib)
+        else:
+            try_names = (lib + "_bcpp", lib)
+
+        for dir in dirs:
+            for name in try_names:
+                libfile = os.path.join(dir, self.library_filename(name))
+                if os.path.exists(libfile):
+                    return libfile
+        else:
+            # Oops, didn't find it in *any* of 'dirs'
+            return None
+
+    # overwrite the one from CCompiler to support rc and res-files
+    def object_filenames (self,
+                          source_filenames,
+                          strip_dir=0,
+                          output_dir=''):
+        if output_dir is None: output_dir = ''
+        obj_names = []
+        for src_name in source_filenames:
+            # use normcase to make sure '.rc' is really '.rc' and not '.RC'
+            (base, ext) = os.path.splitext (os.path.normcase(src_name))
+            if ext not in (self.src_extensions + ['.rc','.res']):
+                raise UnknownFileError, \
+                      "unknown file type '%s' (from '%s')" % \
+                      (ext, src_name)
+            if strip_dir:
+                base = os.path.basename (base)
+            if ext == '.res':
+                # these can go unchanged
+                obj_names.append (os.path.join (output_dir, base + ext))
+            elif ext == '.rc':
+                # these need to be compiled to .res-files
+                obj_names.append (os.path.join (output_dir, base + '.res'))
+            else:
+                obj_names.append (os.path.join (output_dir,
+                                            base + self.obj_extension))
+        return obj_names
+
+    # object_filenames ()
+
+    def preprocess (self,
+                    source,
+                    output_file=None,
+                    macros=None,
+                    include_dirs=None,
+                    extra_preargs=None,
+                    extra_postargs=None):
+
+        (_, macros, include_dirs) = \
+            self._fix_compile_args(None, macros, include_dirs)
+        pp_opts = gen_preprocess_options(macros, include_dirs)
+        pp_args = ['cpp32.exe'] + pp_opts
+        if output_file is not None:
+            pp_args.append('-o' + output_file)
+        if extra_preargs:
+            pp_args[:0] = extra_preargs
+        if extra_postargs:
+            pp_args.extend(extra_postargs)
+        pp_args.append(source)
+
+        # We need to preprocess: either we're being forced to, or the
+        # source file is newer than the target (or the target doesn't
+        # exist).
+        if self.force or output_file is None or newer(source, output_file):
+            if output_file:
+                self.mkpath(os.path.dirname(output_file))
+            try:
+                self.spawn(pp_args)
+            except DistutilsExecError, msg:
+                print msg
+                raise CompileError, msg
+
+    # preprocess()
diff --git a/lib-python/2.2/distutils/ccompiler.py b/lib-python/2.2/distutils/ccompiler.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/ccompiler.py
@@ -0,0 +1,1046 @@
+"""distutils.ccompiler
+
+Contains CCompiler, an abstract base class that defines the interface
+for the Distutils compiler abstraction model."""
+
+# created 1999/07/05, Greg Ward
+
+__revision__ = "$Id$"
+
+import sys, os, re
+from types import *
+from copy import copy
+from distutils.errors import *
+from distutils.spawn import spawn
+from distutils.file_util import move_file
+from distutils.dir_util import mkpath
+from distutils.dep_util import newer_pairwise, newer_group
+from distutils.util import split_quoted, execute
+
+
+class CCompiler:
+    """Abstract base class to define the interface that must be implemented
+    by real compiler classes.  Also has some utility methods used by
+    several compiler classes.
+
+    The basic idea behind a compiler abstraction class is that each
+    instance can be used for all the compile/link steps in building a
+    single project.  Thus, attributes common to all of those compile and
+    link steps -- include directories, macros to define, libraries to link
+    against, etc. -- are attributes of the compiler instance.  To allow for
+    variability in how individual files are treated, most of those
+    attributes may be varied on a per-compilation or per-link basis.
+    """
+
+    # 'compiler_type' is a class attribute that identifies this class.  It
+    # keeps code that wants to know what kind of compiler it's dealing with
+    # from having to import all possible compiler classes just to do an
+    # 'isinstance'.  In concrete CCompiler subclasses, 'compiler_type'
+    # should really, really be one of the keys of the 'compiler_class'
+    # dictionary (see below -- used by the 'new_compiler()' factory
+    # function) -- authors of new compiler interface classes are
+    # responsible for updating 'compiler_class'!
+    compiler_type = None
+
+    # XXX things not handled by this compiler abstraction model:
+    #   * client can't provide additional options for a compiler,
+    #     e.g. warning, optimization, debugging flags.  Perhaps this
+    #     should be the domain of concrete compiler abstraction classes
+    #     (UnixCCompiler, MSVCCompiler, etc.) -- or perhaps the base
+    #     class should have methods for the common ones.
+    #   * can't completely override the include or library searchg
+    #     path, ie. no "cc -I -Idir1 -Idir2" or "cc -L -Ldir1 -Ldir2".
+    #     I'm not sure how widely supported this is even by Unix
+    #     compilers, much less on other platforms.  And I'm even less
+    #     sure how useful it is; maybe for cross-compiling, but
+    #     support for that is a ways off.  (And anyways, cross
+    #     compilers probably have a dedicated binary with the
+    #     right paths compiled in.  I hope.)
+    #   * can't do really freaky things with the library list/library
+    #     dirs, e.g. "-Ldir1 -lfoo -Ldir2 -lfoo" to link against
+    #     different versions of libfoo.a in different locations.  I
+    #     think this is useless without the ability to null out the
+    #     library search path anyways.
+
+
+    # Subclasses that rely on the standard filename generation methods
+    # implemented below should override these; see the comment near
+    # those methods ('object_filenames()' et. al.) for details:
+    src_extensions = None               # list of strings
+    obj_extension = None                # string
+    static_lib_extension = None
+    shared_lib_extension = None         # string
+    static_lib_format = None            # format string
+    shared_lib_format = None            # prob. same as static_lib_format
+    exe_extension = None                # string
+
+
+    def __init__ (self,
+                  verbose=0,
+                  dry_run=0,
+                  force=0):
+
+        self.verbose = verbose
+        self.dry_run = dry_run
+        self.force = force
+
+        # 'output_dir': a common output directory for object, library,
+        # shared object, and shared library files
+        self.output_dir = None
+
+        # 'macros': a list of macro definitions (or undefinitions).  A
+        # macro definition is a 2-tuple (name, value), where the value is
+        # either a string or None (no explicit value).  A macro
+        # undefinition is a 1-tuple (name,).
+        self.macros = []
+
+        # 'include_dirs': a list of directories to search for include files
+        self.include_dirs = []
+
+        # 'libraries': a list of libraries to include in any link
+        # (library names, not filenames: eg. "foo" not "libfoo.a")
+        self.libraries = []
+
+        # 'library_dirs': a list of directories to search for libraries
+        self.library_dirs = []
+
+        # 'runtime_library_dirs': a list of directories to search for
+        # shared libraries/objects at runtime
+        self.runtime_library_dirs = []
+
+        # 'objects': a list of object files (or similar, such as explicitly
+        # named library files) to include on any link
+        self.objects = []
+
+        for key in self.executables.keys():
+            self.set_executable(key, self.executables[key])
+
+    # __init__ ()
+
+
+    def set_executables (self, **args):
+
+        """Define the executables (and options for them) that will be run
+        to perform the various stages of compilation.  The exact set of
+        executables that may be specified here depends on the compiler
+        class (via the 'executables' class attribute), but most will have:
+          compiler      the C/C++ compiler
+          linker_so     linker used to create shared objects and libraries
+          linker_exe    linker used to create binary executables
+          archiver      static library creator
+
+        On platforms with a command-line (Unix, DOS/Windows), each of these
+        is a string that will be split into executable name and (optional)
+        list of arguments.  (Splitting the string is done similarly to how
+        Unix shells operate: words are delimited by spaces, but quotes and
+        backslashes can override this.  See
+        'distutils.util.split_quoted()'.)
+        """
+
+        # Note that some CCompiler implementation classes will define class
+        # attributes 'cpp', 'cc', etc. with hard-coded executable names;
+        # this is appropriate when a compiler class is for exactly one
+        # compiler/OS combination (eg. MSVCCompiler).  Other compiler
+        # classes (UnixCCompiler, in particular) are driven by information
+        # discovered at run-time, since there are many different ways to do
+        # basically the same things with Unix C compilers.
+
+        for key in args.keys():
+            if not self.executables.has_key(key):
+                raise ValueError, \
+                      "unknown executable '%s' for class %s" % \
+                      (key, self.__class__.__name__)
+            self.set_executable(key, args[key])
+
+    # set_executables ()
+
+    def set_executable(self, key, value):
+        if type(value) is StringType:
+            setattr(self, key, split_quoted(value))
+        else:
+            setattr(self, key, value)
+
+
+
+    def _find_macro (self, name):
+        i = 0
+        for defn in self.macros:
+            if defn[0] == name:
+                return i
+            i = i + 1
+
+        return None
+
+
+    def _check_macro_definitions (self, definitions):
+        """Ensures that every element of 'definitions' is a valid macro
+        definition, ie. either (name,value) 2-tuple or a (name,) tuple.  Do
+        nothing if all definitions are OK, raise TypeError otherwise.
+        """
+        for defn in definitions:
+            if not (type (defn) is TupleType and
+                    (len (defn) == 1 or
+                     (len (defn) == 2 and
+                      (type (defn[1]) is StringType or defn[1] is None))) and
+                    type (defn[0]) is StringType):
+                raise TypeError, \
+                      ("invalid macro definition '%s': " % defn) + \
+                      "must be tuple (string,), (string, string), or " + \
+                      "(string, None)"
+
+
+    # -- Bookkeeping methods -------------------------------------------
+
+    def define_macro (self, name, value=None):
+        """Define a preprocessor macro for all compilations driven by this
+        compiler object.  The optional parameter 'value' should be a
+        string; if it is not supplied, then the macro will be defined
+        without an explicit value and the exact outcome depends on the
+        compiler used (XXX true? does ANSI say anything about this?)
+        """
+        # Delete from the list of macro definitions/undefinitions if
+        # already there (so that this one will take precedence).
+        i = self._find_macro (name)
+        if i is not None:
+            del self.macros[i]
+
+        defn = (name, value)
+        self.macros.append (defn)
+
+
+    def undefine_macro (self, name):
+        """Undefine a preprocessor macro for all compilations driven by
+        this compiler object.  If the same macro is defined by
+        'define_macro()' and undefined by 'undefine_macro()' the last call
+        takes precedence (including multiple redefinitions or
+        undefinitions).  If the macro is redefined/undefined on a
+        per-compilation basis (ie. in the call to 'compile()'), then that
+        takes precedence.
+        """
+        # Delete from the list of macro definitions/undefinitions if
+        # already there (so that this one will take precedence).
+        i = self._find_macro (name)
+        if i is not None:
+            del self.macros[i]
+
+        undefn = (name,)
+        self.macros.append (undefn)
+
+
+    def add_include_dir (self, dir):
+        """Add 'dir' to the list of directories that will be searched for
+        header files.  The compiler is instructed to search directories in
+        the order in which they are supplied by successive calls to
+        'add_include_dir()'.
+        """
+        self.include_dirs.append (dir)
+
+    def set_include_dirs (self, dirs):
+        """Set the list of directories that will be searched to 'dirs' (a
+        list of strings).  Overrides any preceding calls to
+        'add_include_dir()'; subsequence calls to 'add_include_dir()' add
+        to the list passed to 'set_include_dirs()'.  This does not affect
+        any list of standard include directories that the compiler may
+        search by default.
+        """
+        self.include_dirs = copy (dirs)
+
+
+    def add_library (self, libname):
+        """Add 'libname' to the list of libraries that will be included in
+        all links driven by this compiler object.  Note that 'libname'
+        should *not* be the name of a file containing a library, but the
+        name of the library itself: the actual filename will be inferred by
+        the linker, the compiler, or the compiler class (depending on the
+        platform).
+
+        The linker will be instructed to link against libraries in the
+        order they were supplied to 'add_library()' and/or
+        'set_libraries()'.  It is perfectly valid to duplicate library
+        names; the linker will be instructed to link against libraries as
+        many times as they are mentioned.
+        """
+        self.libraries.append (libname)
+
+    def set_libraries (self, libnames):
+        """Set the list of libraries to be included in all links driven by
+        this compiler object to 'libnames' (a list of strings).  This does
+        not affect any standard system libraries that the linker may
+        include by default.
+        """
+        self.libraries = copy (libnames)
+
+
+    def add_library_dir (self, dir):
+        """Add 'dir' to the list of directories that will be searched for
+        libraries specified to 'add_library()' and 'set_libraries()'.  The
+        linker will be instructed to search for libraries in the order they
+        are supplied to 'add_library_dir()' and/or 'set_library_dirs()'.
+        """
+        self.library_dirs.append (dir)
+
+    def set_library_dirs (self, dirs):
+        """Set the list of library search directories to 'dirs' (a list of
+        strings).  This does not affect any standard library search path
+        that the linker may search by default.
+        """
+        self.library_dirs = copy (dirs)
+
+
+    def add_runtime_library_dir (self, dir):
+        """Add 'dir' to the list of directories that will be searched for
+        shared libraries at runtime.
+        """
+        self.runtime_library_dirs.append (dir)
+
+    def set_runtime_library_dirs (self, dirs):
+        """Set the list of directories to search for shared libraries at
+        runtime to 'dirs' (a list of strings).  This does not affect any
+        standard search path that the runtime linker may search by
+        default.
+        """
+        self.runtime_library_dirs = copy (dirs)
+
+
+    def add_link_object (self, object):
+        """Add 'object' to the list of object files (or analogues, such as
+        explicitly named library files or the output of "resource
+        compilers") to be included in every link driven by this compiler
+        object.
+        """
+        self.objects.append (object)
+
+    def set_link_objects (self, objects):
+        """Set the list of object files (or analogues) to be included in
+        every link to 'objects'.  This does not affect any standard object
+        files that the linker may include by default (such as system
+        libraries).
+        """
+        self.objects = copy (objects)
+
+
+    # -- Priviate utility methods --------------------------------------
+    # (here for the convenience of subclasses)
+
+    def _fix_compile_args (self, output_dir, macros, include_dirs):
+        """Typecheck and fix-up some of the arguments to the 'compile()'
+        method, and return fixed-up values.  Specifically: if 'output_dir'
+        is None, replaces it with 'self.output_dir'; ensures that 'macros'
+        is a list, and augments it with 'self.macros'; ensures that
+        'include_dirs' is a list, and augments it with 'self.include_dirs'.
+        Guarantees that the returned values are of the correct type,
+        i.e. for 'output_dir' either string or None, and for 'macros' and
+        'include_dirs' either list or None.
+        """
+        if output_dir is None:
+            output_dir = self.output_dir
+        elif type (output_dir) is not StringType:
+            raise TypeError, "'output_dir' must be a string or None"
+
+        if macros is None:
+            macros = self.macros
+        elif type (macros) is ListType:
+            macros = macros + (self.macros or [])
+        else:
+            raise TypeError, \
+                  "'macros' (if supplied) must be a list of tuples"
+
+        if include_dirs is None:
+            include_dirs = self.include_dirs
+        elif type (include_dirs) in (ListType, TupleType):
+            include_dirs = list (include_dirs) + (self.include_dirs or [])
+        else:
+            raise TypeError, \
+                  "'include_dirs' (if supplied) must be a list of strings"
+
+        return (output_dir, macros, include_dirs)
+
+    # _fix_compile_args ()
+
+
+    def _prep_compile (self, sources, output_dir):
+        """Determine the list of object files corresponding to 'sources',
+        and figure out which ones really need to be recompiled.  Return a
+        list of all object files and a dictionary telling which source
+        files can be skipped.
+        """
+        # Get the list of expected output (object) files
+        objects = self.object_filenames (sources,
+                                         strip_dir=1,
+                                         output_dir=output_dir)
+
+        if self.force:
+            skip_source = {}            # rebuild everything
+            for source in sources:
+                skip_source[source] = 0
+        else:
+            # Figure out which source files we have to recompile according
+            # to a simplistic check -- we just compare the source and
+            # object file, no deep dependency checking involving header
+            # files.
+            skip_source = {}            # rebuild everything
+            for source in sources:      # no wait, rebuild nothing
+                skip_source[source] = 1
+
+            (n_sources, n_objects) = newer_pairwise (sources, objects)
+            for source in n_sources:    # no really, only rebuild what's
+                skip_source[source] = 0 # out-of-date
+
+        return (objects, skip_source)
+
+    # _prep_compile ()
+
+
+    def _fix_object_args (self, objects, output_dir):
+        """Typecheck and fix up some arguments supplied to various methods.
+        Specifically: ensure that 'objects' is a list; if output_dir is
+        None, replace with self.output_dir.  Return fixed versions of
+        'objects' and 'output_dir'.
+        """
+        if type (objects) not in (ListType, TupleType):
+            raise TypeError, \
+                  "'objects' must be a list or tuple of strings"
+        objects = list (objects)
+
+        if output_dir is None:
+            output_dir = self.output_dir
+        elif type (output_dir) is not StringType:
+            raise TypeError, "'output_dir' must be a string or None"
+
+        return (objects, output_dir)
+
+
+    def _fix_lib_args (self, libraries, library_dirs, runtime_library_dirs):
+        """Typecheck and fix up some of the arguments supplied to the
+        'link_*' methods.  Specifically: ensure that all arguments are
+        lists, and augment them with their permanent versions
+        (eg. 'self.libraries' augments 'libraries').  Return a tuple with
+        fixed versions of all arguments.
+        """
+        if libraries is None:
+            libraries = self.libraries
+        elif type (libraries) in (ListType, TupleType):
+            libraries = list (libraries) + (self.libraries or [])
+        else:
+            raise TypeError, \
+                  "'libraries' (if supplied) must be a list of strings"
+
+        if library_dirs is None:
+            library_dirs = self.library_dirs
+        elif type (library_dirs) in (ListType, TupleType):
+            library_dirs = list (library_dirs) + (self.library_dirs or [])
+        else:
+            raise TypeError, \
+                  "'library_dirs' (if supplied) must be a list of strings"
+
+        if runtime_library_dirs is None:
+            runtime_library_dirs = self.runtime_library_dirs
+        elif type (runtime_library_dirs) in (ListType, TupleType):
+            runtime_library_dirs = (list (runtime_library_dirs) +
+                                    (self.runtime_library_dirs or []))
+        else:
+            raise TypeError, \
+                  "'runtime_library_dirs' (if supplied) " + \
+                  "must be a list of strings"
+
+        return (libraries, library_dirs, runtime_library_dirs)
+
+    # _fix_lib_args ()
+
+
+    def _need_link (self, objects, output_file):
+        """Return true if we need to relink the files listed in 'objects'
+        to recreate 'output_file'.
+        """
+        if self.force:
+            return 1
+        else:
+            if self.dry_run:
+                newer = newer_group (objects, output_file, missing='newer')
+            else:
+                newer = newer_group (objects, output_file)
+            return newer
+
+    # _need_link ()
+
+
+    # -- Worker methods ------------------------------------------------
+    # (must be implemented by subclasses)
+
+    def preprocess (self,
+                    source,
+                    output_file=None,
+                    macros=None,
+                    include_dirs=None,
+                    extra_preargs=None,
+                    extra_postargs=None):
+        """Preprocess a single C/C++ source file, named in 'source'.
+        Output will be written to file named 'output_file', or stdout if
+        'output_file' not supplied.  'macros' is a list of macro
+        definitions as for 'compile()', which will augment the macros set
+        with 'define_macro()' and 'undefine_macro()'.  'include_dirs' is a
+        list of directory names that will be added to the default list.
+
+        Raises PreprocessError on failure.
+        """
+        pass
+
+    def compile (self,
+                 sources,
+                 output_dir=None,
+                 macros=None,
+                 include_dirs=None,
+                 debug=0,
+                 extra_preargs=None,
+                 extra_postargs=None):
+        """Compile one or more source files.  'sources' must be a list of
+        filenames, most likely C/C++ files, but in reality anything that
+        can be handled by a particular compiler and compiler class
+        (eg. MSVCCompiler can handle resource files in 'sources').  Return
+        a list of object filenames, one per source filename in 'sources'.
+        Depending on the implementation, not all source files will
+        necessarily be compiled, but all corresponding object filenames
+        will be returned.
+
+        If 'output_dir' is given, object files will be put under it, while
+        retaining their original path component.  That is, "foo/bar.c"
+        normally compiles to "foo/bar.o" (for a Unix implementation); if
+        'output_dir' is "build", then it would compile to
+        "build/foo/bar.o".
+
+        'macros', if given, must be a list of macro definitions.  A macro
+        definition is either a (name, value) 2-tuple or a (name,) 1-tuple.
+        The former defines a macro; if the value is None, the macro is
+        defined without an explicit value.  The 1-tuple case undefines a
+        macro.  Later definitions/redefinitions/ undefinitions take
+        precedence.
+
+        'include_dirs', if given, must be a list of strings, the
+        directories to add to the default include file search path for this
+        compilation only.
+
+        'debug' is a boolean; if true, the compiler will be instructed to
+        output debug symbols in (or alongside) the object file(s).
+
+        'extra_preargs' and 'extra_postargs' are implementation- dependent.
+        On platforms that have the notion of a command-line (e.g. Unix,
+        DOS/Windows), they are most likely lists of strings: extra
+        command-line arguments to prepand/append to the compiler command
+        line.  On other platforms, consult the implementation class
+        documentation.  In any event, they are intended as an escape hatch
+        for those occasions when the abstract compiler framework doesn't
+        cut the mustard.
+
+        Raises CompileError on failure.
+        """
+        pass
+
+
+    def create_static_lib (self,
+                           objects,
+                           output_libname,
+                           output_dir=None,
+                           debug=0):
+        """Link a bunch of stuff together to create a static library file.
+        The "bunch of stuff" consists of the list of object files supplied
+        as 'objects', the extra object files supplied to
+        'add_link_object()' and/or 'set_link_objects()', the libraries
+        supplied to 'add_library()' and/or 'set_libraries()', and the
+        libraries supplied as 'libraries' (if any).
+
+        'output_libname' should be a library name, not a filename; the
+        filename will be inferred from the library name.  'output_dir' is
+        the directory where the library file will be put.
+
+        'debug' is a boolean; if true, debugging information will be
+        included in the library (note that on most platforms, it is the
+        compile step where this matters: the 'debug' flag is included here
+        just for consistency).
+
+        Raises LibError on failure.
+        """
+        pass
+
+
+    # values for target_desc parameter in link()
+    SHARED_OBJECT = "shared_object"
+    SHARED_LIBRARY = "shared_library"
+    EXECUTABLE = "executable"
+
+    def link (self,
+              target_desc,
+              objects,
+              output_filename,
+              output_dir=None,
+              libraries=None,
+              library_dirs=None,
+              runtime_library_dirs=None,
+              export_symbols=None,
+              debug=0,
+              extra_preargs=None,
+              extra_postargs=None,
+              build_temp=None):
+        """Link a bunch of stuff together to create an executable or
+        shared library file.
+
+        The "bunch of stuff" consists of the list of object files supplied
+        as 'objects'.  'output_filename' should be a filename.  If
+        'output_dir' is supplied, 'output_filename' is relative to it
+        (i.e. 'output_filename' can provide directory components if
+        needed).
+
+        'libraries' is a list of libraries to link against.  These are
+        library names, not filenames, since they're translated into
+        filenames in a platform-specific way (eg. "foo" becomes "libfoo.a"
+        on Unix and "foo.lib" on DOS/Windows).  However, they can include a
+        directory component, which means the linker will look in that
+        specific directory rather than searching all the normal locations.
+
+        'library_dirs', if supplied, should be a list of directories to
+        search for libraries that were specified as bare library names
+        (ie. no directory component).  These are on top of the system
+        default and those supplied to 'add_library_dir()' and/or
+        'set_library_dirs()'.  'runtime_library_dirs' is a list of
+        directories that will be embedded into the shared library and used
+        to search for other shared libraries that *it* depends on at
+        run-time.  (This may only be relevant on Unix.)
+
+        'export_symbols' is a list of symbols that the shared library will
+        export.  (This appears to be relevant only on Windows.)
+
+        'debug' is as for 'compile()' and 'create_static_lib()', with the
+        slight distinction that it actually matters on most platforms (as
+        opposed to 'create_static_lib()', which includes a 'debug' flag
+        mostly for form's sake).
+
+        'extra_preargs' and 'extra_postargs' are as for 'compile()' (except
+        of course that they supply command-line arguments for the
+        particular linker being used).
+
+        Raises LinkError on failure.
+        """
+        raise NotImplementedError
+
+
+    # Old 'link_*()' methods, rewritten to use the new 'link()' method.
+
+    def link_shared_lib (self,
+                         objects,
+                         output_libname,
+                         output_dir=None,
+                         libraries=None,
+                         library_dirs=None,
+                         runtime_library_dirs=None,
+                         export_symbols=None,
+                         debug=0,
+                         extra_preargs=None,
+                         extra_postargs=None,
+                         build_temp=None):
+        self.link(CCompiler.SHARED_LIBRARY, objects,
+                  self.library_filename(output_libname, lib_type='shared'),
+                  output_dir,
+                  libraries, library_dirs, runtime_library_dirs,
+                  export_symbols, debug,
+                  extra_preargs, extra_postargs, build_temp)
+
+
+    def link_shared_object (self,
+                            objects,
+                            output_filename,
+                            output_dir=None,
+                            libraries=None,
+                            library_dirs=None,
+                            runtime_library_dirs=None,
+                            export_symbols=None,
+                            debug=0,
+                            extra_preargs=None,
+                            extra_postargs=None,
+                            build_temp=None):
+        self.link(CCompiler.SHARED_OBJECT, objects,
+                  output_filename, output_dir,
+                  libraries, library_dirs, runtime_library_dirs,
+                  export_symbols, debug,
+                  extra_preargs, extra_postargs, build_temp)
+
+
+    def link_executable (self,
+                         objects,
+                         output_progname,
+                         output_dir=None,
+                         libraries=None,
+                         library_dirs=None,
+                         runtime_library_dirs=None,
+                         debug=0,
+                         extra_preargs=None,
+                         extra_postargs=None):
+        self.link(CCompiler.EXECUTABLE, objects,
+                  self.executable_filename(output_progname), output_dir,
+                  libraries, library_dirs, runtime_library_dirs, None,
+                  debug, extra_preargs, extra_postargs, None)
+
+
+    # -- Miscellaneous methods -----------------------------------------
+    # These are all used by the 'gen_lib_options() function; there is
+    # no appropriate default implementation so subclasses should
+    # implement all of these.
+
+    def library_dir_option (self, dir):
+        """Return the compiler option to add 'dir' to the list of
+        directories searched for libraries.
+        """
+        raise NotImplementedError
+
+    def runtime_library_dir_option (self, dir):
+        """Return the compiler option to add 'dir' to the list of
+        directories searched for runtime libraries.
+        """
+        raise NotImplementedError
+
+    def library_option (self, lib):
+        """Return the compiler option to add 'dir' to the list of libraries
+        linked into the shared library or executable.
+        """
+        raise NotImplementedError
+
+    def find_library_file (self, dirs, lib, debug=0):
+        """Search the specified list of directories for a static or shared
+        library file 'lib' and return the full path to that file.  If
+        'debug' true, look for a debugging version (if that makes sense on
+        the current platform).  Return None if 'lib' wasn't found in any of
+        the specified directories.
+        """
+        raise NotImplementedError
+
+
+    # -- Filename generation methods -----------------------------------
+
+    # The default implementation of the filename generating methods are
+    # prejudiced towards the Unix/DOS/Windows view of the world:
+    #   * object files are named by replacing the source file extension
+    #     (eg. .c/.cpp -> .o/.obj)
+    #   * library files (shared or static) are named by plugging the
+    #     library name and extension into a format string, eg.
+    #     "lib%s.%s" % (lib_name, ".a") for Unix static libraries
+    #   * executables are named by appending an extension (possibly
+    #     empty) to the program name: eg. progname + ".exe" for
+    #     Windows
+    #
+    # To reduce redundant code, these methods expect to find
+    # several attributes in the current object (presumably defined
+    # as class attributes):
+    #   * src_extensions -
+    #     list of C/C++ source file extensions, eg. ['.c', '.cpp']
+    #   * obj_extension -
+    #     object file extension, eg. '.o' or '.obj'
+    #   * static_lib_extension -
+    #     extension for static library files, eg. '.a' or '.lib'
+    #   * shared_lib_extension -
+    #     extension for shared library/object files, eg. '.so', '.dll'
+    #   * static_lib_format -
+    #     format string for generating static library filenames,
+    #     eg. 'lib%s.%s' or '%s.%s'
+    #   * shared_lib_format
+    #     format string for generating shared library filenames
+    #     (probably same as static_lib_format, since the extension
+    #     is one of the intended parameters to the format string)
+    #   * exe_extension -
+    #     extension for executable files, eg. '' or '.exe'
+
+    def object_filenames (self,
+                          source_filenames,
+                          strip_dir=0,
+                          output_dir=''):
+        if output_dir is None: output_dir = ''
+        obj_names = []
+        for src_name in source_filenames:
+            (base, ext) = os.path.splitext (src_name)
+            if ext not in self.src_extensions:
+                raise UnknownFileError, \
+                      "unknown file type '%s' (from '%s')" % \
+                      (ext, src_name)
+            if strip_dir:
+                base = os.path.basename (base)
+            obj_names.append (os.path.join (output_dir,
+                                            base + self.obj_extension))
+        return obj_names
+
+    # object_filenames ()
+
+
+    def shared_object_filename (self,
+                                basename,
+                                strip_dir=0,
+                                output_dir=''):
+        if output_dir is None: output_dir = ''
+        if strip_dir:
+            basename = os.path.basename (basename)
+        return os.path.join (output_dir, basename + self.shared_lib_extension)
+
+    def executable_filename (self,
+                                basename,
+                                strip_dir=0,
+                                output_dir=''):
+        if output_dir is None: output_dir = ''
+        if strip_dir:
+            basename = os.path.basename (basename)
+        return os.path.join(output_dir, basename + (self.exe_extension or ''))
+
+    def library_filename (self,
+                          libname,
+                          lib_type='static',     # or 'shared'
+                          strip_dir=0,
+                          output_dir=''):
+
+        if output_dir is None: output_dir = ''
+        if lib_type not in ("static","shared","dylib"):
+            raise ValueError, "'lib_type' must be \"static\", \"shared\" or \"dylib\""
+        fmt = getattr (self, lib_type + "_lib_format")
+        ext = getattr (self, lib_type + "_lib_extension")
+
+        (dir, base) = os.path.split (libname)
+        filename = fmt % (base, ext)
+        if strip_dir:
+            dir = ''
+
+        return os.path.join (output_dir, dir, filename)
+
+
+    # -- Utility methods -----------------------------------------------
+
+    def announce (self, msg, level=1):
+        if self.verbose >= level:
+            print msg
+
+    def debug_print (self, msg):
+        from distutils.core import DEBUG
+        if DEBUG:
+            print msg
+
+    def warn (self, msg):
+        sys.stderr.write ("warning: %s\n" % msg)
+
+    def execute (self, func, args, msg=None, level=1):
+        execute(func, args, msg, self.verbose >= level, self.dry_run)
+
+    def spawn (self, cmd):
+        spawn (cmd, verbose=self.verbose, dry_run=self.dry_run)
+
+    def move_file (self, src, dst):
+        return move_file (src, dst, verbose=self.verbose, dry_run=self.dry_run)
+
+    def mkpath (self, name, mode=0777):
+        mkpath (name, mode, self.verbose, self.dry_run)
+
+
+# class CCompiler
+
+
+# Map a sys.platform/os.name ('posix', 'nt') to the default compiler
+# type for that platform. Keys are interpreted as re match
+# patterns. Order is important; platform mappings are preferred over
+# OS names.
+_default_compilers = (
+
+    # Platform string mappings
+
+    # on a cygwin built python we can use gcc like an ordinary UNIXish
+    # compiler
+    ('cygwin.*', 'unix'),
+
+    # OS name mappings
+    ('posix', 'unix'),
+    ('nt', 'msvc'),
+    ('mac', 'mwerks'),
+
+    )
+
+def get_default_compiler(osname=None, platform=None):
+
+    """ Determine the default compiler to use for the given platform.
+
+        osname should be one of the standard Python OS names (i.e. the
+        ones returned by os.name) and platform the common value
+        returned by sys.platform for the platform in question.
+
+        The default values are os.name and sys.platform in case the
+        parameters are not given.
+
+    """
+    if osname is None:
+        osname = os.name
+    if platform is None:
+        platform = sys.platform
+    for pattern, compiler in _default_compilers:
+        if re.match(pattern, platform) is not None or \
+           re.match(pattern, osname) is not None:
+            return compiler
+    # Default to Unix compiler
+    return 'unix'
+
+# Map compiler types to (module_name, class_name) pairs -- ie. where to
+# find the code that implements an interface to this compiler.  (The module
+# is assumed to be in the 'distutils' package.)
+compiler_class = { 'unix':    ('unixccompiler', 'UnixCCompiler',
+                               "standard UNIX-style compiler"),
+                   'msvc':    ('msvccompiler', 'MSVCCompiler',
+                               "Microsoft Visual C++"),
+                   'cygwin':  ('cygwinccompiler', 'CygwinCCompiler',
+                               "Cygwin port of GNU C Compiler for Win32"),
+                   'mingw32': ('cygwinccompiler', 'Mingw32CCompiler',
+                               "Mingw32 port of GNU C Compiler for Win32"),
+                   'bcpp':    ('bcppcompiler', 'BCPPCompiler',
+                               "Borland C++ Compiler"),
+                   'mwerks':  ('mwerkscompiler', 'MWerksCompiler',
+                               "MetroWerks CodeWarrior"),
+                 }
+
+def show_compilers():
+    """Print list of available compilers (used by the "--help-compiler"
+    options to "build", "build_ext", "build_clib").
+    """
+    # XXX this "knows" that the compiler option it's describing is
+    # "--compiler", which just happens to be the case for the three
+    # commands that use it.
+    from distutils.fancy_getopt import FancyGetopt
+    compilers = []
+    for compiler in compiler_class.keys():
+        compilers.append(("compiler="+compiler, None,
+                          compiler_class[compiler][2]))
+    compilers.sort()
+    pretty_printer = FancyGetopt(compilers)
+    pretty_printer.print_help("List of available compilers:")
+
+
+def new_compiler (plat=None,
+                  compiler=None,
+                  verbose=0,
+                  dry_run=0,
+                  force=0):
+    """Generate an instance of some CCompiler subclass for the supplied
+    platform/compiler combination.  'plat' defaults to 'os.name'
+    (eg. 'posix', 'nt'), and 'compiler' defaults to the default compiler
+    for that platform.  Currently only 'posix' and 'nt' are supported, and
+    the default compilers are "traditional Unix interface" (UnixCCompiler
+    class) and Visual C++ (MSVCCompiler class).  Note that it's perfectly
+    possible to ask for a Unix compiler object under Windows, and a
+    Microsoft compiler object under Unix -- if you supply a value for
+    'compiler', 'plat' is ignored.
+    """
+    if plat is None:
+        plat = os.name
+
+    try:
+        if compiler is None:
+            compiler = get_default_compiler(plat)
+
+        (module_name, class_name, long_description) = compiler_class[compiler]
+    except KeyError:
+        msg = "don't know how to compile C/C++ code on platform '%s'" % plat
+        if compiler is not None:
+            msg = msg + " with '%s' compiler" % compiler
+        raise DistutilsPlatformError, msg
+
+    try:
+        module_name = "distutils." + module_name
+        __import__ (module_name)
+        module = sys.modules[module_name]
+        klass = vars(module)[class_name]
+    except ImportError:
+        raise DistutilsModuleError, \
+              "can't compile C/C++ code: unable to load module '%s'" % \
+              module_name
+    except KeyError:
+        raise DistutilsModuleError, \
+              ("can't compile C/C++ code: unable to find class '%s' " +
+               "in module '%s'") % (class_name, module_name)
+
+    return klass (verbose, dry_run, force)
+
+
+def gen_preprocess_options (macros, include_dirs):
+    """Generate C pre-processor options (-D, -U, -I) as used by at least
+    two types of compilers: the typical Unix compiler and Visual C++.
+    'macros' is the usual thing, a list of 1- or 2-tuples, where (name,)
+    means undefine (-U) macro 'name', and (name,value) means define (-D)
+    macro 'name' to 'value'.  'include_dirs' is just a list of directory
+    names to be added to the header file search path (-I).  Returns a list
+    of command-line options suitable for either Unix compilers or Visual
+    C++.
+    """
+    # XXX it would be nice (mainly aesthetic, and so we don't generate
+    # stupid-looking command lines) to go over 'macros' and eliminate
+    # redundant definitions/undefinitions (ie. ensure that only the
+    # latest mention of a particular macro winds up on the command
+    # line).  I don't think it's essential, though, since most (all?)
+    # Unix C compilers only pay attention to the latest -D or -U
+    # mention of a macro on their command line.  Similar situation for
+    # 'include_dirs'.  I'm punting on both for now.  Anyways, weeding out
+    # redundancies like this should probably be the province of
+    # CCompiler, since the data structures used are inherited from it
+    # and therefore common to all CCompiler classes.
+
+    pp_opts = []
+    for macro in macros:
+
+        if not (type (macro) is TupleType and
+                1 <= len (macro) <= 2):
+            raise TypeError, \
+                  ("bad macro definition '%s': " +
+                   "each element of 'macros' list must be a 1- or 2-tuple") % \
+                  macro
+
+        if len (macro) == 1:        # undefine this macro
+            pp_opts.append ("-U%s" % macro[0])
+        elif len (macro) == 2:
+            if macro[1] is None:    # define with no explicit value
+                pp_opts.append ("-D%s" % macro[0])
+            else:
+                # XXX *don't* need to be clever about quoting the
+                # macro value here, because we're going to avoid the
+                # shell at all costs when we spawn the command!
+                pp_opts.append ("-D%s=%s" % macro)
+
+    for dir in include_dirs:
+        pp_opts.append ("-I%s" % dir)
+
+    return pp_opts
+
+# gen_preprocess_options ()
+
+
+def gen_lib_options (compiler, library_dirs, runtime_library_dirs, libraries):
+    """Generate linker options for searching library directories and
+    linking with specific libraries.  'libraries' and 'library_dirs' are,
+    respectively, lists of library names (not filenames!) and search
+    directories.  Returns a list of command-line options suitable for use
+    with some compiler (depending on the two format strings passed in).
+    """
+    lib_opts = []
+
+    for dir in library_dirs:
+        lib_opts.append (compiler.library_dir_option (dir))
+
+    for dir in runtime_library_dirs:
+        lib_opts.append (compiler.runtime_library_dir_option (dir))
+
+    # XXX it's important that we *not* remove redundant library mentions!
+    # sometimes you really do have to say "-lfoo -lbar -lfoo" in order to
+    # resolve all symbols.  I just hope we never have to say "-lfoo obj.o
+    # -lbar" to get things to work -- that's certainly a possibility, but a
+    # pretty nasty way to arrange your C code.
+
+    for lib in libraries:
+        (lib_dir, lib_name) = os.path.split (lib)
+        if lib_dir:
+            lib_file = compiler.find_library_file ([lib_dir], lib_name)
+            if lib_file:
+                lib_opts.append (lib_file)
+            else:
+                compiler.warn ("no library file corresponding to "
+                               "'%s' found (skipping)" % lib)
+        else:
+            lib_opts.append (compiler.library_option (lib))
+
+    return lib_opts
+
+# gen_lib_options ()
diff --git a/lib-python/2.2/distutils/cmd.py b/lib-python/2.2/distutils/cmd.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/cmd.py
@@ -0,0 +1,486 @@
+"""distutils.cmd
+
+Provides the Command class, the base class for the command classes
+in the distutils.command package.
+"""
+
+# created 2000/04/03, Greg Ward
+# (extricated from core.py; actually dates back to the beginning)
+
+__revision__ = "$Id$"
+
+import sys, os, string, re
+from types import *
+from distutils.errors import *
+from distutils import util, dir_util, file_util, archive_util, dep_util
+
+
+class Command:
+    """Abstract base class for defining command classes, the "worker bees"
+    of the Distutils.  A useful analogy for command classes is to think of
+    them as subroutines with local variables called "options".  The options
+    are "declared" in 'initialize_options()' and "defined" (given their
+    final values, aka "finalized") in 'finalize_options()', both of which
+    must be defined by every command class.  The distinction between the
+    two is necessary because option values might come from the outside
+    world (command line, config file, ...), and any options dependent on
+    other options must be computed *after* these outside influences have
+    been processed -- hence 'finalize_options()'.  The "body" of the
+    subroutine, where it does all its work based on the values of its
+    options, is the 'run()' method, which must also be implemented by every
+    command class.
+    """
+
+    # 'sub_commands' formalizes the notion of a "family" of commands,
+    # eg. "install" as the parent with sub-commands "install_lib",
+    # "install_headers", etc.  The parent of a family of commands
+    # defines 'sub_commands' as a class attribute; it's a list of
+    #    (command_name : string, predicate : unbound_method | string | None)
+    # tuples, where 'predicate' is a method of the parent command that
+    # determines whether the corresponding command is applicable in the
+    # current situation.  (Eg. we "install_headers" is only applicable if
+    # we have any C header files to install.)  If 'predicate' is None,
+    # that command is always applicable.
+    #
+    # 'sub_commands' is usually defined at the *end* of a class, because
+    # predicates can be unbound methods, so they must already have been
+    # defined.  The canonical example is the "install" command.
+    sub_commands = []
+
+
+    # -- Creation/initialization methods -------------------------------
+
+    def __init__ (self, dist):
+        """Create and initialize a new Command object.  Most importantly,
+        invokes the 'initialize_options()' method, which is the real
+        initializer and depends on the actual command being
+        instantiated.
+        """
+        # late import because of mutual dependence between these classes
+        from distutils.dist import Distribution
+
+        if not isinstance(dist, Distribution):
+            raise TypeError, "dist must be a Distribution instance"
+        if self.__class__ is Command:
+            raise RuntimeError, "Command is an abstract class"
+
+        self.distribution = dist
+        self.initialize_options()
+
+        # Per-command versions of the global flags, so that the user can
+        # customize Distutils' behaviour command-by-command and let some
+        # commands fallback on the Distribution's behaviour.  None means
+        # "not defined, check self.distribution's copy", while 0 or 1 mean
+        # false and true (duh).  Note that this means figuring out the real
+        # value of each flag is a touch complicated -- hence "self.verbose"
+        # (etc.) will be handled by __getattr__, below.
+        self._verbose = None
+        self._dry_run = None
+
+        # Some commands define a 'self.force' option to ignore file
+        # timestamps, but methods defined *here* assume that
+        # 'self.force' exists for all commands.  So define it here
+        # just to be safe.
+        self.force = None
+
+        # The 'help' flag is just used for command-line parsing, so
+        # none of that complicated bureaucracy is needed.
+        self.help = 0
+
+        # 'finalized' records whether or not 'finalize_options()' has been
+        # called.  'finalize_options()' itself should not pay attention to
+        # this flag: it is the business of 'ensure_finalized()', which
+        # always calls 'finalize_options()', to respect/update it.
+        self.finalized = 0
+
+    # __init__ ()
+
+
+    def __getattr__ (self, attr):
+        if attr in ('verbose', 'dry_run'):
+            myval = getattr(self, "_" + attr)
+            if myval is None:
+                return getattr(self.distribution, attr)
+            else:
+                return myval
+        else:
+            raise AttributeError, attr
+
+
+    def ensure_finalized (self):
+        if not self.finalized:
+            self.finalize_options()
+        self.finalized = 1
+
+
+    # Subclasses must define:
+    #   initialize_options()
+    #     provide default values for all options; may be customized by
+    #     setup script, by options from config file(s), or by command-line
+    #     options
+    #   finalize_options()
+    #     decide on the final values for all options; this is called
+    #     after all possible intervention from the outside world
+    #     (command-line, option file, etc.) has been processed
+    #   run()
+    #     run the command: do whatever it is we're here to do,
+    #     controlled by the command's various option values
+
+    def initialize_options (self):
+        """Set default values for all the options that this command
+        supports.  Note that these defaults may be overridden by other
+        commands, by the setup script, by config files, or by the
+        command-line.  Thus, this is not the place to code dependencies
+        between options; generally, 'initialize_options()' implementations
+        are just a bunch of "self.foo = None" assignments.
+
+        This method must be implemented by all command classes.
+        """
+        raise RuntimeError, \
+              "abstract method -- subclass %s must override" % self.__class__
+
+    def finalize_options (self):
+        """Set final values for all the options that this command supports.
+        This is always called as late as possible, ie.  after any option
+        assignments from the command-line or from other commands have been
+        done.  Thus, this is the place to to code option dependencies: if
+        'foo' depends on 'bar', then it is safe to set 'foo' from 'bar' as
+        long as 'foo' still has the same value it was assigned in
+        'initialize_options()'.
+
+        This method must be implemented by all command classes.
+        """
+        raise RuntimeError, \
+              "abstract method -- subclass %s must override" % self.__class__
+
+
+    def dump_options (self, header=None, indent=""):
+        from distutils.fancy_getopt import longopt_xlate
+        if header is None:
+            header = "command options for '%s':" % self.get_command_name()
+        print indent + header
+        indent = indent + "  "
+        for (option, _, _) in self.user_options:
+            option = string.translate(option, longopt_xlate)
+            if option[-1] == "=":
+                option = option[:-1]
+            value = getattr(self, option)
+            print indent + "%s = %s" % (option, value)
+
+
+    def run (self):
+        """A command's raison d'etre: carry out the action it exists to
+        perform, controlled by the options initialized in
+        'initialize_options()', customized by other commands, the setup
+        script, the command-line, and config files, and finalized in
+        'finalize_options()'.  All terminal output and filesystem
+        interaction should be done by 'run()'.
+
+        This method must be implemented by all command classes.
+        """
+
+        raise RuntimeError, \
+              "abstract method -- subclass %s must override" % self.__class__
+
+    def announce (self, msg, level=1):
+        """If the current verbosity level is of greater than or equal to
+        'level' print 'msg' to stdout.
+        """
+        if self.verbose >= level:
+            print msg
+            sys.stdout.flush()
+
+    def debug_print (self, msg):
+        """Print 'msg' to stdout if the global DEBUG (taken from the
+        DISTUTILS_DEBUG environment variable) flag is true.
+        """
+        from distutils.core import DEBUG
+        if DEBUG:
+            print msg
+            sys.stdout.flush()
+
+
+
+    # -- Option validation methods -------------------------------------
+    # (these are very handy in writing the 'finalize_options()' method)
+    #
+    # NB. the general philosophy here is to ensure that a particular option
+    # value meets certain type and value constraints.  If not, we try to
+    # force it into conformance (eg. if we expect a list but have a string,
+    # split the string on comma and/or whitespace).  If we can't force the
+    # option into conformance, raise DistutilsOptionError.  Thus, command
+    # classes need do nothing more than (eg.)
+    #   self.ensure_string_list('foo')
+    # and they can be guaranteed that thereafter, self.foo will be
+    # a list of strings.
+
+    def _ensure_stringlike (self, option, what, default=None):
+        val = getattr(self, option)
+        if val is None:
+            setattr(self, option, default)
+            return default
+        elif type(val) is not StringType:
+            raise DistutilsOptionError, \
+                  "'%s' must be a %s (got `%s`)" % (option, what, val)
+        return val
+
+    def ensure_string (self, option, default=None):
+        """Ensure that 'option' is a string; if not defined, set it to
+        'default'.
+        """
+        self._ensure_stringlike(option, "string", default)
+
+    def ensure_string_list (self, option):
+        """Ensure that 'option' is a list of strings.  If 'option' is
+        currently a string, we split it either on /,\s*/ or /\s+/, so
+        "foo bar baz", "foo,bar,baz", and "foo,   bar baz" all become
+        ["foo", "bar", "baz"].
+        """
+        val = getattr(self, option)
+        if val is None:
+            return
+        elif type(val) is StringType:
+            setattr(self, option, re.split(r',\s*|\s+', val))
+        else:
+            if type(val) is ListType:
+                types = map(type, val)
+                ok = (types == [StringType] * len(val))
+            else:
+                ok = 0
+
+            if not ok:
+                raise DistutilsOptionError, \
+                      "'%s' must be a list of strings (got %s)" % \
+                      (option, `val`)
+
+    def _ensure_tested_string (self, option, tester,
+                               what, error_fmt, default=None):
+        val = self._ensure_stringlike(option, what, default)
+        if val is not None and not tester(val):
+            raise DistutilsOptionError, \
+                  ("error in '%s' option: " + error_fmt) % (option, val)
+
+    def ensure_filename (self, option):
+        """Ensure that 'option' is the name of an existing file."""
+        self._ensure_tested_string(option, os.path.isfile,
+                                   "filename",
+                                   "'%s' does not exist or is not a file")
+
+    def ensure_dirname (self, option):
+        self._ensure_tested_string(option, os.path.isdir,
+                                   "directory name",
+                                   "'%s' does not exist or is not a directory")
+
+
+    # -- Convenience methods for commands ------------------------------
+
+    def get_command_name (self):
+        if hasattr(self, 'command_name'):
+            return self.command_name
+        else:
+            return self.__class__.__name__
+
+
+    def set_undefined_options (self, src_cmd, *option_pairs):
+        """Set the values of any "undefined" options from corresponding
+        option values in some other command object.  "Undefined" here means
+        "is None", which is the convention used to indicate that an option
+        has not been changed between 'initialize_options()' and
+        'finalize_options()'.  Usually called from 'finalize_options()' for
+        options that depend on some other command rather than another
+        option of the same command.  'src_cmd' is the other command from
+        which option values will be taken (a command object will be created
+        for it if necessary); the remaining arguments are
+        '(src_option,dst_option)' tuples which mean "take the value of
+        'src_option' in the 'src_cmd' command object, and copy it to
+        'dst_option' in the current command object".
+        """
+
+        # Option_pairs: list of (src_option, dst_option) tuples
+
+        src_cmd_obj = self.distribution.get_command_obj(src_cmd)
+        src_cmd_obj.ensure_finalized()
+        for (src_option, dst_option) in option_pairs:
+            if getattr(self, dst_option) is None:
+                setattr(self, dst_option,
+                        getattr(src_cmd_obj, src_option))
+
+
+    def get_finalized_command (self, command, create=1):
+        """Wrapper around Distribution's 'get_command_obj()' method: find
+        (create if necessary and 'create' is true) the command object for
+        'command', call its 'ensure_finalized()' method, and return the
+        finalized command object.
+        """
+        cmd_obj = self.distribution.get_command_obj(command, create)
+        cmd_obj.ensure_finalized()
+        return cmd_obj
+
+    # XXX rename to 'get_reinitialized_command()'? (should do the
+    # same in dist.py, if so)
+    def reinitialize_command (self, command, reinit_subcommands=0):
+        return self.distribution.reinitialize_command(
+            command, reinit_subcommands)
+
+    def run_command (self, command):
+        """Run some other command: uses the 'run_command()' method of
+        Distribution, which creates and finalizes the command object if
+        necessary and then invokes its 'run()' method.
+        """
+        self.distribution.run_command(command)
+
+
+    def get_sub_commands (self):
+        """Determine the sub-commands that are relevant in the current
+        distribution (ie., that need to be run).  This is based on the
+        'sub_commands' class attribute: each tuple in that list may include
+        a method that we call to determine if the subcommand needs to be
+        run for the current distribution.  Return a list of command names.
+        """
+        commands = []
+        for (cmd_name, method) in self.sub_commands:
+            if method is None or method(self):
+                commands.append(cmd_name)
+        return commands
+
+
+    # -- External world manipulation -----------------------------------
+
+    def warn (self, msg):
+        sys.stderr.write("warning: %s: %s\n" %
+                         (self.get_command_name(), msg))
+
+
+    def execute (self, func, args, msg=None, level=1):
+        util.execute(func, args, msg, self.verbose >= level, self.dry_run)
+
+
+    def mkpath (self, name, mode=0777):
+        dir_util.mkpath(name, mode,
+                        self.verbose, self.dry_run)
+
+
+    def copy_file (self, infile, outfile,
+                   preserve_mode=1, preserve_times=1, link=None, level=1):
+        """Copy a file respecting verbose, dry-run and force flags.  (The
+        former two default to whatever is in the Distribution object, and
+        the latter defaults to false for commands that don't define it.)"""
+
+        return file_util.copy_file(
+            infile, outfile,
+            preserve_mode, preserve_times,
+            not self.force,
+            link,
+            self.verbose >= level,
+            self.dry_run)
+
+
+    def copy_tree (self, infile, outfile,
+                   preserve_mode=1, preserve_times=1, preserve_symlinks=0,
+                   level=1):
+        """Copy an entire directory tree respecting verbose, dry-run,
+        and force flags.
+        """
+        return dir_util.copy_tree(
+            infile, outfile,
+            preserve_mode,preserve_times,preserve_symlinks,
+            not self.force,
+            self.verbose >= level,
+            self.dry_run)
+
+
+    def move_file (self, src, dst, level=1):
+        """Move a file respecting verbose and dry-run flags."""
+        return file_util.move_file(src, dst,
+                                   self.verbose >= level,
+                                   self.dry_run)
+
+
+    def spawn (self, cmd, search_path=1, level=1):
+        """Spawn an external command respecting verbose and dry-run flags."""
+        from distutils.spawn import spawn
+        spawn(cmd, search_path,
+              self.verbose >= level,
+              self.dry_run)
+
+
+    def make_archive (self, base_name, format,
+                      root_dir=None, base_dir=None):
+        return archive_util.make_archive(
+            base_name, format, root_dir, base_dir,
+            self.verbose, self.dry_run)
+
+
+    def make_file (self, infiles, outfile, func, args,
+                   exec_msg=None, skip_msg=None, level=1):
+        """Special case of 'execute()' for operations that process one or
+        more input files and generate one output file.  Works just like
+        'execute()', except the operation is skipped and a different
+        message printed if 'outfile' already exists and is newer than all
+        files listed in 'infiles'.  If the command defined 'self.force',
+        and it is true, then the command is unconditionally run -- does no
+        timestamp checks.
+        """
+        if exec_msg is None:
+            exec_msg = "generating %s from %s" % \
+                       (outfile, string.join(infiles, ', '))
+        if skip_msg is None:
+            skip_msg = "skipping %s (inputs unchanged)" % outfile
+
+
+        # Allow 'infiles' to be a single string
+        if type(infiles) is StringType:
+            infiles = (infiles,)
+        elif type(infiles) not in (ListType, TupleType):
+            raise TypeError, \
+                  "'infiles' must be a string, or a list or tuple of strings"
+
+        # If 'outfile' must be regenerated (either because it doesn't
+        # exist, is out-of-date, or the 'force' flag is true) then
+        # perform the action that presumably regenerates it
+        if self.force or dep_util.newer_group (infiles, outfile):
+            self.execute(func, args, exec_msg, level)
+
+        # Otherwise, print the "skip" message
+        else:
+            self.announce(skip_msg, level)
+
+    # make_file ()
+
+# class Command
+
+
+# XXX 'install_misc' class not currently used -- it was the base class for
+# both 'install_scripts' and 'install_data', but they outgrew it.  It might
+# still be useful for 'install_headers', though, so I'm keeping it around
+# for the time being.
+
+class install_misc (Command):
+    """Common base class for installing some files in a subdirectory.
+    Currently used by install_data and install_scripts.
+    """
+
+    user_options = [('install-dir=', 'd', "directory to install the files to")]
+
+    def initialize_options (self):
+        self.install_dir = None
+        self.outfiles = []
+
+    def _install_dir_from (self, dirname):
+        self.set_undefined_options('install', (dirname, 'install_dir'))
+
+    def _copy_files (self, filelist):
+        self.outfiles = []
+        if not filelist:
+            return
+        self.mkpath(self.install_dir)
+        for f in filelist:
+            self.copy_file(f, self.install_dir)
+            self.outfiles.append(os.path.join(self.install_dir, f))
+
+    def get_outputs (self):
+        return self.outfiles
+
+
+if __name__ == "__main__":
+    print "ok"
diff --git a/lib-python/2.2/distutils/command/__init__.py b/lib-python/2.2/distutils/command/__init__.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/command/__init__.py
@@ -0,0 +1,24 @@
+"""distutils.command
+
+Package containing implementation of all the standard Distutils
+commands."""
+
+__revision__ = "$Id$"
+
+__all__ = ['build',
+           'build_py',
+           'build_ext',
+           'build_clib',
+           'build_scripts',
+           'clean',
+           'install',
+           'install_lib',
+           'install_headers',
+           'install_scripts',
+           'install_data',
+           'sdist',
+           'bdist',
+           'bdist_dumb',
+           'bdist_rpm',
+           'bdist_wininst',
+          ]
diff --git a/lib-python/2.2/distutils/command/bdist.py b/lib-python/2.2/distutils/command/bdist.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/command/bdist.py
@@ -0,0 +1,139 @@
+"""distutils.command.bdist
+
+Implements the Distutils 'bdist' command (create a built [binary]
+distribution)."""
+
+# created 2000/03/29, Greg Ward
+
+__revision__ = "$Id$"
+
+import os, string
+from types import *
+from distutils.core import Command
+from distutils.errors import *
+from distutils.util import get_platform
+
+
+def show_formats ():
+    """Print list of available formats (arguments to "--format" option).
+    """
+    from distutils.fancy_getopt import FancyGetopt
+    formats=[]
+    for format in bdist.format_commands:
+        formats.append(("formats=" + format, None,
+                        bdist.format_command[format][1]))
+    pretty_printer = FancyGetopt(formats)
+    pretty_printer.print_help("List of available distribution formats:")
+
+
+class bdist (Command):
+
+    description = "create a built (binary) distribution"
+
+    user_options = [('bdist-base=', 'b',
+                     "temporary directory for creating built distributions"),
+                    ('plat-name=', 'p',
+                     "platform name to embed in generated filenames "
+                     "(default: %s)" % get_platform()),
+                    ('formats=', None,
+                     "formats for distribution (comma-separated list)"),
+                    ('dist-dir=', 'd',
+                     "directory to put final built distributions in "
+                     "[default: dist]"),
+                   ]
+
+    help_options = [
+        ('help-formats', None,
+         "lists available distribution formats", show_formats),
+        ]
+
+    # The following commands do not take a format option from bdist
+    no_format_option = ('bdist_rpm',)
+
+    # This won't do in reality: will need to distinguish RPM-ish Linux,
+    # Debian-ish Linux, Solaris, FreeBSD, ..., Windows, Mac OS.
+    default_format = { 'posix': 'gztar',
+                       'nt': 'zip', }
+
+    # Establish the preferred order (for the --help-formats option).
+    format_commands = ['rpm', 'gztar', 'bztar', 'ztar', 'tar',
+                       'wininst', 'zip']
+
+    # And the real information.
+    format_command = { 'rpm':   ('bdist_rpm',  "RPM distribution"),
+                       'gztar': ('bdist_dumb', "gzip'ed tar file"),
+                       'bztar': ('bdist_dumb', "bzip2'ed tar file"),
+                       'ztar':  ('bdist_dumb', "compressed tar file"),
+                       'tar':   ('bdist_dumb', "tar file"),
+                       'wininst': ('bdist_wininst',
+                                   "Windows executable installer"),
+                       'zip':   ('bdist_dumb', "ZIP file"),
+                     }
+
+
+    def initialize_options (self):
+        self.bdist_base = None
+        self.plat_name = None
+        self.formats = None
+        self.dist_dir = None
+
+    # initialize_options()
+
+
+    def finalize_options (self):
+        # have to finalize 'plat_name' before 'bdist_base'
+        if self.plat_name is None:
+            self.plat_name = get_platform()
+
+        # 'bdist_base' -- parent of per-built-distribution-format
+        # temporary directories (eg. we'll probably have
+        # "build/bdist.<plat>/dumb", "build/bdist.<plat>/rpm", etc.)
+        if self.bdist_base is None:
+            build_base = self.get_finalized_command('build').build_base
+            self.bdist_base = os.path.join(build_base,
+                                           'bdist.' + self.plat_name)
+
+        self.ensure_string_list('formats')
+        if self.formats is None:
+            try:
+                self.formats = [self.default_format[os.name]]
+            except KeyError:
+                raise DistutilsPlatformError, \
+                      "don't know how to create built distributions " + \
+                      "on platform %s" % os.name
+
+        if self.dist_dir is None:
+            self.dist_dir = "dist"
+
+    # finalize_options()
+
+
+    def run (self):
+
+        # Figure out which sub-commands we need to run.
+        commands = []
+        for format in self.formats:
+            try:
+                commands.append(self.format_command[format][0])
+            except KeyError:
+                raise DistutilsOptionError, "invalid format '%s'" % format
+
+        # Reinitialize and run each command.
+        for i in range(len(self.formats)):
+            cmd_name = commands[i]
+            sub_cmd = self.reinitialize_command(cmd_name)
+            if cmd_name not in self.no_format_option:
+                sub_cmd.format = self.formats[i]
+
+            print ("bdist.run: format=%s, command=%s, rest=%s" %
+                   (self.formats[i], cmd_name, commands[i+1:]))
+
+            # If we're going to need to run this command again, tell it to
+            # keep its temporary files around so subsequent runs go faster.
+            if cmd_name in commands[i+1:]:
+                sub_cmd.keep_temp = 1
+            self.run_command(cmd_name)
+
+    # run()
+
+# class bdist
diff --git a/lib-python/2.2/distutils/command/bdist_dumb.py b/lib-python/2.2/distutils/command/bdist_dumb.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/command/bdist_dumb.py
@@ -0,0 +1,96 @@
+"""distutils.command.bdist_dumb
+
+Implements the Distutils 'bdist_dumb' command (create a "dumb" built
+distribution -- i.e., just an archive to be unpacked under $prefix or
+$exec_prefix)."""
+
+# created 2000/03/29, Greg Ward
+
+__revision__ = "$Id$"
+
+import os
+from distutils.core import Command
+from distutils.util import get_platform
+from distutils.dir_util import create_tree, remove_tree
+from distutils.errors import *
+
+class bdist_dumb (Command):
+
+    description = "create a \"dumb\" built distribution"
+
+    user_options = [('bdist-dir=', 'd',
+                     "temporary directory for creating the distribution"),
+                    ('plat-name=', 'p',
+                     "platform name to embed in generated filenames "
+                     "(default: %s)" % get_platform()),
+                    ('format=', 'f',
+                     "archive format to create (tar, ztar, gztar, zip)"),
+                    ('keep-temp', 'k',
+                     "keep the pseudo-installation tree around after " +
+                     "creating the distribution archive"),
+                    ('dist-dir=', 'd',
+                     "directory to put final built distributions in"),
+                   ]
+
+    boolean_options = ['keep-temp']
+
+    default_format = { 'posix': 'gztar',
+                       'nt': 'zip', }
+
+
+    def initialize_options (self):
+        self.bdist_dir = None
+        self.plat_name = None
+        self.format = None
+        self.keep_temp = 0
+        self.dist_dir = None
+
+    # initialize_options()
+
+
+    def finalize_options (self):
+
+        if self.bdist_dir is None:
+            bdist_base = self.get_finalized_command('bdist').bdist_base
+            self.bdist_dir = os.path.join(bdist_base, 'dumb')
+
+        if self.format is None:
+            try:
+                self.format = self.default_format[os.name]
+            except KeyError:
+                raise DistutilsPlatformError, \
+                      ("don't know how to create dumb built distributions " +
+                       "on platform %s") % os.name
+
+        self.set_undefined_options('bdist',
+                                   ('dist_dir', 'dist_dir'),
+                                   ('plat_name', 'plat_name'))
+
+    # finalize_options()
+
+
+    def run (self):
+
+        self.run_command('build')
+
+        install = self.reinitialize_command('install', reinit_subcommands=1)
+        install.root = self.bdist_dir
+        install.warn_dir = 0
+
+        self.announce("installing to %s" % self.bdist_dir)
+        self.run_command('install')
+
+        # And make an archive relative to the root of the
+        # pseudo-installation tree.
+        archive_basename = "%s.%s" % (self.distribution.get_fullname(),
+                                      self.plat_name)
+        self.make_archive(os.path.join(self.dist_dir, archive_basename),
+                          self.format,
+                          root_dir=self.bdist_dir)
+
+        if not self.keep_temp:
+            remove_tree(self.bdist_dir, self.verbose, self.dry_run)
+
+    # run()
+
+# class bdist_dumb
diff --git a/lib-python/2.2/distutils/command/bdist_rpm.py b/lib-python/2.2/distutils/command/bdist_rpm.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/command/bdist_rpm.py
@@ -0,0 +1,488 @@
+"""distutils.command.bdist_rpm
+
+Implements the Distutils 'bdist_rpm' command (create RPM source and binary
+distributions)."""
+
+# created 2000/04/25, by Harry Henry Gebel
+
+__revision__ = "$Id$"
+
+import sys, os, string
+import glob
+from types import *
+from distutils.core import Command, DEBUG
+from distutils.util import get_platform
+from distutils.file_util import write_file
+from distutils.errors import *
+
+class bdist_rpm (Command):
+
+    description = "create an RPM distribution"
+
+    user_options = [
+        ('bdist-base=', None,
+         "base directory for creating built distributions"),
+        ('rpm-base=', None,
+         "base directory for creating RPMs (defaults to \"rpm\" under "
+         "--bdist-base; must be specified for RPM 2)"),
+        ('dist-dir=', 'd',
+         "directory to put final RPM files in "
+         "(and .spec files if --spec-only)"),
+        ('python=', None,
+         "path to Python interpreter to hard-code in the .spec file "
+         "(default: \"python\")"),
+        ('fix-python', None,
+         "hard-code the exact path to the current Python interpreter in "
+         "the .spec file"),
+        ('spec-only', None,
+         "only regenerate spec file"),
+        ('source-only', None,
+         "only generate source RPM"),
+        ('binary-only', None,
+         "only generate binary RPM"),
+        ('use-bzip2', None,
+         "use bzip2 instead of gzip to create source distribution"),
+
+        # More meta-data: too RPM-specific to put in the setup script,
+        # but needs to go in the .spec file -- so we make these options
+        # to "bdist_rpm".  The idea is that packagers would put this
+        # info in setup.cfg, although they are of course free to
+        # supply it on the command line.
+        ('distribution-name=', None,
+         "name of the (Linux) distribution to which this "
+         "RPM applies (*not* the name of the module distribution!)"),
+        ('group=', None,
+         "package classification [default: \"Development/Libraries\"]"),
+        ('release=', None,
+         "RPM release number"),
+        ('serial=', None,
+         "RPM serial number"),
+        ('vendor=', None,
+         "RPM \"vendor\" (eg. \"Joe Blow <joe at example.com>\") "
+         "[default: maintainer or author from setup script]"),
+        ('packager=', None,
+         "RPM packager (eg. \"Jane Doe <jane at example.net>\")"
+         "[default: vendor]"),
+        ('doc-files=', None,
+         "list of documentation files (space or comma-separated)"),
+        ('changelog=', None,
+         "RPM changelog"),
+        ('icon=', None,
+         "name of icon file"),
+        ('provides=', None,
+         "capabilities provided by this package"),
+        ('requires=', None,
+         "capabilities required by this package"),
+        ('conflicts=', None,
+         "capabilities which conflict with this package"),
+        ('build-requires=', None,
+         "capabilities required to build this package"),
+        ('obsoletes=', None,
+         "capabilities made obsolete by this package"),
+
+        # Actions to take when building RPM
+        ('keep-temp', 'k',
+         "don't clean up RPM build directory"),
+        ('no-keep-temp', None,
+         "clean up RPM build directory [default]"),
+        ('use-rpm-opt-flags', None,
+         "compile with RPM_OPT_FLAGS when building from source RPM"),
+        ('no-rpm-opt-flags', None,
+         "do not pass any RPM CFLAGS to compiler"),
+        ('rpm3-mode', None,
+         "RPM 3 compatibility mode (default)"),
+        ('rpm2-mode', None,
+         "RPM 2 compatibility mode"),
+       ]
+
+    boolean_options = ['keep-temp', 'use-rpm-opt-flags', 'rpm3-mode']
+
+    negative_opt = {'no-keep-temp': 'keep-temp',
+                    'no-rpm-opt-flags': 'use-rpm-opt-flags',
+                    'rpm2-mode': 'rpm3-mode'}
+
+
+    def initialize_options (self):
+        self.bdist_base = None
+        self.rpm_base = None
+        self.dist_dir = None
+        self.python = None
+        self.fix_python = None
+        self.spec_only = None
+        self.binary_only = None
+        self.source_only = None
+        self.use_bzip2 = None
+
+        self.distribution_name = None
+        self.group = None
+        self.release = None
+        self.serial = None
+        self.vendor = None
+        self.packager = None
+        self.doc_files = None
+        self.changelog = None
+        self.icon = None
+
+        self.prep_script = None
+        self.build_script = None
+        self.install_script = None
+        self.clean_script = None
+        self.pre_install = None
+        self.post_install = None
+        self.pre_uninstall = None
+        self.post_uninstall = None
+        self.prep = None
+        self.provides = None
+        self.requires = None
+        self.conflicts = None
+        self.build_requires = None
+        self.obsoletes = None
+
+        self.keep_temp = 0
+        self.use_rpm_opt_flags = 1
+        self.rpm3_mode = 1
+
+    # initialize_options()
+
+
+    def finalize_options (self):
+        self.set_undefined_options('bdist', ('bdist_base', 'bdist_base'))
+        if self.rpm_base is None:
+            if not self.rpm3_mode:
+                raise DistutilsOptionError, \
+                      "you must specify --rpm-base in RPM 2 mode"
+            self.rpm_base = os.path.join(self.bdist_base, "rpm")
+
+        if self.python is None:
+            if self.fix_python:
+                self.python = sys.executable
+            else:
+                self.python = "python"
+        elif self.fix_python:
+            raise DistutilsOptionError, \
+                  "--python and --fix-python are mutually exclusive options"
+
+        if os.name != 'posix':
+            raise DistutilsPlatformError, \
+                  ("don't know how to create RPM "
+                   "distributions on platform %s" % os.name)
+        if self.binary_only and self.source_only:
+            raise DistutilsOptionError, \
+                  "cannot supply both '--source-only' and '--binary-only'"
+
+        # don't pass CFLAGS to pure python distributions
+        if not self.distribution.has_ext_modules():
+            self.use_rpm_opt_flags = 0
+
+        self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
+        self.finalize_package_data()
+
+    # finalize_options()
+
+    def finalize_package_data (self):
+        self.ensure_string('group', "Development/Libraries")
+        self.ensure_string('vendor',
+                           "%s <%s>" % (self.distribution.get_contact(),
+                                        self.distribution.get_contact_email()))
+        self.ensure_string('packager')
+        self.ensure_string_list('doc_files')
+        if type(self.doc_files) is ListType:
+            for readme in ('README', 'README.txt'):
+                if os.path.exists(readme) and readme not in self.doc_files:
+                    self.doc_files.append(readme)
+
+        self.ensure_string('release', "1")
+        self.ensure_string('serial')   # should it be an int?
+
+        self.ensure_string('distribution_name')
+
+        self.ensure_string('changelog')
+          # Format changelog correctly
+        self.changelog = self._format_changelog(self.changelog)
+
+        self.ensure_filename('icon')
+
+        self.ensure_filename('prep_script')
+        self.ensure_filename('build_script')
+        self.ensure_filename('install_script')
+        self.ensure_filename('clean_script')
+        self.ensure_filename('pre_install')
+        self.ensure_filename('post_install')
+        self.ensure_filename('pre_uninstall')
+        self.ensure_filename('post_uninstall')
+
+        # XXX don't forget we punted on summaries and descriptions -- they
+        # should be handled here eventually!
+
+        # Now *this* is some meta-data that belongs in the setup script...
+        self.ensure_string_list('provides')
+        self.ensure_string_list('requires')
+        self.ensure_string_list('conflicts')
+        self.ensure_string_list('build_requires')
+        self.ensure_string_list('obsoletes')
+
+    # finalize_package_data ()
+
+
+    def run (self):
+
+        if DEBUG:
+            print "before _get_package_data():"
+            print "vendor =", self.vendor
+            print "packager =", self.packager
+            print "doc_files =", self.doc_files
+            print "changelog =", self.changelog
+
+        # make directories
+        if self.spec_only:
+            spec_dir = self.dist_dir
+            self.mkpath(spec_dir)
+        else:
+            rpm_dir = {}
+            for d in ('SOURCES', 'SPECS', 'BUILD', 'RPMS', 'SRPMS'):
+                rpm_dir[d] = os.path.join(self.rpm_base, d)
+                self.mkpath(rpm_dir[d])
+            spec_dir = rpm_dir['SPECS']
+
+        # Spec file goes into 'dist_dir' if '--spec-only specified',
+        # build/rpm.<plat> otherwise.
+        spec_path = os.path.join(spec_dir,
+                                 "%s.spec" % self.distribution.get_name())
+        self.execute(write_file,
+                     (spec_path,
+                      self._make_spec_file()),
+                     "writing '%s'" % spec_path)
+
+        if self.spec_only: # stop if requested
+            return
+
+        # Make a source distribution and copy to SOURCES directory with
+        # optional icon.
+        sdist = self.reinitialize_command('sdist')
+        if self.use_bzip2:
+            sdist.formats = ['bztar']
+        else:
+            sdist.formats = ['gztar']
+        self.run_command('sdist')
+
+        source = sdist.get_archive_files()[0]
+        source_dir = rpm_dir['SOURCES']
+        self.copy_file(source, source_dir)
+
+        if self.icon:
+            if os.path.exists(self.icon):
+                self.copy_file(self.icon, source_dir)
+            else:
+                raise DistutilsFileError, \
+                      "icon file '%s' does not exist" % self.icon
+
+
+        # build package
+        self.announce('building RPMs')
+        rpm_cmd = ['rpm']
+        if os.path.exists('/usr/bin/rpmbuild') or \
+           os.path.exists('/bin/rpmbuild'):
+            rpm_cmd = ['rpmbuild']
+        if self.source_only: # what kind of RPMs?
+            rpm_cmd.append('-bs')
+        elif self.binary_only:
+            rpm_cmd.append('-bb')
+        else:
+            rpm_cmd.append('-ba')
+        if self.rpm3_mode:
+            rpm_cmd.extend(['--define',
+                             '_topdir %s/%s' % (os.getcwd(), self.rpm_base),])
+        if not self.keep_temp:
+            rpm_cmd.append('--clean')
+        rpm_cmd.append(spec_path)
+        self.spawn(rpm_cmd)
+
+        # XXX this is a nasty hack -- we really should have a proper way to
+        # find out the names of the RPM files created; also, this assumes
+        # that RPM creates exactly one source and one binary RPM.
+        if not self.dry_run:
+            if not self.binary_only:
+                srpms = glob.glob(os.path.join(rpm_dir['SRPMS'], "*.rpm"))
+                assert len(srpms) == 1, \
+                       "unexpected number of SRPM files found: %s" % srpms
+                self.move_file(srpms[0], self.dist_dir)
+
+            if not self.source_only:
+                rpms = glob.glob(os.path.join(rpm_dir['RPMS'], "*/*.rpm"))
+                assert len(rpms) == 1, \
+                       "unexpected number of RPM files found: %s" % rpms
+                self.move_file(rpms[0], self.dist_dir)
+
+    # run()
+
+
+    def _make_spec_file(self):
+        """Generate the text of an RPM spec file and return it as a
+        list of strings (one per line).
+        """
+        # definitions and headers
+        spec_file = [
+            '%define name ' + self.distribution.get_name(),
+            '%define version ' + self.distribution.get_version(),
+            '%define release ' + self.release,
+            '',
+            'Summary: ' + self.distribution.get_description(),
+            ]
+
+        # put locale summaries into spec file
+        # XXX not supported for now (hard to put a dictionary
+        # in a config file -- arg!)
+        #for locale in self.summaries.keys():
+        #    spec_file.append('Summary(%s): %s' % (locale,
+        #                                          self.summaries[locale]))
+
+        spec_file.extend([
+            'Name: %{name}',
+            'Version: %{version}',
+            'Release: %{release}',])
+
+        # XXX yuck! this filename is available from the "sdist" command,
+        # but only after it has run: and we create the spec file before
+        # running "sdist", in case of --spec-only.
+        if self.use_bzip2:
+            spec_file.append('Source0: %{name}-%{version}.tar.bz2')
+        else:
+            spec_file.append('Source0: %{name}-%{version}.tar.gz')
+
+        spec_file.extend([
+            'Copyright: ' + self.distribution.get_license(),
+            'Group: ' + self.group,
+            'BuildRoot: %{_tmppath}/%{name}-buildroot',
+            'Prefix: %{_prefix}', ])
+
+        # noarch if no extension modules
+        if not self.distribution.has_ext_modules():
+            spec_file.append('BuildArchitectures: noarch')
+
+        for field in ('Vendor',
+                      'Packager',
+                      'Provides',
+                      'Requires',
+                      'Conflicts',
+                      'Obsoletes',
+                      ):
+            val = getattr(self, string.lower(field))
+            if type(val) is ListType:
+                spec_file.append('%s: %s' % (field, string.join(val)))
+            elif val is not None:
+                spec_file.append('%s: %s' % (field, val))
+
+
+        if self.distribution.get_url() != 'UNKNOWN':
+            spec_file.append('Url: ' + self.distribution.get_url())
+
+        if self.distribution_name:
+            spec_file.append('Distribution: ' + self.distribution_name)
+
+        if self.build_requires:
+            spec_file.append('BuildRequires: ' +
+                             string.join(self.build_requires))
+
+        if self.icon:
+            spec_file.append('Icon: ' + os.path.basename(self.icon))
+
+        spec_file.extend([
+            '',
+            '%description',
+            self.distribution.get_long_description()
+            ])
+
+        # put locale descriptions into spec file
+        # XXX again, suppressed because config file syntax doesn't
+        # easily support this ;-(
+        #for locale in self.descriptions.keys():
+        #    spec_file.extend([
+        #        '',
+        #        '%description -l ' + locale,
+        #        self.descriptions[locale],
+        #        ])
+
+        # rpm scripts
+        # figure out default build script
+        def_build = "%s setup.py build" % self.python
+        if self.use_rpm_opt_flags:
+            def_build = 'env CFLAGS="$RPM_OPT_FLAGS" ' + def_build
+
+        # insert contents of files
+
+        # XXX this is kind of misleading: user-supplied options are files
+        # that we open and interpolate into the spec file, but the defaults
+        # are just text that we drop in as-is.  Hmmm.
+
+        script_options = [
+            ('prep', 'prep_script', "%setup"),
+            ('build', 'build_script', def_build),
+            ('install', 'install_script',
+             ("%s setup.py install "
+              "--root=$RPM_BUILD_ROOT "
+              "--record=INSTALLED_FILES") % self.python),
+            ('clean', 'clean_script', "rm -rf $RPM_BUILD_ROOT"),
+            ('pre', 'pre_install', None),
+            ('post', 'post_install', None),
+            ('preun', 'pre_uninstall', None),
+            ('postun', 'post_uninstall', None),
+        ]
+
+        for (rpm_opt, attr, default) in script_options:
+            # Insert contents of file referred to, if no file is refered to
+            # use 'default' as contents of script
+            val = getattr(self, attr)
+            if val or default:
+                spec_file.extend([
+                    '',
+                    '%' + rpm_opt,])
+                if val:
+                    spec_file.extend(string.split(open(val, 'r').read(), '\n'))
+                else:
+                    spec_file.append(default)
+
+
+        # files section
+        spec_file.extend([
+            '',
+            '%files -f INSTALLED_FILES',
+            '%defattr(-,root,root)',
+            ])
+
+        if self.doc_files:
+            spec_file.append('%doc ' + string.join(self.doc_files))
+
+        if self.changelog:
+            spec_file.extend([
+                '',
+                '%changelog',])
+            spec_file.extend(self.changelog)
+
+        return spec_file
+
+    # _make_spec_file ()
+
+    def _format_changelog(self, changelog):
+        """Format the changelog correctly and convert it to a list of strings
+        """
+        if not changelog:
+            return changelog
+        new_changelog = []
+        for line in string.split(string.strip(changelog), '\n'):
+            line = string.strip(line)
+            if line[0] == '*':
+                new_changelog.extend(['', line])
+            elif line[0] == '-':
+                new_changelog.append(line)
+            else:
+                new_changelog.append('  ' + line)
+
+        # strip trailing newline inserted by first changelog entry
+        if not new_changelog[0]:
+            del new_changelog[0]
+
+        return new_changelog
+
+    # _format_changelog()
+
+# class bdist_rpm
diff --git a/lib-python/2.2/distutils/command/bdist_wininst.py b/lib-python/2.2/distutils/command/bdist_wininst.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/command/bdist_wininst.py
@@ -0,0 +1,570 @@
+"""distutils.command.bdist_wininst
+
+Implements the Distutils 'bdist_wininst' command: create a windows installer
+exe-program."""
+
+# created 2000/06/02, Thomas Heller
+
+__revision__ = "$Id$"
+
+import sys, os, string
+from distutils.core import Command
+from distutils.util import get_platform
+from distutils.dir_util import create_tree, remove_tree
+from distutils.errors import *
+
+class bdist_wininst (Command):
+
+    description = "create an executable installer for MS Windows"
+
+    user_options = [('bdist-dir=', None,
+                     "temporary directory for creating the distribution"),
+                    ('keep-temp', 'k',
+                     "keep the pseudo-installation tree around after " +
+                     "creating the distribution archive"),
+                    ('target-version=', 'v',
+                     "require a specific python version" +
+                     " on the target system"),
+                    ('no-target-compile', 'c',
+                     "do not compile .py to .pyc on the target system"),
+                    ('no-target-optimize', 'o',
+                     "do not compile .py to .pyo (optimized)"
+                     "on the target system"),
+                    ('dist-dir=', 'd',
+                     "directory to put final built distributions in"),
+                    ('bitmap=', 'b',
+                     "bitmap to use for the installer instead of python-powered logo"),
+                    ('title=', 't',
+                     "title to display on the installer background instead of default"),
+                   ]
+
+    boolean_options = ['keep-temp', 'no-target-compile', 'no-target-optimize',
+                       'skip-build']
+
+    def initialize_options (self):
+        self.bdist_dir = None
+        self.keep_temp = 0
+        self.no_target_compile = 0
+        self.no_target_optimize = 0
+        self.target_version = None
+        self.dist_dir = None
+        self.bitmap = None
+        self.title = None
+
+    # initialize_options()
+
+
+    def finalize_options (self):
+        if self.bdist_dir is None:
+            bdist_base = self.get_finalized_command('bdist').bdist_base
+            self.bdist_dir = os.path.join(bdist_base, 'wininst')
+        if not self.target_version:
+            self.target_version = ""
+        if self.distribution.has_ext_modules():
+            short_version = sys.version[:3]
+            if self.target_version and self.target_version != short_version:
+                raise DistutilsOptionError, \
+                      "target version can only be" + short_version
+            self.target_version = short_version
+
+        self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
+
+    # finalize_options()
+
+
+    def run (self):
+        if (sys.platform != "win32" and
+            (self.distribution.has_ext_modules() or
+             self.distribution.has_c_libraries())):
+            raise DistutilsPlatformError \
+                  ("distribution contains extensions and/or C libraries; "
+                   "must be compiled on a Windows 32 platform")
+
+        self.run_command('build')
+
+        install = self.reinitialize_command('install', reinit_subcommands=1)
+        install.root = self.bdist_dir
+        install.warn_dir = 0
+
+        install_lib = self.reinitialize_command('install_lib')
+        # we do not want to include pyc or pyo files
+        install_lib.compile = 0
+        install_lib.optimize = 0
+
+        # Use a custom scheme for the zip-file, because we have to decide
+        # at installation time which scheme to use.
+        for key in ('purelib', 'platlib', 'headers', 'scripts', 'data'):
+            value = string.upper(key)
+            if key == 'headers':
+                value = value + '/Include/$dist_name'
+            setattr(install,
+                    'install_' + key,
+                    value)
+
+        self.announce("installing to %s" % self.bdist_dir)
+        install.ensure_finalized()
+
+        # avoid warning of 'install_lib' about installing
+        # into a directory not in sys.path
+        sys.path.insert(0, os.path.join(self.bdist_dir, 'PURELIB'))
+
+        install.run()
+
+        del sys.path[0]
+
+        # And make an archive relative to the root of the
+        # pseudo-installation tree.
+        from tempfile import mktemp
+        archive_basename = mktemp()
+        fullname = self.distribution.get_fullname()
+        arcname = self.make_archive(archive_basename, "zip",
+                                    root_dir=self.bdist_dir)
+        # create an exe containing the zip-file
+        self.create_exe(arcname, fullname, self.bitmap)
+        # remove the zip-file again
+        self.announce("removing temporary file '%s'" % arcname)
+        os.remove(arcname)
+
+        if not self.keep_temp:
+            remove_tree(self.bdist_dir, self.verbose, self.dry_run)
+
+    # run()
+
+    def get_inidata (self):
+        # Return data describing the installation.
+
+        lines = []
+        metadata = self.distribution.metadata
+
+        # Write the [metadata] section.  Values are written with
+        # repr()[1:-1], so they do not contain unprintable characters, and
+        # are not surrounded by quote chars.
+        lines.append("[metadata]")
+
+        # 'info' will be displayed in the installer's dialog box,
+        # describing the items to be installed.
+        info = (metadata.long_description or '') + '\n'
+
+        for name in ["author", "author_email", "description", "maintainer",
+                     "maintainer_email", "name", "url", "version"]:
+            data = getattr(metadata, name, "")
+            if data:
+                info = info + ("\n    %s: %s" % \
+                               (string.capitalize(name), data))
+                lines.append("%s=%s" % (name, repr(data)[1:-1]))
+
+        # The [setup] section contains entries controlling
+        # the installer runtime.
+        lines.append("\n[Setup]")
+        lines.append("info=%s" % repr(info)[1:-1])
+        lines.append("target_compile=%d" % (not self.no_target_compile))
+        lines.append("target_optimize=%d" % (not self.no_target_optimize))
+        if self.target_version:
+            lines.append("target_version=%s" % self.target_version)
+
+        title = self.title or self.distribution.get_fullname()
+        lines.append("title=%s" % repr(title)[1:-1])
+        import time
+        import distutils
+        build_info = "Build %s with distutils-%s" % \
+                     (time.ctime(time.time()), distutils.__version__)
+        lines.append("build_info=%s" % build_info)
+        return string.join(lines, "\n")
+
+    # get_inidata()
+
+    def create_exe (self, arcname, fullname, bitmap=None):
+        import struct
+
+        self.mkpath(self.dist_dir)
+
+        cfgdata = self.get_inidata()
+
+        if self.target_version:
+            # if we create an installer for a specific python version,
+            # it's better to include this in the name
+            installer_name = os.path.join(self.dist_dir,
+                                          "%s.win32-py%s.exe" %
+                                           (fullname, self.target_version))
+        else:
+            installer_name = os.path.join(self.dist_dir,
+                                          "%s.win32.exe" % fullname)
+        self.announce("creating %s" % installer_name)
+
+        if bitmap:
+            bitmapdata = open(bitmap, "rb").read()
+            bitmaplen = len(bitmapdata)
+        else:
+            bitmaplen = 0
+
+        file = open(installer_name, "wb")
+        file.write(self.get_exe_bytes())
+        if bitmap:
+            file.write(bitmapdata)
+
+        file.write(cfgdata)
+        header = struct.pack("<iii",
+                             0x1234567A,       # tag
+                             len(cfgdata),     # length
+                             bitmaplen,        # number of bytes in bitmap
+                             )
+        file.write(header)
+        file.write(open(arcname, "rb").read())
+
+    # create_exe()
+
+    def get_exe_bytes (self):
+        import base64
+        return base64.decodestring(EXEDATA)
+# class bdist_wininst
+
+if __name__ == '__main__':
+    # recreate EXEDATA from wininst.exe by rewriting this file
+    import re, base64
+    moddata = open("bdist_wininst.py", "r").read()
+    exedata = open("../../misc/wininst.exe", "rb").read()
+    print "wininst.exe length is %d bytes" % len(exedata)
+    print "wininst.exe encoded length is %d bytes" % len(base64.encodestring(exedata))
+    exp = re.compile('EXE'+'DATA = """\\\\(\n.*)*\n"""', re.M)
+    data = exp.sub('EXE' + 'DATA = """\\\\\n%s"""' %
+                    base64.encodestring(exedata), moddata)
+    open("bdist_wininst.py", "w").write(data)
+    print "bdist_wininst.py recreated"
+
+EXEDATA = """\
+TVqQAAMAAAAEAAAA//8AALgAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAA8AAAAA4fug4AtAnNIbgBTM0hVGhpcyBwcm9ncmFtIGNhbm5vdCBiZSBydW4gaW4gRE9TIG1v
+ZGUuDQ0KJAAAAAAAAAA/SHa+eykY7XspGO17KRjtADUU7XkpGO0UNhLtcCkY7fg1Fu15KRjtFDYc
+7XkpGO0ZNgvtcykY7XspGe0GKRjteykY7XYpGO19ChLteSkY7bwvHu16KRjtUmljaHspGO0AAAAA
+AAAAAAAAAAAAAAAAUEUAAEwBAwCUrh88AAAAAAAAAADgAA8BCwEGAABQAAAAEAAAAKAAANDuAAAA
+sAAAAAABAAAAQAAAEAAAAAIAAAQAAAAAAAAABAAAAAAAAAAAEAEAAAQAAAAAAAACAAAAAAAQAAAQ
+AAAAABAAABAAAAAAAAAQAAAAAAAAAAAAAAAwAQEAbAEAAAAAAQAwAQAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABVUFgwAAAAAACgAAAAEAAAAAAAAAAEAAAA
+AAAAAAAAAAAAAACAAADgVVBYMQAAAAAAUAAAALAAAABCAAAABAAAAAAAAAAAAAAAAAAAQAAA4C5y
+c3JjAAAAABAAAAAAAQAABAAAAEYAAAAAAAAAAAAAAAAAAEAAAMAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACgAkSW5mbzogVGhpcyBmaWxlIGlz
+IHBhY2tlZCB3aXRoIHRoZSBVUFggZXhlY3V0YWJsZSBwYWNrZXIgaHR0cDovL3VweC50c3gub3Jn
+ICQKACRJZDogVVBYIDEuMDEgQ29weXJpZ2h0IChDKSAxOTk2LTIwMDAgdGhlIFVQWCBUZWFtLiBB
+bGwgUmlnaHRzIFJlc2VydmVkLiAkCgBVUFghDAkCCjD69l3lQx/kVsgAAME+AAAAsAAAJgEA4P/b
+//9TVVaLdCQUhfZXdH2LbCQci3wMgD4AdHBqXFb/5vZv/xU0YUAAi/BZHVl0X4AmAFcRvGD9v/n+
+2IP7/3Unag+4hcB1E4XtdA9XaBBw/d/+vw1qBf/Vg8QM6wdXagEJWVn2wxB1HGi3ABOyna0ALbQp
+Dcb3/3/7BlxGdYssWF9eXVvDVYvsg+wMU1ZXiz3ALe/uf3cz9rs5wDl1CHUHx0UIAQxWaIBMsf9v
+bxFWVlMFDP/Xg/j/iUX8D4WIY26+vZnUEQN1GyEg/3UQ6Bf/b7s31wBopw+EA0HrsR9QdAmPbduz
+UI/rL1wgGOpTDGoCrM2W7f9VIPDALmcQZronYy91JS67aFTH6Xbf891TAes7B1kO8yR0Cq3QHvkT
+A41F9G4GAgx7n4UYQtB9/BIDvO7NNEioNBR1CQvIlgbTfTN/DlZqBFYQxBD7GlyEyHyJfg9hOIKz
+3drmPOsmpSsCUyqs+b5tW1OnCCWLBDvGdRcnEMKGNuEoco4KM8BsC+3/5FvJOIN9EAhTi10IaUOS
+druwffI4k8jdUOjITCJFsnzb3AwvUMgIFEBqAcz+c7ftGF4G2CVoqFEq8VCJXdS/sHDrLSIbfRw7
+dGn/dChQaO72+b6QmBlLBCPsjnQTGnOd+5YNfIsEyYr2IR8byFn3Inw6Lh9kQ+2w0VoDxUUSPsgP
+3ea+U5ccGY1e8MwUxuPO8GHOgewo4auLVRBExv/tf4tMAvqNXALqV5/gK0MMK8GD6BaLG//L7f/P
+gTtQSwUGiX3o8GsCg2UUAGaDewoA/5v77g+OYA7rCYtN7D/M6ItEESqNNBEDttkubzP6gT4BAjA6
+gT8Lv3Wf7QMEPC7BLpSJMQPKD79WbY/dth4I9AZOIAwcA1UV0W370rwITxyJwVcaA9CbEBYjNP72
+6I1EAipF3I2F2P6baTShezdgCy7dgLwF1w9cMseY4VkYaLATHShPFz4bMyvtlGX4hoQFtrRhexFS
+5PaDOMA+Cn5jL9vwDfzw/zBSUAp19HyWzNYNNOwPEMoA2zn3Py38/0X4g8AILzU9dciruTo3e0ca
+UGU0aEAKsIG8zJWwBXitrPuG25p0SqZmi0YMUAQOQ5prbDh2ueRQVCyrvwb30UclIicbCBt2FMyz
+/bZRDdxKAfqZGNJ7+9g2mRjJFXlQKUMKUEO7PbexagbBGLwPtRQ5Aiq4rnsPjE1h6ZFw+7pg7rFf
+pjTIjRzIlv9zBNSo2W/uNig3XxrwJvQDyCvYGSY5hIWz/HYQKksgHMDeL1mNT8H+tr2KCID5MwUE
+L3UCQEtT9p1hKQg3W+A6oQQbj9elLHjrA+quXCT937eGBAcRO4TJdAs6A8YAXEB175OtLyeXQAwP
+dBfGFI2d2VzxTthIBmncTd/YSzbAV1AU1GPYnF1b/m+2DAzQagqZWff5M8lolHFRAPmc4zYeaLyu
+CZVgakvNVk8wUELXGrHtZusxFBVIvuCPBFb0d7fa1llQUg8BGh04GP/TaGAdvAzH9BdgIzvwvnYK
+ARXTqShfmjlzpjyfnszwvm331vnywhAA2rgABgA9PI6tWejhTumUgQkQGrsbcEtSrAz8fQhXIQdD
+3Y6G7aF0ozxZJRQ8aHIiaDWd++ABBAB5pFUG7FBsLrT/Ko/HBCSAoQyfAPCgcRvGZ8Ea6DXkvBrs
+9u/WXegDQdZoQJAWaP0MeBvZHOChAARfrF7rJ7oCT+93gXgIOAEZW35wNfO95x3RdFzgKZ9tw/3V
+gz1sp6tCCHR1OW8kuq1WxyQpqEGhHbh1629Ai1AKjUgOAlFS21FWs3TvWUZEozAsDPwN0nBCXvwQ
+3vCwq7BFiMPXKNaswRpotFoFnsKS9FvfNt4RK9ArflIP+CtV8GNSs612+pkrwtH46xW0xw3LjcgI
+hax1g3wC2K7Q8X4GuOjAw64OCAIMxUH4+30FuLgTuAwRnouEdQEXrgwLIzdOV/2wu7ks5QK0JCAT
+iy1/LcIATWdY2PQrSLYVRS4ov91utrv+C2Y7xxQAwegQHoQBaYjwo062C13PzhB79gsJC9W7MKJA
+OlNoZgwHWbiAV1bbuAtsvOnaANkWAUZIixMvbZ0Z1ViJUxB0EqqD7WADRUjCaIYZiyvQvbvDdDSA
+PWWxUy/GOpeMX324ciZMFRw2hpthIWk3fFHkkLGFz0NAKSBu4QCZdOtKRBc8H7r3ahBoNB5JtMMd
+7ANjwehmSV/TQ8OI2MK5ILvECE7x11JW6ChBkb+ojjtkG9v6aO+j0wjwYJsVpIuBSgqgjgQWwdJ4
+dnav1hvsGGiZeLs+DtNcss2WNFNfKeSKYMh0g0BAWCbTQcaOb1pQHolo0Eps0LnuZKMEHP4bHHPc
+sfZ+m90CdR//NSEFIrpDiJkMrkQQMBDLXqHZDFDrQOvNS08Da5c79u4NjegmUWiDj0MKKAu80RDK
+x3wEF5MRdU8IdBMYrv93BBMcL1n4gg4KWfHw60uMQzIdySz9O/TT7AgfM/9XV6do/i0D4dbChq91
+BKvrIANXYJejhDUf1PmjdNZpgcRU/oHc4O14+5Le+FMz23cZAALxUHPSEVigKb38XBhgIUfthjYH
+NrgMxFMAKmyGZn5TUI1FmMc5oyf41lkgshwx9R+FZkeHPCO8YUU/C3ywWxM7wy90GDgYP43M9mjs
+TZhR8nLeNpx4NhfoU1AvSP8oc9bWRi32wxBk/gHubJ5tcNcc6Cn4/vxys7O1kmVi7MdmS9ZmIKrG
+Newzy9be8P1OABbwDAiahtgbEB8bKXBZLmDD7jfoaJp09xhYwT2w/PKEG6BYEsGETLxGUP2mg4+W
+WbboW7iu6fpZpV4t8QKsPaZjlShvuGplXQIEAJGwHUSYcs7EaOwQ2+B25uROEsBhjA6dHbBOWZNG
+ATXr2dgGkyXkFiBomlYGkwO4c5TuH33JdGybxUAIPTHsnqUeW3k9iZ/rAxJjIQyLNVM9PiT+NL7M
+iT1EoqiAuF8jFSjkC58XC1ZjZ7AgHKAUE+I1paQCYZK7nQvP0e0jaKS7U2ako2gML77fYiApQKAF
+xDggpptK7TMCybGAOTW8vfBKBdyjcEiTODpyd/uL2mcOoVke1KEZE2hs0G5WSGcaBRV1iUlMwiRd
+EVpbPGlzpI5XdRkPVs906/BOaA+sNeg6S/3bLyZw4fiFVQWDyP/rclNv5CY2IZhg7qx0QJgHSXPS
+bdH4Coz8eOg0J83waPRYVnczUWB1o2jAhIkJ2HRbiX8C+O70D1mqRCthx+UQsblUHtmPnF5bX+lQ
+AWoFE2+htkMSToUFKOTZlQYG2B9aq/8EQev2D7fBweBn2MNWuw2zMR5SNlPQKb1rXbIfoVZVEEEU
+yVFPROLthT2EkQB1Gfe4u9742BvAg+D0wGOJ7/82/wURSMYPGGjYdG0dwB0p9/+UJHQv/3QEJrAC
+LXZvKjS5LDCb2pgsywOSEF4kuGUzAogseZfLvX2LdgSUdYSLUvXzhTcCPP2UANTc0AObrVsQEAD8
+VQyiq8OLbW0xtcR9RnbDKQbpAJt0e6yb27ICXiEPhf6hZKEFhc+T4JmDPHURS05kOGBoXucCVjub
+9MgGNIem1i59LdmpV/QQqDmtHIMXnJNQPScR28imd6wZajAbtTR1CPZ00sButPRzinDS2dfchiAg
+ZNZqfIHZHBUNYE5H7iTwSgsfgHU2H8avYH9osuuWfWlvWWdFQibs6xtXCCaxlKuH6nMIgcYReBiT
+eD+JBmkYo10ikkYEAyJev4VLS3wyVjlivgp0NRaa4wuCTQhQUbwFgxHe7GxUiYyGFXBWkSWu5HOQ
+MwmxXY4GiB0olLqvATudK/B5ElY04PG6mE0j/FS5z9QhsiFAMVa8f6Xm0gFe+qCOPdMHAnQ4GO7F
+jWrE3HU1BFO3EiNxtF0GwQQHdad3Q+zNxwUq8+vBiT5l08RJucNRMBwC3EuVaKWaX6Y95LnONTTW
+Qx8Km8gbaJzMnAnEIuvbAUAD5Sw4HDy/2cCd7alLiP705NsZLPIMdw0ISWwNOTg0o6qooYTC7Hsj
+gGgP0wVipHNn7th2ETgFYylU9gyzQ9EXJEYswjB3rF1ozDART0etzcVIKkQ/ZSVZRMrVGNYfJwPj
+HTYZFbg1n5jAbALIYc4JM0jc+6x2vBRbYe2ssPxQQVxQAIzU7OyJVwBdGxM/JFL2SpRff4N98AEk
+7CzYdRw9jHzChGYOaApVIPwQms0mbI4ZuN+RGvYYQIkCYHZq/LM3zCwUZBRpDQQbkEBXYqYwuSQP
+hHawLHZUGbQ4sS48HZy0RpZGts147GR0QLDSO8w1/gMjpRuwxw8cQL7w36pWpsKwsiNWonkYDWUy
+qwyDVjwbsXdyNfxsBTwgMl5Innf0ETYL3SvWGzO7JLReJoh9p3TByllnZSiLJ8F0SWF1ZamWeMBS
+knDBllBgaCHkegsGcYRcSBYEXfF20yG4dFRqC1kRjX3EpRvdLgPzqwb0iQCrqwDbNvahaLsMqxqQ
+E4wbv9bcyMEACO5UwDCJL7WgFYUveUh7O7CwHNwcJJsb2HEWB2DigVsGzGv+1qzoTOdcaCsSbCAT
+nlwy1kEZ9G3LHM6TS074buEldOdm2J+JjlyMNHyYy2baHJgFlCwFrIzbsv12f5Agm3W0AryoD6Qo
+QvmgBHco2eINE8uE9D81aKPdxFsaa2y9GVcUVbvow6Ve1hO+yGLAd9+ptHs41xgQaoQqPhimztyB
+iUoTQVWQ2AvasrgoDr0n2wvabCM9xSisjtTBJnstVCNonHSHTsYV1aOaFIznm6GEkEUKaDCi30g3
+kHxbgHSyaLB9WDMZqhkOUCGigSaZD6IWWlWZqw0Xr6E7DW9b0QzGc0A2E4AH5BaPRygrKgAQLfHW
+1LdWGld0b7wQFP8pDG0aZoP/AnZhl7e3v191TopIAUAIMHxKBDN+Hm5032L/lwxydTtAxgYNRusz
+BgMKRk9PxBIOGqfkJlH8NXrSOXw8CgsfT4gG2qJ/M9QG6wWIDkZAT72ZjNXbFXhrgCaoRiiPwqEk
+wbW8qOn4yI1WK9gD3JYVQADkFsCxYeADAH+xIYkuA4++P/CAnVzhH2YTlUxghdi7CHjvmq0AmyL4
+5CX0ZoR52Op3Fx/8TdxdHYOBJnbY7yrQ02Y4kHdWjYzQZRKlWKLtEWlktNasuQQLLUauV8gRDbhi
+cbgKEhLtTAQG+GEhni2Tg+OTVaQEI1sbYIhTuHikRGRzO36g/VR4xjoZ7AsRUGytZD07FfvtRNk9
+O78420hm4BA3ETkcEoFgL5gQRR1o8MKxngepakQlqF5WNcDBSMYiMTpXXXMjXdSeThxQty3cZru3
+U1NEKlNmTdgfps7APqlDFufx7bXdAV3Wag8YoC8KW5ztUMYNZLdgNvdtI+wsyAjWLBNcujiEdzUj
+U0w0hZZ6fWpb2PfYsrJ036Db2UNqXVMN+P8IuSr9PIAnAEcsTOAcskTrA4AXqQhTSzwISKVEMgQU
+0k2eYAhpXXKUHDI9LQjVKzZSLTpi313oJdF1AjmhmA5GgzgBftx/PvgQD74GajaUWesRiw2QCcdW
+re4ViwmKqlkIrwYCO2nQVsBeT3w0kjxFdBSObaEaG7IIwLX4AhNcbpT0CV388ArGmtrRbQlT7+R5
+kD2LG+AsSbXqCf92VR4/Y+xnzB5oyBsIrFk7w1mkprtUFXUWH7BTaUzqI8HXH95qKE+Rdu83cDv7
+dQtooCIZHpiLNWl6neyii2gWs818gLM5Wx8X8hChcFkMBAMVQw4E94Q16xoIFv7GEmYaDUBO68SA
+pDWiJMT7SwBSGOGtwd/TiQSPQTtNhAl8G4MKHDMHvIPDKFNssySJA5nZjcathVWxTXIFgzNcJHRr
+neoSMecvaJhMZlORoox+ipB5ajO1hEwmhT+xuV0Mnlj4uk+OHw91LfEsc9wCSvjxXxdMRtwe/zBT
+ZISubIxgOxSLGNC2PFj04fdu1mo7x3VFLhwzuxv/AMfhofONSn7YFgEKSYagmS+2AmC2UpjfCI85
+sMhmMfxeKIEc2AF0GngQwF6ezxAb46L060Mp2CMD8vx4CA/rICH2yIEc6Ad5WYPqIhcKhUula/P+
+ve+sMX5YnwVkFBG0kTCgs2fs/LhmBhpRCl+dQGCpFlZuePpLvpl7FOsbFh8ccME3CA+xQPRcFmrU
+EieCRZgSwKF0hWFqmQUMnoNYXTpZw1e+bQBiiwPvVjT/VaAOOPExHCC6WaPGquGZaYMOCHpLtOt7
+wI5fEGKi9cIhMmCW6ILEo2jTEBdaYYv0iA2mKCEHOZqOtW8sDDAOfRyDBz9/SPACYB4fQHQcagzp
+cI0Gc2e1MFDClSpZABKq6FQDo5ESQUlAJM5fUaKrMToXrTPRCIDiqaX4iMACgz0rTQkoTYBkgCiA
+BLja+wxXNAFLdQShG0wE5GIJgl/XljCQ2KiECAhGAJbK/TeLVQgai0zAW9HeZCtBEAJ2IoE5d9TG
+HV6NNBAIw9s+elZqbjLbNBILtziMrwBOo/5vUfLZDYvWK1YEK9GJFSC1sVu1K0a9ELtX/gzUkkS/
+gIkBK34EarFGd5Yoe1H8m4hWtmYld1Lq0hbajGugsxM/hBs2dHbICALQmiLyeQnYvRUISAx0LhdX
+UEkjxBcEqsyG68bCcFszbEWiRt/+Pw7MzEgz0jvCVnQzi0hLynQsiUL/L9tQFAIIGItxDPfeG/ZS
+g+bYF7C7/Ykxi0AcIBRRPyhMcDPAUoWvJ7icCAVHMOyQAH0JZqED+PZ0OotGEzMXJEtD7bYsPRQN
+ClbmNgjptnhzHhooUFHkJA3H+AhgbgAAVOJWsDVfLQMp94oBDVBmYRemOsF/597BYbvNGDgK3ILA
+O/d1Ck3fYsQ/TmQgiX4YzwprfENvYCDwR7x+KDl+JIQOcI1deSQQSIFqGGGEpGubIUMniYY+/PcX
+fptMJYl4FItWF8+Jegx9DLR1b/9+99nHQAwBePkIfFkED39UH7h/YWv/EdPgiUoQUtdRN9ob0lD3
+0gTu0/aB4sBGZVJ+KMwZHYL/NXhBT1Y5ehR1DyOxBvCWbg5PC4zwZLNWG8lfuPppECrPE5ZxU1UQ
+ux3Kpc4EBHYK+QOhE4Xmtj4AE/ADVCOC/fsOevoEv/tzlcNLvQXB4/uJXB3w7aEZiQjIDQ+HxBok
+NFvh4Y0QOBkEtj2ISbe2o20eiQ3fQYsvBYsO9RvfxooRHAQ1FhAEg+EPQuDc3yixLhZ0FccADVXd
+fXfJvGwY5Hpy66Iii1AQwenJgd24KMEIXXYYJNDztbaB8CQuFwW9BG+7ws0RSDPJjmYIQHaLXhzY
+HremiUsGib0fAxOJ93+Jt3ZDBMFmA8H39YXSdCHHA1Y8Xcy9lNHdX7hoZ3Mbn/bBICWBYykHtByx
+YSYc2HHeKrh+2il8X6Ri/XUYZigE+6MCVfNaLLa1DQqCApIiAU8Al9rsaQJzoDONSDbnIm0CUh4S
+RFQMyTfbOvkL2Aw54wh7wZx5LQJj5O3hzzXemkrcweEYSAvk+Lpka0k0CfhKVqEw1m6DSEKJBjoc
+FJAG7G9dgUg34hADyolIOQpILpJLvgibLblkC4Q2P5Y53Jg5SDQSNoLZDBHr5TNZ6QMhIAegpDvo
+h2xoAnUJi8dlwggOzrllp2dyamN3JrPQpBZQR27HAQOWEB5gORZITzfOlqXhigobUOHRPlaTHCBH
+AgQO0mGHECYgiSizEkJKGCEfstdsF3hOMPMGuPg7hmlYRmkskHAsm80KACVqW5JvKwD9DEMBKf3w
+i7klBjgLRzTZLLtGAgO0Nu4tN9Msm2ZotDU1otfLLJtlETZL7Df4W4sHksB/01fyKgGH23o8iUNC
+rcXWFrIEDwQFTL46sMEb60coUqZXygqcut51BnUNPldPKtuNdwTqKMfyAUY0AjBsLQhsDjjuUQgg
+1oUJ+HQOMbLQH4FkbbdgRzDAw9/8nWsQX21qqmRjIFDiixK+SfbYjkYXcsEHTyhcSYEDrgUYGl/Z
+s9IFQ5d6VyiMkEXuMQbqw3JAc9ActrNQKCgfnyusgXOnUR4uojZ1qxokAiAD2JCYLbweiV4svDjI
+BHrZi8U9qgCD7NBadB+VOFNvOFX7bq2xNSlDsmsSSC5LNLb4ZgJeEDBWO8iwVNeWf9cKFURzBSvB
+SOsFLAce8Ll8AYwDg/gJGQyFLDDUb3BAfhiD/QNzPIkN15Oh0JYNxuR//9/2SIoPxxRMlIvRi83T
+4oPFCGML8kfae9d1MYk4iS9yzusEN6+mFvitmQeLyNHotQFyWeCm+4lLGHeRY1SD7QMZAWx6/+3N
+HAfB7gPT7ivpP7MpvirUUR1BSFFSFyd2t41xjQ0wUQ44Us46el3DRxwkXCE0+NpRPlDi7Q8sUhDe
+EDgcOfMV6BSJrrXvXMBiZuxYcQZhFHWHN+QD+P1YFHBuXbzOIHMsqfr6oAY9ly3QP0wsT/Z8QOJC
+m8EnAPLUiovOFnhXaoLhB3LqEDPRr6K6tbf/OO2LwTvF+gSJbFxLJgFbYthBi4kD6UzSF4dNdG68
+KsccBYWdFqsbvmt8GkQ71nUjv4t7KIsUvmu8GYvXO7EVcwcrwkhX1x3bLmQr8nOJNXVntExB7XCh
+QkgE91M0KA6s+2JrB0cwatajTDocbcPbMSvKSf9LLAcEkJHv9j5VdSBi99Znm9x88k6LzsKLyKRe
+oWGYcLALBclp6N5gdp3CO8EFwT4U+yUKrUQwJIEC86WLyi07PP6CjeEDK9DzpNpcJbvRtrdEA1IN
+S10V8CsMlaEzXRaJeBwpwli1uf5o/UMYkwcqOZDHGJYOczgyDxnjKg6S0iX/bLE5uj8lyCCYH4cd
+3bHQtwbW0DzgCIH6oGxdwc0FE/IFegV9H82Z6BtGjYQIAjp3A3w2ztJIKPlQYQyNBSxgvvEOSA7H
+QwhKA0bT2PvrCK5xU5IIEXm60OgKg2Itc2hZMkymkt++NAYDJToiLSwITrGL/Nh+bIoYpEsMxQSR
+YQjDXKE1CAOGamdk74fdcpgwuBOhyHMhPDRzhffaxzFpNaA3IPSl7XRy33AaJG9DEI1TNmdosFFS
+NFfx41BdAzibUUAsEPCFWMi2mSH7COYFguHDV09l0DTiHzc1byfezgJdD4N70lk76HNr78ejM+NK
+OwXr+vmE5t5rSpj29PkHl441N/ou+c2LyUCOXHsHf7kUI8bmVMEBjeY0u9tq0Ha0VRCXNHMbySy4
+1nYr6tEMRYQSiu+2wzVxQKQ3L4AjErnNeDy+0HQDM/KD6BLNWdhfcpcrJPgLH8ALO+lzO5lg3QJ5
+4AQfMJ1cezRy6cnsfHf2Gv3uVYsMjakjziYOFDXea7Vi1JAb1+lcRjgVHOGMCh7c6936A9A7Koep
+ddMqoUaJpTkQ6ZnwfHMxd4KTFQ3aHYr86wIP314qAKgMQUiZj/x19XeJTBxIaF56goWY+kC3vRVA
+JCZRUECNrWMzZt8JLCRRElI8E/DXcDY7P1FCBUOByEYBa88UZcsO86wJB0AGD0WMJHfS9DgfFUwk
+CmuzjyYZCCU0z3c9bN/TgJ88ICsceVDluWMIpE6EVwQEPMC2kAYpSA9zLVoLC15rPDCX2PF1q4sE
+0CudOANWTEGrOXjozk3urdGpr+dRXEmxe12JZp5AdFZdtlQDLl6cAB0nTWcGicI+DSMYsZAmDgUp
+zCEYBuZrJYnSACzjYC7JAKGdz4smttt6bWialtrplUxRdxJozb2F2hewkMNuh1yhMwYww+DNtHFo
+UVxh/csz7blZ4xhgez9VUfLkksF++Ndq/SvRwwPqUE5LWLYnu0yNMYtpOVHQtwibaysBZpLqLxVS
+UdosgWw6Q4Uyrb1l32rHQRhAg0tGQIYw92NISFGJeQRGRBg24cAREUsg6LOsmEVwjfKEp4Qjgb1B
+FVLIxlQ+Ay7eysQAzjkFhU58QQSTiuCewb3R9wPug1FP0VqCKYFYuEXwISwYE5/Pnmr8UNJQCIGU
+eZAcQuEdUozPK44bCaRAGJ39PYyy2XUGW6VPUesYbJGoOtciaJTYMiIkFHyeXasyRruRUgbhwGXd
+UAY1z4J7CWkA2v6BGGKFTP1fLaRzISRMEFkg4YDsGFKEPiOFPUIJO1w9Wyl5SFBSpgcMd4fX60Cm
+ZudBUFZT98hyQnRLU9F0N6HtI7S5e+ggNy6JVgR/ZFuq/FAr1YtuCONufT7GAfKtZggYMbXwPTJD
+LovHTFZVxWmyNWhjQ0tWmRCSXgo7nYQJAemYoJcNQSaBIRiRU2PtqwlPsP5FQ0g3nsJeKkP/1DkU
+zTpyuVxuA0A7azwaPQE+bJbL5VBA90ReRaI4OsyATbNWuDlBGyADXFLvDKIU4AAXuFZXGEfAU7hS
+WGndi36vUS5YRigYDRgIV9gBDnBj6U8xSDUWt7vvQM+AG911CuzCDOO7d7HAXPnbD4bvEVWB+7AV
+mY+7+L3DcgW4CCvYgg+Moa3xW7Ti6MHt22EQihaDxnL2LgobrFbxA/kI8sghhxzz9PUhhxxy9vf4
+hxxyyPn6+/wccsgh/f7/lWCD7QNNvGSf3rbOQFkVFhJGE0h19G3sbqWxDbnx8vfxTL93q233CIs1
+9/fri/WHEzEOLxFbXRdbCV8LwQgm+DEJn5UIUG5QtxDKTdZQHwhGx7s0dEwEww8fHKHRFC2pN7mK
+3nFXqE+jRYhQEFoMiEgR3EFvBHUAAA9IGMNXPBwG3xR/IHbBhKGFzgNGkvCiLUFjVsjabgzC1cLm
+wQw0wX7FB9unabwQwkYsB4kzxQ0o0E063/4GQoc2fmzoT089HBqztiPQnc4QCgqSbGr5g5EoRnos
+iX47Swts5YwpKyJ7rfktldpWhYkGZdxVDTu6LQpFlFZSIk0RT1XdU8eAEHdIfOrIo34zXSuZHLhI
+nSgNGWsFrkCumaMw+r9GA3KldBNJ99kbyf1iqzcZAoPB701hOJ2pVqLPZmMQuBK26q2xYkWyRVj4
+c0TnYsXDQFwEug61AV6xi+0wALKOnPuSe8/T4NAAxwgLyDZ52eiu/eAsQT8KLHK8roVjS3Xf+CMg
+CFbISRh49CsEJRTT6LiCS79GbsFFK/hAigE0WyTexRaLSY+VCAZ0F3rMr6gQdNXgD66Lki7WSq8F
+Ih8C2qK6bUCvRcOo/+O5IWcOJx8Hgr3PHNLaQhqvSNz5hgXsedDn2Ahv5uQjvosETLlNBANda629
+yM6tkbDUcgO1qDYz19OOJBgMzfVFzGVeJYwiOZYDRAFpmIRkDEQEhJsFw4XwUmUMjQzBiAB5gBBB
+2ALkkEOGDAwFQwEKDG9+Azfg2IBrFdV1A8IrOVOTejdA1h8NrxDt7SOWsVoBVeL5Uc2FlywtoLTZ
+Po51IT4wO8ERPTip1FQtKQz7ceqIsAjrD39nhtIkShsUUoVyYpIhMzI8DG1iG+zkBl1jYSJebons
+kI9intsB90kYIZBC8wmISv8R9xQ590FIO1AIZgdOYHN0PAxmSWHPKAIb0mA3sADjk/FRgeBNCogV
+YWDvCkJIRL32LQNPwM8UiysK4gMFjtHHQx8rzRMXJ4mQNRGq9BTDIPDNdEoJMBgofEBiyI/AG1Bl
+av0rzVNtrjA3VlBJEOu08iALlZiKiQN/74eyPoP/B3YVPzyD7whneK8EkUyJTDfqUFhCULaLstgx
+F7TqYrNOIDr4S5neK21uPPlTK/2La0ZYoTdk74kLW/4kEwmPEkEBi2QiWzv+s9xu1ZC0vnFJA0xK
+0C6Xy22OSwcETCJN905vT68MgFkt3/nokUWQDCBRU6djoXhsIPcTdhBVN4NEZ9jbdQnAj4+OoVtZ
+dRyyVlXcQF0XWY26U+sgUlUTla4FowET9LbaMtHcotP+NxoTtX+JW1NSx0cYdI2KV/jtXYo0XV5M
+Hvt0BoN9i5puo5gMH1C+wmGxsJgwKc+7yv09gezwoowk9Ab8tCRugX4Q8O1Xz0QDmqZpmkhMUFRY
+XGmapmlgZGhscFzAm6Z0eHyJrCRvv1BigzIB735chESNRF2gS28DQ0qJuu05CHUf6L/y1XEYgZRu
+wIkpiSrGFl6EFI8anBe5G+ipLxGNmDtDOSjQDeALPUGDwAQmdvNuPD5tdvnNcwaaYroPG/0f+yu0
+eDkudQhKg+4EO9UFO7/Ntov6pSx2JVT6vlGJO+7t/8bT5q9zEo1cjEQrM3glU8ME0REZIrTBcvJv
+laOFF+hWuBwMRI0DK/G6QHm6k82oEBGiA87liPe3NvgsC/ZKhzPbA0wcSEkW4X435YwcF3Xv3T3I
+QF8xi7TN/wHb4dYcFYyEHD0oPN6OdXKMDYlceEKJERIjvmkoexwIQzvZcsVXNjJ2u4vf90KMFDWU
+iSGmocBpXQNxJHOO6HseYcffABJ8xG+nxB08D4+BAjM0hyJRaGWHDbm3wHuBCjtJhdLsKz4gwfbe
+Nv07TQ+OB2AUOFhys8jWLC34bDPR/y+6OAPfK9NFA8871/AmdNQt0RrXHCBJy5n+nwS4jX0BO8d2
+J4PP//fAbViiGi3HbhhBBLtbWFiufb7FbeAfByvHEmPLUrRy7aEkvzvnyFFftouxfAP4gf+ITx/O
+2NjvJiArLMIvjajewd6UhNg2iTgTYden3tkqdDhDiEygtIQsmth+EdbLiAUxvca18Osl14tK/O+L
+9dPB3Y2Fb0Mr8IkUO3Sf6wlKGIoN7z0o4PAGj//tDUfoWoxuitAJHCrTiD0Db3y6MYsIDJF/cgfG
+Du+KbmPA6583KQyT8XMUgXYX/qP+yRvSg+Kg9mCIcesgkPtNVyAUweYCihQxDC3erR1sgMJLNDEh
+sRa+aLkE9g6HJEe62MTWRuK8tDsVcx7Gb9Fdt8UAgzB3iTmNPNWkhG5nOHEEhh1y5tUUel9wZWKN
+wjGBhcJ0CLQW4fYz0NHoB3X4WEoO0UZoOChgjByNBe8K7YMxJE8j+ss6XxiD6AQX7Ee5T4gmK985
+M4xx4lgII3XcdRXIqaEOT0ogK9LCHKePj4dSkEDrwZowvY1XHk6RG0KydFff1zv1dBeRLAF0Tfu4
+gLUWAQwKhMCwCCQPXx7LA62jYThoEncGkAZkGAtfNHA4gWY0VWQY8FaPkzRS09hoGGPYQe4CwJhi
+BBVVUowb1BJwQIXTRVhEIeAk80DamWywTChIOHtGd24nFkwQZFFWHlu/B/aoUlFLdSQngzoWCAAY
+gN+B/Wp3Ez8sJ/CWHavkT1HIhy3ZII4e+3UfHlkQeASO4yP8dMhekg8C4C8jwM6AwUu8QpglMJic
+RSMvLpAkD98NSPyAd4No3gChTAq7m3IXnIkCEJTHAVARxwJxOuDhUIxAyFHtDGoAG7Bja9d7wNt+
+nNp2/cF3dgMVLBFE0PVGe+876FjokYatwzcyIPcI6iCF2kr8VhQrxQPV5jBWllSIX4I4cA6LSzxV
+BT1uAm42QzwSzYv3pKk+YlKmWcqmO8e/cgPFF0ssA/2iCnV+0bmtakFEKA2RdVvYnW4fczTqmivu
+nxCEkOXkCldHV9RYBzlWRzB8zfdaiy1e+IR7guSMnHpRsIphWr5SXTAoVIlRcjUYvXjBEl4fzBdu
+Nw5Z+YtpnFEgO3EwHLtRCzc4HTvuUUEculzUPzlzCSv1Tv7OSSj3qqUxzYE2fEnTTbQOHCwgg/hR
+J5pLPCKLSUEKKLHVEYulyBpb7O3e6QvWRx1y4liiVzDciL/BI8rIihzOjTTOLISOuAK8c8IyTgHT
+6gRnFqB1Ecc5BL4j3T7AD2sMnWBeBDYDy/0DyIE4VXTHg+MPK8P2FeiCNDFODavLIyaZSLakDw8g
+yJQtaTScMTNlI+QFAZTPLuwBeDvDcytZGIP51X4OaOfVh9dBJi1nS3yXcgc8WU76bI7WqM9wwe7H
+9RAKuKJI15QH3+BCvEkoETv3cheLGnywf/dFig5GiE3/BoPrAusB8O1YI+sncSwfO992Ezv7WyuL
+HRwARUZPdfYYKBCWbGcGS57rGb8GCvT83AQZcEVJgWGrH7EjEnI6DnIz+WrrHLVI2LWcEEkEbY5f
+FRN0K/M+rPAR4Bfqsq078w+C3CcCzc22S9h0LdnFZQV67O3B6x7ZcwLeOCv5MzE2xuqNFM2awsQc
++t5BXIIWU0YI6s+JPiuskisUZ1YNVukAe+HUc2IgdFZX2GxWpM9a2712gFwocj8QlWoy8mb+9YhB
+t7adaAMrQVhAizE6eC+xQTl3X4lBZ5r9jeKmd2af/yU4fYyMjGwFPERITFvxit7MzFE90wtyHPt9
+C4fpCy0EhQEXc+xNJW5dmMQMi+Fgz1CpMCPbw8w9UFxFfZcffGr/aIhTEF5koaFQVNx6S3QlBxho
+U7lf/r+lZegz24ld/GoC/xX4WYMNeKM/7Si2swZ8FPy0Dbh35Lk98QgNAGG0oQQMd+8K9wCjgCjr
+/TkdkBh1DGj3z9zK/l1OCGEY6GgMcIIN1PtsCHAn4qGwP/OU280tUWCsDAmcUAOQR7RUNKBcEPUX
+gCFfBDIATqEUu79BfW4wxYA+InU6RgiKBh6Qb/s6w3QEPA3yEgQgdvKLu2bb1NBOpLDB9kXQM+ft
+rWoRvtTrDisgdtgsFS366/VqCliV62jXoJ5Uih/3kTMI9IZfGGtF7FQJiU2Iy7C94OLcWQou/3WI
+HyAVjYyNYyQFHAxhdu2NmAMELC9OEi4krLCsw5IA3fRgqJLtfPBgAABpvgKpVBUQEZqmG6QSCAMH
+CQZpmqZpCgULBAym6ZqmAw0CPw4Bf/t/kA8gaW5mbGF0ZSAxLgEzIENvcHn/3337cmlnaHQPOTk1
+LQQ4IE1hcmsgQWRsZXIg7733ZktXY297g7733nt/e3drX6cTaZqm6bMXGx8jK6ZpmqYzO0NTY56m
+aZpzg6PD4wEZsosQJQEDAiEZkiEDBGWnGZIFAHBft4RZskcvf/eapum+8xk/ITFBYdl1p2mBwUCB
+AwECpmmapgMEBggMmqZpmhAYIDBAYMhGtsLn18eEJCzhBqerrxnkW8KzAwsM0QBBBg3muqoozDWX
+zgMAv12AD0NyZaVEaQZjdG9yeez/n/ogKCVzKY9NYXBWaWV3T2ZGaWxlFbJ3bxYrEB1waW5nF/YT
+YJYQ+kVuZCAZwoK5/3R1cm5zICVkUxcUYGA/WBNJbml0MhjBYNU9NjNcHIywjoBSV4iEB8iyGWx8
+D3RocWDJkwM2AC9McUxxu3/7V1NvZnR3YYBcTWljcm9zDVxX/2/tb5tkb3dzXEOTF250VmVyc2lv
+blxVb+3tl25zdGFsbFdpYlwSvC1wYb3F3v1ja2FnZXOsREFUQU9FaXB0f/v/7hELQ1JJUFRTAEhF
+QURFUgdQTEFUTEn2t5+XQlVSRVRpbTsgUm9tYW4LdqFt7WhpCnl6ijx3aWTeWiHYIGwTFnwgeW/f
+frvdjCBjKXB1dnIuIENsrWsgTmXC1lzheHQgvRelLnVg23trhcgZS2NlbBUcaQzWsHUdaBVTXXBb
+Lq3Q2gd/eRYybAENNtbcLmTOjw8g6CA3uxvBFrYAS25vdIkna4fN2k5UKhJhdpuG1wylZvESbMoZ
+7DW2Z8h0UGhXdtZ27A5zHXF1cmQs4+8p7LXtY2gFYRNiQnXLumFDO2k+L3JHNwjOKhGBLuRsyRLe
+sDCYBHVzZTrjN3ew2UwGQ28RV1xJJZdtZzJQM2izVuw0LNkonJgoUyoYDCs3p8J24Wt6J2Ybc4cu
+c28uAJtFjrAbY4kcuAvhLRTpYoHgWsImJOiLqLrX8LgDSWYnVG4srnbaVniYyRRpEmczLCzG2wR5
+KktAYaztLiV0dHZzLCpvQlYYwBiGZVF3w9tvy0v3U3lzX0c/T2JqgKs1GjsPX0//2CEY2y50W1xn
+D1I9X1MQcNCt/VxhUztkM19GCHz9UsdzIwufUHpncmFtTve+nqECPhMXaSEPRphx+ExvYWQUtyoA
+1G3u3e8lY39Y4HQaX80GrOEdNTsLLgcjfth2nnInMCe3MTAwgAsMXW1kEvo6NasjXm6DgAAyF8mx
+c6002BhF/1sfG81MOyZPyndy+SCSDWvO2ekWJx7tSSgcKV3HPwoK4O0fXmgG7FlFU0dBTFdBWQnf
+sYewby4sCnAtTk8sTiKksNZFVjsrgxxxaMt3u873dwxCsq10IulSZW32yu9wRylleGUiIC0UAt/C
+scItziwubIQiT3et8JC1YgMuADA0AxDWsJVudURCG1V1AVsZaK0J210CPUL/lV5JOlzhYXnBs0dh
+T7IZKDsyS2V5ORiMdNMKC3VsZP9jSayCe+0gax1LkoOFswJu2SPbjCFGG4SOU8BjgyoA97u2JYzK
+CnJKd1kvKZ777yVtL4BIOiVNICenO02ZS9n1E0dmXFgK2x5zaEgrYWtbizSLZP4WZBVmwNad8QBu
+zgCRZxZfFqTJggcPbycPLG/BGKzzYnVpX4X3HE0hb98FQ97DsAh8GgDMB1xqswbCACOhZ9ZoemCh
+w81hSCvOYNhhxTfhQzxmPMUcQ2ZVD87QsG0XZ0dvrnCR6JHse6Zk+hbzOhUKGO3TIwAuYg5rg7Wd
+YCU0IRtk4GEVLDoDOwxkaQD2caCRxlhkI01YS3IKFh9jvmQFkvMTUJNkscwQMqYiE9lKeu9+ESfS
+F8KaLWsGUzLgHYF2AEFvaHN1CAYGX0JxhwqZcCGx1b0bbb4/O7HQIjdjfWW63t0DzXRybcMZm21B
+cuhYGE8EY/ekZhwFYsUbj5oxvld6JxAfx08FV6/dwtVqFwhtYmRMCZwRcyS/K3BjRWiggfh2WGRQ
+2YsIrg6iN38iSWpob1mV0XlPaVYLxmJ5VFIYm0mvbSknY0QX12vtQEsCpR9CxDs9vB1+ZKxuZWXw
+Yz8YnB42h+fxct4gPW3Z2xyxCmuXFxHGsGENg3IZxejcFjgNc0eOa3R3bmVwByJoQVpQ0Bxc1otk
+L2LCgj49DK0mFa3NW29vmzE70SccGGr37IXNgfdYeU1vbHM/WuHgmHN/DZCFY8sOwS9jXxh0poAZ
+tXlaX7Sm2Z7RBHxz+HsD6Nzam22ayLigexvnta9kObpOYnwpC7hvBt1mZvVlYmdzEcMwHC03aZkt
+Mcsa2rAhn3JtLy3hyA5wG24PBazQluh+XcfDZpujA6kJL+IdTbSMROMFYPwBa5qzI1AABxBUcx82
+yMmmUh8AcDBAMkjTDcAfUApgglGDDCCgiBlkkME/gEDgZJDBBgYfWBhkkKYbkH9TO3ikaQYZONBR
+EZBBBhloKLBBBhlkCIhIBhtkkPAEVAcUBhmsaVXjfyt0GWSQQTTIDWSQQQZkJKiQQQYZBIREDDbZ
+ZOifXB8cDNI0g5hUU3wNwiCDPNifFzLIIIP/bCy4yCCDDAyMTCCDDDL4A1KDDDLIEqMjcgwyyCAy
+xAsyyCCDYiKkyCCDDAKCQiCDDDLkB1qDDDLIGpRDegwyyCA61BMyyCCDaiq0yCCDDAqKSiCDDDL0
+BVYggzTNFsAAM4MMMsh2NswPDDLIIGYmrDLIIIMGhkbIIIMM7AleIIMMMh6cY4MMMsh+PtwbDTLI
+YB9uLrwyyGCDDw4fjk6DMCQN/P9R/xEgQ9Igg/9xIEMyyDHCYYMMMsghogGBQzLIIEHiWUMyyCAZ
+knlDMsggOdJpDDLIICmyCTLIIIOJSfKb3iBDVRUX/wIBgwxyIXU1yoMMMiRlJaoMMsggBYVFDDIk
+g+pdHQwyJIOafT0MMiSD2m0tMsggg7oNjTIkgwxN+lMyJIMME8NzMiSDDDPGY8gggwwjpgMkgwwy
+g0PmJIMMMlsbliSDDDJ7O9Yggwwyayu2gwwyyAuLS/aEDDIkVxckgwwydzfOIIMMMmcnroMMMsgH
+h0fugwwyJF8fnoMMMiR/P96DDDYkbx8vvmSwyWYPn48fT5Khkhj+/8EoGUqGoeGQmKFkkdFQyVBy
+sfEMJUPJyanpyVAylJnZlQwlQ7n5UDKUDMWlDCVDyeWV1clQMpS19SVDyVDNrVAylAztnQwlQ8nd
+vf0ylAyVw6MlQ8lQ45NQMpQM07NDyVDJ88urMpQMJeubJUPJUNu7lAyVDPvHQ8lQMqfnlzKUDCXX
+t8lQyVD3z5QMJUOv70PJUDKf37+d9A0l/38Fn1f3NN3jB+8PEVsQ35rlaToPBVkEVUGe7uxpXUA/
+Aw9YAs49TeevDyFcIJ8PmmZ5mglaCFaBwEEGOXtgfwKBOeTkkBkYBwZDTg45YWAE5OSQkwMxMA1D
+LDk5DMGvoBvhotPdZHmFWkZc6GljWtZVb6LScmXVtHN1YnOxbIW9EmJlZCdLRhYLCXYeR4hLkcAj
+YXR5cKV4Sc0UGx7Llg2Mo7MoL2Upez1jHwOapmmaAQMHDx8/aZqnaX//AQMHq2iapg8fP39toUgY
+xW/8UoEqCnuQUAAEjeCAgCirfIJ4lm4sBEWgCVwut5UAAOcA3gDWy+VyuQC9AIQAQgA5ALlcLpcx
+ACkAGAAQAAhBdvJbP97/AKVj7gAVjqBsN+9elB2YmwYABf8X3KxL2P83D/4GCNlbWcAFFw83LGVv
+Mu8GABfdzle2N/+2vwamphc2c64IDA4LF6b77wN7Bjf7UltK+lJBQloFYtsbu1lSWgtbFyfvC3g+
+sPcRBjf2ICalFed2iWgVrwUUEN4b2W1Axhf+7iYFBna7+cA3+kBK+1ExUTFaBbEB+7oAWgtaF1oF
+1lxb2BBKb2C6dQVz/+u2VBVuFAVldYamEBY3FxuysVgLHRZvEdnd5t6eXQNHQEYBBRHNWI3sZGNv
++gv5QG97g7nXuhVdeQEAEugAczODRgsdb7mTB/lBMVhIUlgQBU/ZZ66FDQtK+lHfFGVk9xv55BAl
+EBampmR1FZUXYYB1MwsKAG9DkG122HVICxcxLmhk3wUxb+rBDOYJsxWmzwuQfcMKWRcFFN9z54zH
++wojWgMLYTfMMToXBUJXTxvGGSF6/pMIW4Y7rL8LtgWfbyRLHSHw/HL+YfaGvQ0DBgTJYEla2G8R
+B+8lm70FA3cL9xs2I2Q3+QcFISVb2OcP78I3G3buSQcF9lct7M0SD/s3Qjh777nZBwX6x4yQvVkP
+IW/542z2WmoHBQMVQw2wZQybbxmzy4JVb0cFm3Q6pWxvgfK5L9nMAWtpdRbna4pxgW8RE+xab5DP
+Jg0Fb0dRMaTZsoYAW291GGGvl28Db0wr28bzWQJbbxd9b4E9m9/NciYX2CuA3w1vSZMlbML8+T0D
+b1rxIiSS+rcJAtlk7/tph/bfGa9tkOtS1xG/L4xJK0s38YcyWq/oFehVnxmTVrY38fMigOTcWgsM
+D5ek00pvZusLsm8htQz3C/43hL1ksOIJC2IgymKHAX1Gv6y5QADASAl7AbJoIYoE5bt0dzCohd9w
+sAFNE+peR10gA2E9cwkhcvTCaCNhZjZQfSAa1Eb99zFzG9QPDf+CQ2glMU23ue5XB3o/NWQNd2yd
+uc91ASAHUXQZDyW3uc2NLW8VBXkHhXIJus91TWNtj3UpeS4TQ+a6rusvaRlrC04VeBsp3OfOzHQv
+bgtddRtk3dj3UUdDwWMRbCuWvcG+OWk7aCv/uidsyLcu7AQIsO8ftstGboMA/YEcAgMOL2gzXFAG
+P1OjK7uw1g4PA30AAkPhzQymo2cjFJ9kIpApCAyhe92XJ2wDY/9PeQPppoTDO5lhGWmwrpswN39z
+OTpgoLaon4AIgVC/WbU82UhYZe8T74kANzdh38l2g1B1RGWE7CFYcpGzeWGM3DQvdwMBoRhqAP6D
+GTlLhaed8IQBeQqeAEJJDyNaZSmzHSL87L5CAQcAMm8CBIAARmHeRzCeDW95oS4BPFBIyzWn9gAf
+6w6SkktiD2erlMJY0iEb7zQk95dJbbvpi2kz3WVNcj92BXeVvtjnJmNVJWdbCXlExpKxA2aPse69
+j4d0D0MNLFOR9dxl0UItCTUV1gKsDQFrbpqHS4CdDgDrbX10Dd2HBWwHX5dy82fZR9WNcwEzK1AV
+BmlkDDEpI/ayRYZr7FN7Y2QkQjo6C1+EDARyA/cPZgwhV/8dCJxujGhlddV0mRJYyRB3e6wSmgEp
+gmd6cCAZgS2D3Amue7dziWMBeWYNAWFQ+zV5jXogArAAAIoNuJzEAFRQmEe2AsWmbWl2lgZvtdu7
+Ih1JbnRBFkRlCfHvIgHLDFJlc3VtZVRo28i2FS5kMVNvAnSAXiyKeTJD9sF2kxxDY2USTW9kdUSo
+WNn5SGFuZGiQqcLFiRnPcg6KEkMQDWCDxQJFSEFJ8RwVL4xPkA0L6EFxrZ+B4pslH1P3DFRAw5Zt
+IXAwEeag6AzUDUbMVR9rULhf7oBjYWxGzba2a0w6bHOVNW4yFuzF/oRBZGRy0R+l8WEWEAYVChuQ
+eewNEpNUaW2txQZCSED/SgsVSxSISdFBYlW0YyxMYXw70B5gEwTgQXSfKAhJvip1dGVzpAl/IyGV
+E2xvc4Fuu7C7clVubYNEHEQyMbDLfp9Ub6lHiT0U4gAceXNnxF6omGI0RXhBECoG5mYlEA5irZ0d
+aBBRCLwPudh7wzGRMAzzsBbFhBxPNl1t1qIYRboOhtCScZreJB4rwiYYvj15U2hlpsUTC+g0XTLr
+MAs0YQbQkKjxO7wEPkNvbGgKT3XTJMyW8SVNbwxmKDyEjUlC1kJC3w7rOEJrlGUaU0xpZEJyo3Ga
+7XVzaHb13DRVXMe3QtsHX3NucOl0Ct9luztcbmNw/F92FF8Vad7NdY1jnQpjcMZsZgvcEb7UmQFw
+dF9ovnIzERdFw9YpeF/cX0/di725DwlfZm2HCz1turWNYA2GaowrZmTCYwtWcDcOZfQbc1tzhdYR
+ecp0EByjornCPNUQHYDa1tw5iG5uCHOP1pncDliudyuRWhSmFxMr1NmBucFyXzYLduQWhfu9zQhj
+aDeW5GDuvfQHiCBhdPpmp9kHcw8oZjcb43eKDWZ0kW1xER3C2bBYWWZDZiY4Ss7EvUlBUQr32LUx
+KGZjbgeWlmKn0jhObPBsPOxsdlsFc0hxc7OD8GsV93BjY2lzCXYrlQ1hbWL0BmF4DblhmLWhk+dl
+pFHL2r4Qp0RsZ0lnbVmAXKZNS0RD/K0xzmIRZBIKUmg2C/ZgK0JveC5CT1xrJGxIjH3jWSuYgFk0
+/htmILp1VJNucz0Sliu1bqtFOhTQZ1DXFXt5c5M4Yz9CZh0zRzMd82aLLls4velCd2tXUJINCBQ7
+JFObzYLQnTMQdzSdBiZwoFENzBoeQJMMRsR/zcxfDyewVXBkcqTDq+Id9CBGtVKk2Rj+xAiK7QSa
+DhhFA0wbKmbfkJSuHzwR9w8BOklR7gsBBhxAfFwXgc6KxGCZC5YsEr0D/wcXnc2KydD2DBCIl72B
+BwYAlGSCBeIs97D3EsJ2K0CSpwwCHg22M7wudGwHIE6QUALavZCYG0UuctkSZsdltA5TAwLT7F5z
+QC4mPIQzcI/blL0HJ8BPc3LdW9lgY+uwJ5BPKQAoz1skLGcAxgAAAAAAAAAk/wAAAAAAAAAAAAAA
+AAAAAGC+ALBAAI2+AGD//1eDzf/rEJCQkJCQkIoGRogHRwHbdQeLHoPu/BHbcu24AQAAAAHbdQeL
+HoPu/BHbEcAB23PvdQmLHoPu/BHbc+QxyYPoA3INweAIigZGg/D/dHSJxQHbdQeLHoPu/BHbEckB
+23UHix6D7vwR2xHJdSBBAdt1B4seg+78EdsRyQHbc+91CYseg+78Edtz5IPBAoH9APP//4PRAY0U
+L4P9/HYPigJCiAdHSXX36WP///+QiwKDwgSJB4PHBIPpBHfxAc/pTP///16J97m7AAAAigdHLOg8
+AXf3gD8BdfKLB4pfBGbB6AjBwBCGxCn4gOvoAfCJB4PHBYnY4tmNvgDAAACLBwnAdDyLXwSNhDAw
+8QAAAfNQg8cI/5a88QAAlYoHRwjAdNyJ+VdI8q5V/5bA8QAACcB0B4kDg8ME6+H/lsTxAABh6Vhs
+//8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgACAAAAIAAAgAUAAABgAACAAAAA
+AAAAAAAAAAAAAAABAG4AAAA4AACAAAAAAAAAAAAAAAAAAAABAAAAAABQAAAAMLEAAAgKAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAABABrAAAAkAAAgGwAAAC4AACAbQAAAOAAAIBuAAAACAEAgAAAAAAA
+AAAAAAAAAAAAAQAJBAAAqAAAADi7AACgAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEACQQAANAA
+AADYvAAABAIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAkEAAD4AAAA4L4AAFoCAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAQAJBAAAIAEAAEDBAABcAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAD0AQEA
+vAEBAAAAAAAAAAAAAAAAAAECAQDMAQEAAAAAAAAAAAAAAAAADgIBANQBAQAAAAAAAAAAAAAAAAAb
+AgEA3AEBAAAAAAAAAAAAAAAAACUCAQDkAQEAAAAAAAAAAAAAAAAAMAIBAOwBAQAAAAAAAAAAAAAA
+AAAAAAAAAAAAADoCAQBIAgEAWAIBAAAAAABmAgEAAAAAAHQCAQAAAAAAhAIBAAAAAACOAgEAAAAA
+AJQCAQAAAAAAS0VSTkVMMzIuRExMAEFEVkFQSTMyLmRsbABDT01DVEwzMi5kbGwAR0RJMzIuZGxs
+AE1TVkNSVC5kbGwAVVNFUjMyLmRsbAAATG9hZExpYnJhcnlBAABHZXRQcm9jQWRkcmVzcwAARXhp
+dFByb2Nlc3MAAABSZWdDbG9zZUtleQAAAFByb3BlcnR5U2hlZXRBAABUZXh0T3V0QQAAZXhpdAAA
+R2V0REMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAA=
+"""
+
+# --- EOF ---
diff --git a/lib-python/2.2/distutils/command/build.py b/lib-python/2.2/distutils/command/build.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/command/build.py
@@ -0,0 +1,131 @@
+"""distutils.command.build
+
+Implements the Distutils 'build' command."""
+
+# created 1999/03/08, Greg Ward
+
+__revision__ = "$Id$"
+
+import sys, os
+from distutils.core import Command
+from distutils.util import get_platform
+
+
+def show_compilers ():
+    from distutils.ccompiler import show_compilers
+    show_compilers()
+
+
+class build (Command):
+
+    description = "build everything needed to install"
+
+    user_options = [
+        ('build-base=', 'b',
+         "base directory for build library"),
+        ('build-purelib=', None,
+         "build directory for platform-neutral distributions"),
+        ('build-platlib=', None,
+         "build directory for platform-specific distributions"),
+        ('build-lib=', None,
+         "build directory for all distribution (defaults to either " +
+         "build-purelib or build-platlib"),
+        ('build-scripts=', None,
+         "build directory for scripts"),
+        ('build-temp=', 't',
+         "temporary build directory"),
+        ('compiler=', 'c',
+         "specify the compiler type"),
+        ('debug', 'g',
+         "compile extensions and libraries with debugging information"),
+        ('force', 'f',
+         "forcibly build everything (ignore file timestamps)"),
+        ]
+
+    boolean_options = ['debug', 'force']
+
+    help_options = [
+        ('help-compiler', None,
+         "list available compilers", show_compilers),
+        ]
+
+    def initialize_options (self):
+        self.build_base = 'build'
+        # these are decided only after 'build_base' has its final value
+        # (unless overridden by the user or client)
+        self.build_purelib = None
+        self.build_platlib = None
+        self.build_lib = None
+        self.build_temp = None
+        self.build_scripts = None
+        self.compiler = None
+        self.debug = None
+        self.force = 0
+
+    def finalize_options (self):
+
+        plat_specifier = ".%s-%s" % (get_platform(), sys.version[0:3])
+
+        # 'build_purelib' and 'build_platlib' just default to 'lib' and
+        # 'lib.<plat>' under the base build directory.  We only use one of
+        # them for a given distribution, though --
+        if self.build_purelib is None:
+            self.build_purelib = os.path.join(self.build_base, 'lib')
+        if self.build_platlib is None:
+            self.build_platlib = os.path.join(self.build_base,
+                                              'lib' + plat_specifier)
+
+        # 'build_lib' is the actual directory that we will use for this
+        # particular module distribution -- if user didn't supply it, pick
+        # one of 'build_purelib' or 'build_platlib'.
+        if self.build_lib is None:
+            if self.distribution.ext_modules:
+                self.build_lib = self.build_platlib
+            else:
+                self.build_lib = self.build_purelib
+
+        # 'build_temp' -- temporary directory for compiler turds,
+        # "build/temp.<plat>"
+        if self.build_temp is None:
+            self.build_temp = os.path.join(self.build_base,
+                                           'temp' + plat_specifier)
+        if self.build_scripts is None:
+            self.build_scripts = os.path.join(self.build_base,
+                                              'scripts-' + sys.version[0:3])
+
+    # finalize_options ()
+
+
+    def run (self):
+
+        # Run all relevant sub-commands.  This will be some subset of:
+        #  - build_py      - pure Python modules
+        #  - build_clib    - standalone C libraries
+        #  - build_ext     - Python extensions
+        #  - build_scripts - (Python) scripts
+        for cmd_name in self.get_sub_commands():
+            self.run_command(cmd_name)
+
+
+    # -- Predicates for the sub-command list ---------------------------
+
+    def has_pure_modules (self):
+        return self.distribution.has_pure_modules()
+
+    def has_c_libraries (self):
+        return self.distribution.has_c_libraries()
+
+    def has_ext_modules (self):
+        return self.distribution.has_ext_modules()
+
+    def has_scripts (self):
+        return self.distribution.has_scripts()
+
+
+    sub_commands = [('build_py',      has_pure_modules),
+                    ('build_clib',    has_c_libraries),
+                    ('build_ext',     has_ext_modules),
+                    ('build_scripts', has_scripts),
+                   ]
+
+# class build
diff --git a/lib-python/2.2/distutils/command/build_clib.py b/lib-python/2.2/distutils/command/build_clib.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/command/build_clib.py
@@ -0,0 +1,240 @@
+"""distutils.command.build_clib
+
+Implements the Distutils 'build_clib' command, to build a C/C++ library
+that is included in the module distribution and needed by an extension
+module."""
+
+# created (an empty husk) 1999/12/18, Greg Ward
+# fleshed out 2000/02/03-04
+
+__revision__ = "$Id$"
+
+
+# XXX this module has *lots* of code ripped-off quite transparently from
+# build_ext.py -- not surprisingly really, as the work required to build
+# a static library from a collection of C source files is not really all
+# that different from what's required to build a shared object file from
+# a collection of C source files.  Nevertheless, I haven't done the
+# necessary refactoring to account for the overlap in code between the
+# two modules, mainly because a number of subtle details changed in the
+# cut 'n paste.  Sigh.
+
+import os, string
+from types import *
+from distutils.core import Command
+from distutils.errors import *
+from distutils.sysconfig import customize_compiler
+
+
+def show_compilers ():
+    from distutils.ccompiler import show_compilers
+    show_compilers()
+
+
+class build_clib (Command):
+
+    description = "build C/C++ libraries used by Python extensions"
+
+    user_options = [
+        ('build-clib', 'b',
+         "directory to build C/C++ libraries to"),
+        ('build-temp', 't',
+         "directory to put temporary build by-products"),
+        ('debug', 'g',
+         "compile with debugging information"),
+        ('force', 'f',
+         "forcibly build everything (ignore file timestamps)"),
+        ('compiler=', 'c',
+         "specify the compiler type"),
+        ]
+
+    boolean_options = ['debug', 'force']
+
+    help_options = [
+        ('help-compiler', None,
+         "list available compilers", show_compilers),
+        ]
+
+    def initialize_options (self):
+        self.build_clib = None
+        self.build_temp = None
+
+        # List of libraries to build
+        self.libraries = None
+
+        # Compilation options for all libraries
+        self.include_dirs = None
+        self.define = None
+        self.undef = None
+        self.debug = None
+        self.force = 0
+        self.compiler = None
+
+    # initialize_options()
+
+
+    def finalize_options (self):
+
+        # This might be confusing: both build-clib and build-temp default
+        # to build-temp as defined by the "build" command.  This is because
+        # I think that C libraries are really just temporary build
+        # by-products, at least from the point of view of building Python
+        # extensions -- but I want to keep my options open.
+        self.set_undefined_options('build',
+                                   ('build_temp', 'build_clib'),
+                                   ('build_temp', 'build_temp'),
+                                   ('compiler', 'compiler'),
+                                   ('debug', 'debug'),
+                                   ('force', 'force'))
+
+        self.libraries = self.distribution.libraries
+        if self.libraries:
+            self.check_library_list(self.libraries)
+
+        if self.include_dirs is None:
+            self.include_dirs = self.distribution.include_dirs or []
+        if type(self.include_dirs) is StringType:
+            self.include_dirs = string.split(self.include_dirs,
+                                             os.pathsep)
+
+        # XXX same as for build_ext -- what about 'self.define' and
+        # 'self.undef' ?
+
+    # finalize_options()
+
+
+    def run (self):
+
+        if not self.libraries:
+            return
+
+        # Yech -- this is cut 'n pasted from build_ext.py!
+        from distutils.ccompiler import new_compiler
+        self.compiler = new_compiler(compiler=self.compiler,
+                                     verbose=self.verbose,
+                                     dry_run=self.dry_run,
+                                     force=self.force)
+        customize_compiler(self.compiler)
+
+        if self.include_dirs is not None:
+            self.compiler.set_include_dirs(self.include_dirs)
+        if self.define is not None:
+            # 'define' option is a list of (name,value) tuples
+            for (name,value) in self.define:
+                self.compiler.define_macro(name, value)
+        if self.undef is not None:
+            for macro in self.undef:
+                self.compiler.undefine_macro(macro)
+
+        self.build_libraries(self.libraries)
+
+    # run()
+
+
+    def check_library_list (self, libraries):
+        """Ensure that the list of libraries (presumably provided as a
+           command option 'libraries') is valid, i.e. it is a list of
+           2-tuples, where the tuples are (library_name, build_info_dict).
+           Raise DistutilsSetupError if the structure is invalid anywhere;
+           just returns otherwise."""
+
+        # Yechh, blecch, ackk: this is ripped straight out of build_ext.py,
+        # with only names changed to protect the innocent!
+
+        if type(libraries) is not ListType:
+            raise DistutilsSetupError, \
+                  "'libraries' option must be a list of tuples"
+
+        for lib in libraries:
+            if type(lib) is not TupleType and len(lib) != 2:
+                raise DistutilsSetupError, \
+                      "each element of 'libraries' must a 2-tuple"
+
+            if type(lib[0]) is not StringType:
+                raise DistutilsSetupError, \
+                      "first element of each tuple in 'libraries' " + \
+                      "must be a string (the library name)"
+            if '/' in lib[0] or (os.sep != '/' and os.sep in lib[0]):
+                raise DistutilsSetupError, \
+                      ("bad library name '%s': " +
+                       "may not contain directory separators") % \
+                      lib[0]
+
+            if type(lib[1]) is not DictionaryType:
+                raise DistutilsSetupError, \
+                      "second element of each tuple in 'libraries' " + \
+                      "must be a dictionary (build info)"
+        # for lib
+
+    # check_library_list ()
+
+
+    def get_library_names (self):
+        # Assume the library list is valid -- 'check_library_list()' is
+        # called from 'finalize_options()', so it should be!
+
+        if not self.libraries:
+            return None
+
+        lib_names = []
+        for (lib_name, build_info) in self.libraries:
+            lib_names.append(lib_name)
+        return lib_names
+
+    # get_library_names ()
+
+
+    def get_source_files (self):
+        self.check_library_list(self.libraries)
+        filenames = []
+        for (lib_name, build_info) in self.libraries:
+            sources = build_info.get('sources')
+            if (sources is None or
+                type(sources) not in (ListType, TupleType) ):
+                raise DistutilsSetupError, \
+                      ("in 'libraries' option (library '%s'), "
+                       "'sources' must be present and must be "
+                       "a list of source filenames") % lib_name
+
+            filenames.extend(sources)
+
+        return filenames
+    # get_source_files ()
+
+
+    def build_libraries (self, libraries):
+
+        for (lib_name, build_info) in libraries:
+            sources = build_info.get('sources')
+            if sources is None or type(sources) not in (ListType, TupleType):
+                raise DistutilsSetupError, \
+                      ("in 'libraries' option (library '%s'), " +
+                       "'sources' must be present and must be " +
+                       "a list of source filenames") % lib_name
+            sources = list(sources)
+
+            self.announce("building '%s' library" % lib_name)
+
+            # First, compile the source code to object files in the library
+            # directory.  (This should probably change to putting object
+            # files in a temporary build directory.)
+            macros = build_info.get('macros')
+            include_dirs = build_info.get('include_dirs')
+            objects = self.compiler.compile(sources,
+                                            output_dir=self.build_temp,
+                                            macros=macros,
+                                            include_dirs=include_dirs,
+                                            debug=self.debug)
+
+            # Now "link" the object files together into a static library.
+            # (On Unix at least, this isn't really linking -- it just
+            # builds an archive.  Whatever.)
+            self.compiler.create_static_lib(objects, lib_name,
+                                            output_dir=self.build_clib,
+                                            debug=self.debug)
+
+        # for libraries
+
+    # build_libraries ()
+
+# class build_lib
diff --git a/lib-python/2.2/distutils/command/build_ext.py b/lib-python/2.2/distutils/command/build_ext.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/command/build_ext.py
@@ -0,0 +1,630 @@
+"""distutils.command.build_ext
+
+Implements the Distutils 'build_ext' command, for building extension
+modules (currently limited to C extensions, should accommodate C++
+extensions ASAP)."""
+
+# created 1999/08/09, Greg Ward
+
+__revision__ = "$Id$"
+
+import sys, os, string, re
+from types import *
+from distutils.core import Command
+from distutils.errors import *
+from distutils.sysconfig import customize_compiler
+from distutils.dep_util import newer_group
+from distutils.extension import Extension
+
+# An extension name is just a dot-separated list of Python NAMEs (ie.
+# the same as a fully-qualified module name).
+extension_name_re = re.compile \
+    (r'^[a-zA-Z_][a-zA-Z_0-9]*(\.[a-zA-Z_][a-zA-Z_0-9]*)*$')
+
+
+def show_compilers ():
+    from distutils.ccompiler import show_compilers
+    show_compilers()
+
+
+class build_ext (Command):
+
+    description = "build C/C++ extensions (compile/link to build directory)"
+
+    # XXX thoughts on how to deal with complex command-line options like
+    # these, i.e. how to make it so fancy_getopt can suck them off the
+    # command line and make it look like setup.py defined the appropriate
+    # lists of tuples of what-have-you.
+    #   - each command needs a callback to process its command-line options
+    #   - Command.__init__() needs access to its share of the whole
+    #     command line (must ultimately come from
+    #     Distribution.parse_command_line())
+    #   - it then calls the current command class' option-parsing
+    #     callback to deal with weird options like -D, which have to
+    #     parse the option text and churn out some custom data
+    #     structure
+    #   - that data structure (in this case, a list of 2-tuples)
+    #     will then be present in the command object by the time
+    #     we get to finalize_options() (i.e. the constructor
+    #     takes care of both command-line and client options
+    #     in between initialize_options() and finalize_options())
+
+    sep_by = " (separated by '%s')" % os.pathsep
+    user_options = [
+        ('build-lib=', 'b',
+         "directory for compiled extension modules"),
+        ('build-temp=', 't',
+         "directory for temporary files (build by-products)"),
+        ('inplace', 'i',
+         "ignore build-lib and put compiled extensions into the source " +
+         "directory alongside your pure Python modules"),
+        ('include-dirs=', 'I',
+         "list of directories to search for header files" + sep_by),
+        ('define=', 'D',
+         "C preprocessor macros to define"),
+        ('undef=', 'U',
+         "C preprocessor macros to undefine"),
+        ('libraries=', 'l',
+         "external C libraries to link with"),
+        ('library-dirs=', 'L',
+         "directories to search for external C libraries" + sep_by),
+        ('rpath=', 'R',
+         "directories to search for shared C libraries at runtime"),
+        ('link-objects=', 'O',
+         "extra explicit link objects to include in the link"),
+        ('debug', 'g',
+         "compile/link with debugging information"),
+        ('force', 'f',
+         "forcibly build everything (ignore file timestamps)"),
+        ('compiler=', 'c',
+         "specify the compiler type"),
+        ('swig-cpp', None,
+         "make SWIG create C++ files (default is C)"),
+        ]
+
+    boolean_options = ['inplace', 'debug', 'force', 'swig-cpp']
+
+    help_options = [
+        ('help-compiler', None,
+         "list available compilers", show_compilers),
+        ]
+
+    def initialize_options (self):
+        self.extensions = None
+        self.build_lib = None
+        self.build_temp = None
+        self.inplace = 0
+        self.package = None
+
+        self.include_dirs = None
+        self.define = None
+        self.undef = None
+        self.libraries = None
+        self.library_dirs = None
+        self.rpath = None
+        self.link_objects = None
+        self.debug = None
+        self.force = None
+        self.compiler = None
+        self.swig_cpp = None
+
+
+    def finalize_options (self):
+        from distutils import sysconfig
+
+        self.set_undefined_options('build',
+                                   ('build_lib', 'build_lib'),
+                                   ('build_temp', 'build_temp'),
+                                   ('compiler', 'compiler'),
+                                   ('debug', 'debug'),
+                                   ('force', 'force'))
+
+        if self.package is None:
+            self.package = self.distribution.ext_package
+
+        self.extensions = self.distribution.ext_modules
+
+
+        # Make sure Python's include directories (for Python.h, pyconfig.h,
+        # etc.) are in the include search path.
+        py_include = sysconfig.get_python_inc()
+        plat_py_include = sysconfig.get_python_inc(plat_specific=1)
+        if self.include_dirs is None:
+            self.include_dirs = self.distribution.include_dirs or []
+        if type(self.include_dirs) is StringType:
+            self.include_dirs = string.split(self.include_dirs, os.pathsep)
+
+        # Put the Python "system" include dir at the end, so that
+        # any local include dirs take precedence.
+        self.include_dirs.append(py_include)
+        if plat_py_include != py_include:
+            self.include_dirs.append(plat_py_include)
+
+        if type(self.libraries) is StringType:
+            self.libraries = [self.libraries]
+
+        # Life is easier if we're not forever checking for None, so
+        # simplify these options to empty lists if unset
+        if self.libraries is None:
+            self.libraries = []
+        if self.library_dirs is None:
+            self.library_dirs = []
+        elif type(self.library_dirs) is StringType:
+            self.library_dirs = string.split(self.library_dirs, os.pathsep)
+
+        if self.rpath is None:
+            self.rpath = []
+        elif type(self.rpath) is StringType:
+            self.rpath = string.split(self.rpath, os.pathsep)
+
+        # for extensions under windows use different directories
+        # for Release and Debug builds.
+        # also Python's library directory must be appended to library_dirs
+        if os.name == 'nt':
+            self.library_dirs.append(os.path.join(sys.exec_prefix, 'libs'))
+            if self.debug:
+                self.build_temp = os.path.join(self.build_temp, "Debug")
+            else:
+                self.build_temp = os.path.join(self.build_temp, "Release")
+
+        # for extensions under Cygwin Python's library directory must be
+        # appended to library_dirs
+        if sys.platform[:6] == 'cygwin':
+            if string.find(sys.executable, sys.exec_prefix) != -1:
+                # building third party extensions
+                self.library_dirs.append(os.path.join(sys.prefix, "lib", "python" + sys.version[:3], "config"))
+            else:
+                # building python standard extensions
+                self.library_dirs.append('.')
+
+        # The argument parsing will result in self.define being a string, but
+        # it has to be a list of 2-tuples.  All the preprocessor symbols
+        # specified by the 'define' option will be set to '1'.  Multiple
+        # symbols can be separated with commas.
+
+        if self.define:
+            defines = string.split(self.define, ',')
+            self.define = map(lambda symbol: (symbol, '1'), defines)
+
+        # The option for macros to undefine is also a string from the
+        # option parsing, but has to be a list.  Multiple symbols can also
+        # be separated with commas here.
+        if self.undef:
+            self.undef = string.split(self.undef, ',')
+
+    # finalize_options ()
+
+
+    def run (self):
+
+        from distutils.ccompiler import new_compiler
+
+        # 'self.extensions', as supplied by setup.py, is a list of
+        # Extension instances.  See the documentation for Extension (in
+        # distutils.extension) for details.
+        #
+        # For backwards compatibility with Distutils 0.8.2 and earlier, we
+        # also allow the 'extensions' list to be a list of tuples:
+        #    (ext_name, build_info)
+        # where build_info is a dictionary containing everything that
+        # Extension instances do except the name, with a few things being
+        # differently named.  We convert these 2-tuples to Extension
+        # instances as needed.
+
+        if not self.extensions:
+            return
+
+        # If we were asked to build any C/C++ libraries, make sure that the
+        # directory where we put them is in the library search path for
+        # linking extensions.
+        if self.distribution.has_c_libraries():
+            build_clib = self.get_finalized_command('build_clib')
+            self.libraries.extend(build_clib.get_library_names() or [])
+            self.library_dirs.append(build_clib.build_clib)
+
+        # Setup the CCompiler object that we'll use to do all the
+        # compiling and linking
+        self.compiler = new_compiler(compiler=self.compiler,
+                                     verbose=self.verbose,
+                                     dry_run=self.dry_run,
+                                     force=self.force)
+        customize_compiler(self.compiler)
+
+        # And make sure that any compile/link-related options (which might
+        # come from the command-line or from the setup script) are set in
+        # that CCompiler object -- that way, they automatically apply to
+        # all compiling and linking done here.
+        if self.include_dirs is not None:
+            self.compiler.set_include_dirs(self.include_dirs)
+        if self.define is not None:
+            # 'define' option is a list of (name,value) tuples
+            for (name,value) in self.define:
+                self.compiler.define_macro(name, value)
+        if self.undef is not None:
+            for macro in self.undef:
+                self.compiler.undefine_macro(macro)
+        if self.libraries is not None:
+            self.compiler.set_libraries(self.libraries)
+        if self.library_dirs is not None:
+            self.compiler.set_library_dirs(self.library_dirs)
+        if self.rpath is not None:
+            self.compiler.set_runtime_library_dirs(self.rpath)
+        if self.link_objects is not None:
+            self.compiler.set_link_objects(self.link_objects)
+
+        # Now actually compile and link everything.
+        self.build_extensions()
+
+    # run ()
+
+
+    def check_extensions_list (self, extensions):
+        """Ensure that the list of extensions (presumably provided as a
+        command option 'extensions') is valid, i.e. it is a list of
+        Extension objects.  We also support the old-style list of 2-tuples,
+        where the tuples are (ext_name, build_info), which are converted to
+        Extension instances here.
+
+        Raise DistutilsSetupError if the structure is invalid anywhere;
+        just returns otherwise.
+        """
+        if type(extensions) is not ListType:
+            raise DistutilsSetupError, \
+                  "'ext_modules' option must be a list of Extension instances"
+
+        for i in range(len(extensions)):
+            ext = extensions[i]
+            if isinstance(ext, Extension):
+                continue                # OK! (assume type-checking done
+                                        # by Extension constructor)
+
+            (ext_name, build_info) = ext
+            self.warn(("old-style (ext_name, build_info) tuple found in "
+                       "ext_modules for extension '%s'"
+                       "-- please convert to Extension instance" % ext_name))
+            if type(ext) is not TupleType and len(ext) != 2:
+                raise DistutilsSetupError, \
+                      ("each element of 'ext_modules' option must be an "
+                       "Extension instance or 2-tuple")
+
+            if not (type(ext_name) is StringType and
+                    extension_name_re.match(ext_name)):
+                raise DistutilsSetupError, \
+                      ("first element of each tuple in 'ext_modules' "
+                       "must be the extension name (a string)")
+
+            if type(build_info) is not DictionaryType:
+                raise DistutilsSetupError, \
+                      ("second element of each tuple in 'ext_modules' "
+                       "must be a dictionary (build info)")
+
+            # OK, the (ext_name, build_info) dict is type-safe: convert it
+            # to an Extension instance.
+            ext = Extension(ext_name, build_info['sources'])
+
+            # Easy stuff: one-to-one mapping from dict elements to
+            # instance attributes.
+            for key in ('include_dirs',
+                        'library_dirs',
+                        'libraries',
+                        'extra_objects',
+                        'extra_compile_args',
+                        'extra_link_args'):
+                val = build_info.get(key)
+                if val is not None:
+                    setattr(ext, key, val)
+
+            # Medium-easy stuff: same syntax/semantics, different names.
+            ext.runtime_library_dirs = build_info.get('rpath')
+            if build_info.has_key('def_file'):
+                self.warn("'def_file' element of build info dict "
+                          "no longer supported")
+
+            # Non-trivial stuff: 'macros' split into 'define_macros'
+            # and 'undef_macros'.
+            macros = build_info.get('macros')
+            if macros:
+                ext.define_macros = []
+                ext.undef_macros = []
+                for macro in macros:
+                    if not (type(macro) is TupleType and
+                            1 <= len(macro) <= 2):
+                        raise DistutilsSetupError, \
+                              ("'macros' element of build info dict "
+                               "must be 1- or 2-tuple")
+                    if len(macro) == 1:
+                        ext.undef_macros.append(macro[0])
+                    elif len(macro) == 2:
+                        ext.define_macros.append(macro)
+
+            extensions[i] = ext
+
+        # for extensions
+
+    # check_extensions_list ()
+
+
+    def get_source_files (self):
+        self.check_extensions_list(self.extensions)
+        filenames = []
+
+        # Wouldn't it be neat if we knew the names of header files too...
+        for ext in self.extensions:
+            filenames.extend(ext.sources)
+
+        return filenames
+
+
+    def get_outputs (self):
+
+        # Sanity check the 'extensions' list -- can't assume this is being
+        # done in the same run as a 'build_extensions()' call (in fact, we
+        # can probably assume that it *isn't*!).
+        self.check_extensions_list(self.extensions)
+
+        # And build the list of output (built) filenames.  Note that this
+        # ignores the 'inplace' flag, and assumes everything goes in the
+        # "build" tree.
+        outputs = []
+        for ext in self.extensions:
+            fullname = self.get_ext_fullname(ext.name)
+            outputs.append(os.path.join(self.build_lib,
+                                        self.get_ext_filename(fullname)))
+        return outputs
+
+    # get_outputs ()
+
+    def build_extensions(self):
+
+        # First, sanity-check the 'extensions' list
+        self.check_extensions_list(self.extensions)
+
+        for ext in self.extensions:
+            self.build_extension(ext)
+
+    def build_extension(self, ext):
+
+        sources = ext.sources
+        if sources is None or type(sources) not in (ListType, TupleType):
+            raise DistutilsSetupError, \
+                  ("in 'ext_modules' option (extension '%s'), " +
+                   "'sources' must be present and must be " +
+                   "a list of source filenames") % ext.name
+        sources = list(sources)
+
+        fullname = self.get_ext_fullname(ext.name)
+        if self.inplace:
+            # ignore build-lib -- put the compiled extension into
+            # the source tree along with pure Python modules
+
+            modpath = string.split(fullname, '.')
+            package = string.join(modpath[0:-1], '.')
+            base = modpath[-1]
+
+            build_py = self.get_finalized_command('build_py')
+            package_dir = build_py.get_package_dir(package)
+            ext_filename = os.path.join(package_dir,
+                                        self.get_ext_filename(base))
+        else:
+            ext_filename = os.path.join(self.build_lib,
+                                        self.get_ext_filename(fullname))
+
+        if not (self.force or newer_group(sources, ext_filename, 'newer')):
+            self.announce("skipping '%s' extension (up-to-date)" %
+                          ext.name)
+            return
+        else:
+            self.announce("building '%s' extension" % ext.name)
+
+        # First, scan the sources for SWIG definition files (.i), run
+        # SWIG on 'em to create .c files, and modify the sources list
+        # accordingly.
+        sources = self.swig_sources(sources)
+
+        # Next, compile the source code to object files.
+
+        # XXX not honouring 'define_macros' or 'undef_macros' -- the
+        # CCompiler API needs to change to accommodate this, and I
+        # want to do one thing at a time!
+
+        # Two possible sources for extra compiler arguments:
+        #   - 'extra_compile_args' in Extension object
+        #   - CFLAGS environment variable (not particularly
+        #     elegant, but people seem to expect it and I
+        #     guess it's useful)
+        # The environment variable should take precedence, and
+        # any sensible compiler will give precedence to later
+        # command line args.  Hence we combine them in order:
+        extra_args = ext.extra_compile_args or []
+
+        macros = ext.define_macros[:]
+        for undef in ext.undef_macros:
+            macros.append((undef,))
+
+        # XXX and if we support CFLAGS, why not CC (compiler
+        # executable), CPPFLAGS (pre-processor options), and LDFLAGS
+        # (linker options) too?
+        # XXX should we use shlex to properly parse CFLAGS?
+
+        if os.environ.has_key('CFLAGS'):
+            extra_args.extend(string.split(os.environ['CFLAGS']))
+
+        objects = self.compiler.compile(sources,
+                                        output_dir=self.build_temp,
+                                        macros=macros,
+                                        include_dirs=ext.include_dirs,
+                                        debug=self.debug,
+                                        extra_postargs=extra_args)
+
+        # XXX -- this is a Vile HACK!
+        #
+        # The setup.py script for Python on Unix needs to be able to
+        # get this list so it can perform all the clean up needed to
+        # avoid keeping object files around when cleaning out a failed
+        # build of an extension module.  Since Distutils does not
+        # track dependencies, we have to get rid of intermediates to
+        # ensure all the intermediates will be properly re-built.
+        #
+        self._built_objects = objects[:]
+
+        # Now link the object files together into a "shared object" --
+        # of course, first we have to figure out all the other things
+        # that go into the mix.
+        if ext.extra_objects:
+            objects.extend(ext.extra_objects)
+        extra_args = ext.extra_link_args or []
+
+
+        self.compiler.link_shared_object(
+            objects, ext_filename,
+            libraries=self.get_libraries(ext),
+            library_dirs=ext.library_dirs,
+            runtime_library_dirs=ext.runtime_library_dirs,
+            extra_postargs=extra_args,
+            export_symbols=self.get_export_symbols(ext),
+            debug=self.debug,
+            build_temp=self.build_temp)
+
+
+    def swig_sources (self, sources):
+
+        """Walk the list of source files in 'sources', looking for SWIG
+        interface (.i) files.  Run SWIG on all that are found, and
+        return a modified 'sources' list with SWIG source files replaced
+        by the generated C (or C++) files.
+        """
+
+        new_sources = []
+        swig_sources = []
+        swig_targets = {}
+
+        # XXX this drops generated C/C++ files into the source tree, which
+        # is fine for developers who want to distribute the generated
+        # source -- but there should be an option to put SWIG output in
+        # the temp dir.
+
+        if self.swig_cpp:
+            target_ext = '.cpp'
+        else:
+            target_ext = '.c'
+
+        for source in sources:
+            (base, ext) = os.path.splitext(source)
+            if ext == ".i":             # SWIG interface file
+                new_sources.append(base + target_ext)
+                swig_sources.append(source)
+                swig_targets[source] = new_sources[-1]
+            else:
+                new_sources.append(source)
+
+        if not swig_sources:
+            return new_sources
+
+        swig = self.find_swig()
+        swig_cmd = [swig, "-python", "-dnone", "-ISWIG"]
+        if self.swig_cpp:
+            swig_cmd.append("-c++")
+
+        for source in swig_sources:
+            target = swig_targets[source]
+            self.announce("swigging %s to %s" % (source, target))
+            self.spawn(swig_cmd + ["-o", target, source])
+
+        return new_sources
+
+    # swig_sources ()
+
+    def find_swig (self):
+        """Return the name of the SWIG executable.  On Unix, this is
+        just "swig" -- it should be in the PATH.  Tries a bit harder on
+        Windows.
+        """
+
+        if os.name == "posix":
+            return "swig"
+        elif os.name == "nt":
+
+            # Look for SWIG in its standard installation directory on
+            # Windows (or so I presume!).  If we find it there, great;
+            # if not, act like Unix and assume it's in the PATH.
+            for vers in ("1.3", "1.2", "1.1"):
+                fn = os.path.join("c:\\swig%s" % vers, "swig.exe")
+                if os.path.isfile(fn):
+                    return fn
+            else:
+                return "swig.exe"
+
+        else:
+            raise DistutilsPlatformError, \
+                  ("I don't know how to find (much less run) SWIG "
+                   "on platform '%s'") % os.name
+
+    # find_swig ()
+
+    # -- Name generators -----------------------------------------------
+    # (extension names, filenames, whatever)
+
+    def get_ext_fullname (self, ext_name):
+        if self.package is None:
+            return ext_name
+        else:
+            return self.package + '.' + ext_name
+
+    def get_ext_filename (self, ext_name):
+        r"""Convert the name of an extension (eg. "foo.bar") into the name
+        of the file from which it will be loaded (eg. "foo/bar.so", or
+        "foo\bar.pyd").
+        """
+
+        from distutils.sysconfig import get_config_var
+        ext_path = string.split(ext_name, '.')
+        # extensions in debug_mode are named 'module_d.pyd' under windows
+        so_ext = get_config_var('SO')
+        if os.name == 'nt' and self.debug:
+            return apply(os.path.join, ext_path) + '_d' + so_ext
+        return apply(os.path.join, ext_path) + so_ext
+
+    def get_export_symbols (self, ext):
+        """Return the list of symbols that a shared extension has to
+        export.  This either uses 'ext.export_symbols' or, if it's not
+        provided, "init" + module_name.  Only relevant on Windows, where
+        the .pyd file (DLL) must export the module "init" function.
+        """
+
+        initfunc_name = "init" + string.split(ext.name,'.')[-1]
+        if initfunc_name not in ext.export_symbols:
+            ext.export_symbols.append(initfunc_name)
+        return ext.export_symbols
+
+    def get_libraries (self, ext):
+        """Return the list of libraries to link against when building a
+        shared extension.  On most platforms, this is just 'ext.libraries';
+        on Windows, we add the Python library (eg. python20.dll).
+        """
+        # The python library is always needed on Windows.  For MSVC, this
+        # is redundant, since the library is mentioned in a pragma in
+        # pyconfig.h that MSVC groks.  The other Windows compilers all seem
+        # to need it mentioned explicitly, though, so that's what we do.
+        # Append '_d' to the python import library on debug builds.
+        from distutils.msvccompiler import MSVCCompiler
+        if sys.platform == "win32" and \
+           not isinstance(self.compiler, MSVCCompiler):
+            template = "python%d%d"
+            if self.debug:
+                template = template + '_d'
+            pythonlib = (template %
+                   (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
+            # don't extend ext.libraries, it may be shared with other
+            # extensions, it is a reference to the original list
+            return ext.libraries + [pythonlib]
+        elif sys.platform[:6] == "cygwin":
+            template = "python%d.%d"
+            pythonlib = (template %
+                   (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
+            # don't extend ext.libraries, it may be shared with other
+            # extensions, it is a reference to the original list
+            return ext.libraries + [pythonlib]
+        else:
+            return ext.libraries
+
+# class build_ext
diff --git a/lib-python/2.2/distutils/command/build_py.py b/lib-python/2.2/distutils/command/build_py.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/command/build_py.py
@@ -0,0 +1,401 @@
+"""distutils.command.build_py
+
+Implements the Distutils 'build_py' command."""
+
+# created 1999/03/08, Greg Ward
+
+__revision__ = "$Id$"
+
+import sys, string, os
+from types import *
+from glob import glob
+
+from distutils.core import Command
+from distutils.errors import *
+from distutils.util import convert_path
+
+
+class build_py (Command):
+
+    description = "\"build\" pure Python modules (copy to build directory)"
+
+    user_options = [
+        ('build-lib=', 'd', "directory to \"build\" (copy) to"),
+        ('compile', 'c', "compile .py to .pyc"),
+        ('no-compile', None, "don't compile .py files [default]"),
+        ('optimize=', 'O',
+         "also compile with optimization: -O1 for \"python -O\", "
+         "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
+        ('force', 'f', "forcibly build everything (ignore file timestamps)"),
+        ]
+
+    boolean_options = ['compile', 'force']
+    negative_opt = {'no-compile' : 'compile'}
+
+
+    def initialize_options (self):
+        self.build_lib = None
+        self.py_modules = None
+        self.package = None
+        self.package_dir = None
+        self.compile = 0
+        self.optimize = 0
+        self.force = None
+
+    def finalize_options (self):
+        self.set_undefined_options('build',
+                                   ('build_lib', 'build_lib'),
+                                   ('force', 'force'))
+
+        # Get the distribution options that are aliases for build_py
+        # options -- list of packages and list of modules.
+        self.packages = self.distribution.packages
+        self.py_modules = self.distribution.py_modules
+        self.package_dir = {}
+        if self.distribution.package_dir:
+            for name, path in self.distribution.package_dir.items():
+                self.package_dir[name] = convert_path(path)
+
+        # Ick, copied straight from install_lib.py (fancy_getopt needs a
+        # type system!  Hell, *everything* needs a type system!!!)
+        if type(self.optimize) is not IntType:
+            try:
+                self.optimize = int(self.optimize)
+                assert 0 <= self.optimize <= 2
+            except (ValueError, AssertionError):
+                raise DistutilsOptionError, "optimize must be 0, 1, or 2"
+
+    def run (self):
+
+        # XXX copy_file by default preserves atime and mtime.  IMHO this is
+        # the right thing to do, but perhaps it should be an option -- in
+        # particular, a site administrator might want installed files to
+        # reflect the time of installation rather than the last
+        # modification time before the installed release.
+
+        # XXX copy_file by default preserves mode, which appears to be the
+        # wrong thing to do: if a file is read-only in the working
+        # directory, we want it to be installed read/write so that the next
+        # installation of the same module distribution can overwrite it
+        # without problems.  (This might be a Unix-specific issue.)  Thus
+        # we turn off 'preserve_mode' when copying to the build directory,
+        # since the build directory is supposed to be exactly what the
+        # installation will look like (ie. we preserve mode when
+        # installing).
+
+        # Two options control which modules will be installed: 'packages'
+        # and 'py_modules'.  The former lets us work with whole packages, not
+        # specifying individual modules at all; the latter is for
+        # specifying modules one-at-a-time.  Currently they are mutually
+        # exclusive: you can define one or the other (or neither), but not
+        # both.  It remains to be seen how limiting this is.
+
+        # Dispose of the two "unusual" cases first: no pure Python modules
+        # at all (no problem, just return silently), and over-specified
+        # 'packages' and 'py_modules' options.
+
+        if not self.py_modules and not self.packages:
+            return
+        if self.py_modules and self.packages:
+            raise DistutilsOptionError, \
+                  "build_py: supplying both 'packages' and 'py_modules' " + \
+                  "options is not allowed"
+
+        # Now we're down to two cases: 'py_modules' only and 'packages' only.
+        if self.py_modules:
+            self.build_modules()
+        else:
+            self.build_packages()
+
+        self.byte_compile(self.get_outputs(include_bytecode=0))
+
+    # run ()
+
+
+    def get_package_dir (self, package):
+        """Return the directory, relative to the top of the source
+           distribution, where package 'package' should be found
+           (at least according to the 'package_dir' option, if any)."""
+
+        path = string.split(package, '.')
+
+        if not self.package_dir:
+            if path:
+                return apply(os.path.join, path)
+            else:
+                return ''
+        else:
+            tail = []
+            while path:
+                try:
+                    pdir = self.package_dir[string.join(path, '.')]
+                except KeyError:
+                    tail.insert(0, path[-1])
+                    del path[-1]
+                else:
+                    tail.insert(0, pdir)
+                    return apply(os.path.join, tail)
+            else:
+                # Oops, got all the way through 'path' without finding a
+                # match in package_dir.  If package_dir defines a directory
+                # for the root (nameless) package, then fallback on it;
+                # otherwise, we might as well have not consulted
+                # package_dir at all, as we just use the directory implied
+                # by 'tail' (which should be the same as the original value
+                # of 'path' at this point).
+                pdir = self.package_dir.get('')
+                if pdir is not None:
+                    tail.insert(0, pdir)
+
+                if tail:
+                    return apply(os.path.join, tail)
+                else:
+                    return ''
+
+    # get_package_dir ()
+
+
+    def check_package (self, package, package_dir):
+
+        # Empty dir name means current directory, which we can probably
+        # assume exists.  Also, os.path.exists and isdir don't know about
+        # my "empty string means current dir" convention, so we have to
+        # circumvent them.
+        if package_dir != "":
+            if not os.path.exists(package_dir):
+                raise DistutilsFileError, \
+                      "package directory '%s' does not exist" % package_dir
+            if not os.path.isdir(package_dir):
+                raise DistutilsFileError, \
+                      ("supposed package directory '%s' exists, " +
+                       "but is not a directory") % package_dir
+
+        # Require __init__.py for all but the "root package"
+        if package:
+            init_py = os.path.join(package_dir, "__init__.py")
+            if os.path.isfile(init_py):
+                return init_py
+            else:
+                self.warn(("package init file '%s' not found " +
+                           "(or not a regular file)") % init_py)
+
+        # Either not in a package at all (__init__.py not expected), or
+        # __init__.py doesn't exist -- so don't return the filename.
+        return
+
+    # check_package ()
+
+
+    def check_module (self, module, module_file):
+        if not os.path.isfile(module_file):
+            self.warn("file %s (for module %s) not found" %
+                      (module_file, module))
+            return 0
+        else:
+            return 1
+
+    # check_module ()
+
+
+    def find_package_modules (self, package, package_dir):
+        self.check_package(package, package_dir)
+        module_files = glob(os.path.join(package_dir, "*.py"))
+        modules = []
+        setup_script = os.path.abspath(self.distribution.script_name)
+
+        for f in module_files:
+            abs_f = os.path.abspath(f)
+            if abs_f != setup_script:
+                module = os.path.splitext(os.path.basename(f))[0]
+                modules.append((package, module, f))
+            else:
+                self.debug_print("excluding %s" % setup_script)
+        return modules
+
+
+    def find_modules (self):
+        """Finds individually-specified Python modules, ie. those listed by
+        module name in 'self.py_modules'.  Returns a list of tuples (package,
+        module_base, filename): 'package' is a tuple of the path through
+        package-space to the module; 'module_base' is the bare (no
+        packages, no dots) module name, and 'filename' is the path to the
+        ".py" file (relative to the distribution root) that implements the
+        module.
+        """
+
+        # Map package names to tuples of useful info about the package:
+        #    (package_dir, checked)
+        # package_dir - the directory where we'll find source files for
+        #   this package
+        # checked - true if we have checked that the package directory
+        #   is valid (exists, contains __init__.py, ... ?)
+        packages = {}
+
+        # List of (package, module, filename) tuples to return
+        modules = []
+
+        # We treat modules-in-packages almost the same as toplevel modules,
+        # just the "package" for a toplevel is empty (either an empty
+        # string or empty list, depending on context).  Differences:
+        #   - don't check for __init__.py in directory for empty package
+
+        for module in self.py_modules:
+            path = string.split(module, '.')
+            package = string.join(path[0:-1], '.')
+            module_base = path[-1]
+
+            try:
+                (package_dir, checked) = packages[package]
+            except KeyError:
+                package_dir = self.get_package_dir(package)
+                checked = 0
+
+            if not checked:
+                init_py = self.check_package(package, package_dir)
+                packages[package] = (package_dir, 1)
+                if init_py:
+                    modules.append((package, "__init__", init_py))
+
+            # XXX perhaps we should also check for just .pyc files
+            # (so greedy closed-source bastards can distribute Python
+            # modules too)
+            module_file = os.path.join(package_dir, module_base + ".py")
+            if not self.check_module(module, module_file):
+                continue
+
+            modules.append((package, module_base, module_file))
+
+        return modules
+
+    # find_modules ()
+
+
+    def find_all_modules (self):
+        """Compute the list of all modules that will be built, whether
+        they are specified one-module-at-a-time ('self.py_modules') or
+        by whole packages ('self.packages').  Return a list of tuples
+        (package, module, module_file), just like 'find_modules()' and
+        'find_package_modules()' do."""
+
+        if self.py_modules:
+            modules = self.find_modules()
+        else:
+            modules = []
+            for package in self.packages:
+                package_dir = self.get_package_dir(package)
+                m = self.find_package_modules(package, package_dir)
+                modules.extend(m)
+
+        return modules
+
+    # find_all_modules ()
+
+
+    def get_source_files (self):
+
+        modules = self.find_all_modules()
+        filenames = []
+        for module in modules:
+            filenames.append(module[-1])
+
+        return filenames
+
+
+    def get_module_outfile (self, build_dir, package, module):
+        outfile_path = [build_dir] + list(package) + [module + ".py"]
+        return apply(os.path.join, outfile_path)
+
+
+    def get_outputs (self, include_bytecode=1):
+        modules = self.find_all_modules()
+        outputs = []
+        for (package, module, module_file) in modules:
+            package = string.split(package, '.')
+            filename = self.get_module_outfile(self.build_lib, package, module)
+            outputs.append(filename)
+            if include_bytecode:
+                if self.compile:
+                    outputs.append(filename + "c")
+                if self.optimize > 0:
+                    outputs.append(filename + "o")
+
+        return outputs
+
+
+    def build_module (self, module, module_file, package):
+        if type(package) is StringType:
+            package = string.split(package, '.')
+        elif type(package) not in (ListType, TupleType):
+            raise TypeError, \
+                  "'package' must be a string (dot-separated), list, or tuple"
+
+        # Now put the module source file into the "build" area -- this is
+        # easy, we just copy it somewhere under self.build_lib (the build
+        # directory for Python source).
+        outfile = self.get_module_outfile(self.build_lib, package, module)
+        dir = os.path.dirname(outfile)
+        self.mkpath(dir)
+        return self.copy_file(module_file, outfile, preserve_mode=0)
+
+
+    def build_modules (self):
+
+        modules = self.find_modules()
+        for (package, module, module_file) in modules:
+
+            # Now "build" the module -- ie. copy the source file to
+            # self.build_lib (the build directory for Python source).
+            # (Actually, it gets copied to the directory for this package
+            # under self.build_lib.)
+            self.build_module(module, module_file, package)
+
+    # build_modules ()
+
+
+    def build_packages (self):
+
+        for package in self.packages:
+
+            # Get list of (package, module, module_file) tuples based on
+            # scanning the package directory.  'package' is only included
+            # in the tuple so that 'find_modules()' and
+            # 'find_package_tuples()' have a consistent interface; it's
+            # ignored here (apart from a sanity check).  Also, 'module' is
+            # the *unqualified* module name (ie. no dots, no package -- we
+            # already know its package!), and 'module_file' is the path to
+            # the .py file, relative to the current directory
+            # (ie. including 'package_dir').
+            package_dir = self.get_package_dir(package)
+            modules = self.find_package_modules(package, package_dir)
+
+            # Now loop over the modules we found, "building" each one (just
+            # copy it to self.build_lib).
+            for (package_, module, module_file) in modules:
+                assert package == package_
+                self.build_module(module, module_file, package)
+
+    # build_packages ()
+
+
+    def byte_compile (self, files):
+        from distutils.util import byte_compile
+        prefix = self.build_lib
+        if prefix[-1] != os.sep:
+            prefix = prefix + os.sep
+
+        # XXX this code is essentially the same as the 'byte_compile()
+        # method of the "install_lib" command, except for the determination
+        # of the 'prefix' string.  Hmmm.
+
+        if self.compile:
+            byte_compile(files, optimize=0,
+                         force=self.force,
+                         prefix=prefix,
+                         verbose=self.verbose, dry_run=self.dry_run)
+        if self.optimize > 0:
+            byte_compile(files, optimize=self.optimize,
+                         force=self.force,
+                         prefix=prefix,
+                         verbose=self.verbose, dry_run=self.dry_run)
+
+# class build_py
diff --git a/lib-python/2.2/distutils/command/build_scripts.py b/lib-python/2.2/distutils/command/build_scripts.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/command/build_scripts.py
@@ -0,0 +1,110 @@
+"""distutils.command.build_scripts
+
+Implements the Distutils 'build_scripts' command."""
+
+# created 2000/05/23, Bastian Kleineidam
+
+__revision__ = "$Id$"
+
+import sys, os, re
+from distutils import sysconfig
+from distutils.core import Command
+from distutils.dep_util import newer
+from distutils.util import convert_path
+
+# check if Python is called on the first line with this expression
+first_line_re = re.compile(r'^#!.*python(\s+.*)?$')
+
+class build_scripts (Command):
+
+    description = "\"build\" scripts (copy and fixup #! line)"
+
+    user_options = [
+        ('build-dir=', 'd', "directory to \"build\" (copy) to"),
+        ('force', 'f', "forcibly build everything (ignore file timestamps"),
+        ]
+
+    boolean_options = ['force']
+
+
+    def initialize_options (self):
+        self.build_dir = None
+        self.scripts = None
+        self.force = None
+        self.outfiles = None
+
+    def finalize_options (self):
+        self.set_undefined_options('build',
+                                   ('build_scripts', 'build_dir'),
+                                   ('force', 'force'))
+        self.scripts = self.distribution.scripts
+
+
+    def run (self):
+        if not self.scripts:
+            return
+        self.copy_scripts()
+
+
+    def copy_scripts (self):
+        """Copy each script listed in 'self.scripts'; if it's marked as a
+        Python script in the Unix way (first line matches 'first_line_re',
+        ie. starts with "\#!" and contains "python"), then adjust the first
+        line to refer to the current Python interpreter as we copy.
+        """
+        self.mkpath(self.build_dir)
+        for script in self.scripts:
+            adjust = 0
+            script = convert_path(script)
+            outfile = os.path.join(self.build_dir, os.path.basename(script))
+
+            if not self.force and not newer(script, outfile):
+                self.announce("not copying %s (up-to-date)" % script)
+                continue
+
+            # Always open the file, but ignore failures in dry-run mode --
+            # that way, we'll get accurate feedback if we can read the
+            # script.
+            try:
+                f = open(script, "r")
+            except IOError:
+                if not self.dry_run:
+                    raise
+                f = None
+            else:
+                first_line = f.readline()
+                if not first_line:
+                    self.warn("%s is an empty file (skipping)" % script)
+                    continue
+
+                match = first_line_re.match(first_line)
+                if match:
+                    adjust = 1
+                    post_interp = match.group(1) or ''
+
+            if adjust:
+                self.announce("copying and adjusting %s -> %s" %
+                              (script, self.build_dir))
+                if not self.dry_run:
+                    outf = open(outfile, "w")
+                    if not sysconfig.python_build:
+                        outf.write("#!%s%s\n" % 
+                                   (os.path.normpath(sys.executable),
+                                    post_interp))
+                    else:
+                        outf.write("#!%s%s" %
+                                   (os.path.join(
+                            sysconfig.get_config_var("BINDIR"),
+                            "python" + sysconfig.get_config_var("EXE")),
+                                    post_interp))
+                    outf.writelines(f.readlines())
+                    outf.close()
+                if f:
+                    f.close()
+            else:
+                f.close()
+                self.copy_file(script, outfile)
+
+    # copy_scripts ()
+
+# class build_scripts
diff --git a/lib-python/2.2/distutils/command/clean.py b/lib-python/2.2/distutils/command/clean.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/command/clean.py
@@ -0,0 +1,79 @@
+"""distutils.command.clean
+
+Implements the Distutils 'clean' command."""
+
+# contributed by Bastian Kleineidam <calvin at cs.uni-sb.de>, added 2000-03-18
+
+__revision__ = "$Id$"
+
+import os
+from distutils.core import Command
+from distutils.dir_util import remove_tree
+
+class clean (Command):
+
+    description = "clean up output of 'build' command"
+    user_options = [
+        ('build-base=', 'b',
+         "base build directory (default: 'build.build-base')"),
+        ('build-lib=', None,
+         "build directory for all modules (default: 'build.build-lib')"),
+        ('build-temp=', 't',
+         "temporary build directory (default: 'build.build-temp')"),
+        ('build-scripts=', None,
+         "build directory for scripts (default: 'build.build-scripts')"),
+        ('bdist-base=', None,
+         "temporary directory for built distributions"),
+        ('all', 'a',
+         "remove all build output, not just temporary by-products")
+    ]
+
+    boolean_options = ['all']
+
+    def initialize_options(self):
+        self.build_base = None
+        self.build_lib = None
+        self.build_temp = None
+        self.build_scripts = None
+        self.bdist_base = None
+        self.all = None
+
+    def finalize_options(self):
+        self.set_undefined_options('build',
+                                   ('build_base', 'build_base'),
+                                   ('build_lib', 'build_lib'),
+                                   ('build_scripts', 'build_scripts'),
+                                   ('build_temp', 'build_temp'))
+        self.set_undefined_options('bdist',
+                                   ('bdist_base', 'bdist_base'))
+
+    def run(self):
+        # remove the build/temp.<plat> directory (unless it's already
+        # gone)
+        if os.path.exists(self.build_temp):
+            remove_tree(self.build_temp, self.verbose, self.dry_run)
+        else:
+            self.warn("'%s' does not exist -- can't clean it" %
+                      self.build_temp)
+
+        if self.all:
+            # remove build directories
+            for directory in (self.build_lib,
+                              self.bdist_base,
+                              self.build_scripts):
+                if os.path.exists(directory):
+                    remove_tree(directory, self.verbose, self.dry_run)
+                else:
+                    self.warn("'%s' does not exist -- can't clean it" %
+                              directory)
+
+        # just for the heck of it, try to remove the base build directory:
+        # we might have emptied it right now, but if not we don't care
+        if not self.dry_run:
+            try:
+                os.rmdir(self.build_base)
+                self.announce("removing '%s'" % self.build_base)
+            except OSError:
+                pass
+
+# class clean
diff --git a/lib-python/2.2/distutils/command/command_template b/lib-python/2.2/distutils/command/command_template
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/command/command_template
@@ -0,0 +1,45 @@
+"""distutils.command.x
+
+Implements the Distutils 'x' command.
+"""
+
+# created 2000/mm/dd, John Doe
+
+__revision__ = "$Id$"
+
+from distutils.core import Command
+
+
+class x (Command):
+
+    # Brief (40-50 characters) description of the command
+    description = ""
+
+    # List of option tuples: long name, short name (None if no short
+    # name), and help string.
+    user_options = [('', '',
+                     ""),
+                   ]
+
+
+    def initialize_options (self):
+        self. = None
+        self. = None
+        self. = None
+
+    # initialize_options()
+
+
+    def finalize_options (self):
+        if self.x is None:
+            self.x = 
+
+    # finalize_options()
+
+
+    def run (self):
+
+
+    # run()
+
+# class x
diff --git a/lib-python/2.2/distutils/command/config.py b/lib-python/2.2/distutils/command/config.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/command/config.py
@@ -0,0 +1,366 @@
+"""distutils.command.config
+
+Implements the Distutils 'config' command, a (mostly) empty command class
+that exists mainly to be sub-classed by specific module distributions and
+applications.  The idea is that while every "config" command is different,
+at least they're all named the same, and users always see "config" in the
+list of standard commands.  Also, this is a good place to put common
+configure-like tasks: "try to compile this C code", or "figure out where
+this header file lives".
+"""
+
+# created 2000/05/29, Greg Ward
+
+__revision__ = "$Id$"
+
+import sys, os, string, re
+from types import *
+from distutils.core import Command
+from distutils.errors import DistutilsExecError
+
+
+LANG_EXT = {'c': '.c',
+            'c++': '.cxx'}
+
+class config (Command):
+
+    description = "prepare to build"
+
+    user_options = [
+        ('compiler=', None,
+         "specify the compiler type"),
+        ('cc=', None,
+         "specify the compiler executable"),
+        ('include-dirs=', 'I',
+         "list of directories to search for header files"),
+        ('define=', 'D',
+         "C preprocessor macros to define"),
+        ('undef=', 'U',
+         "C preprocessor macros to undefine"),
+        ('libraries=', 'l',
+         "external C libraries to link with"),
+        ('library-dirs=', 'L',
+         "directories to search for external C libraries"),
+
+        ('noisy', None,
+         "show every action (compile, link, run, ...) taken"),
+        ('dump-source', None,
+         "dump generated source files before attempting to compile them"),
+        ]
+
+
+    # The three standard command methods: since the "config" command
+    # does nothing by default, these are empty.
+
+    def initialize_options (self):
+        self.compiler = None
+        self.cc = None
+        self.include_dirs = None
+        #self.define = None
+        #self.undef = None
+        self.libraries = None
+        self.library_dirs = None
+
+        # maximal output for now
+        self.noisy = 1
+        self.dump_source = 1
+
+        # list of temporary files generated along-the-way that we have
+        # to clean at some point
+        self.temp_files = []
+
+    def finalize_options (self):
+        if self.include_dirs is None:
+            self.include_dirs = self.distribution.include_dirs or []
+        elif type(self.include_dirs) is StringType:
+            self.include_dirs = string.split(self.include_dirs, os.pathsep)
+
+        if self.libraries is None:
+            self.libraries = []
+        elif type(self.libraries) is StringType:
+            self.libraries = [self.libraries]
+
+        if self.library_dirs is None:
+            self.library_dirs = []
+        elif type(self.library_dirs) is StringType:
+            self.library_dirs = string.split(self.library_dirs, os.pathsep)
+
+
+    def run (self):
+        pass
+
+
+    # Utility methods for actual "config" commands.  The interfaces are
+    # loosely based on Autoconf macros of similar names.  Sub-classes
+    # may use these freely.
+
+    def _check_compiler (self):
+        """Check that 'self.compiler' really is a CCompiler object;
+        if not, make it one.
+        """
+        # We do this late, and only on-demand, because this is an expensive
+        # import.
+        from distutils.ccompiler import CCompiler, new_compiler
+        if not isinstance(self.compiler, CCompiler):
+            self.compiler = new_compiler(compiler=self.compiler,
+                                         verbose=self.noisy,
+                                         dry_run=self.dry_run,
+                                         force=1)
+            if self.include_dirs:
+                self.compiler.set_include_dirs(self.include_dirs)
+            if self.libraries:
+                self.compiler.set_libraries(self.libraries)
+            if self.library_dirs:
+                self.compiler.set_library_dirs(self.library_dirs)
+
+
+    def _gen_temp_sourcefile (self, body, headers, lang):
+        filename = "_configtest" + LANG_EXT[lang]
+        file = open(filename, "w")
+        if headers:
+            for header in headers:
+                file.write("#include <%s>\n" % header)
+            file.write("\n")
+        file.write(body)
+        if body[-1] != "\n":
+            file.write("\n")
+        file.close()
+        return filename
+
+    def _preprocess (self, body, headers, include_dirs, lang):
+        src = self._gen_temp_sourcefile(body, headers, lang)
+        out = "_configtest.i"
+        self.temp_files.extend([src, out])
+        self.compiler.preprocess(src, out, include_dirs=include_dirs)
+        return (src, out)
+
+    def _compile (self, body, headers, include_dirs, lang):
+        src = self._gen_temp_sourcefile(body, headers, lang)
+        if self.dump_source:
+            dump_file(src, "compiling '%s':" % src)
+        (obj,) = self.compiler.object_filenames([src])
+        self.temp_files.extend([src, obj])
+        self.compiler.compile([src], include_dirs=include_dirs)
+        return (src, obj)
+
+    def _link (self, body,
+               headers, include_dirs,
+               libraries, library_dirs, lang):
+        (src, obj) = self._compile(body, headers, include_dirs, lang)
+        prog = os.path.splitext(os.path.basename(src))[0]
+        self.compiler.link_executable([obj], prog,
+                                      libraries=libraries,
+                                      library_dirs=library_dirs)
+
+        prog = prog + self.compiler.exe_extension
+        self.temp_files.append(prog)
+
+        return (src, obj, prog)
+
+    def _clean (self, *filenames):
+        if not filenames:
+            filenames = self.temp_files
+            self.temp_files = []
+        self.announce("removing: " + string.join(filenames))
+        for filename in filenames:
+            try:
+                os.remove(filename)
+            except OSError:
+                pass
+
+
+    # XXX these ignore the dry-run flag: what to do, what to do? even if
+    # you want a dry-run build, you still need some sort of configuration
+    # info.  My inclination is to make it up to the real config command to
+    # consult 'dry_run', and assume a default (minimal) configuration if
+    # true.  The problem with trying to do it here is that you'd have to
+    # return either true or false from all the 'try' methods, neither of
+    # which is correct.
+
+    # XXX need access to the header search path and maybe default macros.
+
+    def try_cpp (self, body=None, headers=None, include_dirs=None, lang="c"):
+        """Construct a source file from 'body' (a string containing lines
+        of C/C++ code) and 'headers' (a list of header files to include)
+        and run it through the preprocessor.  Return true if the
+        preprocessor succeeded, false if there were any errors.
+        ('body' probably isn't of much use, but what the heck.)
+        """
+        from distutils.ccompiler import CompileError
+        self._check_compiler()
+        ok = 1
+        try:
+            self._preprocess(body, headers, include_dirs, lang)
+        except CompileError:
+            ok = 0
+
+        self._clean()
+        return ok
+
+    def search_cpp (self, pattern, body=None,
+                    headers=None, include_dirs=None, lang="c"):
+        """Construct a source file (just like 'try_cpp()'), run it through
+        the preprocessor, and return true if any line of the output matches
+        'pattern'.  'pattern' should either be a compiled regex object or a
+        string containing a regex.  If both 'body' and 'headers' are None,
+        preprocesses an empty file -- which can be useful to determine the
+        symbols the preprocessor and compiler set by default.
+        """
+
+        self._check_compiler()
+        (src, out) = self._preprocess(body, headers, include_dirs, lang)
+
+        if type(pattern) is StringType:
+            pattern = re.compile(pattern)
+
+        file = open(out)
+        match = 0
+        while 1:
+            line = file.readline()
+            if line == '':
+                break
+            if pattern.search(line):
+                match = 1
+                break
+
+        file.close()
+        self._clean()
+        return match
+
+    def try_compile (self, body, headers=None, include_dirs=None, lang="c"):
+        """Try to compile a source file built from 'body' and 'headers'.
+        Return true on success, false otherwise.
+        """
+        from distutils.ccompiler import CompileError
+        self._check_compiler()
+        try:
+            self._compile(body, headers, include_dirs, lang)
+            ok = 1
+        except CompileError:
+            ok = 0
+
+        self.announce(ok and "success!" or "failure.")
+        self._clean()
+        return ok
+
+    def try_link (self, body,
+                  headers=None, include_dirs=None,
+                  libraries=None, library_dirs=None,
+                  lang="c"):
+        """Try to compile and link a source file, built from 'body' and
+        'headers', to executable form.  Return true on success, false
+        otherwise.
+        """
+        from distutils.ccompiler import CompileError, LinkError
+        self._check_compiler()
+        try:
+            self._link(body, headers, include_dirs,
+                       libraries, library_dirs, lang)
+            ok = 1
+        except (CompileError, LinkError):
+            ok = 0
+
+        self.announce(ok and "success!" or "failure.")
+        self._clean()
+        return ok
+
+    def try_run (self, body,
+                 headers=None, include_dirs=None,
+                 libraries=None, library_dirs=None,
+                 lang="c"):
+        """Try to compile, link to an executable, and run a program
+        built from 'body' and 'headers'.  Return true on success, false
+        otherwise.
+        """
+        from distutils.ccompiler import CompileError, LinkError
+        self._check_compiler()
+        try:
+            src, obj, exe = self._link(body, headers, include_dirs,
+                                       libraries, library_dirs, lang)
+            self.spawn([exe])
+            ok = 1
+        except (CompileError, LinkError, DistutilsExecError):
+            ok = 0
+
+        self.announce(ok and "success!" or "failure.")
+        self._clean()
+        return ok
+
+
+    # -- High-level methods --------------------------------------------
+    # (these are the ones that are actually likely to be useful
+    # when implementing a real-world config command!)
+
+    def check_func (self, func,
+                    headers=None, include_dirs=None,
+                    libraries=None, library_dirs=None,
+                    decl=0, call=0):
+
+        """Determine if function 'func' is available by constructing a
+        source file that refers to 'func', and compiles and links it.
+        If everything succeeds, returns true; otherwise returns false.
+
+        The constructed source file starts out by including the header
+        files listed in 'headers'.  If 'decl' is true, it then declares
+        'func' (as "int func()"); you probably shouldn't supply 'headers'
+        and set 'decl' true in the same call, or you might get errors about
+        a conflicting declarations for 'func'.  Finally, the constructed
+        'main()' function either references 'func' or (if 'call' is true)
+        calls it.  'libraries' and 'library_dirs' are used when
+        linking.
+        """
+
+        self._check_compiler()
+        body = []
+        if decl:
+            body.append("int %s ();" % func)
+        body.append("int main () {")
+        if call:
+            body.append("  %s();" % func)
+        else:
+            body.append("  %s;" % func)
+        body.append("}")
+        body = string.join(body, "\n") + "\n"
+
+        return self.try_link(body, headers, include_dirs,
+                             libraries, library_dirs)
+
+    # check_func ()
+
+    def check_lib (self, library, library_dirs=None,
+                   headers=None, include_dirs=None, other_libraries=[]):
+        """Determine if 'library' is available to be linked against,
+        without actually checking that any particular symbols are provided
+        by it.  'headers' will be used in constructing the source file to
+        be compiled, but the only effect of this is to check if all the
+        header files listed are available.  Any libraries listed in
+        'other_libraries' will be included in the link, in case 'library'
+        has symbols that depend on other libraries.
+        """
+        self._check_compiler()
+        return self.try_link("int main (void) { }",
+                             headers, include_dirs,
+                             [library]+other_libraries, library_dirs)
+
+    def check_header (self, header, include_dirs=None,
+                      library_dirs=None, lang="c"):
+        """Determine if the system header file named by 'header_file'
+        exists and can be found by the preprocessor; return true if so,
+        false otherwise.
+        """
+        return self.try_cpp(body="/* No body */", headers=[header],
+                            include_dirs=include_dirs)
+
+
+# class config
+
+
+def dump_file (filename, head=None):
+    if head is None:
+        print filename + ":"
+    else:
+        print head
+
+    file = open(filename)
+    sys.stdout.write(file.read())
+    file.close()
diff --git a/lib-python/2.2/distutils/command/install.py b/lib-python/2.2/distutils/command/install.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/command/install.py
@@ -0,0 +1,598 @@
+"""distutils.command.install
+
+Implements the Distutils 'install' command."""
+
+# created 1999/03/13, Greg Ward
+
+__revision__ = "$Id$"
+
+import sys, os, string
+from types import *
+from distutils.core import Command, DEBUG
+from distutils.sysconfig import get_config_vars
+from distutils.errors import DistutilsPlatformError
+from distutils.file_util import write_file
+from distutils.util import convert_path, subst_vars, change_root
+from distutils.errors import DistutilsOptionError
+from glob import glob
+
+if sys.version < "2.2":
+    WINDOWS_SCHEME = {
+        'purelib': '$base',
+        'platlib': '$base',
+        'headers': '$base/Include/$dist_name',
+        'scripts': '$base/Scripts',
+        'data'   : '$base',
+    }
+else:
+    WINDOWS_SCHEME = {
+        'purelib': '$base/Lib/site-packages',
+        'platlib': '$base/Lib/site-packages',
+        'headers': '$base/Include/$dist_name',
+        'scripts': '$base/Scripts',
+        'data'   : '$base',
+    }
+
+INSTALL_SCHEMES = {
+    'unix_prefix': {
+        'purelib': '$base/lib/python$py_version_short/site-packages',
+        'platlib': '$platbase/lib/python$py_version_short/site-packages',
+        'headers': '$base/include/python$py_version_short/$dist_name',
+        'scripts': '$base/bin',
+        'data'   : '$base',
+        },
+    'unix_home': {
+        'purelib': '$base/lib/python',
+        'platlib': '$base/lib/python',
+        'headers': '$base/include/python/$dist_name',
+        'scripts': '$base/bin',
+        'data'   : '$base',
+        },
+    'nt': WINDOWS_SCHEME,
+    'mac': {
+        'purelib': '$base/Lib/site-packages',
+        'platlib': '$base/Lib/site-packages',
+        'headers': '$base/Include/$dist_name',
+        'scripts': '$base/Scripts',
+        'data'   : '$base',
+        },
+    'java': {
+        'purelib': '$base/Lib',
+        'platlib': '$base/Lib',
+        'headers': '$base/Include/$dist_name',
+        'scripts': '$base/Scripts',
+        'data'   : '$base',
+        },
+    }
+
+# The keys to an installation scheme; if any new types of files are to be
+# installed, be sure to add an entry to every installation scheme above,
+# and to SCHEME_KEYS here.
+SCHEME_KEYS = ('purelib', 'platlib', 'headers', 'scripts', 'data')
+
+
+class install (Command):
+
+    description = "install everything from build directory"
+
+    user_options = [
+        # Select installation scheme and set base director(y|ies)
+        ('prefix=', None,
+         "installation prefix"),
+        ('exec-prefix=', None,
+         "(Unix only) prefix for platform-specific files"),
+        ('home=', None,
+         "(Unix only) home directory to install under"),
+
+        # Or, just set the base director(y|ies)
+        ('install-base=', None,
+         "base installation directory (instead of --prefix or --home)"),
+        ('install-platbase=', None,
+         "base installation directory for platform-specific files " +
+         "(instead of --exec-prefix or --home)"),
+        ('root=', None,
+         "install everything relative to this alternate root directory"),
+
+        # Or, explicitly set the installation scheme
+        ('install-purelib=', None,
+         "installation directory for pure Python module distributions"),
+        ('install-platlib=', None,
+         "installation directory for non-pure module distributions"),
+        ('install-lib=', None,
+         "installation directory for all module distributions " +
+         "(overrides --install-purelib and --install-platlib)"),
+
+        ('install-headers=', None,
+         "installation directory for C/C++ headers"),
+        ('install-scripts=', None,
+         "installation directory for Python scripts"),
+        ('install-data=', None,
+         "installation directory for data files"),
+
+        # Byte-compilation options -- see install_lib.py for details, as
+        # these are duplicated from there (but only install_lib does
+        # anything with them).
+        ('compile', 'c', "compile .py to .pyc [default]"),
+        ('no-compile', None, "don't compile .py files"),
+        ('optimize=', 'O',
+         "also compile with optimization: -O1 for \"python -O\", "
+         "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
+
+        # Miscellaneous control options
+        ('force', 'f',
+         "force installation (overwrite any existing files)"),
+        ('skip-build', None,
+         "skip rebuilding everything (for testing/debugging)"),
+
+        # Where to install documentation (eventually!)
+        #('doc-format=', None, "format of documentation to generate"),
+        #('install-man=', None, "directory for Unix man pages"),
+        #('install-html=', None, "directory for HTML documentation"),
+        #('install-info=', None, "directory for GNU info files"),
+
+        ('record=', None,
+         "filename in which to record list of installed files"),
+        ]
+
+    boolean_options = ['compile', 'force', 'skip-build']
+    negative_opt = {'no-compile' : 'compile'}
+
+
+    def initialize_options (self):
+
+        # High-level options: these select both an installation base
+        # and scheme.
+        self.prefix = None
+        self.exec_prefix = None
+        self.home = None
+
+        # These select only the installation base; it's up to the user to
+        # specify the installation scheme (currently, that means supplying
+        # the --install-{platlib,purelib,scripts,data} options).
+        self.install_base = None
+        self.install_platbase = None
+        self.root = None
+
+        # These options are the actual installation directories; if not
+        # supplied by the user, they are filled in using the installation
+        # scheme implied by prefix/exec-prefix/home and the contents of
+        # that installation scheme.
+        self.install_purelib = None     # for pure module distributions
+        self.install_platlib = None     # non-pure (dists w/ extensions)
+        self.install_headers = None     # for C/C++ headers
+        self.install_lib = None         # set to either purelib or platlib
+        self.install_scripts = None
+        self.install_data = None
+
+        self.compile = None
+        self.optimize = None
+
+        # These two are for putting non-packagized distributions into their
+        # own directory and creating a .pth file if it makes sense.
+        # 'extra_path' comes from the setup file; 'install_path_file' can
+        # be turned off if it makes no sense to install a .pth file.  (But
+        # better to install it uselessly than to guess wrong and not
+        # install it when it's necessary and would be used!)  Currently,
+        # 'install_path_file' is always true unless some outsider meddles
+        # with it.
+        self.extra_path = None
+        self.install_path_file = 1
+
+        # 'force' forces installation, even if target files are not
+        # out-of-date.  'skip_build' skips running the "build" command,
+        # handy if you know it's not necessary.  'warn_dir' (which is *not*
+        # a user option, it's just there so the bdist_* commands can turn
+        # it off) determines whether we warn about installing to a
+        # directory not in sys.path.
+        self.force = 0
+        self.skip_build = 0
+        self.warn_dir = 1
+
+        # These are only here as a conduit from the 'build' command to the
+        # 'install_*' commands that do the real work.  ('build_base' isn't
+        # actually used anywhere, but it might be useful in future.)  They
+        # are not user options, because if the user told the install
+        # command where the build directory is, that wouldn't affect the
+        # build command.
+        self.build_base = None
+        self.build_lib = None
+
+        # Not defined yet because we don't know anything about
+        # documentation yet.
+        #self.install_man = None
+        #self.install_html = None
+        #self.install_info = None
+
+        self.record = None
+
+
+    # -- Option finalizing methods -------------------------------------
+    # (This is rather more involved than for most commands,
+    # because this is where the policy for installing third-
+    # party Python modules on various platforms given a wide
+    # array of user input is decided.  Yes, it's quite complex!)
+
+    def finalize_options (self):
+
+        # This method (and its pliant slaves, like 'finalize_unix()',
+        # 'finalize_other()', and 'select_scheme()') is where the default
+        # installation directories for modules, extension modules, and
+        # anything else we care to install from a Python module
+        # distribution.  Thus, this code makes a pretty important policy
+        # statement about how third-party stuff is added to a Python
+        # installation!  Note that the actual work of installation is done
+        # by the relatively simple 'install_*' commands; they just take
+        # their orders from the installation directory options determined
+        # here.
+
+        # Check for errors/inconsistencies in the options; first, stuff
+        # that's wrong on any platform.
+
+        if ((self.prefix or self.exec_prefix or self.home) and
+            (self.install_base or self.install_platbase)):
+            raise DistutilsOptionError, \
+                  ("must supply either prefix/exec-prefix/home or " +
+                   "install-base/install-platbase -- not both")
+
+        # Next, stuff that's wrong (or dubious) only on certain platforms.
+        if os.name == 'posix':
+            if self.home and (self.prefix or self.exec_prefix):
+                raise DistutilsOptionError, \
+                      ("must supply either home or prefix/exec-prefix -- " +
+                       "not both")
+        else:
+            if self.exec_prefix:
+                self.warn("exec-prefix option ignored on this platform")
+                self.exec_prefix = None
+            if self.home:
+                self.warn("home option ignored on this platform")
+                self.home = None
+
+        # Now the interesting logic -- so interesting that we farm it out
+        # to other methods.  The goal of these methods is to set the final
+        # values for the install_{lib,scripts,data,...}  options, using as
+        # input a heady brew of prefix, exec_prefix, home, install_base,
+        # install_platbase, user-supplied versions of
+        # install_{purelib,platlib,lib,scripts,data,...}, and the
+        # INSTALL_SCHEME dictionary above.  Phew!
+
+        self.dump_dirs("pre-finalize_{unix,other}")
+
+        if os.name == 'posix':
+            self.finalize_unix()
+        else:
+            self.finalize_other()
+
+        self.dump_dirs("post-finalize_{unix,other}()")
+
+        # Expand configuration variables, tilde, etc. in self.install_base
+        # and self.install_platbase -- that way, we can use $base or
+        # $platbase in the other installation directories and not worry
+        # about needing recursive variable expansion (shudder).
+
+        py_version = (string.split(sys.version))[0]
+        (prefix, exec_prefix) = get_config_vars('prefix', 'exec_prefix')
+        self.config_vars = {'dist_name': self.distribution.get_name(),
+                            'dist_version': self.distribution.get_version(),
+                            'dist_fullname': self.distribution.get_fullname(),
+                            'py_version': py_version,
+                            'py_version_short': py_version[0:3],
+                            'sys_prefix': prefix,
+                            'prefix': prefix,
+                            'sys_exec_prefix': exec_prefix,
+                            'exec_prefix': exec_prefix,
+                           }
+        self.expand_basedirs()
+
+        self.dump_dirs("post-expand_basedirs()")
+
+        # Now define config vars for the base directories so we can expand
+        # everything else.
+        self.config_vars['base'] = self.install_base
+        self.config_vars['platbase'] = self.install_platbase
+
+        if DEBUG:
+            from pprint import pprint
+            print "config vars:"
+            pprint(self.config_vars)
+
+        # Expand "~" and configuration variables in the installation
+        # directories.
+        self.expand_dirs()
+
+        self.dump_dirs("post-expand_dirs()")
+
+        # Pick the actual directory to install all modules to: either
+        # install_purelib or install_platlib, depending on whether this
+        # module distribution is pure or not.  Of course, if the user
+        # already specified install_lib, use their selection.
+        if self.install_lib is None:
+            if self.distribution.ext_modules: # has extensions: non-pure
+                self.install_lib = self.install_platlib
+            else:
+                self.install_lib = self.install_purelib
+
+
+        # Convert directories from Unix /-separated syntax to the local
+        # convention.
+        self.convert_paths('lib', 'purelib', 'platlib',
+                           'scripts', 'data', 'headers')
+
+        # Well, we're not actually fully completely finalized yet: we still
+        # have to deal with 'extra_path', which is the hack for allowing
+        # non-packagized module distributions (hello, Numerical Python!) to
+        # get their own directories.
+        self.handle_extra_path()
+        self.install_libbase = self.install_lib # needed for .pth file
+        self.install_lib = os.path.join(self.install_lib, self.extra_dirs)
+
+        # If a new root directory was supplied, make all the installation
+        # dirs relative to it.
+        if self.root is not None:
+            self.change_roots('libbase', 'lib', 'purelib', 'platlib',
+                              'scripts', 'data', 'headers')
+
+        self.dump_dirs("after prepending root")
+
+        # Find out the build directories, ie. where to install from.
+        self.set_undefined_options('build',
+                                   ('build_base', 'build_base'),
+                                   ('build_lib', 'build_lib'))
+
+        # Punt on doc directories for now -- after all, we're punting on
+        # documentation completely!
+
+    # finalize_options ()
+
+
+    def dump_dirs (self, msg):
+        if DEBUG:
+            from distutils.fancy_getopt import longopt_xlate
+            print msg + ":"
+            for opt in self.user_options:
+                opt_name = opt[0]
+                if opt_name[-1] == "=":
+                    opt_name = opt_name[0:-1]
+                opt_name = string.translate(opt_name, longopt_xlate)
+                val = getattr(self, opt_name)
+                print "  %s: %s" % (opt_name, val)
+
+
+    def finalize_unix (self):
+
+        if self.install_base is not None or self.install_platbase is not None:
+            if ((self.install_lib is None and
+                 self.install_purelib is None and
+                 self.install_platlib is None) or
+                self.install_headers is None or
+                self.install_scripts is None or
+                self.install_data is None):
+                raise DistutilsOptionError, \
+                      "install-base or install-platbase supplied, but " + \
+                      "installation scheme is incomplete"
+            return
+
+        if self.home is not None:
+            self.install_base = self.install_platbase = self.home
+            self.select_scheme("unix_home")
+        else:
+            if self.prefix is None:
+                if self.exec_prefix is not None:
+                    raise DistutilsOptionError, \
+                          "must not supply exec-prefix without prefix"
+
+                self.prefix = os.path.normpath(sys.prefix)
+                self.exec_prefix = os.path.normpath(sys.exec_prefix)
+
+            else:
+                if self.exec_prefix is None:
+                    self.exec_prefix = self.prefix
+
+            self.install_base = self.prefix
+            self.install_platbase = self.exec_prefix
+            self.select_scheme("unix_prefix")
+
+    # finalize_unix ()
+
+
+    def finalize_other (self):          # Windows and Mac OS for now
+
+        if self.prefix is None:
+            self.prefix = os.path.normpath(sys.prefix)
+
+        self.install_base = self.install_platbase = self.prefix
+        try:
+            self.select_scheme(os.name)
+        except KeyError:
+            raise DistutilsPlatformError, \
+                  "I don't know how to install stuff on '%s'" % os.name
+
+    # finalize_other ()
+
+
+    def select_scheme (self, name):
+        # it's the caller's problem if they supply a bad name!
+        scheme = INSTALL_SCHEMES[name]
+        for key in SCHEME_KEYS:
+            attrname = 'install_' + key
+            if getattr(self, attrname) is None:
+                setattr(self, attrname, scheme[key])
+
+
+    def _expand_attrs (self, attrs):
+        for attr in attrs:
+            val = getattr(self, attr)
+            if val is not None:
+                if os.name == 'posix':
+                    val = os.path.expanduser(val)
+                val = subst_vars(val, self.config_vars)
+                setattr(self, attr, val)
+
+
+    def expand_basedirs (self):
+        self._expand_attrs(['install_base',
+                            'install_platbase',
+                            'root'])
+
+    def expand_dirs (self):
+        self._expand_attrs(['install_purelib',
+                            'install_platlib',
+                            'install_lib',
+                            'install_headers',
+                            'install_scripts',
+                            'install_data',])
+
+
+    def convert_paths (self, *names):
+        for name in names:
+            attr = "install_" + name
+            setattr(self, attr, convert_path(getattr(self, attr)))
+
+
+    def handle_extra_path (self):
+
+        if self.extra_path is None:
+            self.extra_path = self.distribution.extra_path
+
+        if self.extra_path is not None:
+            if type(self.extra_path) is StringType:
+                self.extra_path = string.split(self.extra_path, ',')
+
+            if len(self.extra_path) == 1:
+                path_file = extra_dirs = self.extra_path[0]
+            elif len(self.extra_path) == 2:
+                (path_file, extra_dirs) = self.extra_path
+            else:
+                raise DistutilsOptionError, \
+                      "'extra_path' option must be a list, tuple, or " + \
+                      "comma-separated string with 1 or 2 elements"
+
+            # convert to local form in case Unix notation used (as it
+            # should be in setup scripts)
+            extra_dirs = convert_path(extra_dirs)
+
+        else:
+            path_file = None
+            extra_dirs = ''
+
+        # XXX should we warn if path_file and not extra_dirs? (in which
+        # case the path file would be harmless but pointless)
+        self.path_file = path_file
+        self.extra_dirs = extra_dirs
+
+    # handle_extra_path ()
+
+
+    def change_roots (self, *names):
+        for name in names:
+            attr = "install_" + name
+            setattr(self, attr, change_root(self.root, getattr(self, attr)))
+
+
+    # -- Command execution methods -------------------------------------
+
+    def run (self):
+
+        # Obviously have to build before we can install
+        if not self.skip_build:
+            self.run_command('build')
+
+        # Run all sub-commands (at least those that need to be run)
+        for cmd_name in self.get_sub_commands():
+            self.run_command(cmd_name)
+
+        if self.path_file:
+            self.create_path_file()
+
+        # write list of installed files, if requested.
+        if self.record:
+            outputs = self.get_outputs()
+            if self.root:               # strip any package prefix
+                root_len = len(self.root)
+                for counter in xrange(len(outputs)):
+                    outputs[counter] = outputs[counter][root_len:]
+            self.execute(write_file,
+                         (self.record, outputs),
+                         "writing list of installed files to '%s'" %
+                         self.record)
+
+        sys_path = map(os.path.normpath, sys.path)
+        sys_path = map(os.path.normcase, sys_path)
+        install_lib = os.path.normcase(os.path.normpath(self.install_lib))
+        if (self.warn_dir and
+            not (self.path_file and self.install_path_file) and
+            install_lib not in sys_path):
+            self.warn(("modules installed to '%s', which is not in " +
+                       "Python's module search path (sys.path) -- " +
+                       "you'll have to change the search path yourself") %
+                      self.install_lib)
+
+    # run ()
+
+    def create_path_file (self):
+        filename = os.path.join(self.install_libbase,
+                                self.path_file + ".pth")
+        if self.install_path_file:
+            self.execute(write_file,
+                         (filename, [self.extra_dirs]),
+                         "creating %s" % filename)
+        else:
+            self.warn("path file '%s' not created" % filename)
+
+
+    # -- Reporting methods ---------------------------------------------
+
+    def get_outputs (self):
+        # Assemble the outputs of all the sub-commands.
+        outputs = []
+        for cmd_name in self.get_sub_commands():
+            cmd = self.get_finalized_command(cmd_name)
+            # Add the contents of cmd.get_outputs(), ensuring
+            # that outputs doesn't contain duplicate entries
+            for filename in cmd.get_outputs():
+                if filename not in outputs:
+                    outputs.append(filename)
+
+        if self.path_file and self.install_path_file:
+            outputs.append(os.path.join(self.install_libbase,
+                                        self.path_file + ".pth"))
+
+        return outputs
+
+    def get_inputs (self):
+        # XXX gee, this looks familiar ;-(
+        inputs = []
+        for cmd_name in self.get_sub_commands():
+            cmd = self.get_finalized_command(cmd_name)
+            inputs.extend(cmd.get_inputs())
+
+        return inputs
+
+
+    # -- Predicates for sub-command list -------------------------------
+
+    def has_lib (self):
+        """Return true if the current distribution has any Python
+        modules to install."""
+        return (self.distribution.has_pure_modules() or
+                self.distribution.has_ext_modules())
+
+    def has_headers (self):
+        return self.distribution.has_headers()
+
+    def has_scripts (self):
+        return self.distribution.has_scripts()
+
+    def has_data (self):
+        return self.distribution.has_data_files()
+
+
+    # 'sub_commands': a list of commands this command might have to run to
+    # get its work done.  See cmd.py for more info.
+    sub_commands = [('install_lib',     has_lib),
+                    ('install_headers', has_headers),
+                    ('install_scripts', has_scripts),
+                    ('install_data',    has_data),
+                   ]
+
+# class install
diff --git a/lib-python/2.2/distutils/command/install_data.py b/lib-python/2.2/distutils/command/install_data.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/command/install_data.py
@@ -0,0 +1,83 @@
+"""distutils.command.install_data
+
+Implements the Distutils 'install_data' command, for installing
+platform-independent data files."""
+
+# contributed by Bastian Kleineidam
+
+__revision__ = "$Id$"
+
+import os
+from types import StringType
+from distutils.core import Command
+from distutils.util import change_root, convert_path
+
+class install_data (Command):
+
+    description = "install data files"
+
+    user_options = [
+        ('install-dir=', 'd',
+         "base directory for installing data files "
+         "(default: installation base dir)"),
+        ('root=', None,
+         "install everything relative to this alternate root directory"),
+        ('force', 'f', "force installation (overwrite existing files)"),
+        ]
+
+    boolean_options = ['force']
+
+    def initialize_options (self):
+        self.install_dir = None
+        self.outfiles = []
+        self.root = None
+        self.force = 0
+
+        self.data_files = self.distribution.data_files
+        self.warn_dir = 1
+
+    def finalize_options (self):
+        self.set_undefined_options('install',
+                                   ('install_data', 'install_dir'),
+                                   ('root', 'root'),
+                                   ('force', 'force'),
+                                  )
+
+    def run (self):
+        self.mkpath(self.install_dir)
+        for f in self.data_files:
+            if type(f) == StringType:
+                # it's a simple file, so copy it
+                f = convert_path(f)
+                if self.warn_dir:
+                    self.warn("setup script did not provide a directory for "
+                              "'%s' -- installing right in '%s'" %
+                              (f, self.install_dir))
+                (out, _) = self.copy_file(f, self.install_dir)
+                self.outfiles.append(out)
+            else:
+                # it's a tuple with path to install to and a list of files
+                dir = convert_path(f[0])
+                if not os.path.isabs(dir):
+                    dir = os.path.join(self.install_dir, dir)
+                elif self.root:
+                    dir = change_root(self.root, dir)
+                self.mkpath(dir)
+
+                if f[1] == []:
+                    # If there are no files listed, the user must be
+                    # trying to create an empty directory, so add the
+                    # directory to the list of output files.
+                    self.outfiles.append(dir)
+                else:
+                    # Copy files, adding them to the list of output files.
+                    for data in f[1]:
+                        data = convert_path(data)
+                        (out, _) = self.copy_file(data, dir)
+                        self.outfiles.append(out)
+
+    def get_inputs (self):
+        return self.data_files or []
+
+    def get_outputs (self):
+        return self.outfiles
diff --git a/lib-python/2.2/distutils/command/install_headers.py b/lib-python/2.2/distutils/command/install_headers.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/command/install_headers.py
@@ -0,0 +1,53 @@
+"""distutils.command.install_headers
+
+Implements the Distutils 'install_headers' command, to install C/C++ header
+files to the Python include directory."""
+
+# created 2000/05/26, Greg Ward
+
+__revision__ = "$Id$"
+
+import os
+from distutils.core import Command
+
+
+class install_headers (Command):
+
+    description = "install C/C++ header files"
+
+    user_options = [('install-dir=', 'd',
+                     "directory to install header files to"),
+                    ('force', 'f',
+                     "force installation (overwrite existing files)"),
+                   ]
+
+    boolean_options = ['force']
+
+    def initialize_options (self):
+        self.install_dir = None
+        self.force = 0
+        self.outfiles = []
+
+    def finalize_options (self):
+        self.set_undefined_options('install',
+                                   ('install_headers', 'install_dir'),
+                                   ('force', 'force'))
+
+
+    def run (self):
+        headers = self.distribution.headers
+        if not headers:
+            return
+
+        self.mkpath(self.install_dir)
+        for header in headers:
+            (out, _) = self.copy_file(header, self.install_dir)
+            self.outfiles.append(out)
+
+    def get_inputs (self):
+        return self.distribution.headers or []
+
+    def get_outputs (self):
+        return self.outfiles
+
+# class install_headers
diff --git a/lib-python/2.2/distutils/command/install_lib.py b/lib-python/2.2/distutils/command/install_lib.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/command/install_lib.py
@@ -0,0 +1,213 @@
+# created 1999/03/13, Greg Ward
+
+__revision__ = "$Id$"
+
+import sys, os, string
+from types import IntType
+from distutils.core import Command
+from distutils.errors import DistutilsOptionError
+from distutils.dir_util import copy_tree
+
+class install_lib (Command):
+
+    description = "install all Python modules (extensions and pure Python)"
+
+    # The byte-compilation options are a tad confusing.  Here are the
+    # possible scenarios:
+    #   1) no compilation at all (--no-compile --no-optimize)
+    #   2) compile .pyc only (--compile --no-optimize; default)
+    #   3) compile .pyc and "level 1" .pyo (--compile --optimize)
+    #   4) compile "level 1" .pyo only (--no-compile --optimize)
+    #   5) compile .pyc and "level 2" .pyo (--compile --optimize-more)
+    #   6) compile "level 2" .pyo only (--no-compile --optimize-more)
+    #
+    # The UI for this is two option, 'compile' and 'optimize'.
+    # 'compile' is strictly boolean, and only decides whether to
+    # generate .pyc files.  'optimize' is three-way (0, 1, or 2), and
+    # decides both whether to generate .pyo files and what level of
+    # optimization to use.
+
+    user_options = [
+        ('install-dir=', 'd', "directory to install to"),
+        ('build-dir=','b', "build directory (where to install from)"),
+        ('force', 'f', "force installation (overwrite existing files)"),
+        ('compile', 'c', "compile .py to .pyc [default]"),
+        ('no-compile', None, "don't compile .py files"),
+        ('optimize=', 'O',
+         "also compile with optimization: -O1 for \"python -O\", "
+         "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
+        ('skip-build', None, "skip the build steps"),
+        ]
+
+    boolean_options = ['force', 'compile', 'skip-build']
+    negative_opt = {'no-compile' : 'compile'}
+
+
+    def initialize_options (self):
+        # let the 'install' command dictate our installation directory
+        self.install_dir = None
+        self.build_dir = None
+        self.force = 0
+        self.compile = None
+        self.optimize = None
+        self.skip_build = None
+
+    def finalize_options (self):
+
+        # Get all the information we need to install pure Python modules
+        # from the umbrella 'install' command -- build (source) directory,
+        # install (target) directory, and whether to compile .py files.
+        self.set_undefined_options('install',
+                                   ('build_lib', 'build_dir'),
+                                   ('install_lib', 'install_dir'),
+                                   ('force', 'force'),
+                                   ('compile', 'compile'),
+                                   ('optimize', 'optimize'),
+                                   ('skip_build', 'skip_build'),
+                                  )
+
+        if self.compile is None:
+            self.compile = 1
+        if self.optimize is None:
+            self.optimize = 0
+
+        if type(self.optimize) is not IntType:
+            try:
+                self.optimize = int(self.optimize)
+                assert 0 <= self.optimize <= 2
+            except (ValueError, AssertionError):
+                raise DistutilsOptionError, "optimize must be 0, 1, or 2"
+
+    def run (self):
+
+        # Make sure we have built everything we need first
+        self.build()
+
+        # Install everything: simply dump the entire contents of the build
+        # directory to the installation directory (that's the beauty of
+        # having a build directory!)
+        outfiles = self.install()
+
+        # (Optionally) compile .py to .pyc
+        if outfiles is not None and self.distribution.has_pure_modules():
+            self.byte_compile(outfiles)
+
+    # run ()
+
+
+    # -- Top-level worker functions ------------------------------------
+    # (called from 'run()')
+
+    def build (self):
+        if not self.skip_build:
+            if self.distribution.has_pure_modules():
+                self.run_command('build_py')
+            if self.distribution.has_ext_modules():
+                self.run_command('build_ext')
+
+    def install (self):
+        if os.path.isdir(self.build_dir):
+            outfiles = self.copy_tree(self.build_dir, self.install_dir)
+        else:
+            self.warn("'%s' does not exist -- no Python modules to install" %
+                      self.build_dir)
+            return
+        return outfiles
+
+    def byte_compile (self, files):
+        from distutils.util import byte_compile
+
+        # Get the "--root" directory supplied to the "install" command,
+        # and use it as a prefix to strip off the purported filename
+        # encoded in bytecode files.  This is far from complete, but it
+        # should at least generate usable bytecode in RPM distributions.
+        install_root = self.get_finalized_command('install').root
+
+        if self.compile:
+            byte_compile(files, optimize=0,
+                         force=self.force,
+                         prefix=install_root,
+                         verbose=self.verbose, dry_run=self.dry_run)
+        if self.optimize > 0:
+            byte_compile(files, optimize=self.optimize,
+                         force=self.force,
+                         prefix=install_root,
+                         verbose=self.verbose, dry_run=self.dry_run)
+
+
+    # -- Utility methods -----------------------------------------------
+
+    def _mutate_outputs (self, has_any, build_cmd, cmd_option, output_dir):
+
+        if not has_any:
+            return []
+
+        build_cmd = self.get_finalized_command(build_cmd)
+        build_files = build_cmd.get_outputs()
+        build_dir = getattr(build_cmd, cmd_option)
+
+        prefix_len = len(build_dir) + len(os.sep)
+        outputs = []
+        for file in build_files:
+            outputs.append(os.path.join(output_dir, file[prefix_len:]))
+
+        return outputs
+
+    # _mutate_outputs ()
+
+    def _bytecode_filenames (self, py_filenames):
+        bytecode_files = []
+        for py_file in py_filenames:
+            if self.compile:
+                bytecode_files.append(py_file + "c")
+            if self.optimize > 0:
+                bytecode_files.append(py_file + "o")
+
+        return bytecode_files
+
+
+    # -- External interface --------------------------------------------
+    # (called by outsiders)
+
+    def get_outputs (self):
+        """Return the list of files that would be installed if this command
+        were actually run.  Not affected by the "dry-run" flag or whether
+        modules have actually been built yet.
+        """
+        pure_outputs = \
+            self._mutate_outputs(self.distribution.has_pure_modules(),
+                                 'build_py', 'build_lib',
+                                 self.install_dir)
+        if self.compile:
+            bytecode_outputs = self._bytecode_filenames(pure_outputs)
+        else:
+            bytecode_outputs = []
+
+        ext_outputs = \
+            self._mutate_outputs(self.distribution.has_ext_modules(),
+                                 'build_ext', 'build_lib',
+                                 self.install_dir)
+
+        return pure_outputs + bytecode_outputs + ext_outputs
+
+    # get_outputs ()
+
+    def get_inputs (self):
+        """Get the list of files that are input to this command, ie. the
+        files that get installed as they are named in the build tree.
+        The files in this list correspond one-to-one to the output
+        filenames returned by 'get_outputs()'.
+        """
+        inputs = []
+
+        if self.distribution.has_pure_modules():
+            build_py = self.get_finalized_command('build_py')
+            inputs.extend(build_py.get_outputs())
+
+        if self.distribution.has_ext_modules():
+            build_ext = self.get_finalized_command('build_ext')
+            inputs.extend(build_ext.get_outputs())
+
+        return inputs
+
+# class install_lib
diff --git a/lib-python/2.2/distutils/command/install_scripts.py b/lib-python/2.2/distutils/command/install_scripts.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/command/install_scripts.py
@@ -0,0 +1,63 @@
+"""distutils.command.install_scripts
+
+Implements the Distutils 'install_scripts' command, for installing
+Python scripts."""
+
+# contributed by Bastian Kleineidam
+
+__revision__ = "$Id$"
+
+import os
+from distutils.core import Command
+from stat import ST_MODE
+
+class install_scripts (Command):
+
+    description = "install scripts (Python or otherwise)"
+
+    user_options = [
+        ('install-dir=', 'd', "directory to install scripts to"),
+        ('build-dir=','b', "build directory (where to install from)"),
+        ('force', 'f', "force installation (overwrite existing files)"),
+        ('skip-build', None, "skip the build steps"),
+    ]
+
+    boolean_options = ['force', 'skip-build']
+
+
+    def initialize_options (self):
+        self.install_dir = None
+        self.force = 0
+        self.build_dir = None
+        self.skip_build = None
+
+    def finalize_options (self):
+        self.set_undefined_options('build', ('build_scripts', 'build_dir'))
+        self.set_undefined_options('install',
+                                   ('install_scripts', 'install_dir'),
+                                   ('force', 'force'),
+                                   ('skip_build', 'skip_build'),
+                                  )
+
+    def run (self):
+        if not self.skip_build:
+            self.run_command('build_scripts')
+        self.outfiles = self.copy_tree(self.build_dir, self.install_dir)
+        if os.name == 'posix':
+            # Set the executable bits (owner, group, and world) on
+            # all the scripts we just installed.
+            for file in self.get_outputs():
+                if self.dry_run:
+                    self.announce("changing mode of %s" % file)
+                else:
+                    mode = ((os.stat(file)[ST_MODE]) | 0555) & 07777
+                    self.announce("changing mode of %s to %o" % (file, mode))
+                    os.chmod(file, mode)
+
+    def get_inputs (self):
+        return self.distribution.scripts or []
+
+    def get_outputs(self):
+        return self.outfiles or []
+
+# class install_scripts
diff --git a/lib-python/2.2/distutils/command/sdist.py b/lib-python/2.2/distutils/command/sdist.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/command/sdist.py
@@ -0,0 +1,475 @@
+"""distutils.command.sdist
+
+Implements the Distutils 'sdist' command (create a source distribution)."""
+
+# created 1999/09/22, Greg Ward
+
+__revision__ = "$Id$"
+
+import sys, os, string
+from types import *
+from glob import glob
+from distutils.core import Command
+from distutils import dir_util, dep_util, file_util, archive_util
+from distutils.text_file import TextFile
+from distutils.errors import *
+from distutils.filelist import FileList
+
+
+def show_formats ():
+    """Print all possible values for the 'formats' option (used by
+    the "--help-formats" command-line option).
+    """
+    from distutils.fancy_getopt import FancyGetopt
+    from distutils.archive_util import ARCHIVE_FORMATS
+    formats=[]
+    for format in ARCHIVE_FORMATS.keys():
+        formats.append(("formats=" + format, None,
+                        ARCHIVE_FORMATS[format][2]))
+    formats.sort()
+    pretty_printer = FancyGetopt(formats)
+    pretty_printer.print_help(
+        "List of available source distribution formats:")
+
+class sdist (Command):
+
+    description = "create a source distribution (tarball, zip file, etc.)"
+
+    user_options = [
+        ('template=', 't',
+         "name of manifest template file [default: MANIFEST.in]"),
+        ('manifest=', 'm',
+         "name of manifest file [default: MANIFEST]"),
+        ('use-defaults', None,
+         "include the default file set in the manifest "
+         "[default; disable with --no-defaults]"),
+        ('no-defaults', None,
+         "don't include the default file set"),
+        ('prune', None,
+         "specifically exclude files/directories that should not be "
+         "distributed (build tree, RCS/CVS dirs, etc.) "
+         "[default; disable with --no-prune]"),
+        ('no-prune', None,
+         "don't automatically exclude anything"),
+        ('manifest-only', 'o',
+         "just regenerate the manifest and then stop "
+         "(implies --force-manifest)"),
+        ('force-manifest', 'f',
+         "forcibly regenerate the manifest and carry on as usual"),
+        ('formats=', None,
+         "formats for source distribution (comma-separated list)"),
+        ('keep-temp', 'k',
+         "keep the distribution tree around after creating " +
+         "archive file(s)"),
+        ('dist-dir=', 'd',
+         "directory to put the source distribution archive(s) in "
+         "[default: dist]"),
+        ]
+
+    boolean_options = ['use-defaults', 'prune',
+                       'manifest-only', 'force-manifest',
+                       'keep-temp']
+
+    help_options = [
+        ('help-formats', None,
+         "list available distribution formats", show_formats),
+        ]
+
+    negative_opt = {'no-defaults': 'use-defaults',
+                    'no-prune': 'prune' }
+
+    default_format = { 'posix': 'gztar',
+                       'nt': 'zip' }
+
+    def initialize_options (self):
+        # 'template' and 'manifest' are, respectively, the names of
+        # the manifest template and manifest file.
+        self.template = None
+        self.manifest = None
+
+        # 'use_defaults': if true, we will include the default file set
+        # in the manifest
+        self.use_defaults = 1
+        self.prune = 1
+
+        self.manifest_only = 0
+        self.force_manifest = 0
+
+        self.formats = None
+        self.keep_temp = 0
+        self.dist_dir = None
+
+        self.archive_files = None
+
+
+    def finalize_options (self):
+        if self.manifest is None:
+            self.manifest = "MANIFEST"
+        if self.template is None:
+            self.template = "MANIFEST.in"
+
+        self.ensure_string_list('formats')
+        if self.formats is None:
+            try:
+                self.formats = [self.default_format[os.name]]
+            except KeyError:
+                raise DistutilsPlatformError, \
+                      "don't know how to create source distributions " + \
+                      "on platform %s" % os.name
+
+        bad_format = archive_util.check_archive_formats(self.formats)
+        if bad_format:
+            raise DistutilsOptionError, \
+                  "unknown archive format '%s'" % bad_format
+
+        if self.dist_dir is None:
+            self.dist_dir = "dist"
+
+
+    def run (self):
+
+        # 'filelist' contains the list of files that will make up the
+        # manifest
+        self.filelist = FileList()
+
+        # Ensure that all required meta-data is given; warn if not (but
+        # don't die, it's not *that* serious!)
+        self.check_metadata()
+
+        # Do whatever it takes to get the list of files to process
+        # (process the manifest template, read an existing manifest,
+        # whatever).  File list is accumulated in 'self.filelist'.
+        self.get_file_list()
+
+        # If user just wanted us to regenerate the manifest, stop now.
+        if self.manifest_only:
+            return
+
+        # Otherwise, go ahead and create the source distribution tarball,
+        # or zipfile, or whatever.
+        self.make_distribution()
+
+
+    def check_metadata (self):
+        """Ensure that all required elements of meta-data (name, version,
+        URL, (author and author_email) or (maintainer and
+        maintainer_email)) are supplied by the Distribution object; warn if
+        any are missing.
+        """
+        metadata = self.distribution.metadata
+
+        missing = []
+        for attr in ('name', 'version', 'url'):
+            if not (hasattr(metadata, attr) and getattr(metadata, attr)):
+                missing.append(attr)
+
+        if missing:
+            self.warn("missing required meta-data: " +
+                      string.join(missing, ", "))
+
+        if metadata.author:
+            if not metadata.author_email:
+                self.warn("missing meta-data: if 'author' supplied, " +
+                          "'author_email' must be supplied too")
+        elif metadata.maintainer:
+            if not metadata.maintainer_email:
+                self.warn("missing meta-data: if 'maintainer' supplied, " +
+                          "'maintainer_email' must be supplied too")
+        else:
+            self.warn("missing meta-data: either (author and author_email) " +
+                      "or (maintainer and maintainer_email) " +
+                      "must be supplied")
+
+    # check_metadata ()
+
+
+    def get_file_list (self):
+        """Figure out the list of files to include in the source
+        distribution, and put it in 'self.filelist'.  This might involve
+        reading the manifest template (and writing the manifest), or just
+        reading the manifest, or just using the default file set -- it all
+        depends on the user's options and the state of the filesystem.
+        """
+
+        # If we have a manifest template, see if it's newer than the
+        # manifest; if so, we'll regenerate the manifest.
+        template_exists = os.path.isfile(self.template)
+        if template_exists:
+            template_newer = dep_util.newer(self.template, self.manifest)
+
+        # The contents of the manifest file almost certainly depend on the
+        # setup script as well as the manifest template -- so if the setup
+        # script is newer than the manifest, we'll regenerate the manifest
+        # from the template.  (Well, not quite: if we already have a
+        # manifest, but there's no template -- which will happen if the
+        # developer elects to generate a manifest some other way -- then we
+        # can't regenerate the manifest, so we don't.)
+        self.debug_print("checking if %s newer than %s" %
+                         (self.distribution.script_name, self.manifest))
+        setup_newer = dep_util.newer(self.distribution.script_name,
+                                     self.manifest)
+
+        # cases:
+        #   1) no manifest, template exists: generate manifest
+        #      (covered by 2a: no manifest == template newer)
+        #   2) manifest & template exist:
+        #      2a) template or setup script newer than manifest:
+        #          regenerate manifest
+        #      2b) manifest newer than both:
+        #          do nothing (unless --force or --manifest-only)
+        #   3) manifest exists, no template:
+        #      do nothing (unless --force or --manifest-only)
+        #   4) no manifest, no template: generate w/ warning ("defaults only")
+
+        manifest_outofdate = (template_exists and
+                              (template_newer or setup_newer))
+        force_regen = self.force_manifest or self.manifest_only
+        manifest_exists = os.path.isfile(self.manifest)
+        neither_exists = (not template_exists and not manifest_exists)
+
+        # Regenerate the manifest if necessary (or if explicitly told to)
+        if manifest_outofdate or neither_exists or force_regen:
+            if not template_exists:
+                self.warn(("manifest template '%s' does not exist " +
+                           "(using default file list)") %
+                          self.template)
+
+            self.filelist.findall()
+
+            # Add default file set to 'files'
+            if self.use_defaults:
+                self.add_defaults()
+
+            # Read manifest template if it exists
+            if template_exists:
+                self.read_template()
+
+            # Prune away any directories that don't belong in the source
+            # distribution
+            if self.prune:
+                self.prune_file_list()
+
+            # File list now complete -- sort it so that higher-level files
+            # come first
+            self.filelist.sort()
+
+            # Remove duplicates from the file list
+            self.filelist.remove_duplicates()
+
+            # And write complete file list (including default file set) to
+            # the manifest.
+            self.write_manifest()
+
+        # Don't regenerate the manifest, just read it in.
+        else:
+            self.read_manifest()
+
+    # get_file_list ()
+
+
+    def add_defaults (self):
+        """Add all the default files to self.filelist:
+          - README or README.txt
+          - setup.py
+          - test/test*.py
+          - all pure Python modules mentioned in setup script
+          - all C sources listed as part of extensions or C libraries
+            in the setup script (doesn't catch C headers!)
+        Warns if (README or README.txt) or setup.py are missing; everything
+        else is optional.
+        """
+
+        standards = [('README', 'README.txt'), self.distribution.script_name]
+        for fn in standards:
+            if type(fn) is TupleType:
+                alts = fn
+                got_it = 0
+                for fn in alts:
+                    if os.path.exists(fn):
+                        got_it = 1
+                        self.filelist.append(fn)
+                        break
+
+                if not got_it:
+                    self.warn("standard file not found: should have one of " +
+                              string.join(alts, ', '))
+            else:
+                if os.path.exists(fn):
+                    self.filelist.append(fn)
+                else:
+                    self.warn("standard file '%s' not found" % fn)
+
+        optional = ['test/test*.py', 'setup.cfg']
+        for pattern in optional:
+            files = filter(os.path.isfile, glob(pattern))
+            if files:
+                self.filelist.extend(files)
+
+        if self.distribution.has_pure_modules():
+            build_py = self.get_finalized_command('build_py')
+            self.filelist.extend(build_py.get_source_files())
+
+        if self.distribution.has_ext_modules():
+            build_ext = self.get_finalized_command('build_ext')
+            self.filelist.extend(build_ext.get_source_files())
+
+        if self.distribution.has_c_libraries():
+            build_clib = self.get_finalized_command('build_clib')
+            self.filelist.extend(build_clib.get_source_files())
+
+    # add_defaults ()
+
+
+    def read_template (self):
+
+        """Read and parse the manifest template file named by
+        'self.template' (usually "MANIFEST.in").  The parsing and
+        processing is done by 'self.filelist', which updates itself
+        accordingly.
+        """
+        self.announce("reading manifest template '%s'" % self.template)
+        template = TextFile(self.template,
+                            strip_comments=1,
+                            skip_blanks=1,
+                            join_lines=1,
+                            lstrip_ws=1,
+                            rstrip_ws=1,
+                            collapse_join=1)
+
+        while 1:
+            line = template.readline()
+            if line is None:            # end of file
+                break
+
+            try:
+                self.filelist.process_template_line(line)
+            except DistutilsTemplateError, msg:
+                self.warn("%s, line %d: %s" % (template.filename,
+                                               template.current_line,
+                                               msg))
+
+    # read_template ()
+
+
+    def prune_file_list (self):
+        """Prune off branches that might slip into the file list as created
+        by 'read_template()', but really don't belong there:
+          * the build tree (typically "build")
+          * the release tree itself (only an issue if we ran "sdist"
+            previously with --keep-temp, or it aborted)
+          * any RCS or CVS directories
+        """
+        build = self.get_finalized_command('build')
+        base_dir = self.distribution.get_fullname()
+
+        self.filelist.exclude_pattern(None, prefix=build.build_base)
+        self.filelist.exclude_pattern(None, prefix=base_dir)
+        self.filelist.exclude_pattern(r'/(RCS|CVS)/.*', is_regex=1)
+
+
+    def write_manifest (self):
+        """Write the file list in 'self.filelist' (presumably as filled in
+        by 'add_defaults()' and 'read_template()') to the manifest file
+        named by 'self.manifest'.
+        """
+        self.execute(file_util.write_file,
+                     (self.manifest, self.filelist.files),
+                     "writing manifest file '%s'" % self.manifest)
+
+    # write_manifest ()
+
+
+    def read_manifest (self):
+        """Read the manifest file (named by 'self.manifest') and use it to
+        fill in 'self.filelist', the list of files to include in the source
+        distribution.
+        """
+        self.announce("reading manifest file '%s'" % self.manifest)
+        manifest = open(self.manifest)
+        while 1:
+            line = manifest.readline()
+            if line == '':              # end of file
+                break
+            if line[-1] == '\n':
+                line = line[0:-1]
+            self.filelist.append(line)
+
+    # read_manifest ()
+
+
+    def make_release_tree (self, base_dir, files):
+        """Create the directory tree that will become the source
+        distribution archive.  All directories implied by the filenames in
+        'files' are created under 'base_dir', and then we hard link or copy
+        (if hard linking is unavailable) those files into place.
+        Essentially, this duplicates the developer's source tree, but in a
+        directory named after the distribution, containing only the files
+        to be distributed.
+        """
+        # Create all the directories under 'base_dir' necessary to
+        # put 'files' there; the 'mkpath()' is just so we don't die
+        # if the manifest happens to be empty.
+        self.mkpath(base_dir)
+        dir_util.create_tree(base_dir, files,
+                             verbose=self.verbose, dry_run=self.dry_run)
+
+        # And walk over the list of files, either making a hard link (if
+        # os.link exists) to each one that doesn't already exist in its
+        # corresponding location under 'base_dir', or copying each file
+        # that's out-of-date in 'base_dir'.  (Usually, all files will be
+        # out-of-date, because by default we blow away 'base_dir' when
+        # we're done making the distribution archives.)
+
+        if hasattr(os, 'link'):        # can make hard links on this system
+            link = 'hard'
+            msg = "making hard links in %s..." % base_dir
+        else:                           # nope, have to copy
+            link = None
+            msg = "copying files to %s..." % base_dir
+
+        if not files:
+            self.warn("no files to distribute -- empty manifest?")
+        else:
+            self.announce(msg)
+        for file in files:
+            if not os.path.isfile(file):
+                self.warn("'%s' not a regular file -- skipping" % file)
+            else:
+                dest = os.path.join(base_dir, file)
+                self.copy_file(file, dest, link=link)
+
+        self.distribution.metadata.write_pkg_info(base_dir)
+
+    # make_release_tree ()
+
+    def make_distribution (self):
+        """Create the source distribution(s).  First, we create the release
+        tree with 'make_release_tree()'; then, we create all required
+        archive files (according to 'self.formats') from the release tree.
+        Finally, we clean up by blowing away the release tree (unless
+        'self.keep_temp' is true).  The list of archive files created is
+        stored so it can be retrieved later by 'get_archive_files()'.
+        """
+        # Don't warn about missing meta-data here -- should be (and is!)
+        # done elsewhere.
+        base_dir = self.distribution.get_fullname()
+        base_name = os.path.join(self.dist_dir, base_dir)
+
+        self.make_release_tree(base_dir, self.filelist.files)
+        archive_files = []              # remember names of files we create
+        for fmt in self.formats:
+            file = self.make_archive(base_name, fmt, base_dir=base_dir)
+            archive_files.append(file)
+
+        self.archive_files = archive_files
+
+        if not self.keep_temp:
+            dir_util.remove_tree(base_dir, self.verbose, self.dry_run)
+
+    def get_archive_files (self):
+        """Return the list of archive files created when the command
+        was run, or None if the command hasn't run yet.
+        """
+        return self.archive_files
+
+# class sdist
diff --git a/lib-python/2.2/distutils/core.py b/lib-python/2.2/distutils/core.py
new file mode 100644
--- /dev/null
+++ b/lib-python/2.2/distutils/core.py
@@ -0,0 +1,231 @@
+"""distutils.core
+
+The only module that needs to be imported to use the Distutils; provides
+the 'setup' function (which is to be called from the setup script).  Also
+indirectly provides the Distribution and Command classes, although they are
+really defined in distutils.dist and distutils.cmd.
+"""
+
+# creat